hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
21740db472db1187edbd0b327f10a048c61259a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THHUNN/THHUNN.h>
#include <TH/THHalf.h>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <THH/THHAtomics.cuh>
#include <THHUNN/common.h>
#include <THH/THHDeviceTensor.cuh>
#include <THH/THHDeviceTensorUtils.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHApply.cuh>
#include <c10/macros/Macros.h>
#include <thrust/functional.h>
template <typename Dtype>
__global__ void SpatialClassNLLCriterion_updateOutput_no_reduce_kernel(
int64_t nthreads,
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<THCIndex_t, 3> target,
THCDeviceTensor<Dtype, 3> output,
Dtype *weights,
int64_t ignore_index) {
int64_t batch_size = input.getSize(0);
int64_t H = input.getSize(2);
int64_t W = input.getSize(3);
CUDA_KERNEL_LOOP(index, nthreads) {
const int64_t b = index % batch_size;
const int64_t h = (index / batch_size) % H;
const int64_t w = (index / (batch_size * H)) % W;
int64_t cur_target = target[b][h][w];
if (cur_target == ignore_index) {
output[b][h][w] = ScalarConvert<int, Dtype>::to(0);
continue;
}
Dtype value = input[b][cur_target][h][w];
Dtype weight =
weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1);
output[b][h][w] = -value * weight;
}
}
template <typename Dtype>
__global__ void SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel(
int64_t nthreads,
THCDeviceTensor<THCIndex_t, 3> target,
THCDeviceTensor<Dtype, 3> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
Dtype *weights,
int64_t ignore_index) {
int64_t batch_size = target.getSize(0);
int64_t H = target.getSize(1);
int64_t W = target.getSize(2);
CUDA_KERNEL_LOOP(index, nthreads) {
const int64_t b = index % batch_size;
const int64_t h = (index / batch_size) % H;
const int64_t w = (index / (batch_size * H)) % W;
int64_t cur_target = target[b][h][w];
if (cur_target == ignore_index) {
continue;
}
Dtype value =
-(weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1));
gradInput[b][cur_target][h][w] = value * gradOutput[b][h][w];
}
}
template <typename T, typename AccumT>
#if defined(__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel(
T *output,
T *total_weight,
T *input,
THCIndex_t *target,
T *weights,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample,
int64_t ignore_index)
{
__shared__ AccumT partial_sums[CUDA_NUM_THREADS];
int i, t;
T cur_weight;
AccumT input_sum = 0;
AccumT acc_weight = 0;
int sample = blockIdx.x / blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
int step = blockDim.x * blocks_per_sample;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = target[toffset + i];
if (t != ignore_index) {
assert(t >= 0 && t < n_classes);
cur_weight = weights ? weights[t] : ScalarConvert<int, T>::to(1);
input_sum -= input[ioffset + i + map_nelem * t] * cur_weight;
acc_weight += cur_weight;
}
}
input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<AccumT>(), AccumT(0));
__syncthreads();
acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<AccumT>(), AccumT(0));
if (threadIdx.x == 0) {
atomicAdd(total_weight, ScalarConvert<AccumT, T>::to(acc_weight));
atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum));
}
}
template<typename T>
__global__ void cunn_SpatialClassNLLCriterion_sizeAverage_kernel(
T *output,
T *total_weight)
{
if (*total_weight > 0)
*output = THCNumerics<T>::div(*output, *total_weight);
}
template<typename T>
__global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel(
T *gradInput,
T *gradOutput,
THCIndex_t *target,
T *weights,
T *total_weight,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample,
int64_t ignore_index)
{
if (*total_weight <= 0)
return;
int i, t;
T norm = size_average ? (ScalarConvert<int, T>::to(1) / *total_weight) : ScalarConvert<int, T>::to(1);
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = (int)target[toffset + i];
if (t != ignore_index) {
assert(t >= 0 && t < n_classes);
gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : ScalarConvert<int, T>::to(1)) * norm * gradOutput[0];
}
}
}
#include <THHUNN/generic/SpatialClassNLLCriterion.hip>
#include <THH/THHGenerateFloatTypes.h>
| 21740db472db1187edbd0b327f10a048c61259a2.cu | #include <THCUNN/THCUNN.h>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCAtomics.cuh>
#include <THCUNN/common.h>
#include <THC/THCDeviceTensor.cuh>
#include <THC/THCDeviceTensorUtils.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCApply.cuh>
#include <c10/macros/Macros.h>
#include <thrust/functional.h>
template <typename Dtype>
__global__ void SpatialClassNLLCriterion_updateOutput_no_reduce_kernel(
int64_t nthreads,
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<THCIndex_t, 3> target,
THCDeviceTensor<Dtype, 3> output,
Dtype *weights,
int64_t ignore_index) {
int64_t batch_size = input.getSize(0);
int64_t H = input.getSize(2);
int64_t W = input.getSize(3);
CUDA_KERNEL_LOOP(index, nthreads) {
const int64_t b = index % batch_size;
const int64_t h = (index / batch_size) % H;
const int64_t w = (index / (batch_size * H)) % W;
int64_t cur_target = target[b][h][w];
if (cur_target == ignore_index) {
output[b][h][w] = ScalarConvert<int, Dtype>::to(0);
continue;
}
Dtype value = input[b][cur_target][h][w];
Dtype weight =
weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1);
output[b][h][w] = -value * weight;
}
}
template <typename Dtype>
__global__ void SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel(
int64_t nthreads,
THCDeviceTensor<THCIndex_t, 3> target,
THCDeviceTensor<Dtype, 3> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
Dtype *weights,
int64_t ignore_index) {
int64_t batch_size = target.getSize(0);
int64_t H = target.getSize(1);
int64_t W = target.getSize(2);
CUDA_KERNEL_LOOP(index, nthreads) {
const int64_t b = index % batch_size;
const int64_t h = (index / batch_size) % H;
const int64_t w = (index / (batch_size * H)) % W;
int64_t cur_target = target[b][h][w];
if (cur_target == ignore_index) {
continue;
}
Dtype value =
-(weights ? weights[cur_target] : ScalarConvert<int, Dtype>::to(1));
gradInput[b][cur_target][h][w] = value * gradOutput[b][h][w];
}
}
template <typename T, typename AccumT>
#if defined(__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel(
T *output,
T *total_weight,
T *input,
THCIndex_t *target,
T *weights,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample,
int64_t ignore_index)
{
__shared__ AccumT partial_sums[CUDA_NUM_THREADS];
int i, t;
T cur_weight;
AccumT input_sum = 0;
AccumT acc_weight = 0;
int sample = blockIdx.x / blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
int step = blockDim.x * blocks_per_sample;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = target[toffset + i];
if (t != ignore_index) {
assert(t >= 0 && t < n_classes);
cur_weight = weights ? weights[t] : ScalarConvert<int, T>::to(1);
input_sum -= input[ioffset + i + map_nelem * t] * cur_weight;
acc_weight += cur_weight;
}
}
input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<AccumT>(), AccumT(0));
__syncthreads();
acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<AccumT>(), AccumT(0));
if (threadIdx.x == 0) {
atomicAdd(total_weight, ScalarConvert<AccumT, T>::to(acc_weight));
atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum));
}
}
template<typename T>
__global__ void cunn_SpatialClassNLLCriterion_sizeAverage_kernel(
T *output,
T *total_weight)
{
if (*total_weight > 0)
*output = THCNumerics<T>::div(*output, *total_weight);
}
template<typename T>
__global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel(
T *gradInput,
T *gradOutput,
THCIndex_t *target,
T *weights,
T *total_weight,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample,
int64_t ignore_index)
{
if (*total_weight <= 0)
return;
int i, t;
T norm = size_average ? (ScalarConvert<int, T>::to(1) / *total_weight) : ScalarConvert<int, T>::to(1);
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = (int)target[toffset + i];
if (t != ignore_index) {
assert(t >= 0 && t < n_classes);
gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : ScalarConvert<int, T>::to(1)) * norm * gradOutput[0];
}
}
}
#include <THCUNN/generic/SpatialClassNLLCriterion.cu>
#include <THC/THCGenerateFloatTypes.h>
|
066acdeb4bbcbdd76451f68ab887ab7b8cda30ee.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sd_t_s1_5_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int h1d = 1;
int h2d = 1;
int h3d = 1;
int p4d = 1;
int p5d = 1;
int p6d = 1;
int p4ld_t2 = 1;
int h1ld_t2 = 1;
int h3ld_v2 = 1;
int h2ld_v2 = 1;
int p6ld_v2 = 1;
int p5ld_v2 = 1;
int h3ld_t3 = 1;
int h2ld_t3 = 1;
int h1ld_t3 = 1;
int p6ld_t3 = 1;
int p5ld_t3 = 1;
int p4ld_t3 = 1;
double *t3d = NULL;
hipMalloc(&t3d, XSIZE*YSIZE);
double *t2_d = NULL;
hipMalloc(&t2_d, XSIZE*YSIZE);
double *v2_d = NULL;
hipMalloc(&v2_d, XSIZE*YSIZE);
int p4 = 1;
int total_x = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sd_t_s1_5_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3d,t2_d,v2_d,p4,total_x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sd_t_s1_5_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3d,t2_d,v2_d,p4,total_x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sd_t_s1_5_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3d,t2_d,v2_d,p4,total_x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 066acdeb4bbcbdd76451f68ab887ab7b8cda30ee.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sd_t_s1_5_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int h1d = 1;
int h2d = 1;
int h3d = 1;
int p4d = 1;
int p5d = 1;
int p6d = 1;
int p4ld_t2 = 1;
int h1ld_t2 = 1;
int h3ld_v2 = 1;
int h2ld_v2 = 1;
int p6ld_v2 = 1;
int p5ld_v2 = 1;
int h3ld_t3 = 1;
int h2ld_t3 = 1;
int h1ld_t3 = 1;
int p6ld_t3 = 1;
int p5ld_t3 = 1;
int p4ld_t3 = 1;
double *t3d = NULL;
cudaMalloc(&t3d, XSIZE*YSIZE);
double *t2_d = NULL;
cudaMalloc(&t2_d, XSIZE*YSIZE);
double *v2_d = NULL;
cudaMalloc(&v2_d, XSIZE*YSIZE);
int p4 = 1;
int total_x = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sd_t_s1_5_kernel<<<gridBlock,threadBlock>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3d,t2_d,v2_d,p4,total_x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sd_t_s1_5_kernel<<<gridBlock,threadBlock>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3d,t2_d,v2_d,p4,total_x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sd_t_s1_5_kernel<<<gridBlock,threadBlock>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3d,t2_d,v2_d,p4,total_x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3183ad40f09c398a71a717c323b314871cb85ab3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#define L2(x1, y1, x2, y2) ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
// Each point (thread) computes its distance to each centroid
// and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(
const float* px,
const float* py,
int N,
const float* mx,
const float* my,
float* sx,
float* sy,
int k,
int* c
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= N) {
return;
}
// Make global loads once.
const float x = px[index];
const float y = py[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance = L2(x, y, mx[cluster], my[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
atomicAdd(&sx[best_cluster], x);
atomicAdd(&sy[best_cluster], y);
atomicAdd(&c [best_cluster], 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(
float* mx, float* my, const float* sx, const float* sy, const int* c
) {
const int cluster = threadIdx.x;
const int count = max(1, c[cluster]); // turn 0/0 to 0/1
mx[cluster] = sx[cluster] / count;
my[cluster] = sy[cluster] / count;
}
// k-means clustering
void kmeans(int N, int K, int M, size_t num_cpus, size_t num_gpus) {
std::vector<float> h_px, h_py, h_mx, h_my, mx, my;
std::vector<int> c(K), best_ks(N);
std::vector<float> sx(K), sy(K);
float *d_px, *d_py, *d_mx, *d_my, *d_sx, *d_sy, *d_c;
// Randomly generate N points
for(int i=0; i<N; ++i) {
h_px.push_back(rand()%1000 - 500);
h_py.push_back(rand()%1000 - 500);
if(i < K) {
mx.push_back(h_px.back());
my.push_back(h_py.back());
h_mx.push_back(h_px.back());
h_my.push_back(h_py.back());
}
}
tf::Taskflow taskflow;
tf::Executor executor(num_cpus, num_gpus);
// cpu version
auto init = taskflow.emplace([&](){
for(int i=0; i<K; ++i) {
mx[i] = h_px[i];
my[i] = h_py[i];
}
}).name("init");
// clear the storage
auto clean_up = taskflow.emplace([&](){
for(int k=0; k<K; ++k) {
sx[k] = 0.0f;
sy[k] = 0.0f;
c [k] = 0;
}
}).name("clean_up");
tf::Task pf;
// update cluster
pf = taskflow.parallel_for(0, N, 1, [&](int i){
float x = h_px[i];
float y = h_py[i];
float best_d = std::numeric_limits<float>::max();
int best_k = 0;
for (int k = 0; k < K; ++k) {
const float d = L2(x, y, mx[k], my[k]);
if (d < best_d) {
best_d = d;
best_k = k;
}
}
best_ks[i] = best_k;
});
auto update_cluster = taskflow.emplace([&](){
for(int i=0; i<N; i++) {
sx[best_ks[i]] += h_px[i];
sy[best_ks[i]] += h_py[i];
c [best_ks[i]] += 1;
}
for(int k=0; k<K; ++k) {
auto count = max(1, c[k]); // turn 0/0 to 0/1
mx[k] = sx[k] / count;
my[k] = sy[k] / count;
}
}).name("update_cluster");
auto condition = taskflow.emplace([m=0, M]() mutable {
return (m++ < M) ? 0 : 1;
}).name("converged?");
init.precede(clean_up);
clean_up.precede(pf);
pf.precede(update_cluster);
condition.precede(clean_up)
.succeed(update_cluster);
// gpu version
auto allocate_px = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_px, N*sizeof(float)) == hipSuccess);
}).name("allocate_px");
auto allocate_py = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_py, N*sizeof(float)) == hipSuccess);
}).name("allocate_py");
auto allocate_mx = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_mx, K*sizeof(float)) == hipSuccess);
}).name("allocate_mx");
auto allocate_my = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_my, K*sizeof(float)) == hipSuccess);
}).name("allocate_my");
auto allocate_sx = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_sx, K*sizeof(float)) == hipSuccess);
}).name("allocate_sx");
auto allocate_sy = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_sy, K*sizeof(float)) == hipSuccess);
}).name("allocate_sy");
auto allocate_c = taskflow.emplace([&](){
REQUIRE(hipMalloc(&d_c, K*sizeof(float)) == hipSuccess);
}).name("allocate_c");
auto h2d = taskflow.emplace([&](tf::cudaFlow& cf){
cf.copy(d_px, h_px.data(), N).name("h2d_px");
cf.copy(d_py, h_py.data(), N).name("h2d_py");
cf.copy(d_mx, h_mx.data(), K).name("h2d_mx");
cf.copy(d_my, h_my.data(), K).name("h2d_my");
}).name("h2d");
auto kmeans = taskflow.emplace([&](tf::cudaFlow& cf){
auto zero_c = cf.zero(d_c, K).name("zero_c");
auto zero_sx = cf.zero(d_sx, K).name("zero_sx");
auto zero_sy = cf.zero(d_sy, K).name("zero_sy");
auto cluster = cf.kernel(
(N+1024-1) / 1024, 1024, 0,
assign_clusters, d_px, d_py, N, d_mx, d_my, d_sx, d_sy, K, d_c
).name("cluster");
auto new_centroid = cf.kernel(
1, K, 0,
compute_new_means, d_mx, d_my, d_sx, d_sy, d_c
).name("new_centroid");
cluster.precede(new_centroid)
.succeed(zero_c, zero_sx, zero_sy);
}).name("update_means");
auto gpu_condition = taskflow.emplace([i=0, M] () mutable {
return i++ < M ? 0 : 1;
}).name("converged?");
auto stop = taskflow.emplace([&](tf::cudaFlow& cf){
cf.copy(h_mx.data(), d_mx, K).name("d2h_mx");
cf.copy(h_my.data(), d_my, K).name("d2h_my");
}).name("stop");
auto free = taskflow.emplace([&](){
REQUIRE(hipFree(d_px)==hipSuccess);
REQUIRE(hipFree(d_py)==hipSuccess);
REQUIRE(hipFree(d_mx)==hipSuccess);
REQUIRE(hipFree(d_my)==hipSuccess);
REQUIRE(hipFree(d_sx)==hipSuccess);
REQUIRE(hipFree(d_sy)==hipSuccess);
REQUIRE(hipFree(d_c )==hipSuccess);
}).name("free");
// build up the dependency
h2d.succeed(allocate_px, allocate_py, allocate_mx, allocate_my);
kmeans.succeed(allocate_sx, allocate_sy, allocate_c, h2d)
.precede(gpu_condition);
gpu_condition.precede(kmeans, stop);
stop.precede(free);
executor.run(taskflow).wait();
//taskflow.dump(std::cout);
for(int k=0; k<K; k++) {
REQUIRE(::fabs(h_mx[k] - mx[k]) < 1.0f);
REQUIRE(::fabs(h_my[k] - my[k]) < 1.0f);
}
}
TEST_CASE("kmeans.10.1C1G") {
kmeans(10, 2, 10, 1, 1);
}
TEST_CASE("kmeans.10.1C2G") {
kmeans(10, 2, 10, 1, 2);
}
TEST_CASE("kmeans.10.1C3G") {
kmeans(10, 2, 10, 1, 3);
}
TEST_CASE("kmeans.10.1C4G") {
kmeans(10, 2, 10, 1, 4);
}
TEST_CASE("kmeans.10.2C1G") {
kmeans(10, 2, 10, 2, 1);
}
TEST_CASE("kmeans.10.2C2G") {
kmeans(10, 2, 10, 2, 2);
}
TEST_CASE("kmeans.10.2C3G") {
kmeans(10, 2, 10, 2, 3);
}
TEST_CASE("kmeans.10.2C4G") {
kmeans(10, 2, 10, 2, 4);
}
TEST_CASE("kmeans.10.4C1G") {
kmeans(10, 2, 10, 4, 1);
}
TEST_CASE("kmeans.10.4C2G") {
kmeans(10, 2, 10, 4, 2);
}
TEST_CASE("kmeans.10.4C3G") {
kmeans(10, 2, 10, 4, 3);
}
TEST_CASE("kmeans.10.4C4G") {
kmeans(10, 2, 10, 4, 4);
}
TEST_CASE("kmeans.100.1C1G") {
kmeans(100, 4, 100, 1, 1);
}
TEST_CASE("kmeans.100.2C2G") {
kmeans(100, 4, 100, 2, 2);
}
TEST_CASE("kmeans.100.3C3G") {
kmeans(100, 4, 100, 3, 3);
}
TEST_CASE("kmeans.100.4C4G") {
kmeans(100, 4, 100, 4, 4);
}
TEST_CASE("kmeans.1000.1C1G") {
kmeans(1000, 8, 1000, 1, 1);
}
TEST_CASE("kmeans.1000.2C2G") {
kmeans(1000, 8, 1000, 2, 2);
}
TEST_CASE("kmeans.1000.4C4G") {
kmeans(1000, 8, 1000, 4, 4);
}
TEST_CASE("kmeans.1000.8C8G") {
kmeans(1000, 8, 1000, 8, 8);
}
TEST_CASE("kmeans.1000.16C16G") {
kmeans(1000, 8, 1000, 16, 16);
}
| 3183ad40f09c398a71a717c323b314871cb85ab3.cu | #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include <doctest.h>
#include <taskflow/taskflow.hpp>
#define L2(x1, y1, x2, y2) ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
// Each point (thread) computes its distance to each centroid
// and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(
const float* px,
const float* py,
int N,
const float* mx,
const float* my,
float* sx,
float* sy,
int k,
int* c
) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= N) {
return;
}
// Make global loads once.
const float x = px[index];
const float y = py[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance = L2(x, y, mx[cluster], my[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
atomicAdd(&sx[best_cluster], x);
atomicAdd(&sy[best_cluster], y);
atomicAdd(&c [best_cluster], 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(
float* mx, float* my, const float* sx, const float* sy, const int* c
) {
const int cluster = threadIdx.x;
const int count = max(1, c[cluster]); // turn 0/0 to 0/1
mx[cluster] = sx[cluster] / count;
my[cluster] = sy[cluster] / count;
}
// k-means clustering
void kmeans(int N, int K, int M, size_t num_cpus, size_t num_gpus) {
std::vector<float> h_px, h_py, h_mx, h_my, mx, my;
std::vector<int> c(K), best_ks(N);
std::vector<float> sx(K), sy(K);
float *d_px, *d_py, *d_mx, *d_my, *d_sx, *d_sy, *d_c;
// Randomly generate N points
for(int i=0; i<N; ++i) {
h_px.push_back(rand()%1000 - 500);
h_py.push_back(rand()%1000 - 500);
if(i < K) {
mx.push_back(h_px.back());
my.push_back(h_py.back());
h_mx.push_back(h_px.back());
h_my.push_back(h_py.back());
}
}
tf::Taskflow taskflow;
tf::Executor executor(num_cpus, num_gpus);
// cpu version
auto init = taskflow.emplace([&](){
for(int i=0; i<K; ++i) {
mx[i] = h_px[i];
my[i] = h_py[i];
}
}).name("init");
// clear the storage
auto clean_up = taskflow.emplace([&](){
for(int k=0; k<K; ++k) {
sx[k] = 0.0f;
sy[k] = 0.0f;
c [k] = 0;
}
}).name("clean_up");
tf::Task pf;
// update cluster
pf = taskflow.parallel_for(0, N, 1, [&](int i){
float x = h_px[i];
float y = h_py[i];
float best_d = std::numeric_limits<float>::max();
int best_k = 0;
for (int k = 0; k < K; ++k) {
const float d = L2(x, y, mx[k], my[k]);
if (d < best_d) {
best_d = d;
best_k = k;
}
}
best_ks[i] = best_k;
});
auto update_cluster = taskflow.emplace([&](){
for(int i=0; i<N; i++) {
sx[best_ks[i]] += h_px[i];
sy[best_ks[i]] += h_py[i];
c [best_ks[i]] += 1;
}
for(int k=0; k<K; ++k) {
auto count = max(1, c[k]); // turn 0/0 to 0/1
mx[k] = sx[k] / count;
my[k] = sy[k] / count;
}
}).name("update_cluster");
auto condition = taskflow.emplace([m=0, M]() mutable {
return (m++ < M) ? 0 : 1;
}).name("converged?");
init.precede(clean_up);
clean_up.precede(pf);
pf.precede(update_cluster);
condition.precede(clean_up)
.succeed(update_cluster);
// gpu version
auto allocate_px = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_px, N*sizeof(float)) == cudaSuccess);
}).name("allocate_px");
auto allocate_py = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_py, N*sizeof(float)) == cudaSuccess);
}).name("allocate_py");
auto allocate_mx = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_mx, K*sizeof(float)) == cudaSuccess);
}).name("allocate_mx");
auto allocate_my = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_my, K*sizeof(float)) == cudaSuccess);
}).name("allocate_my");
auto allocate_sx = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_sx, K*sizeof(float)) == cudaSuccess);
}).name("allocate_sx");
auto allocate_sy = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_sy, K*sizeof(float)) == cudaSuccess);
}).name("allocate_sy");
auto allocate_c = taskflow.emplace([&](){
REQUIRE(cudaMalloc(&d_c, K*sizeof(float)) == cudaSuccess);
}).name("allocate_c");
auto h2d = taskflow.emplace([&](tf::cudaFlow& cf){
cf.copy(d_px, h_px.data(), N).name("h2d_px");
cf.copy(d_py, h_py.data(), N).name("h2d_py");
cf.copy(d_mx, h_mx.data(), K).name("h2d_mx");
cf.copy(d_my, h_my.data(), K).name("h2d_my");
}).name("h2d");
auto kmeans = taskflow.emplace([&](tf::cudaFlow& cf){
auto zero_c = cf.zero(d_c, K).name("zero_c");
auto zero_sx = cf.zero(d_sx, K).name("zero_sx");
auto zero_sy = cf.zero(d_sy, K).name("zero_sy");
auto cluster = cf.kernel(
(N+1024-1) / 1024, 1024, 0,
assign_clusters, d_px, d_py, N, d_mx, d_my, d_sx, d_sy, K, d_c
).name("cluster");
auto new_centroid = cf.kernel(
1, K, 0,
compute_new_means, d_mx, d_my, d_sx, d_sy, d_c
).name("new_centroid");
cluster.precede(new_centroid)
.succeed(zero_c, zero_sx, zero_sy);
}).name("update_means");
auto gpu_condition = taskflow.emplace([i=0, M] () mutable {
return i++ < M ? 0 : 1;
}).name("converged?");
auto stop = taskflow.emplace([&](tf::cudaFlow& cf){
cf.copy(h_mx.data(), d_mx, K).name("d2h_mx");
cf.copy(h_my.data(), d_my, K).name("d2h_my");
}).name("stop");
auto free = taskflow.emplace([&](){
REQUIRE(cudaFree(d_px)==cudaSuccess);
REQUIRE(cudaFree(d_py)==cudaSuccess);
REQUIRE(cudaFree(d_mx)==cudaSuccess);
REQUIRE(cudaFree(d_my)==cudaSuccess);
REQUIRE(cudaFree(d_sx)==cudaSuccess);
REQUIRE(cudaFree(d_sy)==cudaSuccess);
REQUIRE(cudaFree(d_c )==cudaSuccess);
}).name("free");
// build up the dependency
h2d.succeed(allocate_px, allocate_py, allocate_mx, allocate_my);
kmeans.succeed(allocate_sx, allocate_sy, allocate_c, h2d)
.precede(gpu_condition);
gpu_condition.precede(kmeans, stop);
stop.precede(free);
executor.run(taskflow).wait();
//taskflow.dump(std::cout);
for(int k=0; k<K; k++) {
REQUIRE(std::fabs(h_mx[k] - mx[k]) < 1.0f);
REQUIRE(std::fabs(h_my[k] - my[k]) < 1.0f);
}
}
TEST_CASE("kmeans.10.1C1G") {
kmeans(10, 2, 10, 1, 1);
}
TEST_CASE("kmeans.10.1C2G") {
kmeans(10, 2, 10, 1, 2);
}
TEST_CASE("kmeans.10.1C3G") {
kmeans(10, 2, 10, 1, 3);
}
TEST_CASE("kmeans.10.1C4G") {
kmeans(10, 2, 10, 1, 4);
}
TEST_CASE("kmeans.10.2C1G") {
kmeans(10, 2, 10, 2, 1);
}
TEST_CASE("kmeans.10.2C2G") {
kmeans(10, 2, 10, 2, 2);
}
TEST_CASE("kmeans.10.2C3G") {
kmeans(10, 2, 10, 2, 3);
}
TEST_CASE("kmeans.10.2C4G") {
kmeans(10, 2, 10, 2, 4);
}
TEST_CASE("kmeans.10.4C1G") {
kmeans(10, 2, 10, 4, 1);
}
TEST_CASE("kmeans.10.4C2G") {
kmeans(10, 2, 10, 4, 2);
}
TEST_CASE("kmeans.10.4C3G") {
kmeans(10, 2, 10, 4, 3);
}
TEST_CASE("kmeans.10.4C4G") {
kmeans(10, 2, 10, 4, 4);
}
TEST_CASE("kmeans.100.1C1G") {
kmeans(100, 4, 100, 1, 1);
}
TEST_CASE("kmeans.100.2C2G") {
kmeans(100, 4, 100, 2, 2);
}
TEST_CASE("kmeans.100.3C3G") {
kmeans(100, 4, 100, 3, 3);
}
TEST_CASE("kmeans.100.4C4G") {
kmeans(100, 4, 100, 4, 4);
}
TEST_CASE("kmeans.1000.1C1G") {
kmeans(1000, 8, 1000, 1, 1);
}
TEST_CASE("kmeans.1000.2C2G") {
kmeans(1000, 8, 1000, 2, 2);
}
TEST_CASE("kmeans.1000.4C4G") {
kmeans(1000, 8, 1000, 4, 4);
}
TEST_CASE("kmeans.1000.8C8G") {
kmeans(1000, 8, 1000, 8, 8);
}
TEST_CASE("kmeans.1000.16C16G") {
kmeans(1000, 8, 1000, 16, 16);
}
|
b20ae5c3f5f2b75c5c529ca8247d8415c1065b2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#define INDEX_NUM 3
#define INDEX_SUM 0
#define INDEX_MAX 1
#define INDEX_MIN 2
#define NUM_MAX 1024
#define ITEMS_NUM (1 << 20)
#define BLOCK_SIZE 256
using namespace std;
// TODO-1 => ./task_no_atomic
// 1 thread does all compute, no atomic/sync
// thread.0 of block.0 computes everything
__global__ void kernel_no_atomics(int *data, int *results)
{
if (threadIdx.x || blockIdx.x)
return;
for (int i = 0; i != ITEMS_NUM; ++i) {
results[INDEX_SUM] += data[i];
results[INDEX_MAX] = (data[i] > results[INDEX_MAX]) ?
data[i] : results[INDEX_MAX];
results[INDEX_MIN] = (data[i] < results[INDEX_MIN]) ?
data[i] : results[INDEX_MIN];
}
}
// TODO-2 => ./task_partial_atomic
// ITEMS_NUM / 256 threads, ITEMS_NUM / 256 * 3 atomic calls
// thread.0 of each block does partial compute, than uses atomics to compute
__global__ void kernel_partial_atomics(int *data, int *results)
{
if (threadIdx.x)
return;
int start = blockIdx.x * BLOCK_SIZE;
int localRes[INDEX_NUM] = {0};
localRes[INDEX_MIN] = NUM_MAX;
for (int i = 0; i != BLOCK_SIZE; ++i) {
localRes[INDEX_SUM] += data[start + i];
localRes[INDEX_MAX] = (data[start + i] > localRes[INDEX_MAX]) ?
data[start + i] : localRes[INDEX_MAX];
localRes[INDEX_MIN] = (data[start + i] < localRes[INDEX_MIN]) ?
data[start + i] : localRes[INDEX_MIN];
}
atomicAdd(results + INDEX_SUM, localRes[INDEX_SUM]);
atomicMax(results + INDEX_MAX, localRes[INDEX_MAX]);
atomicMin(results + INDEX_MIN, localRes[INDEX_MIN]);
}
// TODO-3 => ./task_full_atomic
// ITEMS_NUM threads do compute, ITEMS_NUM * 3 atomic calls
// all threads to atomics to compute
__global__ void kernel_full_atomics(int *data, int *results)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(results + INDEX_SUM, data[pos]);
atomicMax(results + INDEX_MAX, data[pos]);
atomicMin(results + INDEX_MIN, data[pos]);
}
int main(void)
{
int expResults[INDEX_NUM];
int *data = NULL;
hipMallocManaged(&data, ITEMS_NUM * sizeof(int));
if (data == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// generate data and expected result
expResults[INDEX_SUM] = 0;
expResults[INDEX_MAX] = 0;
expResults[INDEX_MIN] = NUM_MAX;
for(int i = 0; i < ITEMS_NUM; i++) {
// each generated number is lower than NUM_MAX as value
data[i] = rand() % NUM_MAX;
expResults[INDEX_SUM] += data[i];
expResults[INDEX_MAX] = (data[i] > expResults[INDEX_MAX]) ?
data[i] : expResults[INDEX_MAX];
expResults[INDEX_MIN] = (data[i] < expResults[INDEX_MIN]) ?
data[i] : expResults[INDEX_MIN];
}
int *results = NULL;
hipMallocManaged(&results, INDEX_NUM * sizeof(int));
if (results == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// compute 10 times the results
for(int i = 0; i < 10; i++) {
// init
results[INDEX_SUM] = 0;
results[INDEX_MAX] = 0;
results[INDEX_MIN] = NUM_MAX;
#ifdef NO_ATOMIC
hipLaunchKernelGGL(( kernel_no_atomics), dim3(1) , dim3(1) , 0, 0, data, results);
#elif defined(PARTIAL_ATOMIC)
hipLaunchKernelGGL(( kernel_partial_atomics), dim3(ITEMS_NUM / 256) , dim3(1) , 0, 0, data, results);
#elif defined(FULL_ATOMIC)
hipLaunchKernelGGL(( kernel_full_atomics), dim3(ITEMS_NUM / 256) , dim3(256) , 0, 0, data, results);
#endif
hipDeviceSynchronize();
}
cout << "SUM: " << results[INDEX_SUM] << endl;
if(results[INDEX_SUM] != expResults[INDEX_SUM]) {
cout << "Failed, SUM should be " << expResults[INDEX_SUM] << endl;
}
cout << "MAX: " << results[INDEX_MAX] << endl;
if(results[INDEX_MAX] != expResults[INDEX_MAX]) {
cout << "Failed, MAX should be " << expResults[INDEX_MAX] << endl;
}
cout << "MIN: " << results[INDEX_MIN] << endl;
if(results[INDEX_MIN] != expResults[INDEX_MIN]) {
cout << "Failed, MIN should be " << expResults[INDEX_MIN] << endl;
}
hipFree(results);
return 0;
} | b20ae5c3f5f2b75c5c529ca8247d8415c1065b2e.cu | #include <iostream>
#define INDEX_NUM 3
#define INDEX_SUM 0
#define INDEX_MAX 1
#define INDEX_MIN 2
#define NUM_MAX 1024
#define ITEMS_NUM (1 << 20)
#define BLOCK_SIZE 256
using namespace std;
// TODO-1 => ./task_no_atomic
// 1 thread does all compute, no atomic/sync
// thread.0 of block.0 computes everything
__global__ void kernel_no_atomics(int *data, int *results)
{
if (threadIdx.x || blockIdx.x)
return;
for (int i = 0; i != ITEMS_NUM; ++i) {
results[INDEX_SUM] += data[i];
results[INDEX_MAX] = (data[i] > results[INDEX_MAX]) ?
data[i] : results[INDEX_MAX];
results[INDEX_MIN] = (data[i] < results[INDEX_MIN]) ?
data[i] : results[INDEX_MIN];
}
}
// TODO-2 => ./task_partial_atomic
// ITEMS_NUM / 256 threads, ITEMS_NUM / 256 * 3 atomic calls
// thread.0 of each block does partial compute, than uses atomics to compute
__global__ void kernel_partial_atomics(int *data, int *results)
{
if (threadIdx.x)
return;
int start = blockIdx.x * BLOCK_SIZE;
int localRes[INDEX_NUM] = {0};
localRes[INDEX_MIN] = NUM_MAX;
for (int i = 0; i != BLOCK_SIZE; ++i) {
localRes[INDEX_SUM] += data[start + i];
localRes[INDEX_MAX] = (data[start + i] > localRes[INDEX_MAX]) ?
data[start + i] : localRes[INDEX_MAX];
localRes[INDEX_MIN] = (data[start + i] < localRes[INDEX_MIN]) ?
data[start + i] : localRes[INDEX_MIN];
}
atomicAdd(results + INDEX_SUM, localRes[INDEX_SUM]);
atomicMax(results + INDEX_MAX, localRes[INDEX_MAX]);
atomicMin(results + INDEX_MIN, localRes[INDEX_MIN]);
}
// TODO-3 => ./task_full_atomic
// ITEMS_NUM threads do compute, ITEMS_NUM * 3 atomic calls
// all threads to atomics to compute
__global__ void kernel_full_atomics(int *data, int *results)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(results + INDEX_SUM, data[pos]);
atomicMax(results + INDEX_MAX, data[pos]);
atomicMin(results + INDEX_MIN, data[pos]);
}
int main(void)
{
int expResults[INDEX_NUM];
int *data = NULL;
cudaMallocManaged(&data, ITEMS_NUM * sizeof(int));
if (data == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// generate data and expected result
expResults[INDEX_SUM] = 0;
expResults[INDEX_MAX] = 0;
expResults[INDEX_MIN] = NUM_MAX;
for(int i = 0; i < ITEMS_NUM; i++) {
// each generated number is lower than NUM_MAX as value
data[i] = rand() % NUM_MAX;
expResults[INDEX_SUM] += data[i];
expResults[INDEX_MAX] = (data[i] > expResults[INDEX_MAX]) ?
data[i] : expResults[INDEX_MAX];
expResults[INDEX_MIN] = (data[i] < expResults[INDEX_MIN]) ?
data[i] : expResults[INDEX_MIN];
}
int *results = NULL;
cudaMallocManaged(&results, INDEX_NUM * sizeof(int));
if (results == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// compute 10 times the results
for(int i = 0; i < 10; i++) {
// init
results[INDEX_SUM] = 0;
results[INDEX_MAX] = 0;
results[INDEX_MIN] = NUM_MAX;
#ifdef NO_ATOMIC
kernel_no_atomics<<< 1 , 1 >>> (data, results);
#elif defined(PARTIAL_ATOMIC)
kernel_partial_atomics<<< ITEMS_NUM / 256 , 1 >>> (data, results);
#elif defined(FULL_ATOMIC)
kernel_full_atomics<<< ITEMS_NUM / 256 , 256 >>> (data, results);
#endif
cudaDeviceSynchronize();
}
cout << "SUM: " << results[INDEX_SUM] << endl;
if(results[INDEX_SUM] != expResults[INDEX_SUM]) {
cout << "Failed, SUM should be " << expResults[INDEX_SUM] << endl;
}
cout << "MAX: " << results[INDEX_MAX] << endl;
if(results[INDEX_MAX] != expResults[INDEX_MAX]) {
cout << "Failed, MAX should be " << expResults[INDEX_MAX] << endl;
}
cout << "MIN: " << results[INDEX_MIN] << endl;
if(results[INDEX_MIN] != expResults[INDEX_MIN]) {
cout << "Failed, MIN should be " << expResults[INDEX_MIN] << endl;
}
cudaFree(results);
return 0;
} |
245a601d14da80996338564e4cb788e196752668.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef struct test_struct {
void * r;
int y;
double x;
long z;
char str[348];
struct test_struct * next;
struct test_struct * prev;
} test_struct;
__global__ void fun(int *z){
void * voidp = NULL;
char zeroes[sizeof(test_struct)];
memset(zeroes, 0, sizeof(zeroes));
const void *s1 = &voidp;
const void *s2 = zeroes;
size_t n = sizeof(voidp);
const unsigned char *us1 = (const unsigned char *) s1;
const unsigned char *us2 = (const unsigned char *) s2;
int k;
while (n-- != 0) {
if (*us1 != *us2) {
if(*us1 < *us2){k = -1;}
else k = 1;
}
printf("%d\n", k);
us1++;
us2++;
}
printf("%d\n", 100);
//return 100;
// printf("%d\n", (int)(&x) & -7);
}
int main(void)
{
int z;
int *dev_z;
hipMalloc((void**)&dev_z, sizeof(int));
hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_z);
hipMemcpy(&z, dev_z, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_z);
return 0;
}
//
| 245a601d14da80996338564e4cb788e196752668.cu | #include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef struct test_struct {
void * r;
int y;
double x;
long z;
char str[348];
struct test_struct * next;
struct test_struct * prev;
} test_struct;
__global__ void fun(int *z){
void * voidp = NULL;
char zeroes[sizeof(test_struct)];
memset(zeroes, 0, sizeof(zeroes));
const void *s1 = &voidp;
const void *s2 = zeroes;
size_t n = sizeof(voidp);
const unsigned char *us1 = (const unsigned char *) s1;
const unsigned char *us2 = (const unsigned char *) s2;
int k;
while (n-- != 0) {
if (*us1 != *us2) {
if(*us1 < *us2){k = -1;}
else k = 1;
}
printf("%d\n", k);
us1++;
us2++;
}
printf("%d\n", 100);
//return 100;
// printf("%d\n", (int)(&x) & -7);
}
int main(void)
{
int z;
int *dev_z;
cudaMalloc((void**)&dev_z, sizeof(int));
fun<<<1,1>>>(dev_z);
cudaMemcpy(&z, dev_z, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_z);
return 0;
}
//编译通过;
|
33631e01fdd7a72c145f162b552e1d85a83e6f63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b){
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main(){
const int arraySize = 3000;
int i = 0;
int a[arraySize];
int b[arraySize];
int c[arraySize] = { 0 };
for(i = 0; i < arraySize; i++){
a[i] = (int)(rand()% 100);
b[i] = (int)(rand()% 100);
}
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for(i = 0;i < arraySize; i++){
printf("%d + %d = %d\t",a[i],b[i],c[i]);
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size){
hipError_t cudaStatus;
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
} | 33631e01fdd7a72c145f162b552e1d85a83e6f63.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b){
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main(){
const int arraySize = 3000;
int i = 0;
int a[arraySize];
int b[arraySize];
int c[arraySize] = { 0 };
for(i = 0; i < arraySize; i++){
a[i] = (int)(rand()% 100);
b[i] = (int)(rand()% 100);
}
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for(i = 0;i < arraySize; i++){
printf("%d + %d = %d\t",a[i],b[i],c[i]);
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size){
cudaError_t cudaStatus;
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
} |
82e9cb360f0c3563d12f4be26e3d308248ebd050.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (C) 2020 ByteDance Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <cmath>
#include "cuda/attention.h"
namespace effectivetransformer {
namespace cuda {
// Reduce code comes from Nvidia's DeepLearningExamples
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/open_attention.cu#L29-L101
/**
* Multi-head attetion open sourced
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : 0;
val = warpReduceMax(val);
return val;
}
__inline__ __device__
int target_index(int id1, int id2, int id3, int id4,
int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) +
id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
/// ***************************** add bias & pad *****************************
template<typename T>
__global__
void add_QKV_bias_padding(
T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int seq_id = word_idx[blockIdx.x];
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id,
batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
T* src_ptr = (T*)Q;
T* dst_ptr = (T*)q_buf_;
const T* bias_ptr = (const T*)bias_Q;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
src_ptr = (T*)K;
dst_ptr = (T*)k_buf_;
bias_ptr = (const T*)bias_K;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
src_ptr = (T*)V;
dst_ptr = (T*)v_buf_;
bias_ptr = (const T*)bias_V;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
}
template <>
__global__
void add_QKV_bias_padding(
__half* Q, const __half* bias_Q,
__half* K, const __half* bias_K,
__half* V, const __half* bias_V,
__half* q_buf_, __half* k_buf_, __half* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int seq_id = word_idx[blockIdx.x];
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id,
batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
void add_QKV_bias_padding_kernelLauncher(
T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf_, T* k_buf_, T* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream)
{
dim3 grid;
dim3 block;
grid.x = valid_word_num;
block.x = head_num * size_per_head;
hipLaunchKernelGGL(( add_QKV_bias_padding<float>), dim3(grid), dim3(block), 0, stream,
Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, batch_idx, word_idx);
}
template<>
void add_QKV_bias_padding_kernelLauncher(
__half* Q, const __half* bias_Q,
__half* K, const __half* bias_K,
__half* V, const __half* bias_V,
__half* q_buf_, __half* k_buf_, __half* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream)
{
dim3 grid;
dim3 block;
grid.x = valid_word_num;
block.x = head_num * size_per_head / 2;
hipLaunchKernelGGL(( add_QKV_bias_padding<__half>), dim3(grid), dim3(block), 0, stream,
Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head / 2, batch_idx, word_idx);
}
template void add_QKV_bias_padding_kernelLauncher<__half>(
__half* Q, const __half* bias_Q,
__half* K, const __half* bias_K,
__half* V, const __half* bias_V,
__half* q_buf_, __half* k_buf_, __half* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream);
template void add_QKV_bias_padding_kernelLauncher<float>(
float* Q, const float* bias_Q,
float* K, const float* bias_K,
float* V, const float* bias_V,
float* q_buf_, float* k_buf_, float* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream);
/// *********************************** fin ***********************************
/// ************************** softmax for attention **************************
// softmax kernel code is copied from Nvidia's DeepLearningExamples :
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/open_attention.cu#L189-L268
template <typename T>
__global__
void softmax_kernel(T* qk_buf_, const T* attr_mask,
const int batch_size, const int head_num, const int seq_len, const T scaler)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len
? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len
? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len
? (float)(qk * (float)scaler + mask_val): -1e-20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf_, const T* attr_mask,
const int batch_size, const int head_num,
const int seq_len, const float scaler)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len
? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len
? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len
? (float)(qk * (float)scaler + mask_val) : -1e-20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
template <typename T>
void softmax_kernel_kernelLauncher(
T* qk_buf_, const T* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const T scaler,
const hipStream_t stream) {
dim3 grid;
dim3 block;
if(seq_len <= 32)
block.x = 32;
else if(seq_len > 32 && seq_len <= 64)
block.x = 64;
else if(seq_len > 64 && seq_len <= 128)
block.x = 128;
else if(seq_len > 128 && seq_len <= 256)
block.x = 256;
else if(seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
if(batch_size * head_num <= 120)
{
grid.x = batch_size * head_num * seq_len;
hipLaunchKernelGGL(( softmax_kernel_v2<T>), dim3(grid), dim3(block), 0, stream,
qk_buf_, attr_mask, batch_size, head_num, seq_len, scaler);
}
else
{
grid.x = batch_size * head_num;
hipLaunchKernelGGL(( softmax_kernel<T>), dim3(grid), dim3(block), 0, stream,
qk_buf_, attr_mask, batch_size, head_num, seq_len, scaler);
}
}
template void softmax_kernel_kernelLauncher<float>(
float* qk_buf_, const float* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const float scaler,
const hipStream_t stream);
template void softmax_kernel_kernelLauncher<__half>(
__half* qk_buf_, const __half* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const __half scaler,
const hipStream_t stream);
/// *********************************** fin ***********************************
/// ****************** transpose & rm padding for attention *******************
template<typename T>
__global__
void transpose_rm_padding(
T* src, T* dst,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int head_id = threadIdx.y;
int tid = threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int word_id = word_idx[blockIdx.x];
int src_offset = batch_id * head_num * seq_len * size_per_head +
head_id * seq_len * size_per_head +
word_id * size_per_head +
tid;
int dst_offset = blockIdx.x * head_num * size_per_head +
head_id * size_per_head +
tid;
T* src_ptr = (T*)src;
T* dst_ptr = (T*)dst;
dst_ptr[dst_offset] = src_ptr[src_offset];
}
template<>
__global__
void transpose_rm_padding(
__half* src, __half* dst,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
// if (threadIdx.y == (head_num - 1) && threadIdx.x >= size_per_head)
// return;
int head_id = threadIdx.y;
int tid = threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int word_id = word_idx[blockIdx.x];
int src_offset = batch_id * head_num * seq_len * size_per_head +
head_id * seq_len * size_per_head +
word_id * size_per_head +
tid;
int dst_offset = blockIdx.x * head_num * size_per_head +
head_id * size_per_head +
tid;
half2* src_ptr = (half2*)src;
half2* dst_ptr = (half2*)dst;
dst_ptr[dst_offset] = src_ptr[src_offset];
}
template <typename T>
void transpose_rm_padding_kernelLauncher(
T* src, T* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream)
{
dim3 grid(valid_word_num);
dim3 block(size_per_head, head_num);
hipLaunchKernelGGL(( transpose_rm_padding<float>), dim3(grid), dim3(block), 0, stream,
src, dst,
batch_size, seq_len, head_num, size_per_head,
batch_idx, word_idx);
}
template <>
void transpose_rm_padding_kernelLauncher<__half>(
__half* src, __half* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream)
{
dim3 grid(valid_word_num);
dim3 block(size_per_head / 2, head_num);
hipLaunchKernelGGL(( transpose_rm_padding<__half>), dim3(grid), dim3(block), 0, stream,
src, dst,
batch_size, seq_len, head_num, size_per_head / 2,
batch_idx, word_idx);
}
template void transpose_rm_padding_kernelLauncher<float>(
float* src, float* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream);
template void transpose_rm_padding_kernelLauncher<__half>(
__half* src, __half* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const hipStream_t stream);
/// *********************************** fin ***********************************
}//namespace cuda
}//namespace effectivetransformer
| 82e9cb360f0c3563d12f4be26e3d308248ebd050.cu | /*
* Copyright (C) 2020 ByteDance Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cmath>
#include "cuda/attention.h"
namespace effectivetransformer {
namespace cuda {
// Reduce code comes from Nvidia's DeepLearningExamples
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/open_attention.cu#L29-L101
/**
* Multi-head attetion open sourced
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : 0;
val = warpReduceMax(val);
return val;
}
__inline__ __device__
int target_index(int id1, int id2, int id3, int id4,
int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) +
id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
/// ***************************** add bias & pad *****************************
template<typename T>
__global__
void add_QKV_bias_padding(
T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int seq_id = word_idx[blockIdx.x];
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id,
batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
T* src_ptr = (T*)Q;
T* dst_ptr = (T*)q_buf_;
const T* bias_ptr = (const T*)bias_Q;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
src_ptr = (T*)K;
dst_ptr = (T*)k_buf_;
bias_ptr = (const T*)bias_K;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
src_ptr = (T*)V;
dst_ptr = (T*)v_buf_;
bias_ptr = (const T*)bias_V;
dst_ptr[target_id] = src_ptr[tid] + __ldg(&bias_ptr[bias_id]);
}
template <>
__global__
void add_QKV_bias_padding(
__half* Q, const __half* bias_Q,
__half* K, const __half* bias_K,
__half* V, const __half* bias_V,
__half* q_buf_, __half* k_buf_, __half* v_buf_,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int seq_id = word_idx[blockIdx.x];
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id,
batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
void add_QKV_bias_padding_kernelLauncher(
T* Q, const T* bias_Q,
T* K, const T* bias_K,
T* V, const T* bias_V,
T* q_buf_, T* k_buf_, T* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream)
{
dim3 grid;
dim3 block;
grid.x = valid_word_num;
block.x = head_num * size_per_head;
add_QKV_bias_padding<float><<<grid, block, 0, stream>>>(
Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, batch_idx, word_idx);
}
template<>
void add_QKV_bias_padding_kernelLauncher(
__half* Q, const __half* bias_Q,
__half* K, const __half* bias_K,
__half* V, const __half* bias_V,
__half* q_buf_, __half* k_buf_, __half* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream)
{
dim3 grid;
dim3 block;
grid.x = valid_word_num;
block.x = head_num * size_per_head / 2;
add_QKV_bias_padding<__half><<<grid, block, 0, stream>>>(
Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head / 2, batch_idx, word_idx);
}
template void add_QKV_bias_padding_kernelLauncher<__half>(
__half* Q, const __half* bias_Q,
__half* K, const __half* bias_K,
__half* V, const __half* bias_V,
__half* q_buf_, __half* k_buf_, __half* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream);
template void add_QKV_bias_padding_kernelLauncher<float>(
float* Q, const float* bias_Q,
float* K, const float* bias_K,
float* V, const float* bias_V,
float* q_buf_, float* k_buf_, float* v_buf_,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream);
/// *********************************** fin ***********************************
/// ************************** softmax for attention **************************
// softmax kernel code is copied from Nvidia's DeepLearningExamples :
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/open_attention.cu#L189-L268
template <typename T>
__global__
void softmax_kernel(T* qk_buf_, const T* attr_mask,
const int batch_size, const int head_num, const int seq_len, const T scaler)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len
? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len
? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len
? (float)(qk * (float)scaler + mask_val): -1e-20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf_, const T* attr_mask,
const int batch_size, const int head_num,
const int seq_len, const float scaler)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len
? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len
? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len
? (float)(qk * (float)scaler + mask_val) : -1e-20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
template <typename T>
void softmax_kernel_kernelLauncher(
T* qk_buf_, const T* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const T scaler,
const cudaStream_t stream) {
dim3 grid;
dim3 block;
if(seq_len <= 32)
block.x = 32;
else if(seq_len > 32 && seq_len <= 64)
block.x = 64;
else if(seq_len > 64 && seq_len <= 128)
block.x = 128;
else if(seq_len > 128 && seq_len <= 256)
block.x = 256;
else if(seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
if(batch_size * head_num <= 120)
{
grid.x = batch_size * head_num * seq_len;
softmax_kernel_v2<T><<<grid, block, 0, stream>>>(
qk_buf_, attr_mask, batch_size, head_num, seq_len, scaler);
}
else
{
grid.x = batch_size * head_num;
softmax_kernel<T><<<grid, block, 0, stream>>>(
qk_buf_, attr_mask, batch_size, head_num, seq_len, scaler);
}
}
template void softmax_kernel_kernelLauncher<float>(
float* qk_buf_, const float* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const float scaler,
const cudaStream_t stream);
template void softmax_kernel_kernelLauncher<__half>(
__half* qk_buf_, const __half* attr_mask,
const int batch_size, const int head_num, const int seq_len,
const __half scaler,
const cudaStream_t stream);
/// *********************************** fin ***********************************
/// ****************** transpose & rm padding for attention *******************
template<typename T>
__global__
void transpose_rm_padding(
T* src, T* dst,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
int head_id = threadIdx.y;
int tid = threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int word_id = word_idx[blockIdx.x];
int src_offset = batch_id * head_num * seq_len * size_per_head +
head_id * seq_len * size_per_head +
word_id * size_per_head +
tid;
int dst_offset = blockIdx.x * head_num * size_per_head +
head_id * size_per_head +
tid;
T* src_ptr = (T*)src;
T* dst_ptr = (T*)dst;
dst_ptr[dst_offset] = src_ptr[src_offset];
}
template<>
__global__
void transpose_rm_padding(
__half* src, __half* dst,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx)
{
// if (threadIdx.y == (head_num - 1) && threadIdx.x >= size_per_head)
// return;
int head_id = threadIdx.y;
int tid = threadIdx.x;
int batch_id = batch_idx[blockIdx.x];
int word_id = word_idx[blockIdx.x];
int src_offset = batch_id * head_num * seq_len * size_per_head +
head_id * seq_len * size_per_head +
word_id * size_per_head +
tid;
int dst_offset = blockIdx.x * head_num * size_per_head +
head_id * size_per_head +
tid;
half2* src_ptr = (half2*)src;
half2* dst_ptr = (half2*)dst;
dst_ptr[dst_offset] = src_ptr[src_offset];
}
template <typename T>
void transpose_rm_padding_kernelLauncher(
T* src, T* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream)
{
dim3 grid(valid_word_num);
dim3 block(size_per_head, head_num);
transpose_rm_padding<float><<<grid, block, 0, stream>>>(
src, dst,
batch_size, seq_len, head_num, size_per_head,
batch_idx, word_idx);
}
template <>
void transpose_rm_padding_kernelLauncher<__half>(
__half* src, __half* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream)
{
dim3 grid(valid_word_num);
dim3 block(size_per_head / 2, head_num);
transpose_rm_padding<__half><<<grid, block, 0, stream>>>(
src, dst,
batch_size, seq_len, head_num, size_per_head / 2,
batch_idx, word_idx);
}
template void transpose_rm_padding_kernelLauncher<float>(
float* src, float* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream);
template void transpose_rm_padding_kernelLauncher<__half>(
__half* src, __half* dst,
const int valid_word_num,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
const int* batch_idx, const int* word_idx,
const cudaStream_t stream);
/// *********************************** fin ***********************************
}//namespace cuda
}//namespace effectivetransformer
|
4b8ddb42f7a4ed568bd25c4c85dfde9519ea0d64.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by Francisco Jos Gonzlez Garca.
// Copyright (c) 2020 Universidad de Granada. All rights reserved.
//
#include <iostream>
#include <vector>
#include "matrices_test.h"
#include "tests.h"
#include <omp.h>
#include <CSR.h>
#include "readers.h"
int main(int argc, char **argv) {
if (argc < 5) {
cout << "Uso: test [mtx] [rhs] [x0] [srjSch]";
abort();
}
string m = argv[1];
string rhs = argv[2];
string x0 = argv[3];
string srjSch = argv[4];
cout << "Leyendo matriz...." << endl;
fstream fs(m);
CSR matriz(fs);
auto _b = rhsVector_reader<double>(rhs, matriz.getFilas());
int devicesCount;
hipGetDeviceCount(&devicesCount);
int max_threads = omp_get_max_threads();
const int n = 1;
vector<vector<double>> ejecuciones(n);
for (int iter = 0; iter < n; iter++) {
// Tests JACOBI ***************
// ejecuciones[iter].push_back(jacobi_secuencial(matriz, _b, x0));
// for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) {
// ejecuciones[iter].push_back(test_jacobi_CUDA(matriz, _b, x0));
// }
// for (int i = 2; i <= max_threads; i = i << 1) {
// omp_set_num_threads(i);
// cout << "Nucleos CPU: " << omp_get_max_threads() << endl;
// ejecuciones[iter].push_back(test_jacobi_OMP(matriz, _b, x0));
// }
// Tests SRJ *****************
ejecuciones[iter].push_back(srj_secuencial(matriz, _b, x0, srjSch));
for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) {
ejecuciones[iter].push_back(srj_CUDA(matriz, _b, x0, srjSch));
}
for (int i = 2; i <= max_threads; i = i << 1) {
omp_set_num_threads(i);
ejecuciones[iter].push_back(srj_OMP(matriz, _b, x0, srjSch));
}
// Tests SOR ******************
// double omega = 1.1;
// ejecuciones[iter].push_back(SOR(matriz, _b, x0, omega));
// for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) {
// ejecuciones[iter].push_back(SOR_CUDA(matriz, _b, x0, omega));
// }
// for (int i = 2; i <= max_threads; i = i << 1) {
// omp_set_num_threads(i);
// ejecuciones[iter].push_back(SOR_OMP(matriz, _b, x0, omega));
// }
//// Tests Gauss Seidel ******************
// omega = 1;
// ejecuciones[iter].push_back(SOR(matriz, _b, x0, omega));
// for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) {
// ejecuciones[iter].push_back(SOR_CUDA(matriz, _b, x0, omega));
// }
// for (int i = 2; i <= max_threads; i = i << 1) {
// omp_set_num_threads(i);
// ejecuciones[iter].push_back(SOR_OMP(matriz, _b, x0, omega));
// }
}
cout << "Tiempos: ";
vector<double> tiempo_medio(ejecuciones[0].size());
for (int i = 0; i < tiempo_medio.size(); i++) {
for (int j = 0; j < n; j++) {
tiempo_medio[i] += ejecuciones[j][i];
// cout << ejecuciones[j][i] << " ";
}
tiempo_medio[i] /= n;
cout << tiempo_medio[i] << " ";
}
}
| 4b8ddb42f7a4ed568bd25c4c85dfde9519ea0d64.cu | //
// Created by Francisco José González García.
// Copyright (c) 2020 Universidad de Granada. All rights reserved.
//
#include <iostream>
#include <vector>
#include "matrices_test.h"
#include "tests.h"
#include <omp.h>
#include <CSR.h>
#include "readers.h"
int main(int argc, char **argv) {
if (argc < 5) {
cout << "Uso: test [mtx] [rhs] [x0] [srjSch]";
abort();
}
string m = argv[1];
string rhs = argv[2];
string x0 = argv[3];
string srjSch = argv[4];
cout << "Leyendo matriz...." << endl;
fstream fs(m);
CSR matriz(fs);
auto _b = rhsVector_reader<double>(rhs, matriz.getFilas());
int devicesCount;
cudaGetDeviceCount(&devicesCount);
int max_threads = omp_get_max_threads();
const int n = 1;
vector<vector<double>> ejecuciones(n);
for (int iter = 0; iter < n; iter++) {
// Tests JACOBI ***************
// ejecuciones[iter].push_back(jacobi_secuencial(matriz, _b, x0));
// for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) {
// ejecuciones[iter].push_back(test_jacobi_CUDA(matriz, _b, x0));
// }
// for (int i = 2; i <= max_threads; i = i << 1) {
// omp_set_num_threads(i);
// cout << "Nucleos CPU: " << omp_get_max_threads() << endl;
// ejecuciones[iter].push_back(test_jacobi_OMP(matriz, _b, x0));
// }
// Tests SRJ *****************
ejecuciones[iter].push_back(srj_secuencial(matriz, _b, x0, srjSch));
for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) {
ejecuciones[iter].push_back(srj_CUDA(matriz, _b, x0, srjSch));
}
for (int i = 2; i <= max_threads; i = i << 1) {
omp_set_num_threads(i);
ejecuciones[iter].push_back(srj_OMP(matriz, _b, x0, srjSch));
}
// Tests SOR ******************
// double omega = 1.1;
// ejecuciones[iter].push_back(SOR(matriz, _b, x0, omega));
// for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) {
// ejecuciones[iter].push_back(SOR_CUDA(matriz, _b, x0, omega));
// }
// for (int i = 2; i <= max_threads; i = i << 1) {
// omp_set_num_threads(i);
// ejecuciones[iter].push_back(SOR_OMP(matriz, _b, x0, omega));
// }
//// Tests Gauss Seidel ******************
// omega = 1;
// ejecuciones[iter].push_back(SOR(matriz, _b, x0, omega));
// for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) {
// ejecuciones[iter].push_back(SOR_CUDA(matriz, _b, x0, omega));
// }
// for (int i = 2; i <= max_threads; i = i << 1) {
// omp_set_num_threads(i);
// ejecuciones[iter].push_back(SOR_OMP(matriz, _b, x0, omega));
// }
}
cout << "Tiempos: ";
vector<double> tiempo_medio(ejecuciones[0].size());
for (int i = 0; i < tiempo_medio.size(); i++) {
for (int j = 0; j < n; j++) {
tiempo_medio[i] += ejecuciones[j][i];
// cout << ejecuciones[j][i] << " ";
}
tiempo_medio[i] /= n;
cout << tiempo_medio[i] << " ";
}
}
|
6fdd3e0b9fff46ba5d180ae50c0486336481face.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void vec_set (size_t n, double *result, double value)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = value;
}
}
//=== Vector arithmetic ======================================================
extern "C"
__global__ void vec_add (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y[id];
}
}
extern "C"
__global__ void vec_sub (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y[id];
}
}
extern "C"
__global__ void vec_mul (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
}
extern "C"
__global__ void vec_div (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y[id];
}
}
extern "C"
__global__ void vec_negate (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = -x[id];
}
}
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
__global__ void vec_addScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y;
}
}
extern "C"
__global__ void vec_subScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y;
}
}
extern "C"
__global__ void vec_mulScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y;
}
}
extern "C"
__global__ void vec_divScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y;
}
}
extern "C"
__global__ void vec_scalarAdd (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x + y[id];
}
}
extern "C"
__global__ void vec_scalarSub (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
}
extern "C"
__global__ void vec_scalarMul (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x * y[id];
}
}
extern "C"
__global__ void vec_scalarDiv (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x / y[id];
}
}
//=== Vector comparison ======================================================
extern "C"
__global__ void vec_lt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_lte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eq (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_ne (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y[id])?1.0f:0.0f;
}
}
//=== Vector-and-scalar comparison ===========================================
extern "C"
__global__ void vec_ltScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_lteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eqScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_neScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y)?1.0f:0.0f;
}
}
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
__global__ void vec_acos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acosf(x[id]);
}
}
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_acosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acoshf(x[id]);
}
}
// Calculate the arc sine of the input argument.
extern "C"
__global__ void vec_asin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinf(x[id]);
}
}
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
__global__ void vec_asinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinhf(x[id]);
}
}
// Calculate the arc tangent of the input argument.
extern "C"
__global__ void vec_atan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanf(x[id]);
}
}
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_atanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanhf(x[id]);
}
}
// Calculate the cube root of the input argument.
extern "C"
__global__ void vec_cbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cbrtf(x[id]);
}
}
// Calculate ceiling of the input argument.
extern "C"
__global__ void vec_ceil (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = ceilf(x[id]);
}
}
// Calculate the cosine of the input argument.
extern "C"
__global__ void vec_cos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cosf(x[id]);
}
}
// Calculate the hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_cosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = coshf(x[id]);
}
}
// Calculate the cosine of the input argument p .
extern "C"
__global__ void vec_cospi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cospif(x[id]);
}
}
// Calculate the complementary error function of the input argument.
extern "C"
__global__ void vec_erfc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcf(x[id]);
}
}
// Calculate the inverse complementary error function of the input argument.
extern "C"
__global__ void vec_erfcinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcinvf(y[id]);
}
}
// Calculate the scaled complementary error function of the input argument.
extern "C"
__global__ void vec_erfcx (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcxf(x[id]);
}
}
// Calculate the error function of the input argument.
extern "C"
__global__ void vec_erf (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erff(x[id]);
}
}
// Calculate the inverse error function of the input argument.
extern "C"
__global__ void vec_erfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfinvf(y[id]);
}
}
// Calculate the base 10 exponential of the input argument.
extern "C"
__global__ void vec_exp10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp10f(x[id]);
}
}
// Calculate the base 2 exponential of the input argument.
extern "C"
__global__ void vec_exp2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp2f(x[id]);
}
}
// Calculate the base e exponential of the input argument.
extern "C"
__global__ void vec_exp (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expf(x[id]);
}
}
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
__global__ void vec_expm1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expm1f(x[id]);
}
}
// Calculate the absolute value of its argument.
extern "C"
__global__ void vec_fabs (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fabsf(x[id]);
}
}
// Calculate the largest integer less than or equal to x.
extern "C"
__global__ void vec_floor (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = floorf(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
__global__ void vec_j0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j0f(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
__global__ void vec_j1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j1f(x[id]);
}
}
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
__global__ void vec_lgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = lgammaf(x[id]);
}
}
// Calculate the base 10 logarithm of the input argument.
extern "C"
__global__ void vec_log10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log10f(x[id]);
}
}
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
__global__ void vec_log1p (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log1pf(x[id]);
}
}
// Calculate the base 2 logarithm of the input argument.
extern "C"
__global__ void vec_log2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log2f(x[id]);
}
}
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
__global__ void vec_logb (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logbf(x[id]);
}
}
// Calculate the natural logarithm of the input argument.
extern "C"
__global__ void vec_log (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logf(x[id]);
}
}
// Calculate the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdf (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdff(y[id]);
}
}
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdfinvf(y[id]);
}
}
// Calculate reciprocal cube root function.
extern "C"
__global__ void vec_rcbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rcbrtf(x[id]);
}
}
// Round input to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_rint (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rintf(x[id]);
}
}
// Round to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_round (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = roundf(x[id]);
}
}
// Calculate the reciprocal of the square root of the input argument.
extern "C"
__global__ void vec_rsqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rsqrtf(x[id]);
}
}
// Calculate the sine of the input argument.
extern "C"
__global__ void vec_sin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinf(x[id]);
}
}
// Calculate the hyperbolic sine of the input argument.
extern "C"
__global__ void vec_sinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinhf(x[id]);
}
}
// Calculate the sine of the input argument p .
extern "C"
__global__ void vec_sinpi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinpif(x[id]);
}
}
// Calculate the square root of the input argument.
extern "C"
__global__ void vec_sqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sqrtf(x[id]);
}
}
// Calculate the tangent of the input argument.
extern "C"
__global__ void vec_tan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanf(x[id]);
}
}
// Calculate the hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_tanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanhf(x[id]);
}
}
// Calculate the gamma function of the input argument.
extern "C"
__global__ void vec_tgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tgammaf(x[id]);
}
}
// Truncate input argument to the integral part.
extern "C"
__global__ void vec_trunc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = truncf(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
__global__ void vec_y0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y0f(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
__global__ void vec_y1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y1f(x[id]);
}
}
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
__global__ void vec_copysign (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = copysignf(x[id], y[id]);
}
}
// Compute the positive difference between x and y.
extern "C"
__global__ void vec_fdim (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdimf(x[id], y[id]);
}
}
// Divide two doubleing point values.
extern "C"
__global__ void vec_fdivide (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdividef(x[id], y[id]);
}
}
// Determine the maximum numeric value of the arguments.
extern "C"
__global__ void vec_fmax (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmaxf(x[id], y[id]);
}
}
// Determine the minimum numeric value of the arguments.
extern "C"
__global__ void vec_fmin (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fminf(x[id], y[id]);
}
}
// Calculate the doubleing-point remainder of x / y.
extern "C"
__global__ void vec_fmod (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmodf(x[id], y[id]);
}
}
// Calculate the square root of the sum of squares of two arguments.
extern "C"
__global__ void vec_hypot (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = hypotf(x[id], y[id]);
}
}
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
__global__ void vec_nextafter (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = nextafterf(x[id], y[id]);
}
}
// Calculate the value of first argument to the power of second argument.
extern "C"
__global__ void vec_pow (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = powf(x[id], y[id]);
}
}
// Compute single-precision doubleing-point remainder.
extern "C"
__global__ void vec_remainder (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = remainderf(x[id], y[id]);
}
}
| 6fdd3e0b9fff46ba5d180ae50c0486336481face.cu |
extern "C"
__global__ void vec_set (size_t n, double *result, double value)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = value;
}
}
//=== Vector arithmetic ======================================================
extern "C"
__global__ void vec_add (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y[id];
}
}
extern "C"
__global__ void vec_sub (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y[id];
}
}
extern "C"
__global__ void vec_mul (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
}
extern "C"
__global__ void vec_div (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y[id];
}
}
extern "C"
__global__ void vec_negate (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = -x[id];
}
}
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
__global__ void vec_addScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y;
}
}
extern "C"
__global__ void vec_subScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y;
}
}
extern "C"
__global__ void vec_mulScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y;
}
}
extern "C"
__global__ void vec_divScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y;
}
}
extern "C"
__global__ void vec_scalarAdd (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x + y[id];
}
}
extern "C"
__global__ void vec_scalarSub (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
}
extern "C"
__global__ void vec_scalarMul (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x * y[id];
}
}
extern "C"
__global__ void vec_scalarDiv (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x / y[id];
}
}
//=== Vector comparison ======================================================
extern "C"
__global__ void vec_lt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_lte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eq (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_ne (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y[id])?1.0f:0.0f;
}
}
//=== Vector-and-scalar comparison ===========================================
extern "C"
__global__ void vec_ltScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_lteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eqScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_neScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y)?1.0f:0.0f;
}
}
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
__global__ void vec_acos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acosf(x[id]);
}
}
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_acosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acoshf(x[id]);
}
}
// Calculate the arc sine of the input argument.
extern "C"
__global__ void vec_asin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinf(x[id]);
}
}
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
__global__ void vec_asinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinhf(x[id]);
}
}
// Calculate the arc tangent of the input argument.
extern "C"
__global__ void vec_atan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanf(x[id]);
}
}
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_atanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanhf(x[id]);
}
}
// Calculate the cube root of the input argument.
extern "C"
__global__ void vec_cbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cbrtf(x[id]);
}
}
// Calculate ceiling of the input argument.
extern "C"
__global__ void vec_ceil (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = ceilf(x[id]);
}
}
// Calculate the cosine of the input argument.
extern "C"
__global__ void vec_cos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cosf(x[id]);
}
}
// Calculate the hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_cosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = coshf(x[id]);
}
}
// Calculate the cosine of the input argument × p .
extern "C"
__global__ void vec_cospi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cospif(x[id]);
}
}
// Calculate the complementary error function of the input argument.
extern "C"
__global__ void vec_erfc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcf(x[id]);
}
}
// Calculate the inverse complementary error function of the input argument.
extern "C"
__global__ void vec_erfcinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcinvf(y[id]);
}
}
// Calculate the scaled complementary error function of the input argument.
extern "C"
__global__ void vec_erfcx (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcxf(x[id]);
}
}
// Calculate the error function of the input argument.
extern "C"
__global__ void vec_erf (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erff(x[id]);
}
}
// Calculate the inverse error function of the input argument.
extern "C"
__global__ void vec_erfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfinvf(y[id]);
}
}
// Calculate the base 10 exponential of the input argument.
extern "C"
__global__ void vec_exp10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp10f(x[id]);
}
}
// Calculate the base 2 exponential of the input argument.
extern "C"
__global__ void vec_exp2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp2f(x[id]);
}
}
// Calculate the base e exponential of the input argument.
extern "C"
__global__ void vec_exp (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expf(x[id]);
}
}
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
__global__ void vec_expm1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expm1f(x[id]);
}
}
// Calculate the absolute value of its argument.
extern "C"
__global__ void vec_fabs (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fabsf(x[id]);
}
}
// Calculate the largest integer less than or equal to x.
extern "C"
__global__ void vec_floor (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = floorf(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
__global__ void vec_j0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j0f(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
__global__ void vec_j1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j1f(x[id]);
}
}
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
__global__ void vec_lgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = lgammaf(x[id]);
}
}
// Calculate the base 10 logarithm of the input argument.
extern "C"
__global__ void vec_log10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log10f(x[id]);
}
}
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
__global__ void vec_log1p (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log1pf(x[id]);
}
}
// Calculate the base 2 logarithm of the input argument.
extern "C"
__global__ void vec_log2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log2f(x[id]);
}
}
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
__global__ void vec_logb (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logbf(x[id]);
}
}
// Calculate the natural logarithm of the input argument.
extern "C"
__global__ void vec_log (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logf(x[id]);
}
}
// Calculate the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdf (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdff(y[id]);
}
}
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdfinvf(y[id]);
}
}
// Calculate reciprocal cube root function.
extern "C"
__global__ void vec_rcbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rcbrtf(x[id]);
}
}
// Round input to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_rint (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rintf(x[id]);
}
}
// Round to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_round (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = roundf(x[id]);
}
}
// Calculate the reciprocal of the square root of the input argument.
extern "C"
__global__ void vec_rsqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rsqrtf(x[id]);
}
}
// Calculate the sine of the input argument.
extern "C"
__global__ void vec_sin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinf(x[id]);
}
}
// Calculate the hyperbolic sine of the input argument.
extern "C"
__global__ void vec_sinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinhf(x[id]);
}
}
// Calculate the sine of the input argument × p .
extern "C"
__global__ void vec_sinpi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinpif(x[id]);
}
}
// Calculate the square root of the input argument.
extern "C"
__global__ void vec_sqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sqrtf(x[id]);
}
}
// Calculate the tangent of the input argument.
extern "C"
__global__ void vec_tan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanf(x[id]);
}
}
// Calculate the hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_tanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanhf(x[id]);
}
}
// Calculate the gamma function of the input argument.
extern "C"
__global__ void vec_tgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tgammaf(x[id]);
}
}
// Truncate input argument to the integral part.
extern "C"
__global__ void vec_trunc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = truncf(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
__global__ void vec_y0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y0f(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
__global__ void vec_y1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y1f(x[id]);
}
}
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
__global__ void vec_copysign (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = copysignf(x[id], y[id]);
}
}
// Compute the positive difference between x and y.
extern "C"
__global__ void vec_fdim (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdimf(x[id], y[id]);
}
}
// Divide two doubleing point values.
extern "C"
__global__ void vec_fdivide (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdividef(x[id], y[id]);
}
}
// Determine the maximum numeric value of the arguments.
extern "C"
__global__ void vec_fmax (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmaxf(x[id], y[id]);
}
}
// Determine the minimum numeric value of the arguments.
extern "C"
__global__ void vec_fmin (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fminf(x[id], y[id]);
}
}
// Calculate the doubleing-point remainder of x / y.
extern "C"
__global__ void vec_fmod (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmodf(x[id], y[id]);
}
}
// Calculate the square root of the sum of squares of two arguments.
extern "C"
__global__ void vec_hypot (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = hypotf(x[id], y[id]);
}
}
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
__global__ void vec_nextafter (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = nextafterf(x[id], y[id]);
}
}
// Calculate the value of first argument to the power of second argument.
extern "C"
__global__ void vec_pow (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = powf(x[id], y[id]);
}
}
// Compute single-precision doubleing-point remainder.
extern "C"
__global__ void vec_remainder (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = remainderf(x[id], y[id]);
}
}
|
09e0bf25b3ee77d677ecc3154ff7f4c64e1674b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#define CUDPP_STATIC_LIB
#include <stdio.h>
// #include <cutil_inline.h>
#include "/Volumes/Macintosh HD/Developer/NVIDIA/CUDA-7.5/samples/common/inc/helper_cuda.h"
#include <stdio.h> /* defines printf for tests */
#include <time.h> /* defines time_t for timings in the test */
#include <math.h>
#ifdef linux
# include <endian.h> /* attempt to define endianness */
#endif
#include "Kmer.h"
#include "Graph.h"
#include "cudpp.h"
#include "utils.h"
#include "common.h"
// idea ! have all the cuda code as macro to be used in cpu code as well cuda code
/*
one read per block
|R| threads
copy each Ri to shared mem
|lmers|=|R|
l-1 dummy enteris or l-1 thread stall/branch
max overhead 256 bytes per read.
1) use dummy enteris
2) stall extra threads.
*/
//A=65=41=0100-0001
//C=67=43=0100-0011
//T=84=54=0101-0100
//G=71=47=0100-1111
//0A0CT00G
//00013002
//0T0GA00C
//03020001
__device__ __constant__ KEY_T lmerMask[] ={
0x0000000000000003, 0x000000000000000F, 0x000000000000003F, 0x00000000000000FF, // 0 1 2 3
0x00000000000003FF, 0x0000000000000FFF, 0x0000000000003FFF, 0x000000000000FFFF, // 4 5 6 7
0x000000000003FFFF, 0x00000000000FFFFF, 0x00000000003FFFFF, 0x0000000000FFFFFF, // 8 9 10 11
0x0000000003FFFFFF, 0x000000000FFFFFFF, 0x000000003FFFFFFF, 0x00000000FFFFFFFF, // 12 13 14 15
0x00000003FFFFFFFF, 0x0000000FFFFFFFFF, 0x0000003FFFFFFFFF, 0x000000FFFFFFFFFF, // 16 17 18 19
0x000003FFFFFFFFFF, 0x00000FFFFFFFFFFF, 0x00003FFFFFFFFFFF, 0x0000FFFFFFFFFFFF, // 20 21 22 23
0x0003FFFFFFFFFFFF, 0x000FFFFFFFFFFFFF, 0x003FFFFFFFFFFFFF, 0x00FFFFFFFFFFFFFF, // 24 25 26 27
0x03FFFFFFFFFFFFFF, 0x0FFFFFFFFFFFFFFF, 0x3FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF // 28 29 30 31
};
__device__ __constant__ unsigned char shifter[4] [4]=
{
{0,0,0,0},
{1,4,16,64},
{2,8,32,128},
{3,12,48,192},
};
__device__ __constant__ char codeF[]={0,0,0,1,3,0,0,2};
__device__ __constant__ char codeR[]={0,3,0,2,0,0,0,1};
__global__ void encodeLmerDevice( char * buffer,
const unsigned int buffSize,
const unsigned int readLength,
KEY_PTR lmers,
const unsigned int lmerLength
)
{
extern __shared__ char dnaRead[]; // changing name from "read"
const unsigned int tid=threadIdx.x;
printf("tid, buffer = %u, %s\n ",tid, buffer);
const unsigned int rOffset=(blockDim.x*blockDim.y*gridDim.x*blockIdx.y) +(blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y);
KEY_T lmer=0;
printf("tid, offset = %u, %u, %c\n",tid, rOffset,buffer[rOffset+tid]);
dnaRead[tid]=buffer[rOffset+tid];
printf("dnaRead = %c \n",dnaRead[tid]);
// __syncthreads();
/*
* dnaRead[tid]=codeF[dnaRead[tid] & 0x07];
* __syncthreads();
* for(unsigned int i =0; i< lmerLength; i++)
* {
* lmer = lmer<<2;
* lmer = lmer | ((KEY_T)dnaRead[(tid+i)%blockDim.x]); //wraping for dummy entries
* }
*/
for (unsigned int i = 0; i < 8; i++) //calculate lmer
{
printf("calc lmer ");
lmer= (lmer<< 8) | ((KEY_T)(shifter[codeF[dnaRead[threadIdx.x+i*4]& 0x07]][3] |
shifter[codeF[dnaRead[threadIdx.x+i*4+1]& 0x07]][2] |
shifter[codeF[dnaRead[threadIdx.x+i*4+2]& 0x07]][1] |
codeF[dnaRead[threadIdx.x+i*4+3] & 0x07]) ) ;
}
lmer = (lmer >> ((32 - lmerLength) << 1)) & lmerMask[lmerLength-1];
lmers[rOffset+tid]=lmer;
printf("GPU lmer: %llu\n",lmers[rOffset+tid]);
}
__global__ void encodeLmerComplementDevice( char * buffer,
const unsigned int buffSize,
const unsigned int readLength,
KEY_PTR lmers,
const unsigned int lmerLength
){
extern __shared__ char dnaRead[];//have to fix it
const unsigned int tid=threadIdx.x;
const unsigned int rOffset=(blockDim.x*blockDim.y*gridDim.x*blockIdx.y) +(blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y);
KEY_T lmer=0;
KEY_T temp=0;
dnaRead[tid]=buffer[rOffset+tid];
__syncthreads();
dnaRead[tid]=codeR[dnaRead[tid] & 0x07];
__syncthreads();
for(unsigned int i =0; i< lmerLength; i++){
temp=((KEY_T)dnaRead[(tid+i)%blockDim.x]);
lmer = (temp<<(i<<1)) | lmer;
}
lmers[rOffset+tid]=lmer;
}
__global__ void computeKmerDevice( KEY_PTR lmers,
KEY_PTR pkmers,
KEY_PTR skmers,
KEY_T validBitMask
){
const unsigned int tid=(blockDim.x*blockDim.y*gridDim.x*blockIdx.y) +(blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x;
KEY_T lmer;
//fetch lmer
lmer=lmers[tid];
//find prefix
pkmers[tid]=LMER_PREFIX(lmer,validBitMask);
//find suffix
skmers[tid] = LMER_SUFFIX(lmer,validBitMask);
}
extern "C"
void encodeLmer(
char * d_buffer,
const unsigned int bufferSize,
const unsigned int readLength,
KEY_PTR d_lmers,
const unsigned int lmerLength,
const unsigned int entriesCount
)
{
dim3 grid, block;
// printf("d_buffer before GPU: %s\n", d_buffer);
// printf("d_lmers before GPU: %llu\n", *d_lmers);
getOptimalLaunchConfigCustomized(entriesCount,&grid,&block,readLength);
hipLaunchKernelGGL(( encodeLmerDevice), dim3(grid),dim3(block),readLength+31, 0, d_buffer,bufferSize,readLength,d_lmers,lmerLength);
printf("d_lmers after GPU: %llu\n", *d_lmers);
CheckCUDAError();
}
extern "C"
void encodeLmerComplement(
char * d_buffer,
const unsigned int bufferSize,
const unsigned int readLength,
KEY_PTR d_lmers,
const unsigned int lmerLength,
const unsigned int entriesCount
)
{
dim3 grid, block;
getOptimalLaunchConfigCustomized(entriesCount,&grid,&block,readLength);
hipLaunchKernelGGL(( encodeLmerComplementDevice), dim3(grid),dim3(block),readLength, 0, d_buffer,bufferSize,readLength,d_lmers,lmerLength);
CheckCUDAError();
}
extern "C"
void computeKmer( KEY_PTR d_lmers,
KEY_PTR d_pkmers,
KEY_PTR d_skmers,
KEY_T validBitMask,
const unsigned int readLength,
const unsigned int entriesCount
){
dim3 grid, block;
getOptimalLaunchConfigCustomized(entriesCount,&grid,&block,readLength);
hipLaunchKernelGGL(( computeKmerDevice), dim3(grid),dim3(block), 0, 0, d_lmers,d_pkmers,d_skmers,validBitMask);
CheckCUDAError();
}
| 09e0bf25b3ee77d677ecc3154ff7f4c64e1674b8.cu | #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#define CUDPP_STATIC_LIB
#include <stdio.h>
// #include <cutil_inline.h>
#include "/Volumes/Macintosh HD/Developer/NVIDIA/CUDA-7.5/samples/common/inc/helper_cuda.h"
#include <stdio.h> /* defines printf for tests */
#include <time.h> /* defines time_t for timings in the test */
#include <math.h>
#ifdef linux
# include <endian.h> /* attempt to define endianness */
#endif
#include "Kmer.h"
#include "Graph.h"
#include "cudpp.h"
#include "utils.h"
#include "common.h"
// idea ! have all the cuda code as macro to be used in cpu code as well cuda code
/*
one read per block
|R| threads
copy each Ri to shared mem
|lmers|=|R|
l-1 dummy enteris or l-1 thread stall/branch
max overhead 256 bytes per read.
1) use dummy enteris
2) stall extra threads.
*/
//A=65=41=0100-0001
//C=67=43=0100-0011
//T=84=54=0101-0100
//G=71=47=0100-1111
//0A0CT00G
//00013002
//0T0GA00C
//03020001
__device__ __constant__ KEY_T lmerMask[] ={
0x0000000000000003, 0x000000000000000F, 0x000000000000003F, 0x00000000000000FF, // 0 1 2 3
0x00000000000003FF, 0x0000000000000FFF, 0x0000000000003FFF, 0x000000000000FFFF, // 4 5 6 7
0x000000000003FFFF, 0x00000000000FFFFF, 0x00000000003FFFFF, 0x0000000000FFFFFF, // 8 9 10 11
0x0000000003FFFFFF, 0x000000000FFFFFFF, 0x000000003FFFFFFF, 0x00000000FFFFFFFF, // 12 13 14 15
0x00000003FFFFFFFF, 0x0000000FFFFFFFFF, 0x0000003FFFFFFFFF, 0x000000FFFFFFFFFF, // 16 17 18 19
0x000003FFFFFFFFFF, 0x00000FFFFFFFFFFF, 0x00003FFFFFFFFFFF, 0x0000FFFFFFFFFFFF, // 20 21 22 23
0x0003FFFFFFFFFFFF, 0x000FFFFFFFFFFFFF, 0x003FFFFFFFFFFFFF, 0x00FFFFFFFFFFFFFF, // 24 25 26 27
0x03FFFFFFFFFFFFFF, 0x0FFFFFFFFFFFFFFF, 0x3FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF // 28 29 30 31
};
__device__ __constant__ unsigned char shifter[4] [4]=
{
{0,0,0,0},
{1,4,16,64},
{2,8,32,128},
{3,12,48,192},
};
__device__ __constant__ char codeF[]={0,0,0,1,3,0,0,2};
__device__ __constant__ char codeR[]={0,3,0,2,0,0,0,1};
__global__ void encodeLmerDevice( char * buffer,
const unsigned int buffSize,
const unsigned int readLength,
KEY_PTR lmers,
const unsigned int lmerLength
)
{
extern __shared__ char dnaRead[]; // changing name from "read"
const unsigned int tid=threadIdx.x;
printf("tid, buffer = %u, %s\n ",tid, buffer);
const unsigned int rOffset=(blockDim.x*blockDim.y*gridDim.x*blockIdx.y) +(blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y);
KEY_T lmer=0;
printf("tid, offset = %u, %u, %c\n",tid, rOffset,buffer[rOffset+tid]);
dnaRead[tid]=buffer[rOffset+tid];
printf("dnaRead = %c \n",dnaRead[tid]);
// __syncthreads();
/*
* dnaRead[tid]=codeF[dnaRead[tid] & 0x07];
* __syncthreads();
* for(unsigned int i =0; i< lmerLength; i++)
* {
* lmer = lmer<<2;
* lmer = lmer | ((KEY_T)dnaRead[(tid+i)%blockDim.x]); //wraping for dummy entries
* }
*/
for (unsigned int i = 0; i < 8; i++) //calculate lmer
{
printf("calc lmer ");
lmer= (lmer<< 8) | ((KEY_T)(shifter[codeF[dnaRead[threadIdx.x+i*4]& 0x07]][3] |
shifter[codeF[dnaRead[threadIdx.x+i*4+1]& 0x07]][2] |
shifter[codeF[dnaRead[threadIdx.x+i*4+2]& 0x07]][1] |
codeF[dnaRead[threadIdx.x+i*4+3] & 0x07]) ) ;
}
lmer = (lmer >> ((32 - lmerLength) << 1)) & lmerMask[lmerLength-1];
lmers[rOffset+tid]=lmer;
printf("GPU lmer: %llu\n",lmers[rOffset+tid]);
}
__global__ void encodeLmerComplementDevice( char * buffer,
const unsigned int buffSize,
const unsigned int readLength,
KEY_PTR lmers,
const unsigned int lmerLength
){
extern __shared__ char dnaRead[];//have to fix it
const unsigned int tid=threadIdx.x;
const unsigned int rOffset=(blockDim.x*blockDim.y*gridDim.x*blockIdx.y) +(blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y);
KEY_T lmer=0;
KEY_T temp=0;
dnaRead[tid]=buffer[rOffset+tid];
__syncthreads();
dnaRead[tid]=codeR[dnaRead[tid] & 0x07];
__syncthreads();
for(unsigned int i =0; i< lmerLength; i++){
temp=((KEY_T)dnaRead[(tid+i)%blockDim.x]);
lmer = (temp<<(i<<1)) | lmer;
}
lmers[rOffset+tid]=lmer;
}
__global__ void computeKmerDevice( KEY_PTR lmers,
KEY_PTR pkmers,
KEY_PTR skmers,
KEY_T validBitMask
){
const unsigned int tid=(blockDim.x*blockDim.y*gridDim.x*blockIdx.y) +(blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x;
KEY_T lmer;
//fetch lmer
lmer=lmers[tid];
//find prefix
pkmers[tid]=LMER_PREFIX(lmer,validBitMask);
//find suffix
skmers[tid] = LMER_SUFFIX(lmer,validBitMask);
}
extern "C"
void encodeLmer(
char * d_buffer,
const unsigned int bufferSize,
const unsigned int readLength,
KEY_PTR d_lmers,
const unsigned int lmerLength,
const unsigned int entriesCount
)
{
dim3 grid, block;
// printf("d_buffer before GPU: %s\n", d_buffer);
// printf("d_lmers before GPU: %llu\n", *d_lmers);
getOptimalLaunchConfigCustomized(entriesCount,&grid,&block,readLength);
encodeLmerDevice<<<grid,block,readLength+31>>>(d_buffer,bufferSize,readLength,d_lmers,lmerLength);
printf("d_lmers after GPU: %llu\n", *d_lmers);
CheckCUDAError();
}
extern "C"
void encodeLmerComplement(
char * d_buffer,
const unsigned int bufferSize,
const unsigned int readLength,
KEY_PTR d_lmers,
const unsigned int lmerLength,
const unsigned int entriesCount
)
{
dim3 grid, block;
getOptimalLaunchConfigCustomized(entriesCount,&grid,&block,readLength);
encodeLmerComplementDevice<<<grid,block,readLength>>>(d_buffer,bufferSize,readLength,d_lmers,lmerLength);
CheckCUDAError();
}
extern "C"
void computeKmer( KEY_PTR d_lmers,
KEY_PTR d_pkmers,
KEY_PTR d_skmers,
KEY_T validBitMask,
const unsigned int readLength,
const unsigned int entriesCount
){
dim3 grid, block;
getOptimalLaunchConfigCustomized(entriesCount,&grid,&block,readLength);
computeKmerDevice<<<grid,block>>>(d_lmers,d_pkmers,d_skmers,validBitMask);
CheckCUDAError();
}
|
ea25d93e134ac188d796917d44794db0fb28286e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2021-2022, The Neko Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the authors nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "inhom_dirichlet_kernel.h"
#include <device/device_config.h>
#include <device/cuda/check.h>
extern "C" {
/**
* Fortran wrapper for device inhom_dirichlet apply vector
*/
void cuda_inhom_dirichlet_apply_vector(void *msk, void *x, void *y, void *z,
void *bla_x, void *bla_y, void *bla_z, int *m) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks(((*m)+1024 - 1)/ 1024, 1, 1);
hipLaunchKernelGGL(( inhom_dirichlet_apply_vector_kernel<real>)
, dim3(nblcks), dim3(nthrds), 0, (hipStream_t) glb_cmd_queue, (int *) msk,
(real *) x,
(real *) y,
(real *) z,
(real *) bla_x,
(real *) bla_y,
(real *) bla_z,
*m);
CUDA_CHECK(hipGetLastError());
}
/**
* Fortran wrapper for device inhom_dirichlet apply scalar
*/
void cuda_inhom_dirichlet_apply_scalar(void *msk, void *x,
void *bla_x, int *m) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks(((*m)+1024 - 1)/ 1024, 1, 1);
hipLaunchKernelGGL(( inhom_dirichlet_apply_scalar_kernel<real>)
, dim3(nblcks), dim3(nthrds), 0, 0, (int *) msk,
(real *) x,
(real *) bla_x,
*m);
CUDA_CHECK(hipGetLastError());
}
}
| ea25d93e134ac188d796917d44794db0fb28286e.cu | /*
Copyright (c) 2021-2022, The Neko Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the authors nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "inhom_dirichlet_kernel.h"
#include <device/device_config.h>
#include <device/cuda/check.h>
extern "C" {
/**
* Fortran wrapper for device inhom_dirichlet apply vector
*/
void cuda_inhom_dirichlet_apply_vector(void *msk, void *x, void *y, void *z,
void *bla_x, void *bla_y, void *bla_z, int *m) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks(((*m)+1024 - 1)/ 1024, 1, 1);
inhom_dirichlet_apply_vector_kernel<real>
<<<nblcks, nthrds, 0, (cudaStream_t) glb_cmd_queue>>>((int *) msk,
(real *) x,
(real *) y,
(real *) z,
(real *) bla_x,
(real *) bla_y,
(real *) bla_z,
*m);
CUDA_CHECK(cudaGetLastError());
}
/**
* Fortran wrapper for device inhom_dirichlet apply scalar
*/
void cuda_inhom_dirichlet_apply_scalar(void *msk, void *x,
void *bla_x, int *m) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks(((*m)+1024 - 1)/ 1024, 1, 1);
inhom_dirichlet_apply_scalar_kernel<real>
<<<nblcks, nthrds>>>((int *) msk,
(real *) x,
(real *) bla_x,
*m);
CUDA_CHECK(cudaGetLastError());
}
}
|
5a0a713f7736e1de750ef29ba8d7dfbac6f8492b.hip | // !!! This is a file automatically generated by hipify!!!
/*
multiply.cu -- Matrix multiplication testbench - by Cody Rivera
*/
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "multiply.cuh"
#include "kernels_hip.cuh"
#define EPS 10e-3
#define MAX_TILES 255
/*
Helper functions
*/
#include "parameters.cuh"
// Based on https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
bool approxEqual(double A, double B,
double maxRelDiff = EPS)
{
// Calculate the difference.
double diff = fabs(A - B);
A = fabs(A);
B = fabs(B);
// Find the largest
double largest = (B > A) ? B : A;
if (diff <= largest * maxRelDiff)
return true;
return false;
}
template<typename FloatType>
bool matrixCompare(const FloatType* A, const FloatType* B,
unsigned int m, unsigned int n,
unsigned int& iFail, unsigned int& jFail)
{
FloatType aVal, bVal;
bool b = true;
// Cache-friendly comparison pattern
for (unsigned int j = 0; j < n && b; j++)
{
for (unsigned int i = 0; i < m && b; i++)
{
aVal = A[i + (j * m)];
bVal = B[i + (j * m)];
if (!approxEqual(aVal, bVal, EPS))
{
iFail = i;
jFail = j;
b = false;
}
}
}
return b;
}
template<typename FloatType>
void reportTestSuccess(const char* testName, double GFLOPs, double totalGFLOPs)
{
printf("%s succeeded: %g GFLOPs, %g GFLOPs acc. for transfers\n", testName, GFLOPs, totalGFLOPs);
}
template<typename FloatType>
void reportTestFailure(const char* testName,
const FloatType* orig, const FloatType* cand,
unsigned int leadDim,
unsigned int iFail, unsigned int jFail)
{
double oVal = (double)orig[iFail + (jFail * leadDim)];
double cVal = (double)cand[iFail + (jFail * leadDim)];
fprintf(stderr, "%s failed: Original[%u, %u] = %.6f != Candidate[%u, %u] = %.6f\n",
testName, iFail, jFail, oVal, iFail, jFail, cVal);
}
template<typename FloatType>
double getGFLOPs(double time, unsigned int m, unsigned int n, unsigned int k)
{
double instCount = ((double) m * (double) n * (double) k) / 10e9;
double timeSeconds = time / 1000;
return instCount / timeSeconds;
}
/*
floatTSM2 and doubleTSM2 -- Wrappers around the kernels that select
the optimal kernel.
Currently only optimal for Nvidia V100
Parameter Choice for V100:
t1 := 128
Single Precision: n ~= t2, t3 := 32
Double Precision: n ~= t2, t3 := 16 if m < 10240, and t3 := 12 otherwise
*/
void floatTSM2(const float* devA, const float* devB, float* devC,
const unsigned int m, const unsigned int n,
const unsigned int k)
{
int blocks = (m / FLOAT_T1) + 1;
blocks = (blocks > 65536) ? 65536 : blocks;
if (n <= 2)
{
hipLaunchKernelGGL(( floatTSM2Kernel<FLOAT_T1, 2, 32>), dim3(blocks), dim3(FLOAT_T1), 0, 0, devA, devB, devC, m, n, k);
}
else if (n <= 4)
{
hipLaunchKernelGGL(( floatTSM2Kernel<FLOAT_T1, 4, 32>), dim3(blocks), dim3(FLOAT_T1), 0, 0, devA, devB, devC, m, n, k);
}
else if (n <= 6)
{
hipLaunchKernelGGL(( floatTSM2Kernel<FLOAT_T1, 6, 32>), dim3(blocks), dim3(FLOAT_T1), 0, 0, devA, devB, devC, m, n, k);
}
else if (n <= 8)
{
hipLaunchKernelGGL(( floatTSM2Kernel<FLOAT_T1, 8, 32>), dim3(blocks), dim3(FLOAT_T1), 0, 0, devA, devB, devC, m, n, k);
}
else
{
hipLaunchKernelGGL(( floatTSM2Kernel<FLOAT_T1, 16, 32>), dim3(blocks), dim3(FLOAT_T1), 0, 0, devA, devB, devC, m, n, k);
}
// Since CUBLAS starts beating TSM2 at 16, there is no need to include another kernel
}
void doubleTSM2(const double* devA, const double* devB, double* devC,
const unsigned int m, const unsigned int n,
const unsigned int k)
{
int blocks = (m / DOUBLE_T1) + 1;
blocks = (blocks > 65536) ? 65536 : blocks;
if (n <= 2)
{
if (m < 20480)
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 2, 16>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
else
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 2, 12>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
}
else if (n <= 4)
{
if (m < 20480)
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 4, 16>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
else
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 4, 12>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
}
else if (n <= 6)
{
if (m < 20480)
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 6, 16>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
else
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 6, 12>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
}
else if (n <= 8)
{
if (m < 20480)
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 8, 16>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
else
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 8, 12>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
}
else if (n <= 16)
{
if (m < 20480)
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 16, 16>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
else
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 16, 12>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
}
else
{
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, 32, 12>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
}
}
/*
Executes the kernels
*/
template<>
bool runKernels(const float* A, const float* B, float* C,
const unsigned int m, const unsigned int n,
const unsigned int k)
{
// Candidate for C -- Used by GPU kernels
float* candC;
// Device memory
float* devA, * devB, * devC;
// Events used for timing
hipEvent_t start, end, startTotal, endTotal;
float time, timeTotal;
printf("Multiplying matrix A[%u, %u] by matrix B[%u, %u]\n\n", m, k, k, n);
// Allocates new memory
candC = (float*)malloc(m * n * sizeof(float));
if (candC == NULL)
{
fprintf(stderr, "Not enough memory\n");
return false;
}
cudaErrchk(hipMalloc((float**)&devA, m * k * sizeof(float)));
cudaErrchk(hipMalloc((float**)&devB, k * n * sizeof(float)));
cudaErrchk(hipMalloc((float**)&devC, m * n * sizeof(float)));
// Inits CUDA events
cudaErrchk(hipEventCreate(&start));
cudaErrchk(hipEventCreate(&end));
cudaErrchk(hipEventCreate(&startTotal));
cudaErrchk(hipEventCreate(&endTotal));
// Runs CUBLAS call
hipblasHandle_t handle;
cublasErrchk(hipblasCreate(&handle));
float one = 1;
float zero = 0;
cudaErrchk(hipEventRecord(startTotal));
// Cuda Memory Copy
cudaErrchk(hipMemcpy(devA, A, m * k * sizeof(float), hipMemcpyHostToDevice));
cudaErrchk(hipMemcpy(devB, B, k * n * sizeof(float), hipMemcpyHostToDevice));
cudaErrchk(hipEventRecord(start));
cublasErrchk(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
m, n, k,
&one, devA, HIP_R_32F, m, devB, HIP_R_32F, k,
&zero, devC, HIP_R_32F, m, HIP_R_32F, HIPBLAS_GEMM_DEFAULT));
cudaErrchk(hipEventRecord(end));
// Copies result back
cudaErrchk(hipMemcpy(C, devC, m * n * sizeof(float), hipMemcpyDeviceToHost));
cudaErrchk(hipEventRecord(endTotal));
cudaErrchk(hipDeviceSynchronize());
cudaErrchk(hipEventElapsedTime(&time, start, end));
cudaErrchk(hipEventElapsedTime(&timeTotal, startTotal, endTotal));
printf("cublas time consume:%f,\t total time: %f\n", time, timeTotal);
reportTestSuccess<float>("CUBLAS Test", getGFLOPs<float>(time, m, n, k), getGFLOPs<float>(timeTotal, m, n, k));
cublasErrchk(hipblasDestroy(handle));
// Runs kernels
// Failure flag
bool status;
// Failure indices
unsigned int iFail, jFail;
// Clear result matrix
cudaErrchk(hipMemset(devC, 0, m * n * sizeof(float)));
cudaErrchk(hipEventRecord(startTotal));
// Cuda Memory Copy
cudaErrchk(hipMemcpy(devA, A, m * k * sizeof(float), hipMemcpyHostToDevice));
cudaErrchk(hipMemcpy(devB, B, k * n * sizeof(float), hipMemcpyHostToDevice));
int blocks = (k / FLOAT_T1) + 1;
blocks = (blocks > 65536) ? 65536 : blocks;
cudaErrchk(hipEventRecord(start));
#ifdef SINGLE_PARAM
hipLaunchKernelGGL(( floatTSM2Kernel<FLOAT_T1, FLOAT_T2, FLOAT_T3>), dim3(blocks), dim3(FLOAT_T1), 0, 0, devA, devB, devC, m, n, k);
#else
floatTSM2(devA, devB, devC, m, n, k);
#endif
cudaErrchk(hipGetLastError());
cudaErrchk(hipEventRecord(end));
// Copies result back
cudaErrchk(hipMemcpy(candC, devC, m * n * sizeof(float), hipMemcpyDeviceToHost));
cudaErrchk(hipEventRecord(endTotal));
cudaErrchk(hipDeviceSynchronize());
cudaErrchk(hipEventElapsedTime(&time, start, end));
cudaErrchk(hipEventElapsedTime(&timeTotal, startTotal, endTotal));
printf("tsm time consume: %f,\t total time: %f\n", time, timeTotal);
status = matrixCompare<float>(C, candC, m, n, iFail, jFail);
if (status)
{
reportTestSuccess<float>("TSM2 Kernel Test",
getGFLOPs<float>(time, m, n, k),
getGFLOPs<float>(timeTotal, m, n, k));
}
else
{
reportTestFailure<float>("TSM2 Kernel Test", C, candC, m, iFail, jFail);
}
cudaErrchk(hipEventDestroy(start));
cudaErrchk(hipEventDestroy(end));
cudaErrchk(hipEventDestroy(startTotal));
cudaErrchk(hipEventDestroy(endTotal));
free(candC);
cudaErrchk(hipFree(devA));
cudaErrchk(hipFree(devB));
cudaErrchk(hipFree(devC));
return true;
}
template<>
bool runKernels(const double* A, const double* B, double* C,
const unsigned int m, const unsigned int n,
const unsigned int k)
{
// Candidate for C -- Used by GPU kernels
double* candC;
// Device memory
double* devA, * devB, * devC;
// Events used for timing
hipEvent_t start, end, startTotal, endTotal;
float time, timeTotal;
printf("Multiplying matrix A[%u, %u] by matrix B[%u, %u]\n\n", m, k, k, n);
// Allocates new memory
candC = (double*)malloc(m * n * sizeof(double));
if (candC == NULL)
{
fprintf(stderr, "Not enough memory\n");
return false;
}
cudaErrchk(hipMalloc((double**)&devA, m * k * sizeof(double)));
cudaErrchk(hipMalloc((double**)&devB, k * n * sizeof(double)));
cudaErrchk(hipMalloc((double**)&devC, m * n * sizeof(double)));
// Inits CUDA events
cudaErrchk(hipEventCreate(&start));
cudaErrchk(hipEventCreate(&end));
cudaErrchk(hipEventCreate(&startTotal));
cudaErrchk(hipEventCreate(&endTotal));
// Runs CUBLAS call
hipblasHandle_t handle;
cublasErrchk(hipblasCreate(&handle));
double one = 1;
double zero = 0;
cudaErrchk(hipEventRecord(startTotal));
// Cuda Memory Copy
cudaErrchk(hipMemcpy(devA, A, m * k * sizeof(double), hipMemcpyHostToDevice));
cudaErrchk(hipMemcpy(devB, B, k * n * sizeof(double), hipMemcpyHostToDevice));
cudaErrchk(hipEventRecord(start));
cublasErrchk(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
m, n, k,
&one, devA, HIP_R_64F, m, devB, HIP_R_64F, k,
&zero, devC, HIP_R_64F, m, HIP_R_64F, HIPBLAS_GEMM_DEFAULT));
cudaErrchk(hipEventRecord(end));
// Copies result back
cudaErrchk(hipMemcpy(C, devC, m * n * sizeof(double), hipMemcpyDeviceToHost));
cudaErrchk(hipEventRecord(endTotal));
cudaErrchk(hipDeviceSynchronize());
cudaErrchk(hipEventElapsedTime(&time, start, end));
cudaErrchk(hipEventElapsedTime(&timeTotal, startTotal, endTotal));
reportTestSuccess<double>("CUBLAS Test", getGFLOPs<double>(time, m, n, k), getGFLOPs<double>(timeTotal, m, n, k));
cublasErrchk(hipblasDestroy(handle));
// Runs kernel
// Failure flag
bool status;
// Failure indices
unsigned int iFail, jFail;
// If a TSM
// Clear result matrix
cudaErrchk(hipMemset(devC, 0, m * n * sizeof(double)));
cudaErrchk(hipEventRecord(startTotal));
// Cuda Memory Copy
cudaErrchk(hipMemcpy(devA, A, m * k * sizeof(double), hipMemcpyHostToDevice));
cudaErrchk(hipMemcpy(devB, B, k * n * sizeof(double), hipMemcpyHostToDevice));
int blocks = (k / DOUBLE_T1) + 1;
blocks = (blocks > 65536) ? 65536 : blocks;
cudaErrchk(hipEventRecord(start));
#ifdef SINGLE_PARAM
hipLaunchKernelGGL(( doubleTSM2Kernel<DOUBLE_T1, DOUBLE_T2, DOUBLE_T3>), dim3(blocks), dim3(DOUBLE_T1), 0, 0, devA, devB, devC, m, n, k);
#else
doubleTSM2(devA, devB, devC, m, n, k);
#endif
cudaErrchk(hipGetLastError());
cudaErrchk(hipEventRecord(end));
// Copies result back
cudaErrchk(hipMemcpy(candC, devC, m * n * sizeof(double), hipMemcpyDeviceToHost));
cudaErrchk(hipEventRecord(endTotal));
cudaErrchk(hipDeviceSynchronize());
cudaErrchk(hipEventElapsedTime(&time, start, end));
cudaErrchk(hipEventElapsedTime(&timeTotal, startTotal, endTotal));
status = matrixCompare<double>(C, candC, m, n, iFail, jFail);
if (status)
{
reportTestSuccess<double>("TSM2 Kernel Test",
getGFLOPs<double>(time, m, n, k),
getGFLOPs<double>(timeTotal, m, n, k));
}
else
{
reportTestFailure<double>("TSM2 Kernel Test", C, candC, m, iFail, jFail);
}
cudaErrchk(hipEventDestroy(start));
cudaErrchk(hipEventDestroy(end));
cudaErrchk(hipEventDestroy(startTotal));
cudaErrchk(hipEventDestroy(endTotal));
free(candC);
cudaErrchk(hipFree(devA));
cudaErrchk(hipFree(devB));
cudaErrchk(hipFree(devC));
return true;
} | 5a0a713f7736e1de750ef29ba8d7dfbac6f8492b.cu | /*
multiply.cu -- Matrix multiplication testbench - by Cody Rivera
*/
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include "cuda_runtime.h"
#include "cublas_v2.h"
#include "multiply.cuh"
#include "kernels.cuh"
#define EPS 10e-3
#define MAX_TILES 255
/*
Helper functions
*/
#include "parameters.cuh"
// Based on https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
bool approxEqual(double A, double B,
double maxRelDiff = EPS)
{
// Calculate the difference.
double diff = fabs(A - B);
A = fabs(A);
B = fabs(B);
// Find the largest
double largest = (B > A) ? B : A;
if (diff <= largest * maxRelDiff)
return true;
return false;
}
template<typename FloatType>
bool matrixCompare(const FloatType* A, const FloatType* B,
unsigned int m, unsigned int n,
unsigned int& iFail, unsigned int& jFail)
{
FloatType aVal, bVal;
bool b = true;
// Cache-friendly comparison pattern
for (unsigned int j = 0; j < n && b; j++)
{
for (unsigned int i = 0; i < m && b; i++)
{
aVal = A[i + (j * m)];
bVal = B[i + (j * m)];
if (!approxEqual(aVal, bVal, EPS))
{
iFail = i;
jFail = j;
b = false;
}
}
}
return b;
}
template<typename FloatType>
void reportTestSuccess(const char* testName, double GFLOPs, double totalGFLOPs)
{
printf("%s succeeded: %g GFLOPs, %g GFLOPs acc. for transfers\n", testName, GFLOPs, totalGFLOPs);
}
template<typename FloatType>
void reportTestFailure(const char* testName,
const FloatType* orig, const FloatType* cand,
unsigned int leadDim,
unsigned int iFail, unsigned int jFail)
{
double oVal = (double)orig[iFail + (jFail * leadDim)];
double cVal = (double)cand[iFail + (jFail * leadDim)];
fprintf(stderr, "%s failed: Original[%u, %u] = %.6f != Candidate[%u, %u] = %.6f\n",
testName, iFail, jFail, oVal, iFail, jFail, cVal);
}
template<typename FloatType>
double getGFLOPs(double time, unsigned int m, unsigned int n, unsigned int k)
{
double instCount = ((double) m * (double) n * (double) k) / 10e9;
double timeSeconds = time / 1000;
return instCount / timeSeconds;
}
/*
floatTSM2 and doubleTSM2 -- Wrappers around the kernels that select
the optimal kernel.
Currently only optimal for Nvidia V100
Parameter Choice for V100:
t1 := 128
Single Precision: n ~= t2, t3 := 32
Double Precision: n ~= t2, t3 := 16 if m < 10240, and t3 := 12 otherwise
*/
void floatTSM2(const float* devA, const float* devB, float* devC,
const unsigned int m, const unsigned int n,
const unsigned int k)
{
int blocks = (m / FLOAT_T1) + 1;
blocks = (blocks > 65536) ? 65536 : blocks;
if (n <= 2)
{
floatTSM2Kernel<FLOAT_T1, 2, 32><<<blocks, FLOAT_T1>>>(devA, devB, devC, m, n, k);
}
else if (n <= 4)
{
floatTSM2Kernel<FLOAT_T1, 4, 32><<<blocks, FLOAT_T1>>>(devA, devB, devC, m, n, k);
}
else if (n <= 6)
{
floatTSM2Kernel<FLOAT_T1, 6, 32><<<blocks, FLOAT_T1>>>(devA, devB, devC, m, n, k);
}
else if (n <= 8)
{
floatTSM2Kernel<FLOAT_T1, 8, 32><<<blocks, FLOAT_T1>>>(devA, devB, devC, m, n, k);
}
else
{
floatTSM2Kernel<FLOAT_T1, 16, 32><<<blocks, FLOAT_T1>>>(devA, devB, devC, m, n, k);
}
// Since CUBLAS starts beating TSM2 at 16, there is no need to include another kernel
}
void doubleTSM2(const double* devA, const double* devB, double* devC,
const unsigned int m, const unsigned int n,
const unsigned int k)
{
int blocks = (m / DOUBLE_T1) + 1;
blocks = (blocks > 65536) ? 65536 : blocks;
if (n <= 2)
{
if (m < 20480)
{
doubleTSM2Kernel<DOUBLE_T1, 2, 16><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
else
{
doubleTSM2Kernel<DOUBLE_T1, 2, 12><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
}
else if (n <= 4)
{
if (m < 20480)
{
doubleTSM2Kernel<DOUBLE_T1, 4, 16><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
else
{
doubleTSM2Kernel<DOUBLE_T1, 4, 12><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
}
else if (n <= 6)
{
if (m < 20480)
{
doubleTSM2Kernel<DOUBLE_T1, 6, 16><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
else
{
doubleTSM2Kernel<DOUBLE_T1, 6, 12><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
}
else if (n <= 8)
{
if (m < 20480)
{
doubleTSM2Kernel<DOUBLE_T1, 8, 16><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
else
{
doubleTSM2Kernel<DOUBLE_T1, 8, 12><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
}
else if (n <= 16)
{
if (m < 20480)
{
doubleTSM2Kernel<DOUBLE_T1, 16, 16><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
else
{
doubleTSM2Kernel<DOUBLE_T1, 16, 12><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
}
else
{
doubleTSM2Kernel<DOUBLE_T1, 32, 12><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
}
}
/*
Executes the kernels
*/
template<>
bool runKernels(const float* A, const float* B, float* C,
const unsigned int m, const unsigned int n,
const unsigned int k)
{
// Candidate for C -- Used by GPU kernels
float* candC;
// Device memory
float* devA, * devB, * devC;
// Events used for timing
cudaEvent_t start, end, startTotal, endTotal;
float time, timeTotal;
printf("Multiplying matrix A[%u, %u] by matrix B[%u, %u]\n\n", m, k, k, n);
// Allocates new memory
candC = (float*)malloc(m * n * sizeof(float));
if (candC == NULL)
{
fprintf(stderr, "Not enough memory\n");
return false;
}
cudaErrchk(cudaMalloc((float**)&devA, m * k * sizeof(float)));
cudaErrchk(cudaMalloc((float**)&devB, k * n * sizeof(float)));
cudaErrchk(cudaMalloc((float**)&devC, m * n * sizeof(float)));
// Inits CUDA events
cudaErrchk(cudaEventCreate(&start));
cudaErrchk(cudaEventCreate(&end));
cudaErrchk(cudaEventCreate(&startTotal));
cudaErrchk(cudaEventCreate(&endTotal));
// Runs CUBLAS call
cublasHandle_t handle;
cublasErrchk(cublasCreate(&handle));
float one = 1;
float zero = 0;
cudaErrchk(cudaEventRecord(startTotal));
// Cuda Memory Copy
cudaErrchk(cudaMemcpy(devA, A, m * k * sizeof(float), cudaMemcpyHostToDevice));
cudaErrchk(cudaMemcpy(devB, B, k * n * sizeof(float), cudaMemcpyHostToDevice));
cudaErrchk(cudaEventRecord(start));
cublasErrchk(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N,
m, n, k,
&one, devA, CUDA_R_32F, m, devB, CUDA_R_32F, k,
&zero, devC, CUDA_R_32F, m, CUDA_R_32F, CUBLAS_GEMM_DEFAULT));
cudaErrchk(cudaEventRecord(end));
// Copies result back
cudaErrchk(cudaMemcpy(C, devC, m * n * sizeof(float), cudaMemcpyDeviceToHost));
cudaErrchk(cudaEventRecord(endTotal));
cudaErrchk(cudaDeviceSynchronize());
cudaErrchk(cudaEventElapsedTime(&time, start, end));
cudaErrchk(cudaEventElapsedTime(&timeTotal, startTotal, endTotal));
printf("cublas time consume:%f,\t total time: %f\n", time, timeTotal);
reportTestSuccess<float>("CUBLAS Test", getGFLOPs<float>(time, m, n, k), getGFLOPs<float>(timeTotal, m, n, k));
cublasErrchk(cublasDestroy(handle));
// Runs kernels
// Failure flag
bool status;
// Failure indices
unsigned int iFail, jFail;
// Clear result matrix
cudaErrchk(cudaMemset(devC, 0, m * n * sizeof(float)));
cudaErrchk(cudaEventRecord(startTotal));
// Cuda Memory Copy
cudaErrchk(cudaMemcpy(devA, A, m * k * sizeof(float), cudaMemcpyHostToDevice));
cudaErrchk(cudaMemcpy(devB, B, k * n * sizeof(float), cudaMemcpyHostToDevice));
int blocks = (k / FLOAT_T1) + 1;
blocks = (blocks > 65536) ? 65536 : blocks;
cudaErrchk(cudaEventRecord(start));
#ifdef SINGLE_PARAM
floatTSM2Kernel<FLOAT_T1, FLOAT_T2, FLOAT_T3><<<blocks, FLOAT_T1>>>(devA, devB, devC, m, n, k);
#else
floatTSM2(devA, devB, devC, m, n, k);
#endif
cudaErrchk(cudaGetLastError());
cudaErrchk(cudaEventRecord(end));
// Copies result back
cudaErrchk(cudaMemcpy(candC, devC, m * n * sizeof(float), cudaMemcpyDeviceToHost));
cudaErrchk(cudaEventRecord(endTotal));
cudaErrchk(cudaDeviceSynchronize());
cudaErrchk(cudaEventElapsedTime(&time, start, end));
cudaErrchk(cudaEventElapsedTime(&timeTotal, startTotal, endTotal));
printf("tsm time consume: %f,\t total time: %f\n", time, timeTotal);
status = matrixCompare<float>(C, candC, m, n, iFail, jFail);
if (status)
{
reportTestSuccess<float>("TSM2 Kernel Test",
getGFLOPs<float>(time, m, n, k),
getGFLOPs<float>(timeTotal, m, n, k));
}
else
{
reportTestFailure<float>("TSM2 Kernel Test", C, candC, m, iFail, jFail);
}
cudaErrchk(cudaEventDestroy(start));
cudaErrchk(cudaEventDestroy(end));
cudaErrchk(cudaEventDestroy(startTotal));
cudaErrchk(cudaEventDestroy(endTotal));
free(candC);
cudaErrchk(cudaFree(devA));
cudaErrchk(cudaFree(devB));
cudaErrchk(cudaFree(devC));
return true;
}
template<>
bool runKernels(const double* A, const double* B, double* C,
const unsigned int m, const unsigned int n,
const unsigned int k)
{
// Candidate for C -- Used by GPU kernels
double* candC;
// Device memory
double* devA, * devB, * devC;
// Events used for timing
cudaEvent_t start, end, startTotal, endTotal;
float time, timeTotal;
printf("Multiplying matrix A[%u, %u] by matrix B[%u, %u]\n\n", m, k, k, n);
// Allocates new memory
candC = (double*)malloc(m * n * sizeof(double));
if (candC == NULL)
{
fprintf(stderr, "Not enough memory\n");
return false;
}
cudaErrchk(cudaMalloc((double**)&devA, m * k * sizeof(double)));
cudaErrchk(cudaMalloc((double**)&devB, k * n * sizeof(double)));
cudaErrchk(cudaMalloc((double**)&devC, m * n * sizeof(double)));
// Inits CUDA events
cudaErrchk(cudaEventCreate(&start));
cudaErrchk(cudaEventCreate(&end));
cudaErrchk(cudaEventCreate(&startTotal));
cudaErrchk(cudaEventCreate(&endTotal));
// Runs CUBLAS call
cublasHandle_t handle;
cublasErrchk(cublasCreate(&handle));
double one = 1;
double zero = 0;
cudaErrchk(cudaEventRecord(startTotal));
// Cuda Memory Copy
cudaErrchk(cudaMemcpy(devA, A, m * k * sizeof(double), cudaMemcpyHostToDevice));
cudaErrchk(cudaMemcpy(devB, B, k * n * sizeof(double), cudaMemcpyHostToDevice));
cudaErrchk(cudaEventRecord(start));
cublasErrchk(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N,
m, n, k,
&one, devA, CUDA_R_64F, m, devB, CUDA_R_64F, k,
&zero, devC, CUDA_R_64F, m, CUDA_R_64F, CUBLAS_GEMM_DEFAULT));
cudaErrchk(cudaEventRecord(end));
// Copies result back
cudaErrchk(cudaMemcpy(C, devC, m * n * sizeof(double), cudaMemcpyDeviceToHost));
cudaErrchk(cudaEventRecord(endTotal));
cudaErrchk(cudaDeviceSynchronize());
cudaErrchk(cudaEventElapsedTime(&time, start, end));
cudaErrchk(cudaEventElapsedTime(&timeTotal, startTotal, endTotal));
reportTestSuccess<double>("CUBLAS Test", getGFLOPs<double>(time, m, n, k), getGFLOPs<double>(timeTotal, m, n, k));
cublasErrchk(cublasDestroy(handle));
// Runs kernel
// Failure flag
bool status;
// Failure indices
unsigned int iFail, jFail;
// If a TSM
// Clear result matrix
cudaErrchk(cudaMemset(devC, 0, m * n * sizeof(double)));
cudaErrchk(cudaEventRecord(startTotal));
// Cuda Memory Copy
cudaErrchk(cudaMemcpy(devA, A, m * k * sizeof(double), cudaMemcpyHostToDevice));
cudaErrchk(cudaMemcpy(devB, B, k * n * sizeof(double), cudaMemcpyHostToDevice));
int blocks = (k / DOUBLE_T1) + 1;
blocks = (blocks > 65536) ? 65536 : blocks;
cudaErrchk(cudaEventRecord(start));
#ifdef SINGLE_PARAM
doubleTSM2Kernel<DOUBLE_T1, DOUBLE_T2, DOUBLE_T3><<<blocks, DOUBLE_T1>>>(devA, devB, devC, m, n, k);
#else
doubleTSM2(devA, devB, devC, m, n, k);
#endif
cudaErrchk(cudaGetLastError());
cudaErrchk(cudaEventRecord(end));
// Copies result back
cudaErrchk(cudaMemcpy(candC, devC, m * n * sizeof(double), cudaMemcpyDeviceToHost));
cudaErrchk(cudaEventRecord(endTotal));
cudaErrchk(cudaDeviceSynchronize());
cudaErrchk(cudaEventElapsedTime(&time, start, end));
cudaErrchk(cudaEventElapsedTime(&timeTotal, startTotal, endTotal));
status = matrixCompare<double>(C, candC, m, n, iFail, jFail);
if (status)
{
reportTestSuccess<double>("TSM2 Kernel Test",
getGFLOPs<double>(time, m, n, k),
getGFLOPs<double>(timeTotal, m, n, k));
}
else
{
reportTestFailure<double>("TSM2 Kernel Test", C, candC, m, iFail, jFail);
}
cudaErrchk(cudaEventDestroy(start));
cudaErrchk(cudaEventDestroy(end));
cudaErrchk(cudaEventDestroy(startTotal));
cudaErrchk(cudaEventDestroy(endTotal));
free(candC);
cudaErrchk(cudaFree(devA));
cudaErrchk(cudaFree(devB));
cudaErrchk(cudaFree(devC));
return true;
} |
b1a993eb1929220167c3c9174654b5162878dfb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits.h>
#include <limits>
#include <algorithm>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void d_iterate_direction ( dim3 block, dim3 grid, const int* d_costs,
const int *d_left_image, int *d_accumulated_costs,
const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
__device__ int d_find_min_index ( const int *v, const int disp_range );
__device__ void d_evaluate_path ( int *prior, int *local,
int path_intensity_gradient, int *curr_cost,
int nx, int ny, int disp_range );
/* device functions and kernels */
// __global__ void d_determine_costs ( int *left_image, int *right_image, int *costs,
// int nx, int ny, int disp_range )
// {
// int col_size = ceil((float) disp_range / blockDim.y);
// int x = (ceil((float)blockIdx.x / col_size) * blockDim.x) + threadIdx.x;
// int y = blockIdx.y;
// int d = ((blockIdx.x % col_size) * blockDim.y) + threadIdx.y;
// if ( (y < ny) && (d < disp_range) && (x < nx))
// {
// COSTS(x,y,d) = 255u;
// if (x >= d)
// COSTS(x,y,d) = abs( LEFT_IMAGE(x,y) - RIGHT_IMAGE(x-d,y) );
// }
// }
__global__ void d_determine_costs ( int *left_image, int *right_image, int *costs,
int nx, int ny, int disp_range )
{
int row_size = ceil((float) nx / blockDim.x);
int x = ((blockIdx.x % row_size) * blockDim.x) + threadIdx.x;
int y = blockIdx.y;
int d = ((blockIdx.x / row_size) * blockDim.y) + threadIdx.y;
if ( (y < ny) && (d < disp_range) && (x < nx))
{
COSTS(x,y,d) = 255u;
if (x >= d)
COSTS(x,y,d) = abs( LEFT_IMAGE(x,y) - RIGHT_IMAGE(x-d,y) );
}
}
__global__ void d_iterate_direction_dirxpos ( const int dirx, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = 0;
int y = blockIdx.y;
int d = threadIdx.z;
if ( (y < ny) && (d < disp_range) )
{
ACCUMULATED_COSTS(0,y,d) += COSTS(0,y,d);
__syncthreads();
for (x = 1; x < nx; x++)
{
d_evaluate_path( &ACCUMULATED_COSTS(x-dirx,y,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x-dirx,y)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__global__ void d_iterate_direction_dirypos ( const int diry, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = blockIdx.y;
int y = 0;
int d = threadIdx.z;
if ( (x < nx) && (d < disp_range) )
{
ACCUMULATED_COSTS(x,0,d) += COSTS(x,0,d);;
__syncthreads();
for (y = 1; y < ny; y++)
{
d_evaluate_path( &ACCUMULATED_COSTS(x,y-diry,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x,y-diry)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
// const int WIDTH = nx;
// const int HEIGHT = ny;
// for ( int i = 0; i < WIDTH; i++ ) {
// for ( int j = 0; j < HEIGHT; j++ ) {
// if(j==0) {
// for ( int d = 0; d < disp_range; d++ ) {
// ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
// }
// }
// else {
// evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
// &COSTS(i,j,0),
// abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
// &ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
// }
// }
// }
}
__global__ void d_iterate_direction_dirxneg ( const int dirx, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = nx-1;
int y = blockIdx.y;
int d = threadIdx.z;
if ( (y < ny) && (d < disp_range) )
{
ACCUMULATED_COSTS(nx-1,y,d) += COSTS(nx-1,y,d);
__syncthreads();
for (x = nx-2; x >= 0; x--)
{
d_evaluate_path( &ACCUMULATED_COSTS(x-dirx,y,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x-dirx,y)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
// const int WIDTH = nx;
// const int HEIGHT = ny;
// for ( int j = 0; j < HEIGHT; j++ ) {
// for ( int i = WIDTH-1; i >= 0; i-- ) {
// if(i==WIDTH-1) {
// for ( int d = 0; d < disp_range; d++ ) {
// ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
// }
// }
// else {
// evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
// &COSTS(i,j,0),
// abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
// &ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
// }
// }
// }
}
__global__ void d_iterate_direction_diryneg ( const int diry, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = blockIdx.y;
int y = ny-1;
int d = threadIdx.z;
if ( (x < nx) && (d < disp_range) )
{
ACCUMULATED_COSTS(x,ny-1,d) += COSTS(x,ny-1,d);;
__syncthreads();
for (y = ny-2; y >= 0; y--)
{
d_evaluate_path( &ACCUMULATED_COSTS(x,y-diry,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x,y-diry)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
// const int WIDTH = nx;
// const int HEIGHT = ny;
// for ( int i = 0; i < WIDTH; i++ ) {
// for ( int j = HEIGHT-1; j >= 0; j-- ) {
// if(j==HEIGHT-1) {
// for ( int d = 0; d < disp_range; d++ ) {
// ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
// }
// }
// else {
// evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
// &COSTS(i,j,0),
// abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
// &ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
// }
// }
// }
}
__device__ void d_evaluate_path ( int *prior, int *local,
int path_intensity_gradient, int *curr_cost,
int nx, int ny, int disp_range )
{
//memcpy(curr_cost, local, sizeof(int)*disp_range);
int d = threadIdx.z;
curr_cost[d] = local[threadIdx.z];
__syncthreads();
int e_smooth = INT_MAX;
for ( int d_p = 0; d_p < disp_range; d_p++ )
{
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
__syncthreads();
curr_cost[d] += e_smooth;
int min = INT_MAX;
for ( int d1 = 0; d1 < disp_range; d1++ ) {
if (prior[d1]<min) min=prior[d1];
}
__syncthreads();
curr_cost[d] -= min;
}
__global__ void d_inplace_sum_views ( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int pos = (blockIdx.x * blockDim.x) + threadIdx.x;
int size = nx * ny * disp_range;
if ( pos < size )
im1[pos] += im2[pos];
}
__global__ void d_create_disparity_view ( int *accumulated_costs , int * disp_image,
int nx, int ny, int disp_range )
{
int pos = (blockIdx.x * blockDim.x) + threadIdx.x;
int size = nx * ny ;
if ( pos < size )
disp_image[pos] = 4 * d_find_min_index(&accumulated_costs[pos * disp_range], disp_range);
}
__device__ int d_find_min_index ( const int *v, const int disp_range )
{
int min = INT_MAX;
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
/* functions code */
void determine_costs ( const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range )
{
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
void iterate_direction_dirxpos ( const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
void iterate_direction_dirypos ( const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_dirxneg ( const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_diryneg ( const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
/*
*d_iterate_direction: computes iterate_direction_dirxpos() using the
*the GPU
*/
void d_iterate_direction ( int* d_costs,
const int *d_left_image, int *d_accumulated_costs,
const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
dim3 block1d(1);
dim3 grid1d(1);
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
//iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx,
// ny, disp_range);
block1d.z = disp_range;
grid1d.y = ny;
hipMemcpy(d_accumulated_costs, accumulated_costs, nx*ny*disp_range*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( d_iterate_direction_dirxpos) , dim3(grid1d), dim3(block1d) , 0, 0, dirx, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
hipMemcpy(accumulated_costs, d_accumulated_costs,
// nx*ny*disp_range*sizeof(int), hipMemcpyDeviceToHost);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
// iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx,
// ny, disp_range);
block1d.z = disp_range;
grid1d.y = nx;
hipMemcpy(d_accumulated_costs, accumulated_costs, nx*ny*disp_range*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( d_iterate_direction_dirypos) , dim3(grid1d), dim3(block1d) , 0, 0, diry, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
hipMemcpy(accumulated_costs, d_accumulated_costs,
// nx*ny*disp_range*sizeof(int), hipMemcpyDeviceToHost);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
//iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
block1d.z = disp_range;
grid1d.y = ny;
hipMemcpy(d_accumulated_costs, accumulated_costs,
nx*ny*disp_range*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( d_iterate_direction_dirxneg) , dim3(grid1d), dim3(block1d) , 0, 0, dirx, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
hipMemcpy(accumulated_costs, d_accumulated_costs,
nx*ny*disp_range*sizeof(int), hipMemcpyDeviceToHost);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
//iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
block1d.z = disp_range;
grid1d.y = nx;
hipMemcpy(d_accumulated_costs, accumulated_costs,
nx*ny*disp_range*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( d_iterate_direction_diryneg) , dim3(grid1d), dim3(block1d) , 0, 0, diry, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
hipMemcpy(accumulated_costs, d_accumulated_costs,
nx*ny*disp_range*sizeof(int),
hipMemcpyDeviceToHost);
}
}
void iterate_direction ( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
// ADD two cost images
void inplace_sum_views ( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
int find_min_index ( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
void evaluate_path ( const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range )
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void create_disparity_view ( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range )
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) =
4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost ( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range )
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice ( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range )
{
int nx = w;
int ny = h;
int image_size = nx * ny * sizeof(int); // size in bytes
int costs_size = disp_range * image_size;
int image_dim = nx * ny;
int costs_dim = disp_range * nx * ny;
// hipError_t error;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(costs_dim, sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
//determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
/* launching the determine_costs() kernel */
int *d_left_image;
int *d_right_image;
int *d_costs;
//error = hipMalloc ((void **) &d_left_image, image_size)
hipMalloc((void **) &d_left_image, image_size);
hipMalloc((void **) &d_right_image, image_size);
hipMalloc((void **) &d_costs, costs_size);
hipMemset(d_costs, 0, costs_size);
hipMemcpy(d_left_image, h_leftIm, image_size, hipMemcpyHostToDevice);
hipMemcpy(d_right_image, h_rightIm, image_size, hipMemcpyHostToDevice);
int block_x = 32;
int block_y = (disp_range >= 16) ? 16 : disp_range; // 32 * 16 = 512
int z_blocks = (disp_range % block_y)
? ceil((float) disp_range / block_y) + 1
: ceil((float) disp_range / block_y);
int grid_x = ceil((float) nx / block_x) * z_blocks;
int grid_y = ny;
dim3 block(block_x, block_y);
dim3 grid(grid_x, grid_y);
hipLaunchKernelGGL(( d_determine_costs) , dim3(grid), dim3(block) , 0, 0, d_left_image, d_right_image, d_costs,
nx, ny, disp_range);
/* copying costs to host memory */
//hipMemcpy(costs, d_costs, costs_size, hipMemcpyDeviceToHost);
//hipMemcpy(d_costs, costs, costs_size, hipMemcpyHostToDevice);///////////////delete this
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
/* allocating space for dir_accumulated_costs and accumulated_costs on device */
int *d_accumulated_costs;
hipMalloc((void **) &d_accumulated_costs, costs_size);
hipMemset( d_accumulated_costs, 0, costs_size);
int *d_dir_accumulated_costs;
hipMalloc((void **) &d_dir_accumulated_costs, costs_size);
//
dim3 block1d(1);
dim3 grid1d(1);
if (costs_dim >= 512)
{
block1d.x = 512;
grid1d.x = ceil((float) costs_dim/512);
}
else
{ //not likely to happen
block1d.x = costs_dim;
grid1d.x = 1;
}
//geometry for d_iterate_direction kernels
// dim3 block1d_dir(1);
// dim3 grid1d_dir(1);
// block1d_dir.z = disp_range;
// grid1d_dir.y = ny;
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
// std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
// hipMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, nx*ny*disp_range*sizeof(int), hipMemcpyHostToDevice);
// nx*ny*disp_range*sizeof(int), hipMemcpyDeviceToHost);
// iterate_direction ( dirx, diry, h_leftIm,
// costs, dir_accumulated_costs,
// nx, ny, disp_range );
hipMemset( d_dir_accumulated_costs, 0, costs_size);
d_iterate_direction ( d_costs,
d_left_image, d_dir_accumulated_costs,
dirx, diry, h_leftIm,
costs, dir_accumulated_costs,
nx, ny, disp_range );
inplace_sum_views ( accumulated_costs, dir_accumulated_costs,
nx, ny, disp_range );
// hipMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, costs_size, hipMemcpyHostToDevice);
// hipMemcpy(d_accumulated_costs, accumulated_costs, costs_size, hipMemcpyHostToDevice);
//d_inplace_sum_views <<< grid1d, block1d >>> ( d_accumulated_costs, d_dir_accumulated_costs,
//nx, ny, disp_range);
// hipMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_size, hipMemcpyDeviceToHost);
// hipMemcpy(accumulated_costs, d_accumulated_costs, costs_size,
// hipMemcpyDeviceToHost);
// hipMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_size, hipMemcpyDeviceToHost);
// hipMemcpy(accumulated_costs, d_accumulated_costs, costs_size, hipMemcpyDeviceToHost);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
// std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
// hipMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, nx*ny*disp_range*sizeof(int),
// hipMemcpyHostToDevice);
// iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
hipMemset( d_dir_accumulated_costs, 0, costs_size);
d_iterate_direction ( d_costs,
d_left_image, d_dir_accumulated_costs,
dirx, diry, h_leftIm,
costs, dir_accumulated_costs,
nx, ny, disp_range );
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny,
disp_range);
// hipMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, costs_size, hipMemcpyHostToDevice);
// hipMemcpy(d_accumulated_costs, accumulated_costs, costs_size, hipMemcpyHostToDevice);
// d_inplace_sum_views <<< grid1d, block1d >>> ( d_accumulated_costs, d_dir_accumulated_costs,
// nx, ny, disp_range);
hipMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_size, hipMemcpyDeviceToHost);
hipMemcpy(accumulated_costs, d_accumulated_costs, costs_size, hipMemcpyDeviceToHost);
}
free(costs);
free(dir_accumulated_costs);
// device memory mgmt
hipFree(d_dir_accumulated_costs);
hipFree(d_left_image);
hipFree(d_right_image);
hipFree(d_costs);
if (image_dim >= 512)
{
block1d.x = 512;
grid1d.x = ceil((float) image_dim/512);
}
else
{ //not likely to happen
block1d.x = image_dim;
grid1d.x = 1;
}
grid1d.y = grid1d.z = 1;
block1d.y = block1d.z = 1;
int *d_disp_im;
hipMalloc((void **) &d_disp_im, image_size);
// hipMemcpy(d_accumulated_costs, accumulated_costs, costs_size, hipMemcpyHostToDevice);
// create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
// hipMemcpy(d_accumulated_costs, accumulated_costs, costs_size,
// hipMemcpyHostToDevice);
//
hipLaunchKernelGGL(( d_create_disparity_view) , dim3(grid1d), dim3(block1d) , 0, 0, d_accumulated_costs, d_disp_im, nx, ny, disp_range );
hipMemcpy(h_dispIm, d_disp_im, image_size, hipMemcpyDeviceToHost);
hipFree(d_disp_im);
hipFree(d_accumulated_costs);
free(accumulated_costs);
}
// print command line format
void usage ( char *command )
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int
main ( int argc, char** argv )
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( hipSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
hipEvent_t startH, stopH, startD, stopD;
hipEventCreate(&startH);
hipEventCreate(&stopH);
hipEventCreate(&startD);
hipEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
hipEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
hipEventRecord( stopH, 0 );
hipEventSynchronize( stopH );
// sgm at GPU
hipEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
hipEventRecord( stopD, 0 );
hipEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
hipEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
hipEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
| b1a993eb1929220167c3c9174654b5162878dfb7.cu |
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits.h>
#include <limits>
#include <algorithm>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void d_iterate_direction ( dim3 block, dim3 grid, const int* d_costs,
const int *d_left_image, int *d_accumulated_costs,
const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
__device__ int d_find_min_index ( const int *v, const int disp_range );
__device__ void d_evaluate_path ( int *prior, int *local,
int path_intensity_gradient, int *curr_cost,
int nx, int ny, int disp_range );
/* device functions and kernels */
// __global__ void d_determine_costs ( int *left_image, int *right_image, int *costs,
// int nx, int ny, int disp_range )
// {
// int col_size = ceil((float) disp_range / blockDim.y);
// int x = (ceil((float)blockIdx.x / col_size) * blockDim.x) + threadIdx.x;
// int y = blockIdx.y;
// int d = ((blockIdx.x % col_size) * blockDim.y) + threadIdx.y;
// if ( (y < ny) && (d < disp_range) && (x < nx))
// {
// COSTS(x,y,d) = 255u;
// if (x >= d)
// COSTS(x,y,d) = abs( LEFT_IMAGE(x,y) - RIGHT_IMAGE(x-d,y) );
// }
// }
__global__ void d_determine_costs ( int *left_image, int *right_image, int *costs,
int nx, int ny, int disp_range )
{
int row_size = ceil((float) nx / blockDim.x);
int x = ((blockIdx.x % row_size) * blockDim.x) + threadIdx.x;
int y = blockIdx.y;
int d = ((blockIdx.x / row_size) * blockDim.y) + threadIdx.y;
if ( (y < ny) && (d < disp_range) && (x < nx))
{
COSTS(x,y,d) = 255u;
if (x >= d)
COSTS(x,y,d) = abs( LEFT_IMAGE(x,y) - RIGHT_IMAGE(x-d,y) );
}
}
__global__ void d_iterate_direction_dirxpos ( const int dirx, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = 0;
int y = blockIdx.y;
int d = threadIdx.z;
if ( (y < ny) && (d < disp_range) )
{
ACCUMULATED_COSTS(0,y,d) += COSTS(0,y,d);
__syncthreads();
for (x = 1; x < nx; x++)
{
d_evaluate_path( &ACCUMULATED_COSTS(x-dirx,y,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x-dirx,y)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__global__ void d_iterate_direction_dirypos ( const int diry, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = blockIdx.y;
int y = 0;
int d = threadIdx.z;
if ( (x < nx) && (d < disp_range) )
{
ACCUMULATED_COSTS(x,0,d) += COSTS(x,0,d);;
__syncthreads();
for (y = 1; y < ny; y++)
{
d_evaluate_path( &ACCUMULATED_COSTS(x,y-diry,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x,y-diry)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
// const int WIDTH = nx;
// const int HEIGHT = ny;
// for ( int i = 0; i < WIDTH; i++ ) {
// for ( int j = 0; j < HEIGHT; j++ ) {
// if(j==0) {
// for ( int d = 0; d < disp_range; d++ ) {
// ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
// }
// }
// else {
// evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
// &COSTS(i,j,0),
// abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
// &ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
// }
// }
// }
}
__global__ void d_iterate_direction_dirxneg ( const int dirx, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = nx-1;
int y = blockIdx.y;
int d = threadIdx.z;
if ( (y < ny) && (d < disp_range) )
{
ACCUMULATED_COSTS(nx-1,y,d) += COSTS(nx-1,y,d);
__syncthreads();
for (x = nx-2; x >= 0; x--)
{
d_evaluate_path( &ACCUMULATED_COSTS(x-dirx,y,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x-dirx,y)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
// const int WIDTH = nx;
// const int HEIGHT = ny;
// for ( int j = 0; j < HEIGHT; j++ ) {
// for ( int i = WIDTH-1; i >= 0; i-- ) {
// if(i==WIDTH-1) {
// for ( int d = 0; d < disp_range; d++ ) {
// ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
// }
// }
// else {
// evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
// &COSTS(i,j,0),
// abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
// &ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
// }
// }
// }
}
__global__ void d_iterate_direction_diryneg ( const int diry, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = blockIdx.y;
int y = ny-1;
int d = threadIdx.z;
if ( (x < nx) && (d < disp_range) )
{
ACCUMULATED_COSTS(x,ny-1,d) += COSTS(x,ny-1,d);;
__syncthreads();
for (y = ny-2; y >= 0; y--)
{
d_evaluate_path( &ACCUMULATED_COSTS(x,y-diry,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x,y-diry)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
// const int WIDTH = nx;
// const int HEIGHT = ny;
// for ( int i = 0; i < WIDTH; i++ ) {
// for ( int j = HEIGHT-1; j >= 0; j-- ) {
// if(j==HEIGHT-1) {
// for ( int d = 0; d < disp_range; d++ ) {
// ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
// }
// }
// else {
// evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
// &COSTS(i,j,0),
// abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
// &ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
// }
// }
// }
}
__device__ void d_evaluate_path ( int *prior, int *local,
int path_intensity_gradient, int *curr_cost,
int nx, int ny, int disp_range )
{
//memcpy(curr_cost, local, sizeof(int)*disp_range);
int d = threadIdx.z;
curr_cost[d] = local[threadIdx.z];
__syncthreads();
int e_smooth = INT_MAX;
for ( int d_p = 0; d_p < disp_range; d_p++ )
{
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
__syncthreads();
curr_cost[d] += e_smooth;
int min = INT_MAX;
for ( int d1 = 0; d1 < disp_range; d1++ ) {
if (prior[d1]<min) min=prior[d1];
}
__syncthreads();
curr_cost[d] -= min;
}
__global__ void d_inplace_sum_views ( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int pos = (blockIdx.x * blockDim.x) + threadIdx.x;
int size = nx * ny * disp_range;
if ( pos < size )
im1[pos] += im2[pos];
}
__global__ void d_create_disparity_view ( int *accumulated_costs , int * disp_image,
int nx, int ny, int disp_range )
{
int pos = (blockIdx.x * blockDim.x) + threadIdx.x;
int size = nx * ny ;
if ( pos < size )
disp_image[pos] = 4 * d_find_min_index(&accumulated_costs[pos * disp_range], disp_range);
}
__device__ int d_find_min_index ( const int *v, const int disp_range )
{
int min = INT_MAX;
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
/* functions code */
void determine_costs ( const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range )
{
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
void iterate_direction_dirxpos ( const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
void iterate_direction_dirypos ( const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_dirxneg ( const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_diryneg ( const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
/*
*d_iterate_direction: computes iterate_direction_dirxpos() using the
*the GPU
*/
void d_iterate_direction ( int* d_costs,
const int *d_left_image, int *d_accumulated_costs,
const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
dim3 block1d(1);
dim3 grid1d(1);
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
//iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx,
// ny, disp_range);
block1d.z = disp_range;
grid1d.y = ny;
cudaMemcpy(d_accumulated_costs, accumulated_costs, nx*ny*disp_range*sizeof(int), cudaMemcpyHostToDevice);
d_iterate_direction_dirxpos <<< grid1d, block1d >>> ( dirx, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
cudaMemcpy(accumulated_costs, d_accumulated_costs,
// nx*ny*disp_range*sizeof(int), cudaMemcpyDeviceToHost);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
// iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx,
// ny, disp_range);
block1d.z = disp_range;
grid1d.y = nx;
cudaMemcpy(d_accumulated_costs, accumulated_costs, nx*ny*disp_range*sizeof(int), cudaMemcpyHostToDevice);
d_iterate_direction_dirypos <<< grid1d, block1d >>> ( diry, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
cudaMemcpy(accumulated_costs, d_accumulated_costs,
// nx*ny*disp_range*sizeof(int), cudaMemcpyDeviceToHost);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
//iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
block1d.z = disp_range;
grid1d.y = ny;
cudaMemcpy(d_accumulated_costs, accumulated_costs,
nx*ny*disp_range*sizeof(int), cudaMemcpyHostToDevice);
d_iterate_direction_dirxneg <<< grid1d, block1d >>> ( dirx, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
cudaMemcpy(accumulated_costs, d_accumulated_costs,
nx*ny*disp_range*sizeof(int), cudaMemcpyDeviceToHost);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
//iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
block1d.z = disp_range;
grid1d.y = nx;
cudaMemcpy(d_accumulated_costs, accumulated_costs,
nx*ny*disp_range*sizeof(int), cudaMemcpyHostToDevice);
d_iterate_direction_diryneg <<< grid1d, block1d >>> ( diry, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
cudaMemcpy(accumulated_costs, d_accumulated_costs,
nx*ny*disp_range*sizeof(int),
cudaMemcpyDeviceToHost);
}
}
void iterate_direction ( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
// ADD two cost images
void inplace_sum_views ( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
int find_min_index ( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
void evaluate_path ( const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range )
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void create_disparity_view ( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range )
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) =
4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost ( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range )
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice ( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range )
{
int nx = w;
int ny = h;
int image_size = nx * ny * sizeof(int); // size in bytes
int costs_size = disp_range * image_size;
int image_dim = nx * ny;
int costs_dim = disp_range * nx * ny;
// cudaError error;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(costs_dim, sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
//determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
/* launching the determine_costs() kernel */
int *d_left_image;
int *d_right_image;
int *d_costs;
//error = cudaMalloc ((void **) &d_left_image, image_size)
cudaMalloc((void **) &d_left_image, image_size);
cudaMalloc((void **) &d_right_image, image_size);
cudaMalloc((void **) &d_costs, costs_size);
cudaMemset(d_costs, 0, costs_size);
cudaMemcpy(d_left_image, h_leftIm, image_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_right_image, h_rightIm, image_size, cudaMemcpyHostToDevice);
int block_x = 32;
int block_y = (disp_range >= 16) ? 16 : disp_range; // 32 * 16 = 512
int z_blocks = (disp_range % block_y)
? ceil((float) disp_range / block_y) + 1
: ceil((float) disp_range / block_y);
int grid_x = ceil((float) nx / block_x) * z_blocks;
int grid_y = ny;
dim3 block(block_x, block_y);
dim3 grid(grid_x, grid_y);
d_determine_costs <<< grid, block >>> (d_left_image, d_right_image, d_costs,
nx, ny, disp_range);
/* copying costs to host memory */
//cudaMemcpy(costs, d_costs, costs_size, cudaMemcpyDeviceToHost);
//cudaMemcpy(d_costs, costs, costs_size, cudaMemcpyHostToDevice);///////////////delete this
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
/* allocating space for dir_accumulated_costs and accumulated_costs on device */
int *d_accumulated_costs;
cudaMalloc((void **) &d_accumulated_costs, costs_size);
cudaMemset( d_accumulated_costs, 0, costs_size);
int *d_dir_accumulated_costs;
cudaMalloc((void **) &d_dir_accumulated_costs, costs_size);
//
dim3 block1d(1);
dim3 grid1d(1);
if (costs_dim >= 512)
{
block1d.x = 512;
grid1d.x = ceil((float) costs_dim/512);
}
else
{ //not likely to happen
block1d.x = costs_dim;
grid1d.x = 1;
}
//geometry for d_iterate_direction kernels
// dim3 block1d_dir(1);
// dim3 grid1d_dir(1);
// block1d_dir.z = disp_range;
// grid1d_dir.y = ny;
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
// std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
// cudaMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, nx*ny*disp_range*sizeof(int), cudaMemcpyHostToDevice);
// nx*ny*disp_range*sizeof(int), cudaMemcpyDeviceToHost);
// iterate_direction ( dirx, diry, h_leftIm,
// costs, dir_accumulated_costs,
// nx, ny, disp_range );
cudaMemset( d_dir_accumulated_costs, 0, costs_size);
d_iterate_direction ( d_costs,
d_left_image, d_dir_accumulated_costs,
dirx, diry, h_leftIm,
costs, dir_accumulated_costs,
nx, ny, disp_range );
inplace_sum_views ( accumulated_costs, dir_accumulated_costs,
nx, ny, disp_range );
// cudaMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, costs_size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_accumulated_costs, accumulated_costs, costs_size, cudaMemcpyHostToDevice);
//d_inplace_sum_views <<< grid1d, block1d >>> ( d_accumulated_costs, d_dir_accumulated_costs,
//nx, ny, disp_range);
// cudaMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_size, cudaMemcpyDeviceToHost);
// cudaMemcpy(accumulated_costs, d_accumulated_costs, costs_size,
// cudaMemcpyDeviceToHost);
// cudaMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_size, cudaMemcpyDeviceToHost);
// cudaMemcpy(accumulated_costs, d_accumulated_costs, costs_size, cudaMemcpyDeviceToHost);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
// std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
// cudaMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, nx*ny*disp_range*sizeof(int),
// cudaMemcpyHostToDevice);
// iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
cudaMemset( d_dir_accumulated_costs, 0, costs_size);
d_iterate_direction ( d_costs,
d_left_image, d_dir_accumulated_costs,
dirx, diry, h_leftIm,
costs, dir_accumulated_costs,
nx, ny, disp_range );
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny,
disp_range);
// cudaMemcpy(d_dir_accumulated_costs, dir_accumulated_costs, costs_size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_accumulated_costs, accumulated_costs, costs_size, cudaMemcpyHostToDevice);
// d_inplace_sum_views <<< grid1d, block1d >>> ( d_accumulated_costs, d_dir_accumulated_costs,
// nx, ny, disp_range);
cudaMemcpy(dir_accumulated_costs, d_dir_accumulated_costs, costs_size, cudaMemcpyDeviceToHost);
cudaMemcpy(accumulated_costs, d_accumulated_costs, costs_size, cudaMemcpyDeviceToHost);
}
free(costs);
free(dir_accumulated_costs);
// device memory mgmt
cudaFree(d_dir_accumulated_costs);
cudaFree(d_left_image);
cudaFree(d_right_image);
cudaFree(d_costs);
if (image_dim >= 512)
{
block1d.x = 512;
grid1d.x = ceil((float) image_dim/512);
}
else
{ //not likely to happen
block1d.x = image_dim;
grid1d.x = 1;
}
grid1d.y = grid1d.z = 1;
block1d.y = block1d.z = 1;
int *d_disp_im;
cudaMalloc((void **) &d_disp_im, image_size);
// cudaMemcpy(d_accumulated_costs, accumulated_costs, costs_size, cudaMemcpyHostToDevice);
// create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
// cudaMemcpy(d_accumulated_costs, accumulated_costs, costs_size,
// cudaMemcpyHostToDevice);
//
d_create_disparity_view <<< grid1d, block1d >>> ( d_accumulated_costs, d_disp_im, nx, ny, disp_range );
cudaMemcpy(h_dispIm, d_disp_im, image_size, cudaMemcpyDeviceToHost);
cudaFree(d_disp_im);
cudaFree(d_accumulated_costs);
free(accumulated_costs);
}
// print command line format
void usage ( char *command )
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int
main ( int argc, char** argv )
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( cudaSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
cudaEvent_t startH, stopH, startD, stopD;
cudaEventCreate(&startH);
cudaEventCreate(&stopH);
cudaEventCreate(&startD);
cudaEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
cudaEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
cudaEventRecord( stopH, 0 );
cudaEventSynchronize( stopH );
// sgm at GPU
cudaEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
cudaEventRecord( stopD, 0 );
cudaEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
cudaEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
cudaEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
|
b26e1d0a092c62a28675609e82dbc8efe5458b5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
if (index >= n) return;
bools[index] = idata[index] != 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
if (index >= n) return;
if (bools[index]) {
odata[indices[index]] = idata[index];
}
}
}
}
| b26e1d0a092c62a28675609e82dbc8efe5458b5e.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
if (index >= n) return;
bools[index] = idata[index] != 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
if (index >= n) return;
if (bools[index]) {
odata[indices[index]] = idata[index];
}
}
}
}
|
402346c4a2c17ae69325598af4f453a09208fc9d.hip | // !!! This is a file automatically generated by hipify!!!
/*
Jueguito:
1) Sea h[i] el numero de granitos en el sitio i, 0<i<N-1.
2) Si h[i]>1 el sitio i esta "activo".
3) Al tiempo t, un sitio "activo" se "descarga" completamente tirando cada uno de sus granitos aleatoriamente y con igual probabilidad a la izquierda o a la derecha (el numero total de granitos entonces se conserva).
4) Los sitios se descargan sincronicamente. Entonces, a tiempo (t+1), el sitio activo i tendra h[i]=0 solo si sus vecinos no le tiraron granitos a tiempo t.
5) Se define la actividad A como el numero de sitios activos, es decir el numero de sitios que quieren descargarse.
Notar que si la densidad de granitos, [Suma_i h[i]/N] es muy baja, la actividad caera rapidamente a cero. Si la densidad es alta por otro lado, la actividad nunca cesara, ya que siempre habra sitios activos. En el medio hay una densidad "critica", para la cual la actividad decaera como una ley de potencia (pero se necesitaran sistemas grandes, y tiempos largos para verla bien definida).
*/
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "helper_cuda.h"
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include <iostream>
#include <fstream>
#include <cstring>
#include <array>
#include <vector>
#include <cstdlib>
#include <random>
#include <cassert>
// number of sites
#ifdef NSLOTS
#define N NSLOTS
#else
#define N (1024*1024)
#endif
#define SIZE (N * 4)
#define BLOCK_SIZE 256
#define DENSITY 0.8924
// number of temporal steps
#define NSTEPS 10000
using namespace std;
typedef int * Manna_Array;
//fastest prng is XORWOW, default.
//~ #define hiprandState_t hiprandStatePhilox4_32_10_t //not so slow
//~ #define hiprandState_t hiprandStateMRG32k3a_t //slowest by far
__device__ hiprandState_t seed[1];
__device__ hiprandState_t rand_state[N];
__global__ void seedinit(int first_num){ //60ms, not top priority
hiprand_init(first_num,0,0,seed);
for(int i=0; i<N; i++) //must do it sequentially because of race conditions in hiprand(seed)
hiprand_init(hiprand(seed),0,0,&rand_state[i]);
}
__device__ static inline bool randbool(hiprandState_t *rand_state){
//~ return 1; //trick to fix behaviour
return 1&hiprand(rand_state);
}
// CONDICION INICIAL ---------------------------------------------------------------
/*
Para generar una condicion inicial suficientemente uniforme con una densidad
lo mas aproximada (exacta cuando N->infinito) al numero real DENSITY, podemos hacer asi:
*/
__global__ void inicializacion(Manna_Array __restrict__ h)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
h[i] = (int)((i+1)*DENSITY)-(int)(i*DENSITY);
}
__device__ void imprimir_array(Manna_Array __restrict__ h)
{
int nrogranitos=0;
int nrogranitos_activos=0;
// esto dibuja los granitos en cada sitio y los cuenta
for(int i = 0; i < 10; ++i) {
printf("%d ",h[i]);
nrogranitos += h[i];
nrogranitos_activos += (h[i]>1);
}
printf("\nHay %d granos en total\n",nrogranitos);
printf("De ellos %d activos\n",nrogranitos_activos);
printf("densidad obtenida es %f, mientras que la deseada era %f\n\n",nrogranitos*1.0/N,DENSITY);
}
__global__ void desestabilizacion_inicial(Manna_Array __restrict__ h, Manna_Array __restrict__ dh)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
if (h[gtid]) {
int k = (gtid+2*randbool(&rand_state[gtid])-1+N)%N;
//~ int k = (gtid+2*((gtid%3)%2)-1+N)%N; //trick to fix behavior
atomicAdd(&dh[k], 1);
h[gtid] = 0;
}
}
__global__ void descargar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ slots_activos)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
//~ unsigned int tid = threadIdx.x; // id hilo dentro del bloque
//~ unsigned int lane = tid & CUDA_WARP_MASK; // id hilo dentro del warp, aka lane
//~ uint warp = tid / CUDA_WARP_SIZE; // warp dentro del bloque
//~ uint gwarp = gtid / CUDA_WARP_SIZE; // Identificador global de warp
//~ uint bid = blockIdx.x; // Identificador de bloque
hiprandState_t *thread_state = &rand_state[gtid]; //doesn't get better if I use a local copy and then copy back
if (h[gtid] > 1) {
for (int j = 0; j < h[gtid]; ++j) {
int k = (gtid+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
} else atomicAdd(&dh[gtid], h[gtid]);
h[gtid] = 0;
if(gtid==0) *slots_activos=0;
}
__global__ void old_actualizar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
h[gtid]+=dh[gtid];
dh[gtid]=0; //zeroes dh array
}
__global__ void actualizar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ slots_activos)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
if(h[gtid]>1)
atomicAdd(slots_activos, 1);
}
__device__ unsigned int slots_activos;
/*
Dynamic parallelism:
When a parent thread block launches a child grid, the child is not guaranteed to begin
execution until the parent thread block reaches an explicit synchronization point (e.g.
hipDeviceSynchronize()).
*/
__global__ void run_manna(unsigned int *activity, Manna_Array h, Manna_Array dh) {
int t = 0;
do {
hipLaunchKernelGGL(( descargar), dim3(N/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, h,dh,&activity[t]);
Manna_Array tmp = h;
h = dh;
dh = tmp;
hipLaunchKernelGGL(( actualizar), dim3(N/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, h,dh,&activity[t]);
//~ hipDeviceSynchronize();
//~ activity[t] = slots_activos;
++t;
//~ } while(slots_activos > 0 && t < NSTEPS); // si la actividad decae a cero, esto no evoluciona mas...
} while(t < NSTEPS); // ahora corre todos los NSTEPS s o s
}
__device__ Manna_Array h,dh;
__device__ unsigned int *activity;
unsigned int activity_host[NSTEPS];
//===================================================================
int main(){
ios::sync_with_stdio(0); cin.tie(0);
assert(N%BLOCK_SIZE==0);
//random initialization
hipLaunchKernelGGL(( seedinit), dim3(1),dim3(1), 0, 0, time(NULL)); //initialize a state per thread with some random seed
getLastCudaError("seedinit failed");
//slots
checkCudaErrors(hipMalloc(&h, N*sizeof(int)));
checkCudaErrors(hipMalloc(&dh, N*sizeof(int)));
checkCudaErrors(hipMalloc(&activity, NSTEPS*sizeof(unsigned int)));
checkCudaErrors(hipMemset(dh, 0, N*sizeof(int)));
//initialize slots
cout << "estado inicial estable de la pila de arena...";
hipLaunchKernelGGL(( inicializacion), dim3(N/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, h);
getLastCudaError("inicializacion failed");
cout << "LISTO\n";
#ifdef DEBUG
imprimir_array(h);
#endif
//create some chaos among slots
cout << "estado inicial desestabilizado de la pila de arena...";
hipLaunchKernelGGL(( desestabilizacion_inicial), dim3(N/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, h,dh);
getLastCudaError("desestabilizacion failed");
hipLaunchKernelGGL(( old_actualizar), dim3(N/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, h,dh);
getLastCudaError("actualizar failed");
cout << "LISTO\n";
#ifdef DEBUG
imprimir_array(h);
#endif
cout << "evolucion de la pila de arena..."; cout.flush();
ofstream activity_out("activity.dat");
hipLaunchKernelGGL(( run_manna), dim3(1),dim3(1), 0, 0, activity,h,dh);
checkCudaErrors(hipMemcpy(activity_host, activity, sizeof(activity_host), hipMemcpyDeviceToHost));
bool timeout = true;
for (int i = 0; i < NSTEPS; i++) {
activity_out << activity_host[i] << "\n";
if (!activity_host[i]) { timeout = false; break; }
}
cout << "LISTO: " << ((timeout)?("se acabo el tiempo\n\n"):("la actividad decayo a cero\n\n")); cout.flush();
//free everything
hipFree(h);
hipFree(dh);
hipFree(activity);
return 0;
}
| 402346c4a2c17ae69325598af4f453a09208fc9d.cu | /*
Jueguito:
1) Sea h[i] el numero de granitos en el sitio i, 0<i<N-1.
2) Si h[i]>1 el sitio i esta "activo".
3) Al tiempo t, un sitio "activo" se "descarga" completamente tirando cada uno de sus granitos aleatoriamente y con igual probabilidad a la izquierda o a la derecha (el numero total de granitos entonces se conserva).
4) Los sitios se descargan sincronicamente. Entonces, a tiempo (t+1), el sitio activo i tendra h[i]=0 solo si sus vecinos no le tiraron granitos a tiempo t.
5) Se define la actividad A como el numero de sitios activos, es decir el numero de sitios que quieren descargarse.
Notar que si la densidad de granitos, [Suma_i h[i]/N] es muy baja, la actividad caera rapidamente a cero. Si la densidad es alta por otro lado, la actividad nunca cesara, ya que siempre habra sitios activos. En el medio hay una densidad "critica", para la cual la actividad decaera como una ley de potencia (pero se necesitaran sistemas grandes, y tiempos largos para verla bien definida).
*/
#include <cuda.h>
#include "cuda_runtime.h"
#include "helper_cuda.h"
#include "curand.h"
#include "curand_kernel.h"
#include <iostream>
#include <fstream>
#include <cstring>
#include <array>
#include <vector>
#include <cstdlib>
#include <random>
#include <cassert>
// number of sites
#ifdef NSLOTS
#define N NSLOTS
#else
#define N (1024*1024)
#endif
#define SIZE (N * 4)
#define BLOCK_SIZE 256
#define DENSITY 0.8924
// number of temporal steps
#define NSTEPS 10000
using namespace std;
typedef int * Manna_Array;
//fastest prng is XORWOW, default.
//~ #define curandState curandStatePhilox4_32_10_t //not so slow
//~ #define curandState curandStateMRG32k3a_t //slowest by far
__device__ curandState seed[1];
__device__ curandState rand_state[N];
__global__ void seedinit(int first_num){ //60ms, not top priority
curand_init(first_num,0,0,seed);
for(int i=0; i<N; i++) //must do it sequentially because of race conditions in curand(seed)
curand_init(curand(seed),0,0,&rand_state[i]);
}
__device__ static inline bool randbool(curandState *rand_state){
//~ return 1; //trick to fix behaviour
return 1&curand(rand_state);
}
// CONDICION INICIAL ---------------------------------------------------------------
/*
Para generar una condicion inicial suficientemente uniforme con una densidad
lo mas aproximada (exacta cuando N->infinito) al numero real DENSITY, podemos hacer asi:
*/
__global__ void inicializacion(Manna_Array __restrict__ h)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
h[i] = (int)((i+1)*DENSITY)-(int)(i*DENSITY);
}
__device__ void imprimir_array(Manna_Array __restrict__ h)
{
int nrogranitos=0;
int nrogranitos_activos=0;
// esto dibuja los granitos en cada sitio y los cuenta
for(int i = 0; i < 10; ++i) {
printf("%d ",h[i]);
nrogranitos += h[i];
nrogranitos_activos += (h[i]>1);
}
printf("\nHay %d granos en total\n",nrogranitos);
printf("De ellos %d activos\n",nrogranitos_activos);
printf("densidad obtenida es %f, mientras que la deseada era %f\n\n",nrogranitos*1.0/N,DENSITY);
}
__global__ void desestabilizacion_inicial(Manna_Array __restrict__ h, Manna_Array __restrict__ dh)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
if (h[gtid]) {
int k = (gtid+2*randbool(&rand_state[gtid])-1+N)%N;
//~ int k = (gtid+2*((gtid%3)%2)-1+N)%N; //trick to fix behavior
atomicAdd(&dh[k], 1);
h[gtid] = 0;
}
}
__global__ void descargar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ slots_activos)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
//~ unsigned int tid = threadIdx.x; // id hilo dentro del bloque
//~ unsigned int lane = tid & CUDA_WARP_MASK; // id hilo dentro del warp, aka lane
//~ uint warp = tid / CUDA_WARP_SIZE; // warp dentro del bloque
//~ uint gwarp = gtid / CUDA_WARP_SIZE; // Identificador global de warp
//~ uint bid = blockIdx.x; // Identificador de bloque
curandState *thread_state = &rand_state[gtid]; //doesn't get better if I use a local copy and then copy back
if (h[gtid] > 1) {
for (int j = 0; j < h[gtid]; ++j) {
int k = (gtid+2*randbool(thread_state)-1+N)%N;
atomicAdd(&dh[k], 1);
}
} else atomicAdd(&dh[gtid], h[gtid]);
h[gtid] = 0;
if(gtid==0) *slots_activos=0;
}
__global__ void old_actualizar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
h[gtid]+=dh[gtid];
dh[gtid]=0; //zeroes dh array
}
__global__ void actualizar(Manna_Array __restrict__ h, Manna_Array __restrict__ dh, unsigned int * __restrict__ slots_activos)
{
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
if(h[gtid]>1)
atomicAdd(slots_activos, 1);
}
__device__ unsigned int slots_activos;
/*
Dynamic parallelism:
When a parent thread block launches a child grid, the child is not guaranteed to begin
execution until the parent thread block reaches an explicit synchronization point (e.g.
cudaDeviceSynchronize()).
*/
__global__ void run_manna(unsigned int *activity, Manna_Array h, Manna_Array dh) {
int t = 0;
do {
descargar<<< N/BLOCK_SIZE, BLOCK_SIZE >>>(h,dh,&activity[t]);
Manna_Array tmp = h;
h = dh;
dh = tmp;
actualizar<<< N/BLOCK_SIZE, BLOCK_SIZE >>>(h,dh,&activity[t]);
//~ cudaDeviceSynchronize();
//~ activity[t] = slots_activos;
++t;
//~ } while(slots_activos > 0 && t < NSTEPS); // si la actividad decae a cero, esto no evoluciona mas...
} while(t < NSTEPS); // ahora corre todos los NSTEPS sí o sí
}
__device__ Manna_Array h,dh;
__device__ unsigned int *activity;
unsigned int activity_host[NSTEPS];
//===================================================================
int main(){
ios::sync_with_stdio(0); cin.tie(0);
assert(N%BLOCK_SIZE==0);
//random initialization
seedinit<<<1,1>>>(time(NULL)); //initialize a state per thread with some random seed
getLastCudaError("seedinit failed");
//slots
checkCudaErrors(cudaMalloc(&h, N*sizeof(int)));
checkCudaErrors(cudaMalloc(&dh, N*sizeof(int)));
checkCudaErrors(cudaMalloc(&activity, NSTEPS*sizeof(unsigned int)));
checkCudaErrors(cudaMemset(dh, 0, N*sizeof(int)));
//initialize slots
cout << "estado inicial estable de la pila de arena...";
inicializacion<<<N/BLOCK_SIZE, BLOCK_SIZE>>>(h);
getLastCudaError("inicializacion failed");
cout << "LISTO\n";
#ifdef DEBUG
imprimir_array(h);
#endif
//create some chaos among slots
cout << "estado inicial desestabilizado de la pila de arena...";
desestabilizacion_inicial<<< N/BLOCK_SIZE, BLOCK_SIZE >>>(h,dh);
getLastCudaError("desestabilizacion failed");
old_actualizar<<< N/BLOCK_SIZE, BLOCK_SIZE >>>(h,dh);
getLastCudaError("actualizar failed");
cout << "LISTO\n";
#ifdef DEBUG
imprimir_array(h);
#endif
cout << "evolucion de la pila de arena..."; cout.flush();
ofstream activity_out("activity.dat");
run_manna<<<1,1>>>(activity,h,dh);
checkCudaErrors(cudaMemcpy(activity_host, activity, sizeof(activity_host), cudaMemcpyDeviceToHost));
bool timeout = true;
for (int i = 0; i < NSTEPS; i++) {
activity_out << activity_host[i] << "\n";
if (!activity_host[i]) { timeout = false; break; }
}
cout << "LISTO: " << ((timeout)?("se acabo el tiempo\n\n"):("la actividad decayo a cero\n\n")); cout.flush();
//free everything
cudaFree(h);
cudaFree(dh);
cudaFree(activity);
return 0;
}
|
6ebea078332d3cb9e5fb47624da6e2dea4f9b053.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GPUGDScheduler.h"
#include <fstream>
#include <ctime>
extern char* LOG_DIRECTORY;
extern idx_t SERVER_RANK;
GPUGDSchedulerImp::GPUGDSchedulerImp() : sparse_data(NULL), sparse_testdata(NULL), model(NULL), para(NULL), gpuEV(NULL){
//idx_t n_gpus = 4;
idx_t n_gpus = 2;
gpuEV.resize(n_gpus);
for(idx_t i = 0; i < n_gpus; ++i){
gpuEV[i] = new GPUGD(myInterface);
}
for(idx_t i = 0; i < n_gpus; ++i){
gpuEV[i]->ForkAndSpin();
}
RegisterMessageProcessor(GPUGD_AccuracyMessage::type, &GPUGD_Accuracy, 100);
RegisterMessageProcessor(GPUGD_TrainMessage::type, &GPUGD_Train, 100);
RegisterMessageProcessor(GPUGD_DataLoadMessage::type, &GPUGD_DataLoad, 100);
RegisterMessageProcessor(DieMessage::type, &newDieHandler, 100);
}
GPUGDSchedulerImp::~GPUGDSchedulerImp() {
ofDebug << "Release memory" << endl;
ofDebug.close();
hipblasDestroy(d_handle);
for(idx_t i = 0; i < n_gpus; ++i){
delete gpuEV[i];
for(idx_t j = 0; j < model->num_layers-1; ++j)
hipFree((*gpu_d_weight[i])[j]);
}
}
void GPUGDSchedulerImp::RunGPUGD(SparseData* _sparse_data, SparseData* _sparse_testdata, FCModel* _model, Para* _para, idx_t _n_ranks, idx_t _rank, idx_t _n_gpus) {
rank = _rank;
n_ranks = _n_ranks;
n_gpus = _n_gpus;
ofDebug.open((string(LOG_DIRECTORY) + "GPUGDScheduler.log.rank").c_str() + std::to_string(rank), ios::out);
sparse_data = _sparse_data;
sparse_testdata = _sparse_testdata;
model = _model;
para = _para;
gpu_model.resize(n_gpus);
gpu_d_weight.resize(n_gpus);
for(idx_t i = 0; i < n_gpus; ++i){
hipSetDevice(i);
gpu_model[i] = gpu_model[i].deep_copy(*model);
gpu_d_weight[i] = new vector<val_t*>();
gpu_d_weight[i]->resize(model->num_layers-1);
for(idx_t j = 0; j < model->num_layers-1; ++j){
hipMalloc(&(*gpu_d_weight[i])[j], sizeof(val_t)*model->num_units[j]*model->num_units[j+1]);
hipMemcpy((*gpu_d_weight[i])[j], model->weight[j], sizeof(val_t)
*model->num_units[j]*model->num_units[j+1], hipMemcpyHostToDevice);
}
}
// Enable Peer-to-Peer
for(idx_t i = 0; i < n_gpus; ++i){
hipSetDevice(i);
for(idx_t k = 0; k < n_gpus; ++k){
if(k==i) continue;
hipDeviceEnablePeerAccess(k,0);
}
}
hipblasCreate(&d_handle);
ofDebug << "Start GPUGD" << endl;
ofDebug << "num_blocks: " << para->num_blocks
<< "num_threads: " << para->num_threads
<< ", num_tuples: " << sparse_data->num_tuples
<< ", num_testtuples: " << sparse_testdata->num_tuples
<< ", feature_size: " << sparse_data->feature_size
<< ", num_classes: " << sparse_data->num_classes
<< ", batch_size: " << para->batch_size
<< ", num_batches: " << para->num_batches
<< ", num_testbatches: " << para->num_testbatches
<< ", num_microbatches: 1" //<< para->num_mbatches
<< ", tuples_in_last_batch: " << para->tuples_last_batch
<< ", init_stepsize: " << para->init_stepsize
<< ", decay: " << para->decay<< endl;
for(idx_t i = 1; i < model->num_layers-1; i++)
ofDebug << "hidden layer " << i << ": " << model->num_units[i] << endl;
ofDebug.flush();
// init gpuEV.proc_data as sparse_data
for(idx_t i = 0; i < n_gpus; ++i)
gpuEV[i]->Init(sparse_data, sparse_testdata->data_max_row_nnz,
sparse_testdata->label_max_row_nnz, &gpu_model[i], gpu_d_weight[i], para, i, rank);
task_idx = 0;
start_idx = 0;
for(idx_t i = 0; i < n_gpus; ++i){
accu_send.push_back(0);
model_send.push_back(0);
train_batches.push_back(0);
gpu_p.push_back(0.);
batch_size.push_back(para->batch_size);
}
max_gpu_bsize = para->batch_size;
s_merg_iter = 0, merg_iter = 0;
train_tuples = 0, train_idx = 0;
test_batches = 0;
sche_data_timer.resize(n_gpus);
sche_testaccu_timer.resize(n_gpus);
sche_train_timer.resize(n_gpus);
for(idx_t i = 0; i < n_gpus; ++i){
sche_data_time.push_back(0.); pure_data_time.push_back(0.);
sche_testaccu_time.push_back(0.); pure_testaccu_time.push_back(0.);
sche_train_time.push_back(0.); pure_train_time.push_back(0.);
}
task_testaccu_time = 0.; task_train_time = 0.; task_sampleaccu_time = 0.;
for(idx_t i = 0; i < 3; ++i) mpi_time.push_back(0.);
transfer_time = 0.; localavg_time = 0.;
test_accuracy = 0.; //sample_accuracy = 0.;
test_loss = 0.; //sample_loss = 0.;
p_test_loss = 0.;
incloss_cnt = 0;
train_iter = 0;
sample_tuples = 0;
task_idx = 1;
l2_flag = 1;
task_testaccu_timer.Restart();
for(idx_t i = 0; i < n_gpus; ++i){
GPUGD_RunDataLoadMessage_Factory(*gpuEV[i], i, sparse_testdata, start_idx, para->batch_size, l2_flag, task_idx);
start_idx += para->batch_size;
sche_data_timer[i].Restart();
++accu_send[i];
}
}
MESSAGE_HANDLER_DEFINITION_BEGIN(GPUGDSchedulerImp, GPUGD_Accuracy, GPUGD_AccuracyMessage) {
evProc.sche_data_time[msg.gpu_idx] += evProc.sche_data_timer[msg.gpu_idx].GetTime();
evProc.pure_data_time[msg.gpu_idx] += msg.time;
GPUGD_RunAccuracyMessage_Factory(*evProc.gpuEV[msg.gpu_idx], msg.gpu_idx, msg.start_idx, evProc.para->batch_size, evProc.task_idx);
++evProc.test_batches;
evProc.sche_testaccu_timer[msg.gpu_idx].Restart();
}MESSAGE_HANDLER_DEFINITION_END
MESSAGE_HANDLER_DEFINITION_BEGIN(GPUGDSchedulerImp, GPUGD_Train, GPUGD_TrainMessage) {
if(evProc.task_idx == 2 || evProc.task_idx == 3){
evProc.sche_data_time[msg.gpu_idx] += evProc.sche_data_timer[msg.gpu_idx].GetTime();
evProc.pure_data_time[msg.gpu_idx] += msg.time;
++evProc.model_send[msg.gpu_idx];
// warmup+adaptive+time-based decay
val_t t_decay = 0.f;
if(evProc.train_tuples/evProc.sparse_data->num_tuples <= 10)
t_decay = 1.f;
else
t_decay = 1./(1+evProc.para->decay*(
evProc.train_tuples/evProc.sparse_data->num_tuples-10));
val_t lr = evProc.para->init_stepsize*t_decay
/evProc.para->batch_size*evProc.batch_size[msg.gpu_idx];
if(evProc.train_tuples/evProc.sparse_data->num_tuples <= 10){
// gradual warmup
lr = evProc.para->init_stepsize/1024*32/100 +
((evProc.para->init_stepsize/1024*32 -
evProc.para->init_stepsize/1024*32/100)*evProc.train_tuples
/(10*evProc.sparse_data->num_tuples));
}/**/
GPUGD_RunTrainMessage_Factory(*evProc.gpuEV[msg.gpu_idx], msg.gpu_idx, msg.start_idx, evProc.batch_size[msg.gpu_idx], evProc.task_idx, lr, evProc.model);
++evProc.model_send[msg.gpu_idx];
evProc.sche_train_timer[msg.gpu_idx].Restart();
}
}MESSAGE_HANDLER_DEFINITION_END
MESSAGE_HANDLER_DEFINITION_BEGIN(GPUGDSchedulerImp, GPUGD_DataLoad, GPUGD_DataLoadMessage) {
// msg: gpu_idx, start_idx, (task_idx,) val, loss, time
evProc.l2_flag = msg.flag;
if(evProc.task_idx == 1){
evProc.test_accuracy += msg.val;
evProc.test_loss += msg.loss;
evProc.sche_testaccu_time[msg.gpu_idx] += evProc.sche_testaccu_timer[msg.gpu_idx].GetTime();
evProc.pure_testaccu_time[msg.gpu_idx] += msg.time;
--evProc.accu_send[msg.gpu_idx];
bool isDone = true;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.accu_send[i] > 0) isDone = false;
}
if(evProc.start_idx + evProc.para->batch_size >=
evProc.sparse_testdata->num_tuples && isDone){
std::cerr<<"start_idx: "<<evProc.start_idx << std::endl;
evProc.test_accuracy /= (evProc.start_idx+1.);
evProc.test_loss /= (evProc.start_idx+1.);
evProc.test_batches = 0;
std::cerr<<"testing_accu: " << evProc.test_accuracy
<< ", testing_loss: " << evProc.test_loss
<< ", train_tuples: " << evProc.train_tuples << std::endl;
evProc.task_testaccu_time += evProc.task_testaccu_timer.GetTime();
evProc.ofDebug << "testing_accuracy: " << std::setprecision(6) << evProc.test_accuracy
<< " testing_loss: " << std::setprecision(6) << evProc.test_loss
<< " train_tuples: " << evProc.train_tuples
<< " task_testaccu_time: " << evProc.task_testaccu_time;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.ofDebug << " pure_data_time(GPU" << i
<< "): " << evProc.pure_data_time[i]
<< " pure_testaccu_time(GPU" << i
<< "): " << evProc.pure_testaccu_time[i];
}
evProc.ofDebug << endl;
evProc.ofDebug.flush();
if (evProc.train_tuples/evProc.sparse_data->num_tuples > evProc.para->train_iter){
evProc.ofDebug << "Total task_testaccu_time: " << evProc.task_testaccu_time
<< " total task_train_time: " << evProc.task_train_time
<< " total task_sampleaccu_time: " << evProc.task_sampleaccu_time
<< " total iterations: " << evProc.para->train_iter;
evProc.ofDebug.flush();
DieMessage_Factory(evProc.myInterface);
}
evProc.test_accuracy = 0.;
evProc.p_test_loss = evProc.test_loss;
evProc.test_loss = 0.;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.accu_send[i] = 0;
}
evProc.task_idx = 2;
evProc.start_idx = evProc.train_idx;
evProc.task_train_timer.Restart();
if(evProc.train_tuples/evProc.sparse_data->num_tuples == 11
&& evProc.s_merg_iter == 0){
evProc.max_gpu_bsize = 1024;
for(idx_t i = 0; i<evProc.n_gpus; ++i) evProc.batch_size[i] = 1024;
std::cerr<<"start adaptive bsize." <<std::endl;
}
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.start_idx + evProc.batch_size[i] >= evProc.sparse_data->num_tuples){
evProc.start_idx = 0;
}
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[i], i, evProc.sparse_data, evProc.start_idx, evProc.batch_size[i], evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.batch_size[i];
evProc.train_tuples += evProc.batch_size[i];
++evProc.train_batches[i];
evProc.sche_data_timer[i].Restart();
++evProc.model_send[i];
}
} else if(evProc.start_idx + evProc.para->batch_size < evProc.sparse_testdata->num_tuples){
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[msg.gpu_idx], msg.gpu_idx, evProc.sparse_testdata, evProc.start_idx, evProc.para->batch_size, evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.para->batch_size;
evProc.sche_data_timer[msg.gpu_idx].Restart();
++evProc.accu_send[msg.gpu_idx];
}
} else if(evProc.task_idx == 2){
evProc.sche_train_time[msg.gpu_idx] += evProc.sche_train_timer[msg.gpu_idx].GetTime();
evProc.pure_train_time[msg.gpu_idx] += msg.time;
evProc.sample_tuples += evProc.batch_size[msg.gpu_idx];
evProc.model_send[msg.gpu_idx]-=3;
bool isDone = true;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.model_send[i] > 0) isDone = false;
}
if(evProc.sample_tuples-evProc.max_gpu_bsize>=evProc.para->sample_tuples && isDone){
evProc.task_train_time += evProc.task_train_timer.GetTime();
evProc.ofDebug << "iteration: " << evProc.train_tuples/evProc.sparse_data->num_tuples
<< " merg_iter: " << evProc.merg_iter
<< " task_train_time: " << evProc.task_train_time;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.ofDebug << " pure_data_time(GPU" << i
<< "): " << evProc.pure_data_time[i]
<< " pure_train_time(GPU" << i
<< "): " << evProc.pure_train_time[i];
}
evProc.ofDebug << endl;
evProc.ofDebug.flush();
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.model_send[i] = 0;
}
val_t t_decay = 1./(1+evProc.para->decay*(
evProc.train_tuples/evProc.sparse_data->num_tuples-10));
std::cerr<<"decay: "<<t_decay<<std::endl;
evProc.localavg_timer.Restart();
/////////////////average_gpu_peer_b/////////////////
std::cerr<<"gpu_idx:"<<msg.gpu_idx<<std::endl;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
std::cerr<<"gpu"<<i<<"_";
hipLaunchKernelGGL(( myprint_kernel), dim3(1),dim3(1), 0, 0, (*evProc.gpu_d_weight[i])[0],1);
hipDeviceSynchronize();
}
idx_t tot_batches = 0;
for(idx_t i = 0; i < evProc.n_gpus; ++i) tot_batches+=evProc.train_batches[i];
for(idx_t i = 0; i < evProc.n_gpus; ++i)
evProc.gpu_p[i] = 1.0f*evProc.train_batches[i]/tot_batches;
std::cerr<<"GPU_Train: ";
for(idx_t i = 0; i < evProc.n_gpus; ++i) std::cerr<<","<<evProc.gpu_p[i];
std::cerr<<std::endl;
for(idx_t i = 0; i < evProc.n_gpus; ++i) std::cerr<<","<<evProc.train_batches[i];
std::cerr<<std::endl;
// adaptive batch size
idx_t t_iter = evProc.train_tuples/evProc.sparse_data->num_tuples;
idx_t bsize_min = 32, bsize_max = 1024;
val_t mu = 1.f/evProc.n_gpus;
val_t p_tuples = 0.f;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
p_tuples += evProc.gpu_p[i]*evProc.batch_size[i];
}
idx_t tmp_b = 4;
if(evProc.train_tuples/evProc.sparse_data->num_tuples > 10){
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.gpu_p[i]<mu && evProc.batch_size[i]>bsize_min)
//evProc.batch_size[i]/=tmp_b;
evProc.batch_size[i]-=32;
else if(evProc.gpu_p[i]>mu && evProc.batch_size[i]<bsize_max)
//evProc.batch_size[i]*=tmp_b;
evProc.batch_size[i]+=32;
}/**/
val_t max_p = 0., min_p = 1.;
idx_t max_gidx = -1, min_gidx = -1;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(max_p < evProc.gpu_p[i]){
max_p = evProc.gpu_p[i];
max_gidx = i;
}
if(min_p > evProc.gpu_p[i]){
min_p = evProc.gpu_p[i];
min_gidx = i;
}
}
max_p/=10, min_p/=10;
std::cerr<<"l2_flag: " << evProc.l2_flag << std::endl;
if(evProc.l2_flag == 1){
evProc.gpu_p[max_gidx] += max_p;
evProc.gpu_p[min_gidx] -= min_p;
}/**/
std::cerr<<"max_weight "<< evProc.gpu_p[max_gidx]<<" on GPU" << max_gidx<<std::endl;
std::cerr<<"min_weight "<< evProc.gpu_p[min_gidx]<<" on GPU" << min_gidx<<std::endl;
} /**/
if(evProc.train_tuples/evProc.sparse_data->num_tuples <= 10){
for(idx_t i = 0; i < evProc.n_gpus; ++i)
evProc.batch_size[i] = 32;
}/**/
evProc.max_gpu_bsize = evProc.batch_size[0];
for(idx_t i = 1; i < evProc.n_gpus; ++i){
evProc.max_gpu_bsize = evProc.batch_size[i] > evProc.max_gpu_bsize?
evProc.batch_size[i] : evProc.max_gpu_bsize;
}
for(idx_t i = 0; i < evProc.n_gpus; ++i) std::cerr<<","<<evProc.batch_size[i];
std::cerr<<", max_gpu_bsize: "<< evProc.max_gpu_bsize << std::endl;
evProc.localavg_timer.Restart();
#pragma omp parallel num_threads(evProc.n_gpus)
{
idx_t i = omp_get_thread_num();
hipSetDevice(i);
hipMemcpy(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->gpu_weight+i, &evProc.gpu_p[i],
sizeof(val_t), hipMemcpyHostToDevice);
for(idx_t j = 0; j < evProc.model->num_layers-1; ++j){
hipLaunchKernelGGL(( mul_glb_weight), dim3(evProc.para->num_blocks),
dim3(evProc.para->num_threads), 0, 0,
(*evProc.gpu_d_weight[i])[j],
((GPUGDImp*)(evProc.gpuEV[i]->evProc))->gpu_weight+i,//evProc.gpu_weight,
evProc.model->num_units[j]*evProc.model->num_units[j+1],
evProc.para->num_blocks*evProc.para->num_threads);
hipDeviceSynchronize();
#pragma omp barrier
/////////// b_2gpus ///////////
idx_t n_elements = evProc.model->num_units[j]*evProc.model->num_units[j+1];
idx_t chunks = 8;
idx_t stream_size = n_elements/chunks;
idx_t m_streamsize = stream_size/2;
val_t t_gweight = (1.);
for(idx_t ts = 0; ts < chunks; ++ts){
idx_t dest_gpu = i, src_gpu= (i+1)%2;
idx_t data_idx= ts*stream_size+i*m_streamsize;
hipMemcpyPeerAsync(
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->d_glb_weight[src_gpu]))[j]+data_idx, i,
(*evProc.gpu_d_weight[src_gpu])[j]+data_idx, src_gpu,
sizeof(val_t)*m_streamsize, ((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->gpu_streams[ts%4]);
hipblasSetStream(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_handle,
((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->gpu_streams[ts%4]);
hipblasSaxpy(((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->d_handle, m_streamsize, &t_gweight,
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->d_glb_weight[src_gpu]))[j]+data_idx, 1,
(*evProc.gpu_d_weight[i])[j]+data_idx, 1);
hipMemcpyPeerAsync(
(*evProc.gpu_d_weight[src_gpu])[j]+data_idx, src_gpu,
(*evProc.gpu_d_weight[i])[j]+data_idx, i,
sizeof(val_t)*m_streamsize, ((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->gpu_streams[ts%4]);
}
hipDeviceSynchronize();
/////////// e_2gpus ///////////
/////////// GD momentum
hipLaunchKernelGGL(( mu_weight), dim3(evProc.para->num_blocks),dim3(evProc.para->num_threads), 0, 0,
(*evProc.gpu_d_weight[i])[j],
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[i]))[j],
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[evProc.n_gpus]))[j],
evProc.model->num_units[j]*evProc.model->num_units[j+1],
evProc.para->num_blocks*evProc.para->num_threads);
hipDeviceSynchronize();
hipLaunchKernelGGL(( copy_weight), dim3(evProc.para->num_blocks),dim3(evProc.para->num_threads), 0, 0,
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[evProc.n_gpus]))[j],
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[i]))[j],
evProc.model->num_units[j]*evProc.model->num_units[j+1],
evProc.para->num_blocks*evProc.para->num_threads);
hipDeviceSynchronize();
hipLaunchKernelGGL(( copy_weight), dim3(evProc.para->num_blocks),dim3(evProc.para->num_threads), 0, 0,
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[i]))[j],
(*evProc.gpu_d_weight[i])[j],
evProc.model->num_units[j]*evProc.model->num_units[j+1],
evProc.para->num_blocks*evProc.para->num_threads);
hipDeviceSynchronize();
/////////// e_momentum
} // end of #layers
//hipDeviceSynchronize();
}
evProc.localavg_time += evProc.localavg_timer.GetTime();
for(idx_t i = 0; i < evProc.n_gpus; ++i){
std::cerr<<"gpu"<<i<<"_";
hipLaunchKernelGGL(( myprint_kernel), dim3(1),dim3(1), 0, 0, (*evProc.gpu_d_weight[i])[0],1);
hipDeviceSynchronize();
}
/////////////////average_gpu_peer_e/////////////////
evProc.ofDebug << " localavg_time: " << evProc.localavg_time;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.ofDebug << " pure_data_time(GPU" << i
<< "): " << evProc.pure_data_time[i]
<< " pure_train_time(GPU" << i
<< "): " << evProc.pure_train_time[i];
}
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.ofDebug << " new_bsize(GPU" << i
<< "): " <<evProc.batch_size[i];
}
evProc.ofDebug << endl;
if(evProc.train_tuples/evProc.sparse_data->num_tuples == 10){
std::cerr<<"save model"<<std::endl;
std::ofstream oFile("amazon-sgdm-warmup");
for(idx_t k = 0; k < evProc.model->num_layers-1; ++k){
hipMemcpy(evProc.model->weight[k],
(*evProc.gpu_d_weight[msg.gpu_idx])[k],
sizeof(val_t)*evProc.model->num_units[k]*evProc.model->num_units[k+1],
hipMemcpyDeviceToHost);
for(idx_t j = 0; j < evProc.model->num_units[k]*evProc.model->num_units[k+1]; ++j){
oFile <<std::scientific << evProc.model->weight[k][j] << ",";
}
oFile << std::endl;
}
oFile.close();
}/**/
////////////////////////////////
evProc.sample_tuples = 0;
for(idx_t i = 0; i < evProc.n_gpus; ++i) evProc.train_batches[i] = 0;
++evProc.s_merg_iter;
std::cerr<<"s_merg_iter: "<< evProc.s_merg_iter;
if(evProc.s_merg_iter==evProc.para->sampletest_tuples){
evProc.s_merg_iter = 0;
++evProc.merg_iter;
}
std::cerr<<", merg_iter: " << evProc.merg_iter << std::endl;
if(evProc.s_merg_iter == 0){
// compute testing accuracy
evProc.train_idx = evProc.start_idx;
evProc.start_idx = 0;
evProc.task_idx = 1;
evProc.l2_flag = 1;
evProc.task_testaccu_timer.Restart();
for(idx_t i = 0; i < evProc.n_gpus; ++i){
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[i], i, evProc.sparse_testdata, evProc.start_idx, evProc.para->batch_size, evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.para->batch_size;
evProc.sche_data_timer[i].Restart();
++evProc.accu_send[i];
}
} else {
evProc.task_train_timer.Restart();
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.start_idx + evProc.batch_size[i] >= evProc.sparse_data->num_tuples){
evProc.start_idx = 0;
}
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[i], i, evProc.sparse_data, evProc.start_idx, evProc.batch_size[i], evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.batch_size[i];
evProc.train_tuples += evProc.batch_size[i];
++evProc.train_batches[i];
evProc.sche_data_timer[i].Restart();
++evProc.model_send[i];
}
}
} else if(evProc.sample_tuples - evProc.max_gpu_bsize < evProc.para->sample_tuples){
if(evProc.start_idx + evProc.batch_size[msg.gpu_idx] >= evProc.sparse_data->num_tuples){
evProc.start_idx = 0;
}
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[msg.gpu_idx], msg.gpu_idx, evProc.sparse_data, evProc.start_idx, evProc.batch_size[msg.gpu_idx], evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.batch_size[msg.gpu_idx];
evProc.train_tuples += evProc.batch_size[msg.gpu_idx];
++evProc.train_batches[msg.gpu_idx];
evProc.sche_data_timer[msg.gpu_idx].Restart();
++evProc.model_send[msg.gpu_idx];
}
} else if (evProc.task_idx == 3){
evProc.model_send[msg.gpu_idx]-=3;
}
}MESSAGE_HANDLER_DEFINITION_END
MESSAGE_HANDLER_DEFINITION_BEGIN(GPUGDSchedulerImp, newDieHandler, DieMessage)
for(idx_t i = 0; i < evProc.n_gpus; ++i){
DieMessage_Factory(*evProc.gpuEV[i]);
evProc.gpuEV[i]->Join();
}
return true;
}
| 6ebea078332d3cb9e5fb47624da6e2dea4f9b053.cu | #include "GPUGDScheduler.h"
#include <fstream>
#include <ctime>
extern char* LOG_DIRECTORY;
extern idx_t SERVER_RANK;
GPUGDSchedulerImp::GPUGDSchedulerImp() : sparse_data(NULL), sparse_testdata(NULL), model(NULL), para(NULL), gpuEV(NULL){
//idx_t n_gpus = 4;
idx_t n_gpus = 2;
gpuEV.resize(n_gpus);
for(idx_t i = 0; i < n_gpus; ++i){
gpuEV[i] = new GPUGD(myInterface);
}
for(idx_t i = 0; i < n_gpus; ++i){
gpuEV[i]->ForkAndSpin();
}
RegisterMessageProcessor(GPUGD_AccuracyMessage::type, &GPUGD_Accuracy, 100);
RegisterMessageProcessor(GPUGD_TrainMessage::type, &GPUGD_Train, 100);
RegisterMessageProcessor(GPUGD_DataLoadMessage::type, &GPUGD_DataLoad, 100);
RegisterMessageProcessor(DieMessage::type, &newDieHandler, 100);
}
GPUGDSchedulerImp::~GPUGDSchedulerImp() {
ofDebug << "Release memory" << endl;
ofDebug.close();
cublasDestroy(d_handle);
for(idx_t i = 0; i < n_gpus; ++i){
delete gpuEV[i];
for(idx_t j = 0; j < model->num_layers-1; ++j)
cudaFree((*gpu_d_weight[i])[j]);
}
}
void GPUGDSchedulerImp::RunGPUGD(SparseData* _sparse_data, SparseData* _sparse_testdata, FCModel* _model, Para* _para, idx_t _n_ranks, idx_t _rank, idx_t _n_gpus) {
rank = _rank;
n_ranks = _n_ranks;
n_gpus = _n_gpus;
ofDebug.open((string(LOG_DIRECTORY) + "GPUGDScheduler.log.rank").c_str() + std::to_string(rank), ios::out);
sparse_data = _sparse_data;
sparse_testdata = _sparse_testdata;
model = _model;
para = _para;
gpu_model.resize(n_gpus);
gpu_d_weight.resize(n_gpus);
for(idx_t i = 0; i < n_gpus; ++i){
cudaSetDevice(i);
gpu_model[i] = gpu_model[i].deep_copy(*model);
gpu_d_weight[i] = new vector<val_t*>();
gpu_d_weight[i]->resize(model->num_layers-1);
for(idx_t j = 0; j < model->num_layers-1; ++j){
cudaMalloc(&(*gpu_d_weight[i])[j], sizeof(val_t)*model->num_units[j]*model->num_units[j+1]);
cudaMemcpy((*gpu_d_weight[i])[j], model->weight[j], sizeof(val_t)
*model->num_units[j]*model->num_units[j+1], cudaMemcpyHostToDevice);
}
}
// Enable Peer-to-Peer
for(idx_t i = 0; i < n_gpus; ++i){
cudaSetDevice(i);
for(idx_t k = 0; k < n_gpus; ++k){
if(k==i) continue;
cudaDeviceEnablePeerAccess(k,0);
}
}
cublasCreate(&d_handle);
ofDebug << "Start GPUGD" << endl;
ofDebug << "num_blocks: " << para->num_blocks
<< "num_threads: " << para->num_threads
<< ", num_tuples: " << sparse_data->num_tuples
<< ", num_testtuples: " << sparse_testdata->num_tuples
<< ", feature_size: " << sparse_data->feature_size
<< ", num_classes: " << sparse_data->num_classes
<< ", batch_size: " << para->batch_size
<< ", num_batches: " << para->num_batches
<< ", num_testbatches: " << para->num_testbatches
<< ", num_microbatches: 1" //<< para->num_mbatches
<< ", tuples_in_last_batch: " << para->tuples_last_batch
<< ", init_stepsize: " << para->init_stepsize
<< ", decay: " << para->decay<< endl;
for(idx_t i = 1; i < model->num_layers-1; i++)
ofDebug << "hidden layer " << i << ": " << model->num_units[i] << endl;
ofDebug.flush();
// init gpuEV.proc_data as sparse_data
for(idx_t i = 0; i < n_gpus; ++i)
gpuEV[i]->Init(sparse_data, sparse_testdata->data_max_row_nnz,
sparse_testdata->label_max_row_nnz, &gpu_model[i], gpu_d_weight[i], para, i, rank);
task_idx = 0;
start_idx = 0;
for(idx_t i = 0; i < n_gpus; ++i){
accu_send.push_back(0);
model_send.push_back(0);
train_batches.push_back(0);
gpu_p.push_back(0.);
batch_size.push_back(para->batch_size);
}
max_gpu_bsize = para->batch_size;
s_merg_iter = 0, merg_iter = 0;
train_tuples = 0, train_idx = 0;
test_batches = 0;
sche_data_timer.resize(n_gpus);
sche_testaccu_timer.resize(n_gpus);
sche_train_timer.resize(n_gpus);
for(idx_t i = 0; i < n_gpus; ++i){
sche_data_time.push_back(0.); pure_data_time.push_back(0.);
sche_testaccu_time.push_back(0.); pure_testaccu_time.push_back(0.);
sche_train_time.push_back(0.); pure_train_time.push_back(0.);
}
task_testaccu_time = 0.; task_train_time = 0.; task_sampleaccu_time = 0.;
for(idx_t i = 0; i < 3; ++i) mpi_time.push_back(0.);
transfer_time = 0.; localavg_time = 0.;
test_accuracy = 0.; //sample_accuracy = 0.;
test_loss = 0.; //sample_loss = 0.;
p_test_loss = 0.;
incloss_cnt = 0;
train_iter = 0;
sample_tuples = 0;
task_idx = 1;
l2_flag = 1;
task_testaccu_timer.Restart();
for(idx_t i = 0; i < n_gpus; ++i){
GPUGD_RunDataLoadMessage_Factory(*gpuEV[i], i, sparse_testdata, start_idx, para->batch_size, l2_flag, task_idx);
start_idx += para->batch_size;
sche_data_timer[i].Restart();
++accu_send[i];
}
}
MESSAGE_HANDLER_DEFINITION_BEGIN(GPUGDSchedulerImp, GPUGD_Accuracy, GPUGD_AccuracyMessage) {
evProc.sche_data_time[msg.gpu_idx] += evProc.sche_data_timer[msg.gpu_idx].GetTime();
evProc.pure_data_time[msg.gpu_idx] += msg.time;
GPUGD_RunAccuracyMessage_Factory(*evProc.gpuEV[msg.gpu_idx], msg.gpu_idx, msg.start_idx, evProc.para->batch_size, evProc.task_idx);
++evProc.test_batches;
evProc.sche_testaccu_timer[msg.gpu_idx].Restart();
}MESSAGE_HANDLER_DEFINITION_END
MESSAGE_HANDLER_DEFINITION_BEGIN(GPUGDSchedulerImp, GPUGD_Train, GPUGD_TrainMessage) {
if(evProc.task_idx == 2 || evProc.task_idx == 3){
evProc.sche_data_time[msg.gpu_idx] += evProc.sche_data_timer[msg.gpu_idx].GetTime();
evProc.pure_data_time[msg.gpu_idx] += msg.time;
++evProc.model_send[msg.gpu_idx];
// warmup+adaptive+time-based decay
val_t t_decay = 0.f;
if(evProc.train_tuples/evProc.sparse_data->num_tuples <= 10)
t_decay = 1.f;
else
t_decay = 1./(1+evProc.para->decay*(
evProc.train_tuples/evProc.sparse_data->num_tuples-10));
val_t lr = evProc.para->init_stepsize*t_decay
/evProc.para->batch_size*evProc.batch_size[msg.gpu_idx];
if(evProc.train_tuples/evProc.sparse_data->num_tuples <= 10){
// gradual warmup
lr = evProc.para->init_stepsize/1024*32/100 +
((evProc.para->init_stepsize/1024*32 -
evProc.para->init_stepsize/1024*32/100)*evProc.train_tuples
/(10*evProc.sparse_data->num_tuples));
}/**/
GPUGD_RunTrainMessage_Factory(*evProc.gpuEV[msg.gpu_idx], msg.gpu_idx, msg.start_idx, evProc.batch_size[msg.gpu_idx], evProc.task_idx, lr, evProc.model);
++evProc.model_send[msg.gpu_idx];
evProc.sche_train_timer[msg.gpu_idx].Restart();
}
}MESSAGE_HANDLER_DEFINITION_END
MESSAGE_HANDLER_DEFINITION_BEGIN(GPUGDSchedulerImp, GPUGD_DataLoad, GPUGD_DataLoadMessage) {
// msg: gpu_idx, start_idx, (task_idx,) val, loss, time
evProc.l2_flag = msg.flag;
if(evProc.task_idx == 1){
evProc.test_accuracy += msg.val;
evProc.test_loss += msg.loss;
evProc.sche_testaccu_time[msg.gpu_idx] += evProc.sche_testaccu_timer[msg.gpu_idx].GetTime();
evProc.pure_testaccu_time[msg.gpu_idx] += msg.time;
--evProc.accu_send[msg.gpu_idx];
bool isDone = true;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.accu_send[i] > 0) isDone = false;
}
if(evProc.start_idx + evProc.para->batch_size >=
evProc.sparse_testdata->num_tuples && isDone){
std::cerr<<"start_idx: "<<evProc.start_idx << std::endl;
evProc.test_accuracy /= (evProc.start_idx+1.);
evProc.test_loss /= (evProc.start_idx+1.);
evProc.test_batches = 0;
std::cerr<<"testing_accu: " << evProc.test_accuracy
<< ", testing_loss: " << evProc.test_loss
<< ", train_tuples: " << evProc.train_tuples << std::endl;
evProc.task_testaccu_time += evProc.task_testaccu_timer.GetTime();
evProc.ofDebug << "testing_accuracy: " << std::setprecision(6) << evProc.test_accuracy
<< " testing_loss: " << std::setprecision(6) << evProc.test_loss
<< " train_tuples: " << evProc.train_tuples
<< " task_testaccu_time: " << evProc.task_testaccu_time;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.ofDebug << " pure_data_time(GPU" << i
<< "): " << evProc.pure_data_time[i]
<< " pure_testaccu_time(GPU" << i
<< "): " << evProc.pure_testaccu_time[i];
}
evProc.ofDebug << endl;
evProc.ofDebug.flush();
if (evProc.train_tuples/evProc.sparse_data->num_tuples > evProc.para->train_iter){
evProc.ofDebug << "Total task_testaccu_time: " << evProc.task_testaccu_time
<< " total task_train_time: " << evProc.task_train_time
<< " total task_sampleaccu_time: " << evProc.task_sampleaccu_time
<< " total iterations: " << evProc.para->train_iter;
evProc.ofDebug.flush();
DieMessage_Factory(evProc.myInterface);
}
evProc.test_accuracy = 0.;
evProc.p_test_loss = evProc.test_loss;
evProc.test_loss = 0.;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.accu_send[i] = 0;
}
evProc.task_idx = 2;
evProc.start_idx = evProc.train_idx;
evProc.task_train_timer.Restart();
if(evProc.train_tuples/evProc.sparse_data->num_tuples == 11
&& evProc.s_merg_iter == 0){
evProc.max_gpu_bsize = 1024;
for(idx_t i = 0; i<evProc.n_gpus; ++i) evProc.batch_size[i] = 1024;
std::cerr<<"start adaptive bsize." <<std::endl;
}
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.start_idx + evProc.batch_size[i] >= evProc.sparse_data->num_tuples){
evProc.start_idx = 0;
}
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[i], i, evProc.sparse_data, evProc.start_idx, evProc.batch_size[i], evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.batch_size[i];
evProc.train_tuples += evProc.batch_size[i];
++evProc.train_batches[i];
evProc.sche_data_timer[i].Restart();
++evProc.model_send[i];
}
} else if(evProc.start_idx + evProc.para->batch_size < evProc.sparse_testdata->num_tuples){
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[msg.gpu_idx], msg.gpu_idx, evProc.sparse_testdata, evProc.start_idx, evProc.para->batch_size, evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.para->batch_size;
evProc.sche_data_timer[msg.gpu_idx].Restart();
++evProc.accu_send[msg.gpu_idx];
}
} else if(evProc.task_idx == 2){
evProc.sche_train_time[msg.gpu_idx] += evProc.sche_train_timer[msg.gpu_idx].GetTime();
evProc.pure_train_time[msg.gpu_idx] += msg.time;
evProc.sample_tuples += evProc.batch_size[msg.gpu_idx];
evProc.model_send[msg.gpu_idx]-=3;
bool isDone = true;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.model_send[i] > 0) isDone = false;
}
if(evProc.sample_tuples-evProc.max_gpu_bsize>=evProc.para->sample_tuples && isDone){
evProc.task_train_time += evProc.task_train_timer.GetTime();
evProc.ofDebug << "iteration: " << evProc.train_tuples/evProc.sparse_data->num_tuples
<< " merg_iter: " << evProc.merg_iter
<< " task_train_time: " << evProc.task_train_time;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.ofDebug << " pure_data_time(GPU" << i
<< "): " << evProc.pure_data_time[i]
<< " pure_train_time(GPU" << i
<< "): " << evProc.pure_train_time[i];
}
evProc.ofDebug << endl;
evProc.ofDebug.flush();
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.model_send[i] = 0;
}
val_t t_decay = 1./(1+evProc.para->decay*(
evProc.train_tuples/evProc.sparse_data->num_tuples-10));
std::cerr<<"decay: "<<t_decay<<std::endl;
evProc.localavg_timer.Restart();
/////////////////average_gpu_peer_b/////////////////
std::cerr<<"gpu_idx:"<<msg.gpu_idx<<std::endl;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
std::cerr<<"gpu"<<i<<"_";
myprint_kernel<<<1,1>>>((*evProc.gpu_d_weight[i])[0],1);
cudaDeviceSynchronize();
}
idx_t tot_batches = 0;
for(idx_t i = 0; i < evProc.n_gpus; ++i) tot_batches+=evProc.train_batches[i];
for(idx_t i = 0; i < evProc.n_gpus; ++i)
evProc.gpu_p[i] = 1.0f*evProc.train_batches[i]/tot_batches;
std::cerr<<"GPU_Train: ";
for(idx_t i = 0; i < evProc.n_gpus; ++i) std::cerr<<","<<evProc.gpu_p[i];
std::cerr<<std::endl;
for(idx_t i = 0; i < evProc.n_gpus; ++i) std::cerr<<","<<evProc.train_batches[i];
std::cerr<<std::endl;
// adaptive batch size
idx_t t_iter = evProc.train_tuples/evProc.sparse_data->num_tuples;
idx_t bsize_min = 32, bsize_max = 1024;
val_t mu = 1.f/evProc.n_gpus;
val_t p_tuples = 0.f;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
p_tuples += evProc.gpu_p[i]*evProc.batch_size[i];
}
idx_t tmp_b = 4;
if(evProc.train_tuples/evProc.sparse_data->num_tuples > 10){
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.gpu_p[i]<mu && evProc.batch_size[i]>bsize_min)
//evProc.batch_size[i]/=tmp_b;
evProc.batch_size[i]-=32;
else if(evProc.gpu_p[i]>mu && evProc.batch_size[i]<bsize_max)
//evProc.batch_size[i]*=tmp_b;
evProc.batch_size[i]+=32;
}/**/
val_t max_p = 0., min_p = 1.;
idx_t max_gidx = -1, min_gidx = -1;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(max_p < evProc.gpu_p[i]){
max_p = evProc.gpu_p[i];
max_gidx = i;
}
if(min_p > evProc.gpu_p[i]){
min_p = evProc.gpu_p[i];
min_gidx = i;
}
}
max_p/=10, min_p/=10;
std::cerr<<"l2_flag: " << evProc.l2_flag << std::endl;
if(evProc.l2_flag == 1){
evProc.gpu_p[max_gidx] += max_p;
evProc.gpu_p[min_gidx] -= min_p;
}/**/
std::cerr<<"max_weight "<< evProc.gpu_p[max_gidx]<<" on GPU" << max_gidx<<std::endl;
std::cerr<<"min_weight "<< evProc.gpu_p[min_gidx]<<" on GPU" << min_gidx<<std::endl;
} /**/
if(evProc.train_tuples/evProc.sparse_data->num_tuples <= 10){
for(idx_t i = 0; i < evProc.n_gpus; ++i)
evProc.batch_size[i] = 32;
}/**/
evProc.max_gpu_bsize = evProc.batch_size[0];
for(idx_t i = 1; i < evProc.n_gpus; ++i){
evProc.max_gpu_bsize = evProc.batch_size[i] > evProc.max_gpu_bsize?
evProc.batch_size[i] : evProc.max_gpu_bsize;
}
for(idx_t i = 0; i < evProc.n_gpus; ++i) std::cerr<<","<<evProc.batch_size[i];
std::cerr<<", max_gpu_bsize: "<< evProc.max_gpu_bsize << std::endl;
evProc.localavg_timer.Restart();
#pragma omp parallel num_threads(evProc.n_gpus)
{
idx_t i = omp_get_thread_num();
cudaSetDevice(i);
cudaMemcpy(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->gpu_weight+i, &evProc.gpu_p[i],
sizeof(val_t), cudaMemcpyHostToDevice);
for(idx_t j = 0; j < evProc.model->num_layers-1; ++j){
mul_glb_weight<<<evProc.para->num_blocks,
evProc.para->num_threads>>>(
(*evProc.gpu_d_weight[i])[j],
((GPUGDImp*)(evProc.gpuEV[i]->evProc))->gpu_weight+i,//evProc.gpu_weight,
evProc.model->num_units[j]*evProc.model->num_units[j+1],
evProc.para->num_blocks*evProc.para->num_threads);
cudaDeviceSynchronize();
#pragma omp barrier
/////////// b_2gpus ///////////
idx_t n_elements = evProc.model->num_units[j]*evProc.model->num_units[j+1];
idx_t chunks = 8;
idx_t stream_size = n_elements/chunks;
idx_t m_streamsize = stream_size/2;
val_t t_gweight = (1.);
for(idx_t ts = 0; ts < chunks; ++ts){
idx_t dest_gpu = i, src_gpu= (i+1)%2;
idx_t data_idx= ts*stream_size+i*m_streamsize;
cudaMemcpyPeerAsync(
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->d_glb_weight[src_gpu]))[j]+data_idx, i,
(*evProc.gpu_d_weight[src_gpu])[j]+data_idx, src_gpu,
sizeof(val_t)*m_streamsize, ((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->gpu_streams[ts%4]);
cublasSetStream(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_handle,
((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->gpu_streams[ts%4]);
cublasSaxpy(((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->d_handle, m_streamsize, &t_gweight,
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->d_glb_weight[src_gpu]))[j]+data_idx, 1,
(*evProc.gpu_d_weight[i])[j]+data_idx, 1);
cudaMemcpyPeerAsync(
(*evProc.gpu_d_weight[src_gpu])[j]+data_idx, src_gpu,
(*evProc.gpu_d_weight[i])[j]+data_idx, i,
sizeof(val_t)*m_streamsize, ((GPUGDImp*)(evProc.gpuEV[i]->evProc))
->gpu_streams[ts%4]);
}
cudaDeviceSynchronize();
/////////// e_2gpus ///////////
/////////// GD momentum
mu_weight<<<evProc.para->num_blocks,evProc.para->num_threads>>>(
(*evProc.gpu_d_weight[i])[j],
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[i]))[j],
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[evProc.n_gpus]))[j],
evProc.model->num_units[j]*evProc.model->num_units[j+1],
evProc.para->num_blocks*evProc.para->num_threads);
cudaDeviceSynchronize();
copy_weight<<<evProc.para->num_blocks,evProc.para->num_threads>>>(
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[evProc.n_gpus]))[j],
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[i]))[j],
evProc.model->num_units[j]*evProc.model->num_units[j+1],
evProc.para->num_blocks*evProc.para->num_threads);
cudaDeviceSynchronize();
copy_weight<<<evProc.para->num_blocks,evProc.para->num_threads>>>(
(*(((GPUGDImp*)(evProc.gpuEV[i]->evProc))->d_glb_weight[i]))[j],
(*evProc.gpu_d_weight[i])[j],
evProc.model->num_units[j]*evProc.model->num_units[j+1],
evProc.para->num_blocks*evProc.para->num_threads);
cudaDeviceSynchronize();
/////////// e_momentum
} // end of #layers
//cudaDeviceSynchronize();
}
evProc.localavg_time += evProc.localavg_timer.GetTime();
for(idx_t i = 0; i < evProc.n_gpus; ++i){
std::cerr<<"gpu"<<i<<"_";
myprint_kernel<<<1,1>>>((*evProc.gpu_d_weight[i])[0],1);
cudaDeviceSynchronize();
}
/////////////////average_gpu_peer_e/////////////////
evProc.ofDebug << " localavg_time: " << evProc.localavg_time;
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.ofDebug << " pure_data_time(GPU" << i
<< "): " << evProc.pure_data_time[i]
<< " pure_train_time(GPU" << i
<< "): " << evProc.pure_train_time[i];
}
for(idx_t i = 0; i < evProc.n_gpus; ++i){
evProc.ofDebug << " new_bsize(GPU" << i
<< "): " <<evProc.batch_size[i];
}
evProc.ofDebug << endl;
if(evProc.train_tuples/evProc.sparse_data->num_tuples == 10){
std::cerr<<"save model"<<std::endl;
std::ofstream oFile("amazon-sgdm-warmup");
for(idx_t k = 0; k < evProc.model->num_layers-1; ++k){
cudaMemcpy(evProc.model->weight[k],
(*evProc.gpu_d_weight[msg.gpu_idx])[k],
sizeof(val_t)*evProc.model->num_units[k]*evProc.model->num_units[k+1],
cudaMemcpyDeviceToHost);
for(idx_t j = 0; j < evProc.model->num_units[k]*evProc.model->num_units[k+1]; ++j){
oFile <<std::scientific << evProc.model->weight[k][j] << ",";
}
oFile << std::endl;
}
oFile.close();
}/**/
////////////////////////////////
evProc.sample_tuples = 0;
for(idx_t i = 0; i < evProc.n_gpus; ++i) evProc.train_batches[i] = 0;
++evProc.s_merg_iter;
std::cerr<<"s_merg_iter: "<< evProc.s_merg_iter;
if(evProc.s_merg_iter==evProc.para->sampletest_tuples){
evProc.s_merg_iter = 0;
++evProc.merg_iter;
}
std::cerr<<", merg_iter: " << evProc.merg_iter << std::endl;
if(evProc.s_merg_iter == 0){
// compute testing accuracy
evProc.train_idx = evProc.start_idx;
evProc.start_idx = 0;
evProc.task_idx = 1;
evProc.l2_flag = 1;
evProc.task_testaccu_timer.Restart();
for(idx_t i = 0; i < evProc.n_gpus; ++i){
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[i], i, evProc.sparse_testdata, evProc.start_idx, evProc.para->batch_size, evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.para->batch_size;
evProc.sche_data_timer[i].Restart();
++evProc.accu_send[i];
}
} else {
evProc.task_train_timer.Restart();
for(idx_t i = 0; i < evProc.n_gpus; ++i){
if(evProc.start_idx + evProc.batch_size[i] >= evProc.sparse_data->num_tuples){
evProc.start_idx = 0;
}
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[i], i, evProc.sparse_data, evProc.start_idx, evProc.batch_size[i], evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.batch_size[i];
evProc.train_tuples += evProc.batch_size[i];
++evProc.train_batches[i];
evProc.sche_data_timer[i].Restart();
++evProc.model_send[i];
}
}
} else if(evProc.sample_tuples - evProc.max_gpu_bsize < evProc.para->sample_tuples){
if(evProc.start_idx + evProc.batch_size[msg.gpu_idx] >= evProc.sparse_data->num_tuples){
evProc.start_idx = 0;
}
GPUGD_RunDataLoadMessage_Factory(*evProc.gpuEV[msg.gpu_idx], msg.gpu_idx, evProc.sparse_data, evProc.start_idx, evProc.batch_size[msg.gpu_idx], evProc.l2_flag, evProc.task_idx);
evProc.start_idx += evProc.batch_size[msg.gpu_idx];
evProc.train_tuples += evProc.batch_size[msg.gpu_idx];
++evProc.train_batches[msg.gpu_idx];
evProc.sche_data_timer[msg.gpu_idx].Restart();
++evProc.model_send[msg.gpu_idx];
}
} else if (evProc.task_idx == 3){
evProc.model_send[msg.gpu_idx]-=3;
}
}MESSAGE_HANDLER_DEFINITION_END
MESSAGE_HANDLER_DEFINITION_BEGIN(GPUGDSchedulerImp, newDieHandler, DieMessage)
for(idx_t i = 0; i < evProc.n_gpus; ++i){
DieMessage_Factory(*evProc.gpuEV[i]);
evProc.gpuEV[i]->Join();
}
return true;
}
|
b5094616b5b7b9d00ae23314d4cfa5dfc4cee5bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer_params.h"
void ConvLayerParams::initializeValues(cudnnHandle_t cudnn_handle, ConvDescriptor *user_params, cudnnDataType_t data_type,
int batch_size, cudnnTensorFormat_t tensor_format, size_t data_type_size, LayerDimension &output_size,
UpdateRule update_rule) {
// create tensor, filter, conv descriptor
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&bias_desc));
checkCUDNN(cudnnCreateFilterDescriptor(&filter_desc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc));
C_in = user_params->input_channels;
C_out = user_params->output_channels;
filter_h = user_params->kernel_h;
filter_w = user_params->kernel_w;
kernel_size = C_out * C_in * filter_h * filter_w;
this->data_type = data_type;
this->activation_mode = user_params->activation_mode;
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->input_channels, user_params->input_h, user_params->input_w));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_desc, data_type, tensor_format,
user_params->output_channels, user_params->input_channels, user_params->kernel_h, user_params->kernel_w));
int dilation_h = 1, dilation_w = 1;
checkCUDNN(cudnnSetConvolution2dDescriptor(conv_desc, user_params->pad_h, user_params->pad_w,
user_params->stride_y, user_params->stride_x,
dilation_h, dilation_w,
CUDNN_CROSS_CORRELATION, data_type));
int output_batch_size, output_channels, output_h, output_w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(conv_desc, input_tensor, filter_desc,
&output_batch_size, &output_channels, &output_h, &output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type,
output_batch_size, output_channels, output_h, output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(bias_desc, tensor_format, data_type,
1, output_channels, 1, 1));
fwd_req_count = 10;
fwd_perf = (cudnnConvolutionFwdAlgoPerf_t *)malloc(fwd_req_count * sizeof(cudnnConvolutionFwdAlgoPerf_t));
checkCUDNN(cudnnFindConvolutionForwardAlgorithm(cudnn_handle,
input_tensor, filter_desc, conv_desc, output_tensor,
fwd_req_count, &fwd_ret_count, fwd_perf));
// std::cout << "Printing forward conv algo perf\n";
// std::cout << "CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM: " << CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM << std::endl;
// for (int i = 0; i < fwd_ret_count; i++) {
// std::cout << i << std::endl;
// std::cout << "algo: " << fwd_perf[i].algo << std::endl;
// std::cout << "status: " << cudnnGetErrorString(fwd_perf[i].status) << std::endl;
// std::cout << "time(ms): " << fwd_perf[i].time << std::endl;
// std::cout << "memory(MB): " << fwd_perf[i].memory * 1.0 / 1024 / 1024 << std::endl;
// std::cout << "mathType: " << fwd_perf[i].mathType << std::endl;
// std::cout << std::endl;
// }
bwd_filter_req_count = 10;
bwd_filter_perf = (cudnnConvolutionBwdFilterAlgoPerf_t *)malloc(bwd_filter_req_count * sizeof(cudnnConvolutionBwdFilterAlgoPerf_t));
checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithm(cudnn_handle,
input_tensor, output_tensor, conv_desc, filter_desc,
bwd_filter_req_count, &bwd_filter_ret_count, bwd_filter_perf));
// std::cout << "Printing bwdfilter conv algo perf\n";
// std::cout << "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 " << CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 << std::endl;
// for (int i = 0; i < bwd_filter_ret_count; i++) {
// std::cout << i << std::endl;
// std::cout << "algo: " << bwd_filter_perf[i].algo << std::endl;
// std::cout << "status: " << cudnnGetErrorString(bwd_filter_perf[i].status) << std::endl;
// std::cout << "time(ms): " << bwd_filter_perf[i].time << std::endl;
// std::cout << "memory(MB): " << bwd_filter_perf[i].memory * 1.0 / 1024 / 1024 << std::endl;
// std::cout << "mathType: " << bwd_filter_perf[i].mathType << std::endl;
// std::cout << std::endl;
// }
bwd_data_req_count = 10;
bwd_data_perf = (cudnnConvolutionBwdDataAlgoPerf_t *)malloc(bwd_data_req_count * sizeof(cudnnConvolutionBwdDataAlgoPerf_t));
checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithm(cudnn_handle,
filter_desc, output_tensor, conv_desc, input_tensor,
bwd_data_req_count, &bwd_data_ret_count, bwd_data_perf));
// std::cout << "Printing bwddata conv algo perf\n";
// for (int i = 0; i < bwd_data_ret_count; i++) {
// std::cout << i << std::endl;
// std::cout << "algo: " << bwd_data_perf[i].algo << std::endl;
// std::cout << "status: " << cudnnGetErrorString(bwd_data_perf[i].status) << std::endl;
// std::cout << "time(ms): " << bwd_data_perf[i].time << std::endl;
// std::cout << "memory(MB): " << bwd_data_perf[i].memory * 1.0 / 1024 / 1024 << std::endl;
// std::cout << "mathType: " << bwd_data_perf[i].mathType << std::endl;
// std::cout << std::endl;
// }
this->update_rule = update_rule;
cudnnActivationMode_t mode;
if (activation_mode == SIGMOID)
mode = CUDNN_ACTIVATION_SIGMOID;
else if (activation_mode == RELU)
mode = CUDNN_ACTIVATION_RELU;
else if (activation_mode == TANH)
mode = CUDNN_ACTIVATION_TANH;
else if (activation_mode == CLIPPED_RELU)
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
else if (activation_mode == ELU)
mode = CUDNN_ACTIVATION_ELU;
if (activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnCreateActivationDescriptor(&actv_desc));
checkCUDNN(cudnnSetActivationDescriptor(actv_desc, mode, CUDNN_PROPAGATE_NAN, user_params->actv_coef));
}
output_size.N = output_batch_size, output_size.C = output_channels, output_size.H = output_h, output_size.W = output_w;
}
void ConvLayerParams::allocateSpace(hiprandGenerator_t curand_gen, cudnnDataType_t data_type, size_t data_type_size,
float std_dev, size_t &free_bytes, bool alloc_derivative) {
if (kernel_size % 2 != 0)
kernel_size += 1;
checkCudaErrors(hipMalloc(&W, kernel_size * data_type_size));
checkCudaErrors(hipMalloc(&b, C_out * data_type_size));
if (alloc_derivative) {
checkCudaErrors(hipMalloc(&dW, kernel_size * data_type_size));
checkCudaErrors(hipMalloc(&db, C_out * data_type_size));
}
if (data_type == CUDNN_DATA_FLOAT) {
checkCURAND(hiprandGenerateNormal(curand_gen, (float *)W, kernel_size, 0, std_dev));
hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * C_out / BW)), dim3(BW), 0, 0, (float *)b, C_out, 0);
}
else {
checkCURAND(hiprandGenerateNormalDouble(curand_gen, (double *)W, kernel_size, 0, std_dev));
hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * C_out / BW)), dim3(BW), 0, 0, (double *)b, C_out, 0);
}
free_bytes = free_bytes - 2 * (kernel_size + C_out) * data_type_size;
}
void ConvLayerParams::cnmemAllocDerivatives(size_t data_type_size, hipStream_t stream) {
checkCNMEM(cnmemMalloc(&dW, kernel_size * data_type_size, stream));
checkCNMEM(cnmemMalloc(&db, C_out * data_type_size, stream));
}
bool ConvLayerParams::cnmemAllocDerivativesCheck(size_t data_type_size, hipStream_t stream,
size_t &max_consume, size_t free_bytes, bool &out_of_memory) {
checkCNMEMSim(cnmemMalloc(&dW, kernel_size * data_type_size, stream),
kernel_size * data_type_size, max_consume, free_bytes, return false, out_of_memory);
checkCNMEMSim(cnmemMalloc(&db, C_out * data_type_size, stream),
C_out * data_type_size, max_consume, free_bytes, return false, out_of_memory);
return true;
}
void ConvLayerParams::stepParams(hipblasHandle_t cublas_handle, double learning_rate) {
float Salpha = -learning_rate;
double Dalpha = -learning_rate;
if (update_rule == SGD) {
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSaxpy(cublas_handle, kernel_size,
&Salpha,
(float *)dW, 1,
(float *)W, 1));
checkCUBLAS(hipblasSaxpy(cublas_handle, C_out,
&Salpha,
(float *)db, 1,
(float *)b, 1));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDaxpy(cublas_handle, kernel_size,
&Dalpha,
(double *)dW, 1,
(double *)W, 1));
checkCUBLAS(hipblasDaxpy(cublas_handle, C_out,
&Dalpha,
(double *)db, 1,
(double *)b, 1));
}
}
}
void ConvLayerParams::cnmemFreeDerivatives(hipStream_t stream) {
checkCNMEM(cnmemFree(dW, stream));
checkCNMEM(cnmemFree(db, stream));
}
size_t ConvLayerParams::getWorkspaceSize(size_t &free_bytes, ConvLayerParams::ConvDirection conv_direction, vDNNConvAlgo vdnn_conv_algo) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (conv_direction == FWD) {
if (fwd_perf[0].memory > free_bytes)
outOfMemory();
fwd_algo = fwd_perf[0].algo;
return fwd_perf[0].memory;
}
else if (conv_direction == BWD_FILTER) {
if (bwd_filter_perf[0].memory > free_bytes)
outOfMemory();
bwd_filter_algo = bwd_filter_perf[0].algo;
return bwd_filter_perf[0].memory;
}
else if (conv_direction == BWD_DATA) {
if (bwd_data_perf[0].memory > free_bytes)
outOfMemory();
bwd_data_algo = bwd_data_perf[0].algo;
return bwd_data_perf[0].memory;
}
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (conv_direction == FWD) {
for (int i = 0; i < fwd_ret_count; i++) {
if (fwd_perf[i].algo == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM && fwd_perf[i].status == CUDNN_STATUS_SUCCESS &&
fwd_perf[i].memory < free_bytes) {
fwd_algo = fwd_perf[i].algo;
return fwd_perf[i].memory;
}
}
}
else if (conv_direction == BWD_FILTER) {
for (int i = 0; i < bwd_filter_ret_count; i++) {
if (bwd_filter_perf[i].algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 && bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS &&
bwd_filter_perf[i].memory < free_bytes) {
bwd_filter_algo = bwd_filter_perf[i].algo;
// std::cout << "Free bytes " << free_bytes << std::endl;
// std::cout << "bwd_filter_perf[i].memory " << bwd_filter_perf[i].memory << std::endl;
return bwd_filter_perf[i].memory;
}
}
}
else if (conv_direction == BWD_DATA) {
for (int i = 0; i < bwd_data_ret_count; i++) {
if (bwd_data_perf[i].algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 && bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS &&
bwd_data_perf[i].memory < free_bytes) {
bwd_data_algo = bwd_data_perf[i].algo;
return bwd_data_perf[i].memory;
}
}
}
std::cout << "Error in getWorkspaceSize" << std::endl;
exit(0);
}
return 0;
}
workspaceStatus_t ConvLayerParams::getWorkspaceSize(size_t &free_bytes, ConvLayerParams::ConvDirection conv_direction, vDNNConvAlgoPref algo_pref,
bool hard_pref, size_t &workspace_size) {
if (hard_pref) {
if (algo_pref == PREFER_PERFORMANCE_OPTIMAL) {
if (conv_direction == FWD) {
if (fwd_perf[0].memory > free_bytes && fwd_perf[0].status == CUDNN_STATUS_SUCCESS)
return WORKSPACE_STATUS_OUT_OF_MEMORY;
fwd_algo = fwd_perf[0].algo;
fwd_workspace_size = fwd_perf[0].memory;
workspace_size = fwd_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else if (conv_direction == BWD_FILTER) {
if (bwd_filter_perf[0].memory > free_bytes && bwd_filter_perf[0].status == CUDNN_STATUS_SUCCESS)
return WORKSPACE_STATUS_OUT_OF_MEMORY;
bwd_filter_algo = bwd_filter_perf[0].algo;
bwd_filter_workspace_size = bwd_filter_perf[0].memory;
workspace_size = bwd_filter_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else if (conv_direction == BWD_DATA) {
if (bwd_data_perf[0].memory > free_bytes && bwd_data_perf[0].status == CUDNN_STATUS_SUCCESS)
return WORKSPACE_STATUS_OUT_OF_MEMORY;
bwd_data_algo = bwd_data_perf[0].algo;
bwd_data_workspace_size = bwd_data_perf[0].memory;
workspace_size = bwd_data_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
}
else if (algo_pref == PREFER_MEMORY_OPTIMAL) {
if (conv_direction == FWD) {
for (int i = 0; i < fwd_ret_count; i++) {
if (fwd_perf[i].algo == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM)
if (fwd_perf[i].memory < free_bytes && fwd_perf[i].status == CUDNN_STATUS_SUCCESS) {
fwd_algo = fwd_perf[i].algo;
fwd_workspace_size = fwd_perf[i].memory;
workspace_size = fwd_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else
return WORKSPACE_STATUS_OUT_OF_MEMORY;
}
}
else if (conv_direction == BWD_FILTER) {
for (int i = 0; i < bwd_filter_ret_count; i++) {
if (bwd_filter_perf[i].algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1)
if (bwd_filter_perf[i].memory < free_bytes && bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS) {
bwd_filter_algo = bwd_filter_perf[i].algo;
// std::cout << "Free bytes " << free_bytes << std::endl;
// std::cout << "bwd_filter_perf[i].memory " << bwd_filter_perf[i].memory << std::endl;
bwd_filter_workspace_size = bwd_filter_perf[i].memory;
workspace_size = bwd_filter_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else
return WORKSPACE_STATUS_OUT_OF_MEMORY;
}
}
else if (conv_direction == BWD_DATA) {
for (int i = 0; i < bwd_data_ret_count; i++) {
if (bwd_data_perf[i].algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1)
if (bwd_data_perf[i].memory < free_bytes && bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS) {
bwd_data_algo = bwd_data_perf[i].algo;
bwd_data_workspace_size = bwd_data_perf[i].memory;
workspace_size = bwd_data_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else
return WORKSPACE_STATUS_OUT_OF_MEMORY;
}
}
}
}
else {
// only performance optimal is possible
if (algo_pref == PREFER_PERFORMANCE_OPTIMAL) {
if (conv_direction == FWD) {
for (int i = 0; i < fwd_ret_count; i++) {
if (fwd_perf[i].memory < free_bytes && fwd_perf[i].status == CUDNN_STATUS_SUCCESS) {
fwd_algo = fwd_perf[i].algo;
fwd_workspace_size = fwd_perf[i].memory;
workspace_size = fwd_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
}
}
else if (conv_direction == BWD_FILTER) {
for (int i = 0; i < bwd_filter_ret_count; i++) {
if (bwd_filter_perf[i].memory < free_bytes && bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS) {
bwd_filter_algo = bwd_filter_perf[i].algo;
// std::cout << "Free bytes " << free_bytes << std::endl;
// std::cout << "bwd_filter_perf[i].memory " << bwd_filter_perf[i].memory << std::endl;
bwd_filter_workspace_size = bwd_filter_perf[i].memory;
workspace_size = bwd_filter_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
}
}
else if (conv_direction == BWD_DATA) {
for (int i = 0; i < bwd_data_ret_count; i++) {
if (bwd_data_perf[i].memory < free_bytes && bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS) {
bwd_data_algo = bwd_data_perf[i].algo;
bwd_data_workspace_size = bwd_data_perf[i].memory;
workspace_size = bwd_data_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
}
}
}
}
return WORKSPACE_STATUS_OUT_OF_MEMORY;
}
void FCLayerParams::initializeValues(FCDescriptor *user_params, int batch_size, cudnnTensorFormat_t tensor_format, cudnnDataType_t data_type,
LayerDimension &output_size, UpdateRule update_rule) {
C_in = user_params->input_channels;
C_out = user_params->output_channels;
weight_matrix_size = C_in * C_out;
this->data_type = data_type;
this->activation_mode = user_params->activation_mode;
this->update_rule = update_rule;
cudnnActivationMode_t mode;
if (activation_mode == SIGMOID)
mode = CUDNN_ACTIVATION_SIGMOID;
else if (activation_mode == RELU)
mode = CUDNN_ACTIVATION_RELU;
else if (activation_mode == TANH)
mode = CUDNN_ACTIVATION_TANH;
else if (activation_mode == CLIPPED_RELU)
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
else if (activation_mode == ELU)
mode = CUDNN_ACTIVATION_ELU;
if (activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnCreateActivationDescriptor(&actv_desc));
checkCUDNN(cudnnSetActivationDescriptor(actv_desc, mode, CUDNN_PROPAGATE_NAN, user_params->actv_coef));
checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type,
batch_size, user_params->output_channels, 1, 1));
}
output_size.N = batch_size, output_size.C = C_out, output_size.H = output_size.W = 1;
}
void FCLayerParams::allocateSpace(hiprandGenerator_t curand_gen, cudnnDataType_t data_type, size_t data_type_size,
float std_dev, size_t &free_bytes, bool alloc_derivative) {
int wt_alloc_size = weight_matrix_size;
if (wt_alloc_size % 2 != 0)
wt_alloc_size += 1;
checkCudaErrors(hipMalloc(&W, wt_alloc_size * data_type_size));
checkCudaErrors(hipMalloc(&b, C_out * data_type_size));
if (alloc_derivative) {
checkCudaErrors(hipMalloc(&dW, wt_alloc_size * data_type_size));
checkCudaErrors(hipMalloc(&db, C_out * data_type_size));
}
if (data_type == CUDNN_DATA_FLOAT) {
checkCURAND(hiprandGenerateNormal(curand_gen, (float *)W, wt_alloc_size, 0, std_dev));
hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * C_out / BW)), dim3(BW), 0, 0, (float *)b, C_out, 0);
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCURAND(hiprandGenerateNormalDouble(curand_gen, (double *)W, wt_alloc_size, 0, std_dev));
hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * C_out / BW)), dim3(BW), 0, 0, (double *)b, C_out, 0);
}
free_bytes = free_bytes - 2 * (C_in * C_out + C_out) * data_type_size;
}
void FCLayerParams::cnmemAllocDerivatives(size_t data_type_size, hipStream_t stream) {
checkCNMEM(cnmemMalloc(&dW, weight_matrix_size * data_type_size, stream));
checkCNMEM(cnmemMalloc(&db, C_out * data_type_size, stream));
}
bool FCLayerParams::cnmemAllocDerivativesCheck(size_t data_type_size, hipStream_t stream,
size_t &max_consume, size_t free_bytes, bool &out_of_memory) {
checkCNMEMSim(cnmemMalloc(&dW, weight_matrix_size * data_type_size, stream),
weight_matrix_size * data_type_size, max_consume, free_bytes, return false, out_of_memory);
checkCNMEMSim(cnmemMalloc(&db, C_out * data_type_size, stream),
C_out * data_type_size, max_consume, free_bytes, return false, out_of_memory);
return true;
}
void FCLayerParams::stepParams(hipblasHandle_t cublas_handle, double learning_rate) {
float Salpha = -learning_rate;
double Dalpha = -learning_rate;
// {
// float *db_h = (float *)malloc(C_out * sizeof(float));
// checkCudaErrors(hipMemcpy(db_h, db, C_out * sizeof(float), hipMemcpyDeviceToHost));
// for (int i = 0; i < C_out; i++) {
// std::cout << db_h[i] << ' ';
// }
// std::cout << "\n";
// int n;
// std::cin >> n;
// }
if (update_rule == SGD) {
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSaxpy(cublas_handle, weight_matrix_size,
&Salpha,
(float *)dW, 1,
(float *)W, 1));
checkCUBLAS(hipblasSaxpy(cublas_handle, C_out,
&Salpha,
(float *)db, 1,
(float *)b, 1));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDaxpy(cublas_handle, weight_matrix_size,
&Dalpha,
(double *)dW, 1,
(double *)W, 1));
checkCUBLAS(hipblasDaxpy(cublas_handle, C_out,
&Dalpha,
(double *)db, 1,
(double *)b, 1));
}
}
// {
// float *db_h = (float *)malloc(C_out * sizeof(float));
// checkCudaErrors(hipMemcpy(db_h, b, C_out * sizeof(float), hipMemcpyDeviceToHost));
// for (int i = 0; i < C_out; i++) {
// std::cout << db_h[i] << ' ';
// }
// std::cout << "\n";
// int n;
// std::cin >> n;
// }
}
void FCLayerParams::cnmemFreeDerivatives(hipStream_t stream) {
checkCNMEM(cnmemFree(dW, stream));
checkCNMEM(cnmemFree(db, stream));
}
void DropoutLayerParams::initializeValues(cudnnHandle_t cudnn_handle, DropoutDescriptor *user_params, cudnnDataType_t data_type, int batch_size,
cudnnTensorFormat_t tensor_format, LayerDimension &output_size) {
checkCUDNN(cudnnCreateDropoutDescriptor(&dropout_desc));
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->channels, user_params->h, user_params->w));
checkCUDNN(cudnnDropoutGetStatesSize(cudnn_handle, &state_size));
checkCUDNN(cudnnDropoutGetReserveSpaceSize(input_tensor, &reserved_space_size));
output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w;
}
void DropoutLayerParams::allocateSpace(size_t &free_bytes, cudnnHandle_t cudnn_handle, DropoutDescriptor *user_params, long long seed) {
checkCudaErrors(hipMalloc(&state, state_size));
checkCudaErrors(hipMalloc(&reserved_space, reserved_space_size));
checkCUDNN(cudnnSetDropoutDescriptor(dropout_desc, cudnn_handle, user_params->dropout_value, state, state_size, seed));
free_bytes = free_bytes - (state_size + reserved_space_size);
}
void BatchNormLayerParams::initializeValues(BatchNormDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format,
int batch_size, LayerDimension &output_size, UpdateRule update_rule) {
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&sbmv_desc));
c = user_params->channels, h = user_params->h, w = user_params->w;
if (user_params->mode == BATCHNORM_PER_ACTIVATION) {
mode = CUDNN_BATCHNORM_PER_ACTIVATION;
checkCUDNN(cudnnSetTensor4dDescriptor(sbmv_desc, tensor_format, data_type,
1, user_params->channels, user_params->h, user_params->w));
sbmv_size = c * h * w;
}
else if (user_params->mode == BATCHNORM_SPATIAL) {
mode = CUDNN_BATCHNORM_SPATIAL;
checkCUDNN(cudnnSetTensor4dDescriptor(sbmv_desc, tensor_format, data_type,
1, user_params->channels, 1, 1));
sbmv_size = c;
}
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->channels, user_params->h, user_params->w));
factor = user_params->factor;
epsilon = user_params->epsilon;
this->update_rule = update_rule;
this->data_type = data_type;
if (mode == CUDNN_BATCHNORM_PER_ACTIVATION)
allocation_size = c * h * w;
else
allocation_size = c;
output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w;
}
void BatchNormLayerParams::allocateSpace(cudnnDataType_t data_type, size_t data_type_size, size_t &free_bytes, bool alloc_derivative) {
size_t allocation_size_bytes = allocation_size * data_type_size;
checkCudaErrors(hipMalloc(&scale, allocation_size_bytes));
checkCudaErrors(hipMalloc(&bias, allocation_size_bytes));
if (alloc_derivative) {
checkCudaErrors(hipMalloc(&dscale, allocation_size_bytes));
checkCudaErrors(hipMalloc(&dbias, allocation_size_bytes));
}
checkCudaErrors(hipMalloc(&running_mean, allocation_size_bytes));
checkCudaErrors(hipMalloc(&running_variance, allocation_size_bytes));
checkCudaErrors(hipMalloc(&result_save_mean, allocation_size_bytes));
checkCudaErrors(hipMalloc(&result_save_inv_var, allocation_size_bytes));
if (data_type == CUDNN_DATA_FLOAT) {
hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * allocation_size / BW)), dim3(BW), 0, 0, (float *)scale, allocation_size, 1);
hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * allocation_size / BW)), dim3(BW), 0, 0, (float *)bias, allocation_size, 1);
}
else if (data_type == CUDNN_DATA_DOUBLE) {
hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * allocation_size / BW)), dim3(BW), 0, 0, (double *)scale, allocation_size, 1);
hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * allocation_size / BW)), dim3(BW), 0, 0, (double *)bias, allocation_size, 1);
}
free_bytes = free_bytes - 6 * allocation_size_bytes;
}
void BatchNormLayerParams::cnmemAllocDerivatives(size_t data_type_size, hipStream_t stream) {
checkCNMEM(cnmemMalloc(&dscale, allocation_size * data_type_size, stream));
checkCNMEM(cnmemMalloc(&dbias, allocation_size * data_type_size, stream));
}
bool BatchNormLayerParams::cnmemAllocDerivativesCheck(size_t data_type_size, hipStream_t stream,
size_t &max_consume, size_t free_bytes, bool &out_of_memory) {
checkCNMEMSim(cnmemMalloc(&dscale, allocation_size * data_type_size, stream),
allocation_size * data_type_size, max_consume, free_bytes, return false, out_of_memory);
checkCNMEMSim(cnmemMalloc(&dbias, allocation_size * data_type_size, stream),
allocation_size * data_type_size, max_consume, free_bytes, return false, out_of_memory);
return true;
}
void BatchNormLayerParams::stepParams(hipblasHandle_t cublas_handle, double learning_rate) {
float Salpha = -learning_rate;
double Dalpha = -learning_rate;
if (update_rule == SGD) {
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSaxpy(cublas_handle, sbmv_size,
&Salpha,
(float *)dscale, 1,
(float *)scale, 1));
checkCUBLAS(hipblasSaxpy(cublas_handle, sbmv_size,
&Salpha,
(float *)dbias, 1,
(float *)bias, 1));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDaxpy(cublas_handle, sbmv_size,
&Dalpha,
(double *)dscale, 1,
(double *)scale, 1));
checkCUBLAS(hipblasDaxpy(cublas_handle, sbmv_size,
&Dalpha,
(double *)dbias, 1,
(double *)bias, 1));
}
}
}
void BatchNormLayerParams::cnmemFreeDerivatives(hipStream_t stream) {
checkCNMEM(cnmemFree(dscale, stream));
checkCNMEM(cnmemFree(dbias, stream));
}
void PoolingLayerParams::initializeValues(PoolingDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format,
int batch_size, LayerDimension &output_size) {
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->input_channels, user_params->input_h, user_params->input_w));
checkCUDNN(cudnnCreatePoolingDescriptor(&pool_desc));
cudnnPoolingMode_t mode;
if (user_params->mode == POOLING_MAX)
mode = CUDNN_POOLING_MAX;
else if (user_params->mode == POOLING_AVERAGE_COUNT_INCLUDE_PADDING)
mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
else if (user_params->mode == POOLING_AVERAGE_COUNT_EXCLUDE_PADDING)
mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
checkCUDNN(cudnnSetPooling2dDescriptor(pool_desc, mode, CUDNN_PROPAGATE_NAN,
user_params->kernel_h, user_params->kernel_w,
user_params->pad_h, user_params->pad_w,
user_params->stride_y, user_params->stride_x));
int output_batch_size, output_channels, output_h, output_w;
checkCUDNN(cudnnGetPooling2dForwardOutputDim(pool_desc, input_tensor,
&output_batch_size, &output_channels, &output_h, &output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type,
output_batch_size, output_channels, output_h, output_w));
output_size.N = output_batch_size, output_size.C = output_channels, output_size.H = output_h, output_size.W = output_w;
}
void PoolingLayerParams::allocateSpace(size_t &free_bytes) {
}
void ActivationLayerParams::initializeValues(ActivationDescriptor *user_params, cudnnDataType_t data_type,
cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) {
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->channels, user_params->h, user_params->w));
cudnnActivationMode_t mode;
if (user_params->mode == SIGMOID)
mode = CUDNN_ACTIVATION_SIGMOID;
else if (user_params->mode == RELU)
mode = CUDNN_ACTIVATION_RELU;
else if (user_params->mode == TANH)
mode = CUDNN_ACTIVATION_TANH;
else if (user_params->mode == CLIPPED_RELU)
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
else if (user_params->mode == ELU)
mode = CUDNN_ACTIVATION_ELU;
checkCUDNN(cudnnCreateActivationDescriptor(&actv_desc));
checkCUDNN(cudnnSetActivationDescriptor(actv_desc, mode, CUDNN_PROPAGATE_NAN, user_params->coef));
output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w;
}
void ActivationLayerParams::allocateSpace(size_t &free_bytes) {
}
void SoftmaxLayerParams::initializeValues(SoftmaxDescriptor *user_params, cudnnDataType_t data_type,
cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) {
if (user_params->algo == SOFTMAX_FAST)
algo = CUDNN_SOFTMAX_FAST;
else if (user_params->algo == SOFTMAX_ACCURATE)
algo = CUDNN_SOFTMAX_ACCURATE;
if (user_params->mode == SOFTMAX_MODE_INSTANCE)
mode = CUDNN_SOFTMAX_MODE_INSTANCE;
else if (user_params->mode == SOFTMAX_MODE_CHANNEL) {
mode = CUDNN_SOFTMAX_MODE_CHANNEL;
}
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->channels, user_params->h, user_params->w));
output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w;
}
void SoftmaxLayerParams::allocateSpace(size_t &free_bytes) {
} | b5094616b5b7b9d00ae23314d4cfa5dfc4cee5bb.cu | #include "layer_params.h"
void ConvLayerParams::initializeValues(cudnnHandle_t cudnn_handle, ConvDescriptor *user_params, cudnnDataType_t data_type,
int batch_size, cudnnTensorFormat_t tensor_format, size_t data_type_size, LayerDimension &output_size,
UpdateRule update_rule) {
// create tensor, filter, conv descriptor
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&bias_desc));
checkCUDNN(cudnnCreateFilterDescriptor(&filter_desc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&conv_desc));
C_in = user_params->input_channels;
C_out = user_params->output_channels;
filter_h = user_params->kernel_h;
filter_w = user_params->kernel_w;
kernel_size = C_out * C_in * filter_h * filter_w;
this->data_type = data_type;
this->activation_mode = user_params->activation_mode;
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->input_channels, user_params->input_h, user_params->input_w));
checkCUDNN(cudnnSetFilter4dDescriptor(filter_desc, data_type, tensor_format,
user_params->output_channels, user_params->input_channels, user_params->kernel_h, user_params->kernel_w));
int dilation_h = 1, dilation_w = 1;
checkCUDNN(cudnnSetConvolution2dDescriptor(conv_desc, user_params->pad_h, user_params->pad_w,
user_params->stride_y, user_params->stride_x,
dilation_h, dilation_w,
CUDNN_CROSS_CORRELATION, data_type));
int output_batch_size, output_channels, output_h, output_w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(conv_desc, input_tensor, filter_desc,
&output_batch_size, &output_channels, &output_h, &output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type,
output_batch_size, output_channels, output_h, output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(bias_desc, tensor_format, data_type,
1, output_channels, 1, 1));
fwd_req_count = 10;
fwd_perf = (cudnnConvolutionFwdAlgoPerf_t *)malloc(fwd_req_count * sizeof(cudnnConvolutionFwdAlgoPerf_t));
checkCUDNN(cudnnFindConvolutionForwardAlgorithm(cudnn_handle,
input_tensor, filter_desc, conv_desc, output_tensor,
fwd_req_count, &fwd_ret_count, fwd_perf));
// std::cout << "Printing forward conv algo perf\n";
// std::cout << "CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM: " << CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM << std::endl;
// for (int i = 0; i < fwd_ret_count; i++) {
// std::cout << i << std::endl;
// std::cout << "algo: " << fwd_perf[i].algo << std::endl;
// std::cout << "status: " << cudnnGetErrorString(fwd_perf[i].status) << std::endl;
// std::cout << "time(ms): " << fwd_perf[i].time << std::endl;
// std::cout << "memory(MB): " << fwd_perf[i].memory * 1.0 / 1024 / 1024 << std::endl;
// std::cout << "mathType: " << fwd_perf[i].mathType << std::endl;
// std::cout << std::endl;
// }
bwd_filter_req_count = 10;
bwd_filter_perf = (cudnnConvolutionBwdFilterAlgoPerf_t *)malloc(bwd_filter_req_count * sizeof(cudnnConvolutionBwdFilterAlgoPerf_t));
checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithm(cudnn_handle,
input_tensor, output_tensor, conv_desc, filter_desc,
bwd_filter_req_count, &bwd_filter_ret_count, bwd_filter_perf));
// std::cout << "Printing bwdfilter conv algo perf\n";
// std::cout << "CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 " << CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 << std::endl;
// for (int i = 0; i < bwd_filter_ret_count; i++) {
// std::cout << i << std::endl;
// std::cout << "algo: " << bwd_filter_perf[i].algo << std::endl;
// std::cout << "status: " << cudnnGetErrorString(bwd_filter_perf[i].status) << std::endl;
// std::cout << "time(ms): " << bwd_filter_perf[i].time << std::endl;
// std::cout << "memory(MB): " << bwd_filter_perf[i].memory * 1.0 / 1024 / 1024 << std::endl;
// std::cout << "mathType: " << bwd_filter_perf[i].mathType << std::endl;
// std::cout << std::endl;
// }
bwd_data_req_count = 10;
bwd_data_perf = (cudnnConvolutionBwdDataAlgoPerf_t *)malloc(bwd_data_req_count * sizeof(cudnnConvolutionBwdDataAlgoPerf_t));
checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithm(cudnn_handle,
filter_desc, output_tensor, conv_desc, input_tensor,
bwd_data_req_count, &bwd_data_ret_count, bwd_data_perf));
// std::cout << "Printing bwddata conv algo perf\n";
// for (int i = 0; i < bwd_data_ret_count; i++) {
// std::cout << i << std::endl;
// std::cout << "algo: " << bwd_data_perf[i].algo << std::endl;
// std::cout << "status: " << cudnnGetErrorString(bwd_data_perf[i].status) << std::endl;
// std::cout << "time(ms): " << bwd_data_perf[i].time << std::endl;
// std::cout << "memory(MB): " << bwd_data_perf[i].memory * 1.0 / 1024 / 1024 << std::endl;
// std::cout << "mathType: " << bwd_data_perf[i].mathType << std::endl;
// std::cout << std::endl;
// }
this->update_rule = update_rule;
cudnnActivationMode_t mode;
if (activation_mode == SIGMOID)
mode = CUDNN_ACTIVATION_SIGMOID;
else if (activation_mode == RELU)
mode = CUDNN_ACTIVATION_RELU;
else if (activation_mode == TANH)
mode = CUDNN_ACTIVATION_TANH;
else if (activation_mode == CLIPPED_RELU)
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
else if (activation_mode == ELU)
mode = CUDNN_ACTIVATION_ELU;
if (activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnCreateActivationDescriptor(&actv_desc));
checkCUDNN(cudnnSetActivationDescriptor(actv_desc, mode, CUDNN_PROPAGATE_NAN, user_params->actv_coef));
}
output_size.N = output_batch_size, output_size.C = output_channels, output_size.H = output_h, output_size.W = output_w;
}
void ConvLayerParams::allocateSpace(curandGenerator_t curand_gen, cudnnDataType_t data_type, size_t data_type_size,
float std_dev, size_t &free_bytes, bool alloc_derivative) {
if (kernel_size % 2 != 0)
kernel_size += 1;
checkCudaErrors(cudaMalloc(&W, kernel_size * data_type_size));
checkCudaErrors(cudaMalloc(&b, C_out * data_type_size));
if (alloc_derivative) {
checkCudaErrors(cudaMalloc(&dW, kernel_size * data_type_size));
checkCudaErrors(cudaMalloc(&db, C_out * data_type_size));
}
if (data_type == CUDNN_DATA_FLOAT) {
checkCURAND(curandGenerateNormal(curand_gen, (float *)W, kernel_size, 0, std_dev));
fillValue<float><<<ceil(1.0 * C_out / BW), BW>>>((float *)b, C_out, 0);
}
else {
checkCURAND(curandGenerateNormalDouble(curand_gen, (double *)W, kernel_size, 0, std_dev));
fillValue<double><<<ceil(1.0 * C_out / BW), BW>>>((double *)b, C_out, 0);
}
free_bytes = free_bytes - 2 * (kernel_size + C_out) * data_type_size;
}
void ConvLayerParams::cnmemAllocDerivatives(size_t data_type_size, cudaStream_t stream) {
checkCNMEM(cnmemMalloc(&dW, kernel_size * data_type_size, stream));
checkCNMEM(cnmemMalloc(&db, C_out * data_type_size, stream));
}
bool ConvLayerParams::cnmemAllocDerivativesCheck(size_t data_type_size, cudaStream_t stream,
size_t &max_consume, size_t free_bytes, bool &out_of_memory) {
checkCNMEMSim(cnmemMalloc(&dW, kernel_size * data_type_size, stream),
kernel_size * data_type_size, max_consume, free_bytes, return false, out_of_memory);
checkCNMEMSim(cnmemMalloc(&db, C_out * data_type_size, stream),
C_out * data_type_size, max_consume, free_bytes, return false, out_of_memory);
return true;
}
void ConvLayerParams::stepParams(cublasHandle_t cublas_handle, double learning_rate) {
float Salpha = -learning_rate;
double Dalpha = -learning_rate;
if (update_rule == SGD) {
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSaxpy(cublas_handle, kernel_size,
&Salpha,
(float *)dW, 1,
(float *)W, 1));
checkCUBLAS(cublasSaxpy(cublas_handle, C_out,
&Salpha,
(float *)db, 1,
(float *)b, 1));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDaxpy(cublas_handle, kernel_size,
&Dalpha,
(double *)dW, 1,
(double *)W, 1));
checkCUBLAS(cublasDaxpy(cublas_handle, C_out,
&Dalpha,
(double *)db, 1,
(double *)b, 1));
}
}
}
void ConvLayerParams::cnmemFreeDerivatives(cudaStream_t stream) {
checkCNMEM(cnmemFree(dW, stream));
checkCNMEM(cnmemFree(db, stream));
}
size_t ConvLayerParams::getWorkspaceSize(size_t &free_bytes, ConvLayerParams::ConvDirection conv_direction, vDNNConvAlgo vdnn_conv_algo) {
if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) {
if (conv_direction == FWD) {
if (fwd_perf[0].memory > free_bytes)
outOfMemory();
fwd_algo = fwd_perf[0].algo;
return fwd_perf[0].memory;
}
else if (conv_direction == BWD_FILTER) {
if (bwd_filter_perf[0].memory > free_bytes)
outOfMemory();
bwd_filter_algo = bwd_filter_perf[0].algo;
return bwd_filter_perf[0].memory;
}
else if (conv_direction == BWD_DATA) {
if (bwd_data_perf[0].memory > free_bytes)
outOfMemory();
bwd_data_algo = bwd_data_perf[0].algo;
return bwd_data_perf[0].memory;
}
}
else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) {
if (conv_direction == FWD) {
for (int i = 0; i < fwd_ret_count; i++) {
if (fwd_perf[i].algo == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM && fwd_perf[i].status == CUDNN_STATUS_SUCCESS &&
fwd_perf[i].memory < free_bytes) {
fwd_algo = fwd_perf[i].algo;
return fwd_perf[i].memory;
}
}
}
else if (conv_direction == BWD_FILTER) {
for (int i = 0; i < bwd_filter_ret_count; i++) {
if (bwd_filter_perf[i].algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 && bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS &&
bwd_filter_perf[i].memory < free_bytes) {
bwd_filter_algo = bwd_filter_perf[i].algo;
// std::cout << "Free bytes " << free_bytes << std::endl;
// std::cout << "bwd_filter_perf[i].memory " << bwd_filter_perf[i].memory << std::endl;
return bwd_filter_perf[i].memory;
}
}
}
else if (conv_direction == BWD_DATA) {
for (int i = 0; i < bwd_data_ret_count; i++) {
if (bwd_data_perf[i].algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 && bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS &&
bwd_data_perf[i].memory < free_bytes) {
bwd_data_algo = bwd_data_perf[i].algo;
return bwd_data_perf[i].memory;
}
}
}
std::cout << "Error in getWorkspaceSize" << std::endl;
exit(0);
}
return 0;
}
workspaceStatus_t ConvLayerParams::getWorkspaceSize(size_t &free_bytes, ConvLayerParams::ConvDirection conv_direction, vDNNConvAlgoPref algo_pref,
bool hard_pref, size_t &workspace_size) {
if (hard_pref) {
if (algo_pref == PREFER_PERFORMANCE_OPTIMAL) {
if (conv_direction == FWD) {
if (fwd_perf[0].memory > free_bytes && fwd_perf[0].status == CUDNN_STATUS_SUCCESS)
return WORKSPACE_STATUS_OUT_OF_MEMORY;
fwd_algo = fwd_perf[0].algo;
fwd_workspace_size = fwd_perf[0].memory;
workspace_size = fwd_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else if (conv_direction == BWD_FILTER) {
if (bwd_filter_perf[0].memory > free_bytes && bwd_filter_perf[0].status == CUDNN_STATUS_SUCCESS)
return WORKSPACE_STATUS_OUT_OF_MEMORY;
bwd_filter_algo = bwd_filter_perf[0].algo;
bwd_filter_workspace_size = bwd_filter_perf[0].memory;
workspace_size = bwd_filter_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else if (conv_direction == BWD_DATA) {
if (bwd_data_perf[0].memory > free_bytes && bwd_data_perf[0].status == CUDNN_STATUS_SUCCESS)
return WORKSPACE_STATUS_OUT_OF_MEMORY;
bwd_data_algo = bwd_data_perf[0].algo;
bwd_data_workspace_size = bwd_data_perf[0].memory;
workspace_size = bwd_data_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
}
else if (algo_pref == PREFER_MEMORY_OPTIMAL) {
if (conv_direction == FWD) {
for (int i = 0; i < fwd_ret_count; i++) {
if (fwd_perf[i].algo == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM)
if (fwd_perf[i].memory < free_bytes && fwd_perf[i].status == CUDNN_STATUS_SUCCESS) {
fwd_algo = fwd_perf[i].algo;
fwd_workspace_size = fwd_perf[i].memory;
workspace_size = fwd_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else
return WORKSPACE_STATUS_OUT_OF_MEMORY;
}
}
else if (conv_direction == BWD_FILTER) {
for (int i = 0; i < bwd_filter_ret_count; i++) {
if (bwd_filter_perf[i].algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1)
if (bwd_filter_perf[i].memory < free_bytes && bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS) {
bwd_filter_algo = bwd_filter_perf[i].algo;
// std::cout << "Free bytes " << free_bytes << std::endl;
// std::cout << "bwd_filter_perf[i].memory " << bwd_filter_perf[i].memory << std::endl;
bwd_filter_workspace_size = bwd_filter_perf[i].memory;
workspace_size = bwd_filter_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else
return WORKSPACE_STATUS_OUT_OF_MEMORY;
}
}
else if (conv_direction == BWD_DATA) {
for (int i = 0; i < bwd_data_ret_count; i++) {
if (bwd_data_perf[i].algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1)
if (bwd_data_perf[i].memory < free_bytes && bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS) {
bwd_data_algo = bwd_data_perf[i].algo;
bwd_data_workspace_size = bwd_data_perf[i].memory;
workspace_size = bwd_data_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
else
return WORKSPACE_STATUS_OUT_OF_MEMORY;
}
}
}
}
else {
// only performance optimal is possible
if (algo_pref == PREFER_PERFORMANCE_OPTIMAL) {
if (conv_direction == FWD) {
for (int i = 0; i < fwd_ret_count; i++) {
if (fwd_perf[i].memory < free_bytes && fwd_perf[i].status == CUDNN_STATUS_SUCCESS) {
fwd_algo = fwd_perf[i].algo;
fwd_workspace_size = fwd_perf[i].memory;
workspace_size = fwd_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
}
}
else if (conv_direction == BWD_FILTER) {
for (int i = 0; i < bwd_filter_ret_count; i++) {
if (bwd_filter_perf[i].memory < free_bytes && bwd_filter_perf[i].status == CUDNN_STATUS_SUCCESS) {
bwd_filter_algo = bwd_filter_perf[i].algo;
// std::cout << "Free bytes " << free_bytes << std::endl;
// std::cout << "bwd_filter_perf[i].memory " << bwd_filter_perf[i].memory << std::endl;
bwd_filter_workspace_size = bwd_filter_perf[i].memory;
workspace_size = bwd_filter_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
}
}
else if (conv_direction == BWD_DATA) {
for (int i = 0; i < bwd_data_ret_count; i++) {
if (bwd_data_perf[i].memory < free_bytes && bwd_data_perf[i].status == CUDNN_STATUS_SUCCESS) {
bwd_data_algo = bwd_data_perf[i].algo;
bwd_data_workspace_size = bwd_data_perf[i].memory;
workspace_size = bwd_data_workspace_size;
return WORKSPACE_STATUS_SUCCESS;
}
}
}
}
}
return WORKSPACE_STATUS_OUT_OF_MEMORY;
}
void FCLayerParams::initializeValues(FCDescriptor *user_params, int batch_size, cudnnTensorFormat_t tensor_format, cudnnDataType_t data_type,
LayerDimension &output_size, UpdateRule update_rule) {
C_in = user_params->input_channels;
C_out = user_params->output_channels;
weight_matrix_size = C_in * C_out;
this->data_type = data_type;
this->activation_mode = user_params->activation_mode;
this->update_rule = update_rule;
cudnnActivationMode_t mode;
if (activation_mode == SIGMOID)
mode = CUDNN_ACTIVATION_SIGMOID;
else if (activation_mode == RELU)
mode = CUDNN_ACTIVATION_RELU;
else if (activation_mode == TANH)
mode = CUDNN_ACTIVATION_TANH;
else if (activation_mode == CLIPPED_RELU)
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
else if (activation_mode == ELU)
mode = CUDNN_ACTIVATION_ELU;
if (activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnCreateActivationDescriptor(&actv_desc));
checkCUDNN(cudnnSetActivationDescriptor(actv_desc, mode, CUDNN_PROPAGATE_NAN, user_params->actv_coef));
checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type,
batch_size, user_params->output_channels, 1, 1));
}
output_size.N = batch_size, output_size.C = C_out, output_size.H = output_size.W = 1;
}
void FCLayerParams::allocateSpace(curandGenerator_t curand_gen, cudnnDataType_t data_type, size_t data_type_size,
float std_dev, size_t &free_bytes, bool alloc_derivative) {
int wt_alloc_size = weight_matrix_size;
if (wt_alloc_size % 2 != 0)
wt_alloc_size += 1;
checkCudaErrors(cudaMalloc(&W, wt_alloc_size * data_type_size));
checkCudaErrors(cudaMalloc(&b, C_out * data_type_size));
if (alloc_derivative) {
checkCudaErrors(cudaMalloc(&dW, wt_alloc_size * data_type_size));
checkCudaErrors(cudaMalloc(&db, C_out * data_type_size));
}
if (data_type == CUDNN_DATA_FLOAT) {
checkCURAND(curandGenerateNormal(curand_gen, (float *)W, wt_alloc_size, 0, std_dev));
fillValue<float><<<ceil(1.0 * C_out / BW), BW>>>((float *)b, C_out, 0);
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCURAND(curandGenerateNormalDouble(curand_gen, (double *)W, wt_alloc_size, 0, std_dev));
fillValue<double><<<ceil(1.0 * C_out / BW), BW>>>((double *)b, C_out, 0);
}
free_bytes = free_bytes - 2 * (C_in * C_out + C_out) * data_type_size;
}
void FCLayerParams::cnmemAllocDerivatives(size_t data_type_size, cudaStream_t stream) {
checkCNMEM(cnmemMalloc(&dW, weight_matrix_size * data_type_size, stream));
checkCNMEM(cnmemMalloc(&db, C_out * data_type_size, stream));
}
bool FCLayerParams::cnmemAllocDerivativesCheck(size_t data_type_size, cudaStream_t stream,
size_t &max_consume, size_t free_bytes, bool &out_of_memory) {
checkCNMEMSim(cnmemMalloc(&dW, weight_matrix_size * data_type_size, stream),
weight_matrix_size * data_type_size, max_consume, free_bytes, return false, out_of_memory);
checkCNMEMSim(cnmemMalloc(&db, C_out * data_type_size, stream),
C_out * data_type_size, max_consume, free_bytes, return false, out_of_memory);
return true;
}
void FCLayerParams::stepParams(cublasHandle_t cublas_handle, double learning_rate) {
float Salpha = -learning_rate;
double Dalpha = -learning_rate;
// {
// float *db_h = (float *)malloc(C_out * sizeof(float));
// checkCudaErrors(cudaMemcpy(db_h, db, C_out * sizeof(float), cudaMemcpyDeviceToHost));
// for (int i = 0; i < C_out; i++) {
// std::cout << db_h[i] << ' ';
// }
// std::cout << "\n";
// int n;
// std::cin >> n;
// }
if (update_rule == SGD) {
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSaxpy(cublas_handle, weight_matrix_size,
&Salpha,
(float *)dW, 1,
(float *)W, 1));
checkCUBLAS(cublasSaxpy(cublas_handle, C_out,
&Salpha,
(float *)db, 1,
(float *)b, 1));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDaxpy(cublas_handle, weight_matrix_size,
&Dalpha,
(double *)dW, 1,
(double *)W, 1));
checkCUBLAS(cublasDaxpy(cublas_handle, C_out,
&Dalpha,
(double *)db, 1,
(double *)b, 1));
}
}
// {
// float *db_h = (float *)malloc(C_out * sizeof(float));
// checkCudaErrors(cudaMemcpy(db_h, b, C_out * sizeof(float), cudaMemcpyDeviceToHost));
// for (int i = 0; i < C_out; i++) {
// std::cout << db_h[i] << ' ';
// }
// std::cout << "\n";
// int n;
// std::cin >> n;
// }
}
void FCLayerParams::cnmemFreeDerivatives(cudaStream_t stream) {
checkCNMEM(cnmemFree(dW, stream));
checkCNMEM(cnmemFree(db, stream));
}
void DropoutLayerParams::initializeValues(cudnnHandle_t cudnn_handle, DropoutDescriptor *user_params, cudnnDataType_t data_type, int batch_size,
cudnnTensorFormat_t tensor_format, LayerDimension &output_size) {
checkCUDNN(cudnnCreateDropoutDescriptor(&dropout_desc));
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->channels, user_params->h, user_params->w));
checkCUDNN(cudnnDropoutGetStatesSize(cudnn_handle, &state_size));
checkCUDNN(cudnnDropoutGetReserveSpaceSize(input_tensor, &reserved_space_size));
output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w;
}
void DropoutLayerParams::allocateSpace(size_t &free_bytes, cudnnHandle_t cudnn_handle, DropoutDescriptor *user_params, long long seed) {
checkCudaErrors(cudaMalloc(&state, state_size));
checkCudaErrors(cudaMalloc(&reserved_space, reserved_space_size));
checkCUDNN(cudnnSetDropoutDescriptor(dropout_desc, cudnn_handle, user_params->dropout_value, state, state_size, seed));
free_bytes = free_bytes - (state_size + reserved_space_size);
}
void BatchNormLayerParams::initializeValues(BatchNormDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format,
int batch_size, LayerDimension &output_size, UpdateRule update_rule) {
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&sbmv_desc));
c = user_params->channels, h = user_params->h, w = user_params->w;
if (user_params->mode == BATCHNORM_PER_ACTIVATION) {
mode = CUDNN_BATCHNORM_PER_ACTIVATION;
checkCUDNN(cudnnSetTensor4dDescriptor(sbmv_desc, tensor_format, data_type,
1, user_params->channels, user_params->h, user_params->w));
sbmv_size = c * h * w;
}
else if (user_params->mode == BATCHNORM_SPATIAL) {
mode = CUDNN_BATCHNORM_SPATIAL;
checkCUDNN(cudnnSetTensor4dDescriptor(sbmv_desc, tensor_format, data_type,
1, user_params->channels, 1, 1));
sbmv_size = c;
}
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->channels, user_params->h, user_params->w));
factor = user_params->factor;
epsilon = user_params->epsilon;
this->update_rule = update_rule;
this->data_type = data_type;
if (mode == CUDNN_BATCHNORM_PER_ACTIVATION)
allocation_size = c * h * w;
else
allocation_size = c;
output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w;
}
void BatchNormLayerParams::allocateSpace(cudnnDataType_t data_type, size_t data_type_size, size_t &free_bytes, bool alloc_derivative) {
size_t allocation_size_bytes = allocation_size * data_type_size;
checkCudaErrors(cudaMalloc(&scale, allocation_size_bytes));
checkCudaErrors(cudaMalloc(&bias, allocation_size_bytes));
if (alloc_derivative) {
checkCudaErrors(cudaMalloc(&dscale, allocation_size_bytes));
checkCudaErrors(cudaMalloc(&dbias, allocation_size_bytes));
}
checkCudaErrors(cudaMalloc(&running_mean, allocation_size_bytes));
checkCudaErrors(cudaMalloc(&running_variance, allocation_size_bytes));
checkCudaErrors(cudaMalloc(&result_save_mean, allocation_size_bytes));
checkCudaErrors(cudaMalloc(&result_save_inv_var, allocation_size_bytes));
if (data_type == CUDNN_DATA_FLOAT) {
fillValue<float><<<ceil(1.0 * allocation_size / BW), BW>>>((float *)scale, allocation_size, 1);
fillValue<float><<<ceil(1.0 * allocation_size / BW), BW>>>((float *)bias, allocation_size, 1);
}
else if (data_type == CUDNN_DATA_DOUBLE) {
fillValue<double><<<ceil(1.0 * allocation_size / BW), BW>>>((double *)scale, allocation_size, 1);
fillValue<double><<<ceil(1.0 * allocation_size / BW), BW>>>((double *)bias, allocation_size, 1);
}
free_bytes = free_bytes - 6 * allocation_size_bytes;
}
void BatchNormLayerParams::cnmemAllocDerivatives(size_t data_type_size, cudaStream_t stream) {
checkCNMEM(cnmemMalloc(&dscale, allocation_size * data_type_size, stream));
checkCNMEM(cnmemMalloc(&dbias, allocation_size * data_type_size, stream));
}
bool BatchNormLayerParams::cnmemAllocDerivativesCheck(size_t data_type_size, cudaStream_t stream,
size_t &max_consume, size_t free_bytes, bool &out_of_memory) {
checkCNMEMSim(cnmemMalloc(&dscale, allocation_size * data_type_size, stream),
allocation_size * data_type_size, max_consume, free_bytes, return false, out_of_memory);
checkCNMEMSim(cnmemMalloc(&dbias, allocation_size * data_type_size, stream),
allocation_size * data_type_size, max_consume, free_bytes, return false, out_of_memory);
return true;
}
void BatchNormLayerParams::stepParams(cublasHandle_t cublas_handle, double learning_rate) {
float Salpha = -learning_rate;
double Dalpha = -learning_rate;
if (update_rule == SGD) {
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSaxpy(cublas_handle, sbmv_size,
&Salpha,
(float *)dscale, 1,
(float *)scale, 1));
checkCUBLAS(cublasSaxpy(cublas_handle, sbmv_size,
&Salpha,
(float *)dbias, 1,
(float *)bias, 1));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDaxpy(cublas_handle, sbmv_size,
&Dalpha,
(double *)dscale, 1,
(double *)scale, 1));
checkCUBLAS(cublasDaxpy(cublas_handle, sbmv_size,
&Dalpha,
(double *)dbias, 1,
(double *)bias, 1));
}
}
}
void BatchNormLayerParams::cnmemFreeDerivatives(cudaStream_t stream) {
checkCNMEM(cnmemFree(dscale, stream));
checkCNMEM(cnmemFree(dbias, stream));
}
void PoolingLayerParams::initializeValues(PoolingDescriptor *user_params, cudnnDataType_t data_type, cudnnTensorFormat_t tensor_format,
int batch_size, LayerDimension &output_size) {
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&output_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->input_channels, user_params->input_h, user_params->input_w));
checkCUDNN(cudnnCreatePoolingDescriptor(&pool_desc));
cudnnPoolingMode_t mode;
if (user_params->mode == POOLING_MAX)
mode = CUDNN_POOLING_MAX;
else if (user_params->mode == POOLING_AVERAGE_COUNT_INCLUDE_PADDING)
mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
else if (user_params->mode == POOLING_AVERAGE_COUNT_EXCLUDE_PADDING)
mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
checkCUDNN(cudnnSetPooling2dDescriptor(pool_desc, mode, CUDNN_PROPAGATE_NAN,
user_params->kernel_h, user_params->kernel_w,
user_params->pad_h, user_params->pad_w,
user_params->stride_y, user_params->stride_x));
int output_batch_size, output_channels, output_h, output_w;
checkCUDNN(cudnnGetPooling2dForwardOutputDim(pool_desc, input_tensor,
&output_batch_size, &output_channels, &output_h, &output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(output_tensor, tensor_format, data_type,
output_batch_size, output_channels, output_h, output_w));
output_size.N = output_batch_size, output_size.C = output_channels, output_size.H = output_h, output_size.W = output_w;
}
void PoolingLayerParams::allocateSpace(size_t &free_bytes) {
}
void ActivationLayerParams::initializeValues(ActivationDescriptor *user_params, cudnnDataType_t data_type,
cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) {
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->channels, user_params->h, user_params->w));
cudnnActivationMode_t mode;
if (user_params->mode == SIGMOID)
mode = CUDNN_ACTIVATION_SIGMOID;
else if (user_params->mode == RELU)
mode = CUDNN_ACTIVATION_RELU;
else if (user_params->mode == TANH)
mode = CUDNN_ACTIVATION_TANH;
else if (user_params->mode == CLIPPED_RELU)
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
else if (user_params->mode == ELU)
mode = CUDNN_ACTIVATION_ELU;
checkCUDNN(cudnnCreateActivationDescriptor(&actv_desc));
checkCUDNN(cudnnSetActivationDescriptor(actv_desc, mode, CUDNN_PROPAGATE_NAN, user_params->coef));
output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w;
}
void ActivationLayerParams::allocateSpace(size_t &free_bytes) {
}
void SoftmaxLayerParams::initializeValues(SoftmaxDescriptor *user_params, cudnnDataType_t data_type,
cudnnTensorFormat_t tensor_format, int batch_size, LayerDimension &output_size) {
if (user_params->algo == SOFTMAX_FAST)
algo = CUDNN_SOFTMAX_FAST;
else if (user_params->algo == SOFTMAX_ACCURATE)
algo = CUDNN_SOFTMAX_ACCURATE;
if (user_params->mode == SOFTMAX_MODE_INSTANCE)
mode = CUDNN_SOFTMAX_MODE_INSTANCE;
else if (user_params->mode == SOFTMAX_MODE_CHANNEL) {
mode = CUDNN_SOFTMAX_MODE_CHANNEL;
}
checkCUDNN(cudnnCreateTensorDescriptor(&input_tensor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_tensor, tensor_format, data_type,
batch_size, user_params->channels, user_params->h, user_params->w));
output_size.N = batch_size, output_size.C = user_params->channels, output_size.H = user_params->h, output_size.W = user_params->w;
}
void SoftmaxLayerParams::allocateSpace(size_t &free_bytes) {
} |
5734e597381ea664b6d3caa904e22bdd8f65758d.hip | // !!! This is a file automatically generated by hipify!!!
#include "G4HepEmMaterialData.hh"
#include <hip/hip_runtime.h>
#include "G4HepEmCuUtils.hh"
#include <iostream>
void CopyMaterialDataToGPU(struct G4HepEmMaterialData* onCPU, struct G4HepEmMaterialData** onGPU) {
// clean away previous (if any)
FreeMaterialDataOnGPU ( onGPU );
//
int numMatData = onCPU->fNumMaterialData;
// allocate array of G4HepEmMatData structures on _d (its pointer address will on _h)
struct G4HepEmMatData* arrayHto_d;
gpuErrchk ( hipMalloc ( &arrayHto_d, sizeof( struct G4HepEmMatData )*numMatData ) );
// fill in the structures on _d by copying the G4HepEmMatData one-by-one such that:
// - for each G4HepEmMatData struct, first allocate the int and double arrays
// on the device and set the corresponding _h reside pointers as the struct
// members (possile other members can be set by value such as fNumOfElement
// or fDensity)
// - copy this struct from the _h to _d: the result of the copy will contain
// poinetrs to device memeory reside on the device
struct G4HepEmMatData* dataHtoD_h = new G4HepEmMatData;
for (int imd=0; imd<numMatData; ++imd) {
struct G4HepEmMatData& mData_h = onCPU->fMaterialData[imd];
int numElem = mData_h.fNumOfElement;
dataHtoD_h->fG4MatIndex = mData_h.fG4MatIndex;
dataHtoD_h->fNumOfElement = mData_h.fNumOfElement;
dataHtoD_h->fDensity = mData_h.fDensity;
dataHtoD_h->fDensityCorFactor = mData_h.fDensityCorFactor;
dataHtoD_h->fElectronDensity = mData_h.fElectronDensity;
dataHtoD_h->fRadiationLength = mData_h.fRadiationLength;
//
gpuErrchk ( hipMalloc ( &(dataHtoD_h->fElementVect), sizeof( int )*numElem ) );
gpuErrchk ( hipMemcpy ( dataHtoD_h->fElementVect, mData_h.fElementVect, sizeof( int )*numElem, hipMemcpyHostToDevice ) );
//
gpuErrchk ( hipMalloc ( &(dataHtoD_h->fNumOfAtomsPerVolumeVect), sizeof( double)*numElem ) );
gpuErrchk ( hipMemcpy ( dataHtoD_h->fNumOfAtomsPerVolumeVect, mData_h.fNumOfAtomsPerVolumeVect, sizeof( double )*numElem, hipMemcpyHostToDevice ) );
//
// copy this G4HepEmMatData structure to _d
gpuErrchk ( hipMemcpy ( &(arrayHto_d[imd]), dataHtoD_h, sizeof( struct G4HepEmMatData ), hipMemcpyHostToDevice ) );
}
// now create a helper G4HepEmMaterialData and set only its fNumMaterialData and
// `struct G4HepEmMatData* fMaterialData` array member, then copy to the
// corresponding structure from _h to _d
struct G4HepEmMaterialData* matData_h = new G4HepEmMaterialData;
matData_h->fNumMaterialData = numMatData;
matData_h->fMaterialData = arrayHto_d;
gpuErrchk ( hipMalloc ( onGPU, sizeof( struct G4HepEmMaterialData ) ) );
gpuErrchk ( hipMemcpy ( *onGPU, matData_h, sizeof( struct G4HepEmMaterialData ), hipMemcpyHostToDevice ) );
// celete all helper object allocated
delete dataHtoD_h;
delete matData_h;
}
//
void FreeMaterialDataOnGPU ( struct G4HepEmMaterialData** onGPU) {
if ( *onGPU ) {
// NOTE:
// - (*onGPU) is a pointer to device memory while onGPU (i.e. struct G4HepEmMaterialData**)
// is the address of this pointer that is located on the host memory
// - in order to be able to free dynamically allocated array members, such as
// the `G4HepEmMaterialData::fNumMaterialData` which is a type of
// `struct G4HepEmMatData*`, first we need to copy the address of that
// pointer from the device to the host. Then we can call hipFree from
// the host to device pointer just copied. The same applies if we want to
// access any struct of struct member pointers.
//
// So first copy the struct G4HepEmMaterialData* from _d to _h in order
// to have (1) _h side access to the `struct G4HepEmMatData*` array pointer
// member and to the (2) fNumMaterialData (int) member.
struct G4HepEmMaterialData* matData_h = new G4HepEmMaterialData;
gpuErrchk ( hipMemcpy ( matData_h, *onGPU, sizeof( struct G4HepEmMaterialData ), hipMemcpyDeviceToHost ) );
int mumMaterialData = matData_h->fNumMaterialData;
// Then copy each of the struct G4HepEmMatData structures of the array
// from _d to _h in order to have their int* and double* pointer members
// on the host, then free the pointed device memory by using these _h side
// pointer addresses to _d side memory locations.
struct G4HepEmMatData* mData_h = new G4HepEmMatData;
for (int imd=0; imd<mumMaterialData; ++imd) {
gpuErrchk ( hipMemcpy ( mData_h, &(matData_h->fMaterialData[imd]), sizeof( struct G4HepEmMatData ), hipMemcpyDeviceToHost ) );
hipFree ( mData_h->fElementVect );
hipFree ( mData_h->fNumOfAtomsPerVolumeVect );
}
// Then at the and free the whole `struct G4HepEmMatData* fMaterialData`
// array (after all dynamically allocated memory is freed) by using the
// _h side address of the _d sice memory pointer.
hipFree ( matData_h->fMaterialData );
// }
// At the very end, we can free the whole struct.
hipFree( *onGPU );
*onGPU = nullptr;
// free auxilary objects
delete matData_h;
delete mData_h;
}
}
// The oter way would be to flatten the data structures:
// 1. Should make a single array of all fElementVect and an other from all
// fNumOfAtomsPerVolumeVect
// 2. Keep track in the material data (with a single int instead of the arrays)
// the index where the data, that related to the given material, starts in
// these common, flat arrays.
// This will be used in case of more important data structures such as lambda
// and loss tables.
| 5734e597381ea664b6d3caa904e22bdd8f65758d.cu |
#include "G4HepEmMaterialData.hh"
#include <cuda_runtime.h>
#include "G4HepEmCuUtils.hh"
#include <iostream>
void CopyMaterialDataToGPU(struct G4HepEmMaterialData* onCPU, struct G4HepEmMaterialData** onGPU) {
// clean away previous (if any)
FreeMaterialDataOnGPU ( onGPU );
//
int numMatData = onCPU->fNumMaterialData;
// allocate array of G4HepEmMatData structures on _d (its pointer address will on _h)
struct G4HepEmMatData* arrayHto_d;
gpuErrchk ( cudaMalloc ( &arrayHto_d, sizeof( struct G4HepEmMatData )*numMatData ) );
// fill in the structures on _d by copying the G4HepEmMatData one-by-one such that:
// - for each G4HepEmMatData struct, first allocate the int and double arrays
// on the device and set the corresponding _h reside pointers as the struct
// members (possile other members can be set by value such as fNumOfElement
// or fDensity)
// - copy this struct from the _h to _d: the result of the copy will contain
// poinetrs to device memeory reside on the device
struct G4HepEmMatData* dataHtoD_h = new G4HepEmMatData;
for (int imd=0; imd<numMatData; ++imd) {
struct G4HepEmMatData& mData_h = onCPU->fMaterialData[imd];
int numElem = mData_h.fNumOfElement;
dataHtoD_h->fG4MatIndex = mData_h.fG4MatIndex;
dataHtoD_h->fNumOfElement = mData_h.fNumOfElement;
dataHtoD_h->fDensity = mData_h.fDensity;
dataHtoD_h->fDensityCorFactor = mData_h.fDensityCorFactor;
dataHtoD_h->fElectronDensity = mData_h.fElectronDensity;
dataHtoD_h->fRadiationLength = mData_h.fRadiationLength;
//
gpuErrchk ( cudaMalloc ( &(dataHtoD_h->fElementVect), sizeof( int )*numElem ) );
gpuErrchk ( cudaMemcpy ( dataHtoD_h->fElementVect, mData_h.fElementVect, sizeof( int )*numElem, cudaMemcpyHostToDevice ) );
//
gpuErrchk ( cudaMalloc ( &(dataHtoD_h->fNumOfAtomsPerVolumeVect), sizeof( double)*numElem ) );
gpuErrchk ( cudaMemcpy ( dataHtoD_h->fNumOfAtomsPerVolumeVect, mData_h.fNumOfAtomsPerVolumeVect, sizeof( double )*numElem, cudaMemcpyHostToDevice ) );
//
// copy this G4HepEmMatData structure to _d
gpuErrchk ( cudaMemcpy ( &(arrayHto_d[imd]), dataHtoD_h, sizeof( struct G4HepEmMatData ), cudaMemcpyHostToDevice ) );
}
// now create a helper G4HepEmMaterialData and set only its fNumMaterialData and
// `struct G4HepEmMatData* fMaterialData` array member, then copy to the
// corresponding structure from _h to _d
struct G4HepEmMaterialData* matData_h = new G4HepEmMaterialData;
matData_h->fNumMaterialData = numMatData;
matData_h->fMaterialData = arrayHto_d;
gpuErrchk ( cudaMalloc ( onGPU, sizeof( struct G4HepEmMaterialData ) ) );
gpuErrchk ( cudaMemcpy ( *onGPU, matData_h, sizeof( struct G4HepEmMaterialData ), cudaMemcpyHostToDevice ) );
// celete all helper object allocated
delete dataHtoD_h;
delete matData_h;
}
//
void FreeMaterialDataOnGPU ( struct G4HepEmMaterialData** onGPU) {
if ( *onGPU ) {
// NOTE:
// - (*onGPU) is a pointer to device memory while onGPU (i.e. struct G4HepEmMaterialData**)
// is the address of this pointer that is located on the host memory
// - in order to be able to free dynamically allocated array members, such as
// the `G4HepEmMaterialData::fNumMaterialData` which is a type of
// `struct G4HepEmMatData*`, first we need to copy the address of that
// pointer from the device to the host. Then we can call cudaFree from
// the host to device pointer just copied. The same applies if we want to
// access any struct of struct member pointers.
//
// So first copy the struct G4HepEmMaterialData* from _d to _h in order
// to have (1) _h side access to the `struct G4HepEmMatData*` array pointer
// member and to the (2) fNumMaterialData (int) member.
struct G4HepEmMaterialData* matData_h = new G4HepEmMaterialData;
gpuErrchk ( cudaMemcpy ( matData_h, *onGPU, sizeof( struct G4HepEmMaterialData ), cudaMemcpyDeviceToHost ) );
int mumMaterialData = matData_h->fNumMaterialData;
// Then copy each of the struct G4HepEmMatData structures of the array
// from _d to _h in order to have their int* and double* pointer members
// on the host, then free the pointed device memory by using these _h side
// pointer addresses to _d side memory locations.
struct G4HepEmMatData* mData_h = new G4HepEmMatData;
for (int imd=0; imd<mumMaterialData; ++imd) {
gpuErrchk ( cudaMemcpy ( mData_h, &(matData_h->fMaterialData[imd]), sizeof( struct G4HepEmMatData ), cudaMemcpyDeviceToHost ) );
cudaFree ( mData_h->fElementVect );
cudaFree ( mData_h->fNumOfAtomsPerVolumeVect );
}
// Then at the and free the whole `struct G4HepEmMatData* fMaterialData`
// array (after all dynamically allocated memory is freed) by using the
// _h side address of the _d sice memory pointer.
cudaFree ( matData_h->fMaterialData );
// }
// At the very end, we can free the whole struct.
cudaFree( *onGPU );
*onGPU = nullptr;
// free auxilary objects
delete matData_h;
delete mData_h;
}
}
// The oter way would be to flatten the data structures:
// 1. Should make a single array of all fElementVect and an other from all
// fNumOfAtomsPerVolumeVect
// 2. Keep track in the material data (with a single int instead of the arrays)
// the index where the data, that related to the given material, starts in
// these common, flat arrays.
// This will be used in case of more important data structures such as lambda
// and loss tables.
|
4072733f8bd9e9f4e88c02b6471e27115db593d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Fprop2(const float* layer1, const float* syn2, float* out)
{
int i = blockDim.y*blockIdx.y + threadIdx.y; //10
int j = blockIdx.x; //Data.count
//int k = threadIdx.x; //256
float x = 0.0;
for (int k=0; k < 256; ++k)
x += layer1[j*256 + k] * syn2[k*10 + i];
out[j*10 + i] = x;
} | 4072733f8bd9e9f4e88c02b6471e27115db593d8.cu | #include "includes.h"
__global__ void Fprop2(const float* layer1, const float* syn2, float* out)
{
int i = blockDim.y*blockIdx.y + threadIdx.y; //10
int j = blockIdx.x; //Data.count
//int k = threadIdx.x; //256
float x = 0.0;
for (int k=0; k < 256; ++k)
x += layer1[j*256 + k] * syn2[k*10 + i];
out[j*10 + i] = x;
} |
282c56978be712946778d5f66148c25e8b86743d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[256,1,1] --blockDim=[128,1,1]
#include "common.h"
__global__ void
classifyVoxel(uint *voxelVerts, uint *voxelOccupied, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask, uint numVoxels,
float3 voxelSize, float isoValue)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
uint3 gridPos = calcGridPos(i, gridSizeShift, gridSizeMask);
// read field values at neighbouring grid vertices
#if SAMPLE_VOLUME
float field[8];
field[0] = sampleVolume(volume, gridPos, gridSize);
field[1] = sampleVolume(volume, gridPos + make_uint3(1, 0, 0), gridSize);
field[2] = sampleVolume(volume, gridPos + make_uint3(1, 1, 0), gridSize);
field[3] = sampleVolume(volume, gridPos + make_uint3(0, 1, 0), gridSize);
field[4] = sampleVolume(volume, gridPos + make_uint3(0, 0, 1), gridSize);
field[5] = sampleVolume(volume, gridPos + make_uint3(1, 0, 1), gridSize);
field[6] = sampleVolume(volume, gridPos + make_uint3(1, 1, 1), gridSize);
field[7] = sampleVolume(volume, gridPos + make_uint3(0, 1, 1), gridSize);
#else
float3 p;
p.x = -1.0f + (gridPos.x * voxelSize.x);
p.y = -1.0f + (gridPos.y * voxelSize.y);
p.z = -1.0f + (gridPos.z * voxelSize.z);
float field[8];
field[0] = fieldFunc(p);
field[1] = fieldFunc(p + make_float3(voxelSize.x, 0, 0));
field[2] = fieldFunc(p + make_float3(voxelSize.x, voxelSize.y, 0));
field[3] = fieldFunc(p + make_float3(0, voxelSize.y, 0));
field[4] = fieldFunc(p + make_float3(0, 0, voxelSize.z));
field[5] = fieldFunc(p + make_float3(voxelSize.x, 0, voxelSize.z));
field[6] = fieldFunc(p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z));
field[7] = fieldFunc(p + make_float3(0, voxelSize.y, voxelSize.z));
#endif
// calculate flag indicating if each vertex is inside or outside isosurface
uint cubeindex;
cubeindex = uint(field[0] < isoValue);
cubeindex += uint(field[1] < isoValue)*2;
cubeindex += uint(field[2] < isoValue)*4;
cubeindex += uint(field[3] < isoValue)*8;
cubeindex += uint(field[4] < isoValue)*16;
cubeindex += uint(field[5] < isoValue)*32;
cubeindex += uint(field[6] < isoValue)*64;
cubeindex += uint(field[7] < isoValue)*128;
// read number of vertices from texture
uint numVerts = tex1Dfetch(numVertsTex, cubeindex);
if (i < numVoxels)
{
voxelVerts[i] = numVerts;
voxelOccupied[i] = (numVerts > 0);
}
}
| 282c56978be712946778d5f66148c25e8b86743d.cu | //pass
//--gridDim=[256,1,1] --blockDim=[128,1,1]
#include "common.h"
__global__ void
classifyVoxel(uint *voxelVerts, uint *voxelOccupied, uchar *volume,
uint3 gridSize, uint3 gridSizeShift, uint3 gridSizeMask, uint numVoxels,
float3 voxelSize, float isoValue)
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
uint i = __mul24(blockId, blockDim.x) + threadIdx.x;
uint3 gridPos = calcGridPos(i, gridSizeShift, gridSizeMask);
// read field values at neighbouring grid vertices
#if SAMPLE_VOLUME
float field[8];
field[0] = sampleVolume(volume, gridPos, gridSize);
field[1] = sampleVolume(volume, gridPos + make_uint3(1, 0, 0), gridSize);
field[2] = sampleVolume(volume, gridPos + make_uint3(1, 1, 0), gridSize);
field[3] = sampleVolume(volume, gridPos + make_uint3(0, 1, 0), gridSize);
field[4] = sampleVolume(volume, gridPos + make_uint3(0, 0, 1), gridSize);
field[5] = sampleVolume(volume, gridPos + make_uint3(1, 0, 1), gridSize);
field[6] = sampleVolume(volume, gridPos + make_uint3(1, 1, 1), gridSize);
field[7] = sampleVolume(volume, gridPos + make_uint3(0, 1, 1), gridSize);
#else
float3 p;
p.x = -1.0f + (gridPos.x * voxelSize.x);
p.y = -1.0f + (gridPos.y * voxelSize.y);
p.z = -1.0f + (gridPos.z * voxelSize.z);
float field[8];
field[0] = fieldFunc(p);
field[1] = fieldFunc(p + make_float3(voxelSize.x, 0, 0));
field[2] = fieldFunc(p + make_float3(voxelSize.x, voxelSize.y, 0));
field[3] = fieldFunc(p + make_float3(0, voxelSize.y, 0));
field[4] = fieldFunc(p + make_float3(0, 0, voxelSize.z));
field[5] = fieldFunc(p + make_float3(voxelSize.x, 0, voxelSize.z));
field[6] = fieldFunc(p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z));
field[7] = fieldFunc(p + make_float3(0, voxelSize.y, voxelSize.z));
#endif
// calculate flag indicating if each vertex is inside or outside isosurface
uint cubeindex;
cubeindex = uint(field[0] < isoValue);
cubeindex += uint(field[1] < isoValue)*2;
cubeindex += uint(field[2] < isoValue)*4;
cubeindex += uint(field[3] < isoValue)*8;
cubeindex += uint(field[4] < isoValue)*16;
cubeindex += uint(field[5] < isoValue)*32;
cubeindex += uint(field[6] < isoValue)*64;
cubeindex += uint(field[7] < isoValue)*128;
// read number of vertices from texture
uint numVerts = tex1Dfetch(numVertsTex, cubeindex);
if (i < numVoxels)
{
voxelVerts[i] = numVerts;
voxelOccupied[i] = (numVerts > 0);
}
}
|
d81f57b8c2dd087124208a4b324e7d839deb29f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h> // API de CUDA
#include <cutil_inline.h> // macros para verificar llamadas
#include <stdio.h>
#include <stdlib.h>
#include "../../common/colorstuff.h" // conversin de [0,1] a color
#include "../../common/sdlstuff.h" // grficos
// Longitud de la escala de colores en pixels
// Ojo! No es divisible por el tamao de bloque
#define GRADIENT_SIZE 719
// tamao de la escala en bytes
#define GRADIENT_BYTES (GRADIENT_SIZE * sizeof(rgba))
// longitud del bloque de threads
#define BLOCK_SIZE 32
// altura de la imagen (para que sea fcil de ver)
// la escala generada se dibuja completa una vez por fila
#define IMAGE_HEIGHT 32
// tamao de la imagen
#define IMAGE_SIZE (GRADIENT_SIZE * IMAGE_HEIGHT)
#define DIV_CEIL(n, m) ((n) + (m) - 1) / (m)
// kernel que pinta bloques de distintos colores
__global__ void gradient_1d(rgba * image, size_t n) {
// indice del thread en el vector
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
// posicion normalizada del bloque en la grilla [0..1]
float norm_pos = blockIdx.x / (gridDim.x - 1.0f);
// convertir a una escala de colores
rgba color = color1(norm_pos);
// escribir a memoria
image[idx] = color;
}
}
int main(int argc, char ** argv) {
// pedir memoria en el host para la imagen
rgba * host_gradient = (rgba *) malloc(GRADIENT_BYTES);
// pedir memoria en la placa para la imagen e inicializarla con ceros
rgba * device_gradient;
// FALTA: pedir memoria en la placa
cutilSafeCall(hipMalloc(&device_gradient, GRADIENT_BYTES));
// FALTA: inicializar a 0 la memoria en la placa
cutilSafeCall(hipMemset(device_gradient, 0, GRADIENT_BYTES));
// correr kernel
dim3 block(BLOCK_SIZE); // bloque
// FALTA: definir el tamao del grid de acuerdo al tamao del bloque
dim3 grid(DIV_CEIL(GRADIENT_SIZE, BLOCK_SIZE));
// FALTA: llamar al kernel
hipLaunchKernelGGL(( gradient_1d), dim3(grid), dim3(block), 0, 0, device_gradient, GRADIENT_SIZE);
// verificar errores
cutilCheckMsg("Fallo al lanzar el kernel:");
// FALTA: esperar a que el kernel termine
cutilSafeCall(hipDeviceSynchronize());
// FALTA: copiar la imagen al host
cutilSafeCall(hipMemcpy(host_gradient, device_gradient, GRADIENT_BYTES, hipMemcpyDefault));
// inicializar grficos, dibujar en pantalla
sdls_init(GRADIENT_SIZE, IMAGE_HEIGHT);
for (uint i = 0; i < IMAGE_HEIGHT; ++i) {
sdls_blitrectangle_rgba(0, i, GRADIENT_SIZE, 1, host_gradient);
}
sdls_draw();
// esperar input para salir
printf("<ENTER> para salir\n");
getchar();
// limpieza de memoria
free(host_gradient);
// FALTA: liberar memoria de la placa
cutilSafeCall(hipFree(device_gradient));
return 0;
}
| d81f57b8c2dd087124208a4b324e7d839deb29f1.cu | #include <cuda.h> // API de CUDA
#include <cutil_inline.h> // macros para verificar llamadas
#include <stdio.h>
#include <stdlib.h>
#include "../../common/colorstuff.h" // conversión de [0,1] a color
#include "../../common/sdlstuff.h" // gráficos
// Longitud de la escala de colores en pixels
// Ojo! No es divisible por el tamaño de bloque
#define GRADIENT_SIZE 719
// tamaño de la escala en bytes
#define GRADIENT_BYTES (GRADIENT_SIZE * sizeof(rgba))
// longitud del bloque de threads
#define BLOCK_SIZE 32
// altura de la imagen (para que sea fácil de ver)
// la escala generada se dibuja completa una vez por fila
#define IMAGE_HEIGHT 32
// tamaño de la imagen
#define IMAGE_SIZE (GRADIENT_SIZE * IMAGE_HEIGHT)
#define DIV_CEIL(n, m) ((n) + (m) - 1) / (m)
// kernel que pinta bloques de distintos colores
__global__ void gradient_1d(rgba * image, size_t n) {
// indice del thread en el vector
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
// posicion normalizada del bloque en la grilla [0..1]
float norm_pos = blockIdx.x / (gridDim.x - 1.0f);
// convertir a una escala de colores
rgba color = color1(norm_pos);
// escribir a memoria
image[idx] = color;
}
}
int main(int argc, char ** argv) {
// pedir memoria en el host para la imagen
rgba * host_gradient = (rgba *) malloc(GRADIENT_BYTES);
// pedir memoria en la placa para la imagen e inicializarla con ceros
rgba * device_gradient;
// FALTA: pedir memoria en la placa
cutilSafeCall(cudaMalloc(&device_gradient, GRADIENT_BYTES));
// FALTA: inicializar a 0 la memoria en la placa
cutilSafeCall(cudaMemset(device_gradient, 0, GRADIENT_BYTES));
// correr kernel
dim3 block(BLOCK_SIZE); // bloque
// FALTA: definir el tamaño del grid de acuerdo al tamaño del bloque
dim3 grid(DIV_CEIL(GRADIENT_SIZE, BLOCK_SIZE));
// FALTA: llamar al kernel
gradient_1d<<<grid, block>>>(device_gradient, GRADIENT_SIZE);
// verificar errores
cutilCheckMsg("Fallo al lanzar el kernel:");
// FALTA: esperar a que el kernel termine
cutilSafeCall(cudaDeviceSynchronize());
// FALTA: copiar la imagen al host
cutilSafeCall(cudaMemcpy(host_gradient, device_gradient, GRADIENT_BYTES, cudaMemcpyDefault));
// inicializar gráficos, dibujar en pantalla
sdls_init(GRADIENT_SIZE, IMAGE_HEIGHT);
for (uint i = 0; i < IMAGE_HEIGHT; ++i) {
sdls_blitrectangle_rgba(0, i, GRADIENT_SIZE, 1, host_gradient);
}
sdls_draw();
// esperar input para salir
printf("<ENTER> para salir\n");
getchar();
// limpieza de memoria
free(host_gradient);
// FALTA: liberar memoria de la placa
cutilSafeCall(cudaFree(device_gradient));
return 0;
}
|
4b3d5bb27ca5b3eb8ba48510d084ef4789dd2063.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_fp16.h>
#include "PoolExecution.hpp"
#include <float.h>
#include "MNNCUDADefine.hpp"
namespace MNN {
namespace CUDA {
#define HALF_MIN half(-65504)
#define HALF2_MIN half2(-65504, -65504)
#define MNN_CUDA_HALF2_MAX(a, b) \
do { \
(a).x = __hgt((a).x, (b).x) ? (a).x : (b).x; \
(a).y = __hgt((a).y, (b).y) ? (a).y : (b).y; \
} while (0)
__global__ void maxpool_halfC16(const half* uInput, half* uOutput,
int bc,
int ih, int iw,
int oh, int ow,
int padX, int padY,
int kernelX, int kernelY,
int strideX, int strideY
) {
int total = bc * oh * ow * 8;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int x = i % ow;
int tmp = i / ow;
int y = tmp % oh;
int z = tmp / oh;
int zC = z / 8;
int zR = z % 8;
int ix = x * strideX - padX;
int iy = y * strideY - padY;
int sx = max(0, -ix);
int sy = max(0, -iy);
int ex = min(kernelX, iw - ix);
int ey = min(kernelY, ih - iy);
float div = (float)(ey-sy)* (float)(ex-sx);
half2 sumValue = HALF2_MIN;
for (int fy=sy; fy<ey; ++fy) {
for (int fx=sx; fx<ex; ++fx) {
int currentX = ix + fx;
int currentY = iy + fy;
const half2* input = (const half2*)(uInput
+ zR * 2
+ currentX * 16
+ currentY * iw * 16
+ zC * iw * ih * 16
);
half2 inputV = *input;
MNN_CUDA_HALF2_MAX(sumValue, inputV);
}
}
half2* dst = (half2*)(uOutput
+ zC * ow * oh * 16
+ y * ow * 16
+ x * 16
+ zR * 2
);
*dst = sumValue;
}
}
__global__ void avgpool_halfC16(const half* uInput, half* uOutput,
int bc,
int ih, int iw,
int oh, int ow,
int padX, int padY,
int kernelX, int kernelY,
int strideX, int strideY
) {
int total = bc * oh * ow * 8;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int x = i % ow;
int tmp = i / ow;
int y = tmp % oh;
int z = tmp / oh;
int zC = z / 8;
int zR = z % 8;
int ix = x * strideX - padX;
int iy = y * strideY - padY;
int sx = max(0, -ix);
int sy = max(0, -iy);
int ex = min(kernelX, iw - ix);
int ey = min(kernelY, ih - iy);
float div = (float)(ey-sy)* (float)(ex-sx);
half2 sumValue = half2(0.0f, 0.0f);
half2 mulValue = half2(1.0f / div, 1.0f/div);
for (int fy=sy; fy<ey; ++fy) {
for (int fx=sx; fx<ex; ++fx) {
int currentX = ix + fx;
int currentY = iy + fy;
const half2* input = (const half2*)(uInput
+ zR * 2
+ currentX * 16
+ currentY * iw * 16
+ zC * iw * ih * 16
);
sumValue = __hadd2(sumValue, (*input) * mulValue);
}
}
half2* dst = (half2*)(uOutput
+ zC * ow * oh * 16
+ y * ow * 16
+ x * 16
+ zR * 2
);
*dst = sumValue;
}
}
__global__ void maxpool_floatC16(const float* uInput, float* uOutput,
int bc,
int ih, int iw,
int oh, int ow,
int padX, int padY,
int kernelX, int kernelY,
int strideX, int strideY
) {
int total = bc * oh * ow * PACK_NUMBER;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int x = i % ow;
int tmp = i / ow;
int y = tmp % oh;
int z = tmp / oh;
int zC = z / PACK_NUMBER;
int zR = z % PACK_NUMBER;
int ix = x * strideX - padX;
int iy = y * strideY - padY;
int sx = max(0, -ix);
int sy = max(0, -iy);
int ex = min(kernelX, iw - ix);
int ey = min(kernelY, ih - iy);
float maxValue = -FLT_MAX;
for (int fy=sy; fy<ey; ++fy) {
for (int fx=sx; fx<ex; ++fx) {
int currentX = ix + fx;
int currentY = iy + fy;
const float* input = (const float*)(uInput
+ zR
+ currentX * PACK_NUMBER
+ currentY * iw * PACK_NUMBER
+ zC * iw * ih * PACK_NUMBER
);
maxValue = max(maxValue, *input);
}
}
float* dst = (float*)(uOutput
+ zC * ow * oh * PACK_NUMBER
+ y * ow * PACK_NUMBER
+ x * PACK_NUMBER
+ zR
);
*dst = maxValue;
}
}
__global__ void avgpool_floatC16(const float* uInput, float* uOutput,
int bc,
int ih, int iw,
int oh, int ow,
int padX, int padY,
int kernelX, int kernelY,
int strideX, int strideY
) {
int total = bc * oh * ow * PACK_NUMBER;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int x = i % ow;
int tmp = i / ow;
int y = tmp % oh;
int z = tmp / oh;
int zC = z / PACK_NUMBER;
int zR = z % PACK_NUMBER;
int ix = x * strideX - padX;
int iy = y * strideY - padY;
int sx = max(0, -ix);
int sy = max(0, -iy);
int ex = min(kernelX, iw - ix);
int ey = min(kernelY, ih - iy);
float div = (float)(ey-sy)* (float)(ex-sx);
float sumValue = 0.0f;
float mulValue = 1.0f/div;
for (int fy=sy; fy<ey; ++fy) {
for (int fx=sx; fx<ex; ++fx) {
int currentX = ix + fx;
int currentY = iy + fy;
const float* input = (const float*)(uInput
+ zR
+ currentX * PACK_NUMBER
+ currentY * iw * PACK_NUMBER
+ zC * iw * ih * PACK_NUMBER
);
sumValue = sumValue + (*input) * mulValue;
}
}
float* dst = (float*)(uOutput
+ zC * ow * oh * 16
+ y * ow * 16
+ x * 16
+ zR
);
*dst = sumValue;
}
}
ErrorCode PoolExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto layer = mParameter;
int strideWidth = layer->strideX();
int strideHeight = layer->strideY();
int padWidth = layer->padX();
int padHeight = layer->padY();
// edit const if global
auto input = inputs[0];
auto output = outputs[0];
int kernelWidth = ::min(layer->kernelX(), input->width());
int kernelHeight = ::min(layer->kernelY(), input->height());
if (layer->isGlobal()) {
kernelWidth = input->width();
kernelHeight = input->height();
strideWidth = input->width();
strideHeight = input->height();
padWidth = 0;
padHeight = 0;
}
if (layer->padType() == PoolPadType_SAME) {
int padNeededWidth = (output->width() - 1) * strideWidth + kernelWidth - input->width();
int padNeededHeight = (output->height() - 1) * strideHeight + kernelHeight - input->height();
padWidth = padNeededWidth > 0 ? padNeededWidth / 2 : 0;
padHeight = padNeededHeight > 0 ? padNeededHeight / 2 : 0;
} else if (layer->padType() == PoolPadType_VALID) {
padWidth = padHeight = 0;
}
mPoolType = layer->type();
auto padType = layer->padType();
if (layer->pads() != nullptr && padType == PoolPadType_CAFFE) {
padType = PoolPadType_VALID;
}
mPadType = padType;
mPaddings = {padWidth, padHeight};
mStrides = {strideWidth, strideHeight};
mKernels = {kernelWidth, kernelHeight};
return NO_ERROR;
}
ErrorCode PoolExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto iw = inputs[0]->width();
auto ih = inputs[0]->height();
auto bc = inputs[0]->batch() * UP_DIV(inputs[0]->channel(), PACK_NUMBER);
auto ow = outputs[0]->width();
auto oh = outputs[0]->height();
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
if (static_cast<CUDABackend*>(backend())->useFp16()) {
auto inputPtr = (const half*)inputs[0]->deviceId();
auto outputPtr = (half*)outputs[0]->deviceId();
switch (mPoolType) {
case PoolType_AVEPOOL:
hipLaunchKernelGGL(( avgpool_halfC16), dim3(block_num), dim3(threads_num), 0, 0, inputPtr, outputPtr,
bc,
ih, iw,
oh, ow,
mPaddings[0], mPaddings[1],
mKernels[0], mKernels[1],
mStrides[0], mStrides[1]
);
return NO_ERROR;
case PoolType_MAXPOOL:
hipLaunchKernelGGL(( maxpool_halfC16), dim3(block_num), dim3(threads_num), 0, 0, inputPtr, outputPtr,
bc,
ih, iw,
oh, ow,
mPaddings[0], mPaddings[1],
mKernels[0], mKernels[1],
mStrides[0], mStrides[1]
);
return NO_ERROR;
}
return NO_ERROR;
}
auto inputPtr = (const float*)inputs[0]->deviceId();
auto outputPtr = (float*)outputs[0]->deviceId();
switch (mPoolType) {
case PoolType_AVEPOOL:
hipLaunchKernelGGL(( avgpool_floatC16), dim3(block_num), dim3(threads_num), 0, 0, inputPtr, outputPtr,
bc,
ih, iw,
oh, ow,
mPaddings[0], mPaddings[1],
mKernels[0], mKernels[1],
mStrides[0], mStrides[1]
);
return NO_ERROR;
case PoolType_MAXPOOL:
hipLaunchKernelGGL(( maxpool_floatC16), dim3(block_num), dim3(threads_num), 0, 0, inputPtr, outputPtr,
bc,
ih, iw,
oh, ow,
mPaddings[0], mPaddings[1],
mKernels[0], mKernels[1],
mStrides[0], mStrides[1]
);
return NO_ERROR;
}
return NOT_SUPPORT;
}
class PoolCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
return new PoolExecution(op->main_as_Pool(), backend);
}
};
static CUDACreatorRegister<PoolCreator> __init(OpType_Pooling);
};
}; | 4b3d5bb27ca5b3eb8ba48510d084ef4789dd2063.cu | #include <cuda_fp16.h>
#include "PoolExecution.hpp"
#include <float.h>
#include "MNNCUDADefine.hpp"
namespace MNN {
namespace CUDA {
#define HALF_MIN half(-65504)
#define HALF2_MIN half2(-65504, -65504)
#define MNN_CUDA_HALF2_MAX(a, b) \
do { \
(a).x = __hgt((a).x, (b).x) ? (a).x : (b).x; \
(a).y = __hgt((a).y, (b).y) ? (a).y : (b).y; \
} while (0)
__global__ void maxpool_halfC16(const half* uInput, half* uOutput,
int bc,
int ih, int iw,
int oh, int ow,
int padX, int padY,
int kernelX, int kernelY,
int strideX, int strideY
) {
int total = bc * oh * ow * 8;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int x = i % ow;
int tmp = i / ow;
int y = tmp % oh;
int z = tmp / oh;
int zC = z / 8;
int zR = z % 8;
int ix = x * strideX - padX;
int iy = y * strideY - padY;
int sx = max(0, -ix);
int sy = max(0, -iy);
int ex = min(kernelX, iw - ix);
int ey = min(kernelY, ih - iy);
float div = (float)(ey-sy)* (float)(ex-sx);
half2 sumValue = HALF2_MIN;
for (int fy=sy; fy<ey; ++fy) {
for (int fx=sx; fx<ex; ++fx) {
int currentX = ix + fx;
int currentY = iy + fy;
const half2* input = (const half2*)(uInput
+ zR * 2
+ currentX * 16
+ currentY * iw * 16
+ zC * iw * ih * 16
);
half2 inputV = *input;
MNN_CUDA_HALF2_MAX(sumValue, inputV);
}
}
half2* dst = (half2*)(uOutput
+ zC * ow * oh * 16
+ y * ow * 16
+ x * 16
+ zR * 2
);
*dst = sumValue;
}
}
__global__ void avgpool_halfC16(const half* uInput, half* uOutput,
int bc,
int ih, int iw,
int oh, int ow,
int padX, int padY,
int kernelX, int kernelY,
int strideX, int strideY
) {
int total = bc * oh * ow * 8;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int x = i % ow;
int tmp = i / ow;
int y = tmp % oh;
int z = tmp / oh;
int zC = z / 8;
int zR = z % 8;
int ix = x * strideX - padX;
int iy = y * strideY - padY;
int sx = max(0, -ix);
int sy = max(0, -iy);
int ex = min(kernelX, iw - ix);
int ey = min(kernelY, ih - iy);
float div = (float)(ey-sy)* (float)(ex-sx);
half2 sumValue = half2(0.0f, 0.0f);
half2 mulValue = half2(1.0f / div, 1.0f/div);
for (int fy=sy; fy<ey; ++fy) {
for (int fx=sx; fx<ex; ++fx) {
int currentX = ix + fx;
int currentY = iy + fy;
const half2* input = (const half2*)(uInput
+ zR * 2
+ currentX * 16
+ currentY * iw * 16
+ zC * iw * ih * 16
);
sumValue = __hadd2(sumValue, (*input) * mulValue);
}
}
half2* dst = (half2*)(uOutput
+ zC * ow * oh * 16
+ y * ow * 16
+ x * 16
+ zR * 2
);
*dst = sumValue;
}
}
__global__ void maxpool_floatC16(const float* uInput, float* uOutput,
int bc,
int ih, int iw,
int oh, int ow,
int padX, int padY,
int kernelX, int kernelY,
int strideX, int strideY
) {
int total = bc * oh * ow * PACK_NUMBER;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int x = i % ow;
int tmp = i / ow;
int y = tmp % oh;
int z = tmp / oh;
int zC = z / PACK_NUMBER;
int zR = z % PACK_NUMBER;
int ix = x * strideX - padX;
int iy = y * strideY - padY;
int sx = max(0, -ix);
int sy = max(0, -iy);
int ex = min(kernelX, iw - ix);
int ey = min(kernelY, ih - iy);
float maxValue = -FLT_MAX;
for (int fy=sy; fy<ey; ++fy) {
for (int fx=sx; fx<ex; ++fx) {
int currentX = ix + fx;
int currentY = iy + fy;
const float* input = (const float*)(uInput
+ zR
+ currentX * PACK_NUMBER
+ currentY * iw * PACK_NUMBER
+ zC * iw * ih * PACK_NUMBER
);
maxValue = max(maxValue, *input);
}
}
float* dst = (float*)(uOutput
+ zC * ow * oh * PACK_NUMBER
+ y * ow * PACK_NUMBER
+ x * PACK_NUMBER
+ zR
);
*dst = maxValue;
}
}
__global__ void avgpool_floatC16(const float* uInput, float* uOutput,
int bc,
int ih, int iw,
int oh, int ow,
int padX, int padY,
int kernelX, int kernelY,
int strideX, int strideY
) {
int total = bc * oh * ow * PACK_NUMBER;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int x = i % ow;
int tmp = i / ow;
int y = tmp % oh;
int z = tmp / oh;
int zC = z / PACK_NUMBER;
int zR = z % PACK_NUMBER;
int ix = x * strideX - padX;
int iy = y * strideY - padY;
int sx = max(0, -ix);
int sy = max(0, -iy);
int ex = min(kernelX, iw - ix);
int ey = min(kernelY, ih - iy);
float div = (float)(ey-sy)* (float)(ex-sx);
float sumValue = 0.0f;
float mulValue = 1.0f/div;
for (int fy=sy; fy<ey; ++fy) {
for (int fx=sx; fx<ex; ++fx) {
int currentX = ix + fx;
int currentY = iy + fy;
const float* input = (const float*)(uInput
+ zR
+ currentX * PACK_NUMBER
+ currentY * iw * PACK_NUMBER
+ zC * iw * ih * PACK_NUMBER
);
sumValue = sumValue + (*input) * mulValue;
}
}
float* dst = (float*)(uOutput
+ zC * ow * oh * 16
+ y * ow * 16
+ x * 16
+ zR
);
*dst = sumValue;
}
}
ErrorCode PoolExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto layer = mParameter;
int strideWidth = layer->strideX();
int strideHeight = layer->strideY();
int padWidth = layer->padX();
int padHeight = layer->padY();
// edit const if global
auto input = inputs[0];
auto output = outputs[0];
int kernelWidth = std::min(layer->kernelX(), input->width());
int kernelHeight = std::min(layer->kernelY(), input->height());
if (layer->isGlobal()) {
kernelWidth = input->width();
kernelHeight = input->height();
strideWidth = input->width();
strideHeight = input->height();
padWidth = 0;
padHeight = 0;
}
if (layer->padType() == PoolPadType_SAME) {
int padNeededWidth = (output->width() - 1) * strideWidth + kernelWidth - input->width();
int padNeededHeight = (output->height() - 1) * strideHeight + kernelHeight - input->height();
padWidth = padNeededWidth > 0 ? padNeededWidth / 2 : 0;
padHeight = padNeededHeight > 0 ? padNeededHeight / 2 : 0;
} else if (layer->padType() == PoolPadType_VALID) {
padWidth = padHeight = 0;
}
mPoolType = layer->type();
auto padType = layer->padType();
if (layer->pads() != nullptr && padType == PoolPadType_CAFFE) {
padType = PoolPadType_VALID;
}
mPadType = padType;
mPaddings = {padWidth, padHeight};
mStrides = {strideWidth, strideHeight};
mKernels = {kernelWidth, kernelHeight};
return NO_ERROR;
}
ErrorCode PoolExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto iw = inputs[0]->width();
auto ih = inputs[0]->height();
auto bc = inputs[0]->batch() * UP_DIV(inputs[0]->channel(), PACK_NUMBER);
auto ow = outputs[0]->width();
auto oh = outputs[0]->height();
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
if (static_cast<CUDABackend*>(backend())->useFp16()) {
auto inputPtr = (const half*)inputs[0]->deviceId();
auto outputPtr = (half*)outputs[0]->deviceId();
switch (mPoolType) {
case PoolType_AVEPOOL:
avgpool_halfC16<<<block_num, threads_num>>>(inputPtr, outputPtr,
bc,
ih, iw,
oh, ow,
mPaddings[0], mPaddings[1],
mKernels[0], mKernels[1],
mStrides[0], mStrides[1]
);
return NO_ERROR;
case PoolType_MAXPOOL:
maxpool_halfC16<<<block_num, threads_num>>>(inputPtr, outputPtr,
bc,
ih, iw,
oh, ow,
mPaddings[0], mPaddings[1],
mKernels[0], mKernels[1],
mStrides[0], mStrides[1]
);
return NO_ERROR;
}
return NO_ERROR;
}
auto inputPtr = (const float*)inputs[0]->deviceId();
auto outputPtr = (float*)outputs[0]->deviceId();
switch (mPoolType) {
case PoolType_AVEPOOL:
avgpool_floatC16<<<block_num, threads_num>>>(inputPtr, outputPtr,
bc,
ih, iw,
oh, ow,
mPaddings[0], mPaddings[1],
mKernels[0], mKernels[1],
mStrides[0], mStrides[1]
);
return NO_ERROR;
case PoolType_MAXPOOL:
maxpool_floatC16<<<block_num, threads_num>>>(inputPtr, outputPtr,
bc,
ih, iw,
oh, ow,
mPaddings[0], mPaddings[1],
mKernels[0], mKernels[1],
mStrides[0], mStrides[1]
);
return NO_ERROR;
}
return NOT_SUPPORT;
}
class PoolCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
return new PoolExecution(op->main_as_Pool(), backend);
}
};
static CUDACreatorRegister<PoolCreator> __init(OpType_Pooling);
};
}; |
fa9b41f26bfd847f028d0736faf18cdb16d4d511.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 06.02.2019
// @author [email protected]
//
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/StringUtils.h>
#include <helpers/logger.h>
#include <memory/Workspace.h>
namespace sd {
//////////////////////////////////////////////////////////////////////////
PointersManager::PointersManager(const sd::LaunchContext* context, const std::string& funcName) {
_context = const_cast<sd::LaunchContext*>(context);
_funcName = funcName;
}
//////////////////////////////////////////////////////////////////////////
void* PointersManager::allocateDevMem(const size_t sizeInBytes) {
void* dst = nullptr;
if (_context->getWorkspace() == nullptr) {
hipError_t cudaResult = hipMalloc(reinterpret_cast<void**>(&dst), sizeInBytes);
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cannot allocate global memory on device!", cudaResult);
} else {
dst = _context->getWorkspace()->allocateBytes(sd::memory::MemoryType::DEVICE, sizeInBytes);
}
return dst;
}
//////////////////////////////////////////////////////////////////////////
void* PointersManager::replicatePointer(const void* src, const size_t numberOfBytes) {
void* dst = allocateDevMem(numberOfBytes);
if (src) {
if (_context != nullptr)
hipMemcpyAsync(dst, src, numberOfBytes, hipMemcpyHostToDevice, *_context->getCudaStream());
else
hipMemcpy(dst, src, numberOfBytes, hipMemcpyHostToDevice);
}
_pOnGlobMem.emplace_back(dst);
return dst;
}
//////////////////////////////////////////////////////////////////////////
void PointersManager::synchronize() const {
if (_context != nullptr) {
hipError_t cudaResult = hipStreamSynchronize(*_context->getCudaStream());
if (cudaResult != 0) throw cuda_exception::build(_funcName + ": cuda stream synchronization failed !", cudaResult);
} else {
sd_printf("<%s> syncStream isn't possible: no stream set!", _funcName.c_str());
}
}
//////////////////////////////////////////////////////////////////////////
PointersManager::~PointersManager() {
for (auto& p : _pOnGlobMem) hipFree(p);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
static SD_KERNEL void printDevContentOnDev_(const void* pDev, const sd::LongType len, const int tid) {
PointersManager::printDevContentOnDev<T>(pDev, len, tid);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
void PointersManager::printDevContentOnDevFromHost(const void* pDev, const sd::LongType len, const int tid) {
hipLaunchKernelGGL(( printDevContentOnDev_<T>), dim3(512), dim3(512), 1024, *sd::LaunchContext ::defaultContext()->getCudaStream(), pDev, len, tid);
auto res = hipStreamSynchronize(*sd::LaunchContext ::defaultContext()->getCudaStream());
if (res != 0)
THROW_EXCEPTION("PointersManager::printDevContentOnDevFromHost: hipStreamSynchronize failed!");
}
template void PointersManager::printDevContentOnDevFromHost<sd::LongType>(const void* pDev, const sd::LongType len,
const int tid);
template void PointersManager::printDevContentOnDevFromHost<int>(const void* pDev, const sd::LongType len,
const int tid);
template void PointersManager::printDevContentOnDevFromHost<float>(const void* pDev, const sd::LongType len,
const int tid);
template void PointersManager::printDevContentOnDevFromHost<double>(const void* pDev, const sd::LongType len,
const int tid);
// BUILD_SINGLE_TEMPLATE(template void PointersManager::printDevContentOnDevFromHost, (void* pDev, sd::LongType len, int
// tid), SD_COMMON_TYPES);
////////////////////////////////////////////////////////////////////////
template <typename T>
void PointersManager::printDevContentOnHost(const void* pDev, const sd::LongType len) const {
printf("host print out\n");
void* pHost = operator new(sizeof(T) * len);
hipMemcpyAsync(pHost, pDev, sizeof(T) * len, hipMemcpyDeviceToHost, *_context->getCudaStream());
hipError_t cudaResult = hipStreamSynchronize(*_context->getCudaStream());
if (cudaResult != 0) THROW_EXCEPTION("PointersManager::printCudaHost: hipStreamSynchronize failed!");
for (sd::LongType i = 0; i < len; ++i) printf("%f, ", (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
template void PointersManager::printDevContentOnHost<sd::LongType>(const void* pDev, const sd::LongType len) const;
template void PointersManager::printDevContentOnHost<int>(const void* pDev, const sd::LongType len) const;
template void PointersManager::printDevContentOnHost<float>(const void* pDev, const sd::LongType len) const;
template void PointersManager::printDevContentOnHost<double>(const void* pDev, const sd::LongType len) const;
} // namespace sd
| fa9b41f26bfd847f028d0736faf18cdb16d4d511.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 06.02.2019
// @author [email protected]
//
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/StringUtils.h>
#include <helpers/logger.h>
#include <memory/Workspace.h>
namespace sd {
//////////////////////////////////////////////////////////////////////////
PointersManager::PointersManager(const sd::LaunchContext* context, const std::string& funcName) {
_context = const_cast<sd::LaunchContext*>(context);
_funcName = funcName;
}
//////////////////////////////////////////////////////////////////////////
void* PointersManager::allocateDevMem(const size_t sizeInBytes) {
void* dst = nullptr;
if (_context->getWorkspace() == nullptr) {
cudaError_t cudaResult = cudaMalloc(reinterpret_cast<void**>(&dst), sizeInBytes);
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cannot allocate global memory on device!", cudaResult);
} else {
dst = _context->getWorkspace()->allocateBytes(sd::memory::MemoryType::DEVICE, sizeInBytes);
}
return dst;
}
//////////////////////////////////////////////////////////////////////////
void* PointersManager::replicatePointer(const void* src, const size_t numberOfBytes) {
void* dst = allocateDevMem(numberOfBytes);
if (src) {
if (_context != nullptr)
cudaMemcpyAsync(dst, src, numberOfBytes, cudaMemcpyHostToDevice, *_context->getCudaStream());
else
cudaMemcpy(dst, src, numberOfBytes, cudaMemcpyHostToDevice);
}
_pOnGlobMem.emplace_back(dst);
return dst;
}
//////////////////////////////////////////////////////////////////////////
void PointersManager::synchronize() const {
if (_context != nullptr) {
cudaError_t cudaResult = cudaStreamSynchronize(*_context->getCudaStream());
if (cudaResult != 0) throw cuda_exception::build(_funcName + ": cuda stream synchronization failed !", cudaResult);
} else {
sd_printf("<%s> syncStream isn't possible: no stream set!", _funcName.c_str());
}
}
//////////////////////////////////////////////////////////////////////////
PointersManager::~PointersManager() {
for (auto& p : _pOnGlobMem) cudaFree(p);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
static SD_KERNEL void printDevContentOnDev_(const void* pDev, const sd::LongType len, const int tid) {
PointersManager::printDevContentOnDev<T>(pDev, len, tid);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
void PointersManager::printDevContentOnDevFromHost(const void* pDev, const sd::LongType len, const int tid) {
printDevContentOnDev_<T><<<512, 512, 1024, *sd::LaunchContext ::defaultContext()->getCudaStream()>>>(pDev, len, tid);
auto res = cudaStreamSynchronize(*sd::LaunchContext ::defaultContext()->getCudaStream());
if (res != 0)
THROW_EXCEPTION("PointersManager::printDevContentOnDevFromHost: cudaStreamSynchronize failed!");
}
template void PointersManager::printDevContentOnDevFromHost<sd::LongType>(const void* pDev, const sd::LongType len,
const int tid);
template void PointersManager::printDevContentOnDevFromHost<int>(const void* pDev, const sd::LongType len,
const int tid);
template void PointersManager::printDevContentOnDevFromHost<float>(const void* pDev, const sd::LongType len,
const int tid);
template void PointersManager::printDevContentOnDevFromHost<double>(const void* pDev, const sd::LongType len,
const int tid);
// BUILD_SINGLE_TEMPLATE(template void PointersManager::printDevContentOnDevFromHost, (void* pDev, sd::LongType len, int
// tid), SD_COMMON_TYPES);
////////////////////////////////////////////////////////////////////////
template <typename T>
void PointersManager::printDevContentOnHost(const void* pDev, const sd::LongType len) const {
printf("host print out\n");
void* pHost = operator new(sizeof(T) * len);
cudaMemcpyAsync(pHost, pDev, sizeof(T) * len, cudaMemcpyDeviceToHost, *_context->getCudaStream());
cudaError_t cudaResult = cudaStreamSynchronize(*_context->getCudaStream());
if (cudaResult != 0) THROW_EXCEPTION("PointersManager::printCudaHost: cudaStreamSynchronize failed!");
for (sd::LongType i = 0; i < len; ++i) printf("%f, ", (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
template void PointersManager::printDevContentOnHost<sd::LongType>(const void* pDev, const sd::LongType len) const;
template void PointersManager::printDevContentOnHost<int>(const void* pDev, const sd::LongType len) const;
template void PointersManager::printDevContentOnHost<float>(const void* pDev, const sd::LongType len) const;
template void PointersManager::printDevContentOnHost<double>(const void* pDev, const sd::LongType len) const;
} // namespace sd
|
f29010e7e41597ec565b6a4b1a843a720c88b434.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <ctime>
#include <vector>
#include <hiprand/hiprand.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#define MIN(a, b) (((a)<(b)?(a):(b)))
__device__
float source(float rand_b) { return rand_b; }
__device__
float f_gene(float mu, float sigma, float rand_a, float rand_b)
{
return -mu * logf( rand_a ) / sigma + source(rand_b);
}
__global__
void trajs(float mu, float sigma, float* parts, unsigned nb_parts, float* rands_a, float* rands_b )
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
if (x>=nb_parts) return;
parts[x] = f_gene(mu, sigma, rands_a[x], rands_b[x]);
}
__global__
void make_distrib(float* parts,
unsigned nb_parts,
unsigned* distrib,
unsigned nb_segs,
unsigned* below,
unsigned* above,
float min,
float max,
unsigned nb_threads)
{
unsigned x = threadIdx.x+blockIdx.x*blockDim.x;
if (x>=nb_threads) return;
unsigned range_size = floorf((float) nb_parts/nb_threads),
i = x*range_size;
int seg = floorf( (float) (parts[i]-min)/(max-min)*nb_segs );
for (i++; i<(x+1)*range_size; i++){
if ( floorf( (float) (parts[i]-min)/(max-min)*nb_segs ) > seg )
seg = (int) floorf( (float) (parts[i]-min)/(max-min)*nb_segs );
if ( seg<0 ) *below++;
else if ( seg>nb_segs ) *above++;
else distrib[ seg ]++;
}
}
int main(int argc, char **argv)
{
if (argc!=5) return -1;
float mu = atof(argv[1]),
sigma = atof(argv[2]);
unsigned nb_parts = atoi(argv[3]),
nb_segs = atoi(argv[4]);
float* parts,
*rands_a,
*rands_b;
hipMalloc(&parts, sizeof(float)*nb_parts);
hipMalloc(&rands_a, sizeof(float)*nb_parts);
hipMalloc(&rands_b, sizeof(float)*nb_parts);
dim3 blockSize(512),
gridSize(ceil((float) nb_parts/512));
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, time(NULL));
hiprandGenerateUniform(gen, rands_a, nb_parts);
hiprandGenerateUniform(gen, rands_b, nb_parts);
hipLaunchKernelGGL(( trajs), dim3(gridSize), dim3(blockSize), 0, 0, mu, sigma, parts, nb_parts, rands_a, rands_b);
thrust::sort(thrust::device, parts, parts+nb_parts);
unsigned* distrib,
*above,
*below;
hipMalloc(&distrib, sizeof(unsigned)*nb_segs);
hipMalloc(&below, sizeof(unsigned));
hipMalloc(&above, sizeof(unsigned));
hipLaunchKernelGGL(( make_distrib), dim3(gridSize), dim3(blockSize), 0, 0, parts,
nb_parts,
distrib,
nb_segs,
below,
above,
0, 1,
MIN(nb_segs/2, nb_parts/2));
std::vector<unsigned> h_distrib (nb_segs);
hipMemcpy(h_distrib.data(), distrib, sizeof(unsigned)*nb_segs, hipMemcpyDeviceToHost);
// for (int i=0; i<nb_segs; i++)
// std::cout << (float) i/nb_segs << " " << h_distrib.at(i) << std::endl;
return 0;
} | f29010e7e41597ec565b6a4b1a843a720c88b434.cu | #include <iostream>
#include <stdlib.h>
#include <ctime>
#include <vector>
#include <curand.h>
#include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#define MIN(a, b) (((a)<(b)?(a):(b)))
__device__
float source(float rand_b) { return rand_b; }
__device__
float f_gene(float mu, float sigma, float rand_a, float rand_b)
{
return -mu * logf( rand_a ) / sigma + source(rand_b);
}
__global__
void trajs(float mu, float sigma, float* parts, unsigned nb_parts, float* rands_a, float* rands_b )
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
if (x>=nb_parts) return;
parts[x] = f_gene(mu, sigma, rands_a[x], rands_b[x]);
}
__global__
void make_distrib(float* parts,
unsigned nb_parts,
unsigned* distrib,
unsigned nb_segs,
unsigned* below,
unsigned* above,
float min,
float max,
unsigned nb_threads)
{
unsigned x = threadIdx.x+blockIdx.x*blockDim.x;
if (x>=nb_threads) return;
unsigned range_size = floorf((float) nb_parts/nb_threads),
i = x*range_size;
int seg = floorf( (float) (parts[i]-min)/(max-min)*nb_segs );
for (i++; i<(x+1)*range_size; i++){
if ( floorf( (float) (parts[i]-min)/(max-min)*nb_segs ) > seg )
seg = (int) floorf( (float) (parts[i]-min)/(max-min)*nb_segs );
if ( seg<0 ) *below++;
else if ( seg>nb_segs ) *above++;
else distrib[ seg ]++;
}
}
int main(int argc, char **argv)
{
if (argc!=5) return -1;
float mu = atof(argv[1]),
sigma = atof(argv[2]);
unsigned nb_parts = atoi(argv[3]),
nb_segs = atoi(argv[4]);
float* parts,
*rands_a,
*rands_b;
cudaMalloc(&parts, sizeof(float)*nb_parts);
cudaMalloc(&rands_a, sizeof(float)*nb_parts);
cudaMalloc(&rands_b, sizeof(float)*nb_parts);
dim3 blockSize(512),
gridSize(ceil((float) nb_parts/512));
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, time(NULL));
curandGenerateUniform(gen, rands_a, nb_parts);
curandGenerateUniform(gen, rands_b, nb_parts);
trajs<<<gridSize, blockSize>>>(mu, sigma, parts, nb_parts, rands_a, rands_b);
thrust::sort(thrust::device, parts, parts+nb_parts);
unsigned* distrib,
*above,
*below;
cudaMalloc(&distrib, sizeof(unsigned)*nb_segs);
cudaMalloc(&below, sizeof(unsigned));
cudaMalloc(&above, sizeof(unsigned));
make_distrib<<<gridSize, blockSize>>>(parts,
nb_parts,
distrib,
nb_segs,
below,
above,
0, 1,
MIN(nb_segs/2, nb_parts/2));
std::vector<unsigned> h_distrib (nb_segs);
cudaMemcpy(h_distrib.data(), distrib, sizeof(unsigned)*nb_segs, cudaMemcpyDeviceToHost);
// for (int i=0; i<nb_segs; i++)
// std::cout << (float) i/nb_segs << " " << h_distrib.at(i) << std::endl;
return 0;
} |
8c52969c0b6709b1de865c155aa6cfb2f0863f33.hip | // !!! This is a file automatically generated by hipify!!!
extern "C" {
#include "../shape/head.h"
}
#define GOLD 1.618034
#define GLIMIT 100.0
#define TINY 1.0e-20
//#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define SIGN(a,b) ((b) > 0.0 ? fabs(a) : -fabs(a))
#define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d);
__host__ void mnbrak_gpu(
double *ax,
double *bx,
double *cx,
double *fa,
double *fb,
double *fc,
double (*func)
(double,
struct vertices_t**,
unsigned char*,
unsigned char*,
int*,
int*,
int*,
int,
int,
hipStream_t*),
struct vertices_t **verts,
unsigned char *htype,
unsigned char *dtype,
int *nframes,
int *nviews,
int *lc_n,
int nsets,
int nf,
hipStream_t *bf_stream )
{
double ulim,u,r,q,fu,dum;
*fa = (*func)(*ax, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
// printf("mnbrak ax, %g\n", *ax);
// printf("mnbrak fa, %g\n", *fa);
*fb = (*func)(*bx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
// printf("mnbrak bx, %g\n", *bx);
// printf("mnbrak fb, %g\n", *fb);
if (*fb > *fa) {
SHFT(dum,*ax,*bx,dum)
SHFT(dum,*fb,*fa,dum)
}
*cx = (*bx)+GOLD*(*bx-*ax);
*fc = (*func)(*cx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
while (*fb > *fc) {
r = (*bx-*ax) * (*fb-*fc);
q = (*bx-*cx) * (*fb-*fa);
u = (*bx) - ((*bx-*cx)*q - (*bx-*ax)*r)/
(2.0 * SIGN( MAX( fabs(q-r),TINY), q-r));
ulim = (*bx) + GLIMIT * (*cx-*bx);
if ((*bx-u)*(u-*cx) > 0.0) {
fu=(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
if (fu < *fc) {
*ax=(*bx);
*bx=u;
*fa=(*fb);
*fb=fu;
return;
} else if (fu > *fb) {
*cx = u;
*fc = fu;
return;
}
u = (*cx) + GOLD * (*cx-*bx);
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
} else if ((*cx-u) * (u-ulim) > 0.0) {
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
if (fu < *fc) {
SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx))
SHFT(*fb,*fc,fu,(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream))
}
} else if ((u-ulim)*(ulim-*cx) >= 0.0) {
u = ulim;
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
} else {
u = (*cx)+GOLD*(*cx-*bx);
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
}
SHFT(*ax,*bx,*cx,u)
SHFT(*fa,*fb,*fc,fu)
}
}
__host__ void mnbrak_gpu_dbg(
double *ax,
double *bx,
double *cx,
double *fa,
double *fb,
double *fc,
double (*func)
(double,
struct vertices_t**,
unsigned char*,
unsigned char*,
int*,
int*,
int*,
int,
int,
hipStream_t*),
struct vertices_t **verts,
unsigned char *htype,
unsigned char *dtype,
int *nframes,
int *nviews,
int *lc_n,
int nsets,
int nf,
hipStream_t *bf_stream )
{
double ulim,u,r,q,fu,dum;
int dbg=0;
printf("mnbrak start\n");
*fa = (*func)(*ax, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
*fb = (*func)(*bx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
if (*fb > *fa) {
printf("if (*fb > *fa), 1\n");
SHFT(dum,*ax,*bx,dum)
SHFT(dum,*fb,*fa,dum)
}
else printf("if (*fb > *fa), 0\n");
*cx = (*bx)+GOLD*(*bx-*ax);
*fc = (*func)(*cx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("prior to while loop\n");
printf("ax, %3.8g\n", *ax);
printf("bx, %3.8g\n", *bx);
printf("cx, %3.8g\n", *cx);
while (*fb > *fc) {
dbg++;
printf("start of loop #, %i\n", dbg);
r = (*bx-*ax) * (*fb-*fc);
q = (*bx-*cx) * (*fb-*fa);
u = (*bx) - ((*bx-*cx)*q - (*bx-*ax)*r)/
(2.0 * SIGN( MAX( fabs(q-r),TINY), q-r));
ulim = (*bx) + GLIMIT * (*cx-*bx);
printf("r, %3.8g\n", r);
printf("q, %3.8g\n", q);
printf("u, %3.8g\n", u);
printf("ulim, %3.8g\n", ulim);
if ((*bx-u)*(u-*cx) > 0.0) {
printf("if ((*bx-u)*(u-*cx) > 0.0), 1\n");
fu=(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("fu, %3.8g\n", fu);
if (fu < *fc) {
printf("if (fu < *fc), 1\n");
*ax=(*bx);
*bx=u;
*fa=(*fb);
*fb=fu;
printf("*ax, %3.8g\n", *ax);
printf("*bx, %3.8g\n", *bx);
printf("*fa, %3.8g\n", *fa);
printf("*fb, %3.8g\n", *fb);
return;
} else if (fu > *fb) {
printf("if ((*bx-u)*(u-*cx) > 0.0), 0\n");
printf("if (fu > *fb), 1\n");
*cx = u;
*fc = fu;
printf("*cx, %3.8g\n", *cx);
printf("*fc, %3.8g\n", *fc);
return;
}
u = (*cx) + GOLD * (*cx-*bx);
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("u, %3.8g\n", u);
printf("fu, %3.8g\n", fu);
} else if ((*cx-u) * (u-ulim) > 0.0) {
printf("if ((*bx-u)*(u-*cx) > 0.0), 0\n");
printf("else if ((*cx-u)*(u-ulim) > 0.0), 1\n");
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("fu, %3.8g\n", fu);
if (fu < *fc) {
printf("if (fu < *fc), 1\n");
SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx))
SHFT(*fb,*fc,fu,(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream))
}
else printf("if (fu < *fc), 0\n");
} else if ((u-ulim)*(ulim-*cx) >= 0.0) {
printf("if ((*bx-u)*(u-*cx) > 0.0), 0\n");
printf("if ((u-ulim)*(ulim-*cx) > 0.0), 1\n");
u = ulim;
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("u, %3.8g\n", u);
printf("fu, %3.8g\n", fu);
} else {
printf("if ((*bx-u)*(u-*cx) > 0.0), 0\n");
u = (*cx)+GOLD*(*cx-*bx);
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("u, %3.8g\n", u);
printf("fu, %3.8g\n", fu);
}
SHFT(*ax,*bx,*cx,u)
SHFT(*fa,*fb,*fc,fu)
}
printf("loops in mnbrak_gpu, %i\n", dbg);
}
__host__ void mnbrak_MFS_gpu(
double *ax,
double *bx,
double *cx,
double *fa,
double *fb,
double *fc,
double (*func)
(double,
struct vertices_t**,
int*,
int,
int,
hipStream_t*),
struct vertices_t **verts,
int *nviews,
int nsets,
int nf,
hipStream_t *bf_stream )
{
double ulim,u,r,q,fu,dum;
*fa = (*func)(*ax, verts, nviews, nsets, nf, bf_stream);
*fb = (*func)(*bx, verts, nviews, nsets, nf, bf_stream);
if (*fb > *fa) {
SHFT(dum,*ax,*bx,dum)
SHFT(dum,*fb,*fa,dum)
}
*cx = (*bx)+GOLD*(*bx-*ax);
*fc = (*func)(*cx, verts, nviews, nsets, nf, bf_stream);
while (*fb > *fc) {
r = (*bx-*ax) * (*fb-*fc);
q = (*bx-*cx) * (*fb-*fa);
u = (*bx) - ((*bx-*cx)*q - (*bx-*ax)*r)/
(2.0 * SIGN( MAX( fabs(q-r),TINY), q-r));
ulim = (*bx) + GLIMIT * (*cx-*bx);
if ((*bx-u)*(u-*cx) > 0.0) {
fu=(*func)(u, verts, nviews, nsets, nf, bf_stream);
if (fu < *fc) {
*ax=(*bx);
*bx=u;
*fa=(*fb);
*fb=fu;
return;
} else if (fu > *fb) {
*cx = u;
*fc = fu;
return;
}
u = (*cx) + GOLD * (*cx-*bx);
fu = (*func)(u, verts, nviews, nsets, nf, bf_stream);
} else if ((*cx-u) * (u-ulim) > 0.0) {
fu = (*func)(u, verts, nviews, nsets, nf, bf_stream);
if (fu < *fc) {
SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx))
SHFT(*fb,*fc,fu,(*func)(u, verts, nviews, nsets, nf, bf_stream))
}
} else if ((u-ulim)*(ulim-*cx) >= 0.0) {
u = ulim;
fu = (*func)(u, verts, nviews, nsets, nf, bf_stream);
} else {
u = (*cx)+GOLD*(*cx-*bx);
fu = (*func)(u, verts, nviews, nsets, nf, bf_stream);
}
SHFT(*ax,*bx,*cx,u)
SHFT(*fa,*fb,*fc,fu)
}
}
#undef GOLD
#undef GLIMIT
#undef TINY
#undef MAX
#undef SIGN
#undef SHFT
| 8c52969c0b6709b1de865c155aa6cfb2f0863f33.cu | extern "C" {
#include "../shape/head.h"
}
#define GOLD 1.618034
#define GLIMIT 100.0
#define TINY 1.0e-20
//#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define SIGN(a,b) ((b) > 0.0 ? fabs(a) : -fabs(a))
#define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d);
__host__ void mnbrak_gpu(
double *ax,
double *bx,
double *cx,
double *fa,
double *fb,
double *fc,
double (*func)
(double,
struct vertices_t**,
unsigned char*,
unsigned char*,
int*,
int*,
int*,
int,
int,
cudaStream_t*),
struct vertices_t **verts,
unsigned char *htype,
unsigned char *dtype,
int *nframes,
int *nviews,
int *lc_n,
int nsets,
int nf,
cudaStream_t *bf_stream )
{
double ulim,u,r,q,fu,dum;
*fa = (*func)(*ax, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
// printf("mnbrak ax, %g\n", *ax);
// printf("mnbrak fa, %g\n", *fa);
*fb = (*func)(*bx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
// printf("mnbrak bx, %g\n", *bx);
// printf("mnbrak fb, %g\n", *fb);
if (*fb > *fa) {
SHFT(dum,*ax,*bx,dum)
SHFT(dum,*fb,*fa,dum)
}
*cx = (*bx)+GOLD*(*bx-*ax);
*fc = (*func)(*cx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
while (*fb > *fc) {
r = (*bx-*ax) * (*fb-*fc);
q = (*bx-*cx) * (*fb-*fa);
u = (*bx) - ((*bx-*cx)*q - (*bx-*ax)*r)/
(2.0 * SIGN( MAX( fabs(q-r),TINY), q-r));
ulim = (*bx) + GLIMIT * (*cx-*bx);
if ((*bx-u)*(u-*cx) > 0.0) {
fu=(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
if (fu < *fc) {
*ax=(*bx);
*bx=u;
*fa=(*fb);
*fb=fu;
return;
} else if (fu > *fb) {
*cx = u;
*fc = fu;
return;
}
u = (*cx) + GOLD * (*cx-*bx);
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
} else if ((*cx-u) * (u-ulim) > 0.0) {
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
if (fu < *fc) {
SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx))
SHFT(*fb,*fc,fu,(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream))
}
} else if ((u-ulim)*(ulim-*cx) >= 0.0) {
u = ulim;
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
} else {
u = (*cx)+GOLD*(*cx-*bx);
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
}
SHFT(*ax,*bx,*cx,u)
SHFT(*fa,*fb,*fc,fu)
}
}
__host__ void mnbrak_gpu_dbg(
double *ax,
double *bx,
double *cx,
double *fa,
double *fb,
double *fc,
double (*func)
(double,
struct vertices_t**,
unsigned char*,
unsigned char*,
int*,
int*,
int*,
int,
int,
cudaStream_t*),
struct vertices_t **verts,
unsigned char *htype,
unsigned char *dtype,
int *nframes,
int *nviews,
int *lc_n,
int nsets,
int nf,
cudaStream_t *bf_stream )
{
double ulim,u,r,q,fu,dum;
int dbg=0;
printf("mnbrak start\n");
*fa = (*func)(*ax, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
*fb = (*func)(*bx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
if (*fb > *fa) {
printf("if (*fb > *fa), 1\n");
SHFT(dum,*ax,*bx,dum)
SHFT(dum,*fb,*fa,dum)
}
else printf("if (*fb > *fa), 0\n");
*cx = (*bx)+GOLD*(*bx-*ax);
*fc = (*func)(*cx, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("prior to while loop\n");
printf("ax, %3.8g\n", *ax);
printf("bx, %3.8g\n", *bx);
printf("cx, %3.8g\n", *cx);
while (*fb > *fc) {
dbg++;
printf("start of loop #, %i\n", dbg);
r = (*bx-*ax) * (*fb-*fc);
q = (*bx-*cx) * (*fb-*fa);
u = (*bx) - ((*bx-*cx)*q - (*bx-*ax)*r)/
(2.0 * SIGN( MAX( fabs(q-r),TINY), q-r));
ulim = (*bx) + GLIMIT * (*cx-*bx);
printf("r, %3.8g\n", r);
printf("q, %3.8g\n", q);
printf("u, %3.8g\n", u);
printf("ulim, %3.8g\n", ulim);
if ((*bx-u)*(u-*cx) > 0.0) {
printf("if ((*bx-u)*(u-*cx) > 0.0), 1\n");
fu=(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("fu, %3.8g\n", fu);
if (fu < *fc) {
printf("if (fu < *fc), 1\n");
*ax=(*bx);
*bx=u;
*fa=(*fb);
*fb=fu;
printf("*ax, %3.8g\n", *ax);
printf("*bx, %3.8g\n", *bx);
printf("*fa, %3.8g\n", *fa);
printf("*fb, %3.8g\n", *fb);
return;
} else if (fu > *fb) {
printf("if ((*bx-u)*(u-*cx) > 0.0), 0\n");
printf("if (fu > *fb), 1\n");
*cx = u;
*fc = fu;
printf("*cx, %3.8g\n", *cx);
printf("*fc, %3.8g\n", *fc);
return;
}
u = (*cx) + GOLD * (*cx-*bx);
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("u, %3.8g\n", u);
printf("fu, %3.8g\n", fu);
} else if ((*cx-u) * (u-ulim) > 0.0) {
printf("if ((*bx-u)*(u-*cx) > 0.0), 0\n");
printf("else if ((*cx-u)*(u-ulim) > 0.0), 1\n");
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("fu, %3.8g\n", fu);
if (fu < *fc) {
printf("if (fu < *fc), 1\n");
SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx))
SHFT(*fb,*fc,fu,(*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream))
}
else printf("if (fu < *fc), 0\n");
} else if ((u-ulim)*(ulim-*cx) >= 0.0) {
printf("if ((*bx-u)*(u-*cx) > 0.0), 0\n");
printf("if ((u-ulim)*(ulim-*cx) > 0.0), 1\n");
u = ulim;
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("u, %3.8g\n", u);
printf("fu, %3.8g\n", fu);
} else {
printf("if ((*bx-u)*(u-*cx) > 0.0), 0\n");
u = (*cx)+GOLD*(*cx-*bx);
fu = (*func)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream);
printf("u, %3.8g\n", u);
printf("fu, %3.8g\n", fu);
}
SHFT(*ax,*bx,*cx,u)
SHFT(*fa,*fb,*fc,fu)
}
printf("loops in mnbrak_gpu, %i\n", dbg);
}
__host__ void mnbrak_MFS_gpu(
double *ax,
double *bx,
double *cx,
double *fa,
double *fb,
double *fc,
double (*func)
(double,
struct vertices_t**,
int*,
int,
int,
cudaStream_t*),
struct vertices_t **verts,
int *nviews,
int nsets,
int nf,
cudaStream_t *bf_stream )
{
double ulim,u,r,q,fu,dum;
*fa = (*func)(*ax, verts, nviews, nsets, nf, bf_stream);
*fb = (*func)(*bx, verts, nviews, nsets, nf, bf_stream);
if (*fb > *fa) {
SHFT(dum,*ax,*bx,dum)
SHFT(dum,*fb,*fa,dum)
}
*cx = (*bx)+GOLD*(*bx-*ax);
*fc = (*func)(*cx, verts, nviews, nsets, nf, bf_stream);
while (*fb > *fc) {
r = (*bx-*ax) * (*fb-*fc);
q = (*bx-*cx) * (*fb-*fa);
u = (*bx) - ((*bx-*cx)*q - (*bx-*ax)*r)/
(2.0 * SIGN( MAX( fabs(q-r),TINY), q-r));
ulim = (*bx) + GLIMIT * (*cx-*bx);
if ((*bx-u)*(u-*cx) > 0.0) {
fu=(*func)(u, verts, nviews, nsets, nf, bf_stream);
if (fu < *fc) {
*ax=(*bx);
*bx=u;
*fa=(*fb);
*fb=fu;
return;
} else if (fu > *fb) {
*cx = u;
*fc = fu;
return;
}
u = (*cx) + GOLD * (*cx-*bx);
fu = (*func)(u, verts, nviews, nsets, nf, bf_stream);
} else if ((*cx-u) * (u-ulim) > 0.0) {
fu = (*func)(u, verts, nviews, nsets, nf, bf_stream);
if (fu < *fc) {
SHFT(*bx,*cx,u,*cx+GOLD*(*cx-*bx))
SHFT(*fb,*fc,fu,(*func)(u, verts, nviews, nsets, nf, bf_stream))
}
} else if ((u-ulim)*(ulim-*cx) >= 0.0) {
u = ulim;
fu = (*func)(u, verts, nviews, nsets, nf, bf_stream);
} else {
u = (*cx)+GOLD*(*cx-*bx);
fu = (*func)(u, verts, nviews, nsets, nf, bf_stream);
}
SHFT(*ax,*bx,*cx,u)
SHFT(*fa,*fb,*fc,fu)
}
}
#undef GOLD
#undef GLIMIT
#undef TINY
#undef MAX
#undef SIGN
#undef SHFT
|
02b81fb4715af02cff4be19902da6b783722d58e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include<hiprand/hiprand.h>
#include<iostream>
#include<stdlib.h>
#include<time.h>
#include<cstdio>
#include <assert.h>
#define M 500
#define N 500
#define K 400
#define C 100
using namespace std;
__global__ void multi_kernel(int *mn,int *m, int *n){
int xbidx = blockIdx.x;
int ybidx = blockIdx.y;
int tidx = threadIdx.x;
__shared__ int sh_var[N];
sh_var[tidx] = mn[N * ybidx + tidx] * m[K * tidx + xbidx];
__syncthreads();
n[K * ybidx + xbidx ] = 0;
for(int i = 0; i<N; i++){
n[K * ybidx + xbidx] = n[K * ybidx + xbidx] + sh_var[i];
}
}
int multiplication(){
int *a,*b,*c;
int an[M][N];
int bn[N][K];
int cn[M][K];
//Generating random Matrix B
for (int i = 0; i < N; i++){
for (int j = 0; j < K; j++){
bn[i][j] = (int)rand() % 100 * sizeof(int);
}
}
cout << "Matrix B generated" << endl;
hipMallocManaged((void **)&b, N * K * sizeof(int));
hipMemcpy(b, bn, N * K * sizeof(int), hipMemcpyHostToDevice);
dim3 gridDim(K,M);
for (int i = 0; i < C; i++){
for (int k = 0; k < M; k++){
for (int l = 0; l < N; l++){
an[k][l] = (int)rand() % 100 * sizeof(int);
//printf("%d\n", &an[k][l]);
}
}
hipMallocManaged((void **)&a, M * N * sizeof(int));
hipMallocManaged((void **)&c, M * K * sizeof(int));
hipMemcpy(a, an, M * N * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( multi_kernel) , dim3(gridDim), dim3(N) , 0, 0, a, b, c);
hipMemcpy(cn, c, M * K * sizeof(int), hipMemcpyDeviceToHost);
hipFree(a);
hipFree(c);
}
hipFree(b);
cout << "Completed Successfully" << endl;
cout << "[" << M << "] " << "x" << " [" << N << "] " << "*"<< " [" << N << "] "<< "x" << " [" << K << "]"<< endl;
return 0;
}
int main(){
time_t start, end, t;
start = time(NULL);
srand((unsigned) time(&t));
multiplication();
end = time(NULL);
// printf("%ld", &end);
cout << "Total execution time: " << (end-start) << " seconds" << endl;
return 0;
} | 02b81fb4715af02cff4be19902da6b783722d58e.cu |
#include<stdio.h>
#include<cuda.h>
#include<curand.h>
#include<iostream>
#include<stdlib.h>
#include<time.h>
#include<cstdio>
#include <assert.h>
#define M 500
#define N 500
#define K 400
#define C 100
using namespace std;
__global__ void multi_kernel(int *mn,int *m, int *n){
int xbidx = blockIdx.x;
int ybidx = blockIdx.y;
int tidx = threadIdx.x;
__shared__ int sh_var[N];
sh_var[tidx] = mn[N * ybidx + tidx] * m[K * tidx + xbidx];
__syncthreads();
n[K * ybidx + xbidx ] = 0;
for(int i = 0; i<N; i++){
n[K * ybidx + xbidx] = n[K * ybidx + xbidx] + sh_var[i];
}
}
int multiplication(){
int *a,*b,*c;
int an[M][N];
int bn[N][K];
int cn[M][K];
//Generating random Matrix B
for (int i = 0; i < N; i++){
for (int j = 0; j < K; j++){
bn[i][j] = (int)rand() % 100 * sizeof(int);
}
}
cout << "Matrix B generated" << endl;
cudaMallocManaged((void **)&b, N * K * sizeof(int));
cudaMemcpy(b, bn, N * K * sizeof(int), cudaMemcpyHostToDevice);
dim3 gridDim(K,M);
for (int i = 0; i < C; i++){
for (int k = 0; k < M; k++){
for (int l = 0; l < N; l++){
an[k][l] = (int)rand() % 100 * sizeof(int);
//printf("%d\n", &an[k][l]);
}
}
cudaMallocManaged((void **)&a, M * N * sizeof(int));
cudaMallocManaged((void **)&c, M * K * sizeof(int));
cudaMemcpy(a, an, M * N * sizeof(int), cudaMemcpyHostToDevice);
multi_kernel <<< gridDim, N >>> (a, b, c);
cudaMemcpy(cn, c, M * K * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a);
cudaFree(c);
}
cudaFree(b);
cout << "Completed Successfully" << endl;
cout << "[" << M << "] " << "x" << " [" << N << "] " << "*"<< " [" << N << "] "<< "x" << " [" << K << "]"<< endl;
return 0;
}
int main(){
time_t start, end, t;
start = time(NULL);
srand((unsigned) time(&t));
multiplication();
end = time(NULL);
// printf("%ld", &end);
cout << "Total execution time: " << (end-start) << " seconds" << endl;
return 0;
} |
755b8527ea691103ff0a9f717cdad702876dcfe8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "nodiag_normalize.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
double *I = NULL;
hipMalloc(&I, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int i = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
nodiag_normalize), dim3(gridBlock),dim3(threadBlock), 0, 0, A,I,n,i);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
nodiag_normalize), dim3(gridBlock),dim3(threadBlock), 0, 0, A,I,n,i);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
nodiag_normalize), dim3(gridBlock),dim3(threadBlock), 0, 0, A,I,n,i);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 755b8527ea691103ff0a9f717cdad702876dcfe8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "nodiag_normalize.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
double *I = NULL;
cudaMalloc(&I, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int i = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
nodiag_normalize<<<gridBlock,threadBlock>>>(A,I,n,i);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
nodiag_normalize<<<gridBlock,threadBlock>>>(A,I,n,i);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
nodiag_normalize<<<gridBlock,threadBlock>>>(A,I,n,i);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
03ee057180efe31fb7ab9da534d8ef5e681f5259.hip | // !!! This is a file automatically generated by hipify!!!
//
// SkyNet Project
// Copyright (C) 2018 by Contributors <https://github.com/Tyill/skynet>
//
// This code is licensed under the MIT License.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files(the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#ifdef SN_CUDNN
#include <hip/hip_runtime.h>
#include <cudnn.h>
#include "../stdafx.h"
#include "snOperator/src/Operator/deconvolution.h"
using namespace std;
using namespace SN_Base;
#ifndef cuCHECK
#define cuCHECK(func) if (func != 0){ ERROR_MESS("CUDA error: " + hipGetErrorString(hipGetLastError())); return;}
#endif
struct gpuParams{
cudnnHandle_t cudnn = 0;
cudnnConvolutionDescriptor_t conv_desc = 0;
cudnnTensorDescriptor_t in_desc = 0;
cudnnTensorDescriptor_t out_desc = 0;
cudnnTensorDescriptor_t grin_desc = 0;
cudnnTensorDescriptor_t grout_desc = 0;
cudnnFilterDescriptor_t w_desc = 0;
cudnnFilterDescriptor_t dw_desc = 0;
cudnnTensorDescriptor_t bias_desc = 0;
cudnnConvolutionFwdAlgo_t algoFwd;
cudnnConvolutionBwdDataAlgo_t algoBwdData;
cudnnConvolutionBwdFilterAlgo_t algoBwdW;
size_t wsFwdSz = 0;
size_t wsBwdDataSz = 0;
size_t wsBwdWSz = 0;
size_t inszMem = 0;
snFloat* d_in = 0;
snFloat* d_w = 0;
snFloat* d_dw = 0;
snFloat* d_bias = 0;
snFloat* d_out = 0;
snFloat* d_grout = 0;
void* d_wsFwd = 0;
void* d_wsBwdData = 0;
void* d_wsBwdW = 0;
};
void Deconvolution::iniParamCUDA(const snSize& insz, const snSize& outsz,
const deconvParams& prms, void** pGpuPrm){
hipSetDevice(gpuDeviceId_);
bool isFirst = false;
gpuParams* gpuPrm = (gpuParams*)*pGpuPrm;
if (!gpuPrm){
hipDeviceProp_t cu_deviceProps;
hipGetDeviceProperties(&cu_deviceProps, 0);
if (cu_deviceProps.major < 3){
ERROR_MESS("%s requires SM >= 3.0");
return;
}
gpuPrm = new gpuParams();
memset(gpuPrm, 0, sizeof(gpuParams));
*pGpuPrm = gpuPrm;
cudnnHandle_t cudnn = nullptr;
cuCHECK(cudnnCreate(&cudnn));
gpuPrm->cudnn = cudnn;
isFirst = true;
}
// input
cudnnTensorDescriptor_t in_desc = nullptr;
cuCHECK(cudnnCreateTensorDescriptor(&in_desc));
cuCHECK(cudnnSetTensor4dDescriptor(in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->in_desc));
gpuPrm->in_desc = in_desc;
// grout
cudnnTensorDescriptor_t grout_desc;
cuCHECK(cudnnCreateTensorDescriptor(&grout_desc));
cuCHECK(cudnnSetTensor4dDescriptor(grout_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grout_desc));
gpuPrm->grout_desc = grout_desc;
// w
cudnnFilterDescriptor_t w_desc = nullptr;
cuCHECK(cudnnCreateFilterDescriptor(&w_desc));
cuCHECK(cudnnSetFilter4dDescriptor(w_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth)));
if (!isFirst)
cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->w_desc));
gpuPrm->w_desc = w_desc;
// dw
cudnnFilterDescriptor_t dw_desc = nullptr;
cuCHECK(cudnnCreateFilterDescriptor(&dw_desc));
cuCHECK(cudnnSetFilter4dDescriptor(dw_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth)));
if (!isFirst)
cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->dw_desc));
gpuPrm->dw_desc = dw_desc;
// conv
cudnnConvolutionDescriptor_t conv_desc = nullptr;
cuCHECK(cudnnCreateConvolutionDescriptor(&conv_desc));
cuCHECK(cudnnSetConvolution2dDescriptor(conv_desc, 0, 0, int(prms.stride), int(prms.stride), 1, 1,
CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT));
if (!isFirst)
cuCHECK(cudnnDestroyConvolutionDescriptor((cudnnConvolutionDescriptor_t)gpuPrm->conv_desc));
gpuPrm->conv_desc = conv_desc;
// output
cudnnTensorDescriptor_t out_desc;
cuCHECK(cudnnCreateTensorDescriptor(&out_desc));
cuCHECK(cudnnSetTensor4dDescriptor(out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->out_desc));
gpuPrm->out_desc = out_desc;
cudnnTensorDescriptor_t grin_desc;
cuCHECK(cudnnCreateTensorDescriptor(&grin_desc));
cuCHECK(cudnnSetTensor4dDescriptor(grin_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grin_desc));
gpuPrm->grin_desc = grin_desc;
// bias
cudnnTensorDescriptor_t bias_desc;
cuCHECK(cudnnCreateTensorDescriptor(&bias_desc));
cuCHECK(cudnnSetTensor4dDescriptor(bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, int(insz.d), 1, 1));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->bias_desc));
gpuPrm->bias_desc = bias_desc;
// algorithm
cudnnConvolutionFwdAlgo_t algoFwd;
cuCHECK(cudnnGetConvolutionForwardAlgorithm(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algoFwd));
gpuPrm->algoFwd = algoFwd;
cudnnConvolutionBwdDataAlgo_t algoBwdData;
cuCHECK(cudnnGetConvolutionBackwardDataAlgorithm(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoBwdData));
gpuPrm->algoBwdData = algoBwdData;
cudnnConvolutionBwdFilterAlgo_t algoBwdW;
cuCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoBwdW));
gpuPrm->algoBwdW = algoBwdW;
// workspace
size_t wsFwdSz = 0;
cuCHECK(cudnnGetConvolutionForwardWorkspaceSize(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc, algoFwd, &wsFwdSz));
gpuPrm->wsFwdSz = wsFwdSz;
size_t wsBwdDataSz = 0;
cuCHECK(cudnnGetConvolutionBackwardDataWorkspaceSize(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc, algoBwdData, &wsBwdDataSz));
gpuPrm->wsBwdDataSz = wsBwdDataSz;
size_t wsBwdWSz = 0;
cuCHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc, algoBwdW, &wsBwdWSz));
gpuPrm->wsBwdWSz = wsBwdWSz;
if (isFirst && !gpuClearMem_){
cuCHECK(hipMalloc(&gpuPrm->d_in, insz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_dw, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_out, outsz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_grout, insz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_wsFwd, wsFwdSz));
cuCHECK(hipMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz));
cuCHECK(hipMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz));
cuCHECK(hipMalloc(&gpuPrm->d_bias, insz.d * sizeof(snFloat)));
}
else if (!gpuClearMem_ && (gpuPrm->inszMem < insz.size())){
cuCHECK(hipFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(hipFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(hipFree(gpuPrm->d_dw)); gpuPrm->d_dw = 0;
cuCHECK(hipFree(gpuPrm->d_out)); gpuPrm->d_out = 0;
cuCHECK(hipFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(hipFree(gpuPrm->d_wsFwd)); gpuPrm->d_wsFwd = 0;
cuCHECK(hipFree(gpuPrm->d_wsBwdData)); gpuPrm->d_wsBwdData = 0;
cuCHECK(hipFree(gpuPrm->d_wsBwdW)); gpuPrm->d_wsBwdW = 0;
cuCHECK(hipFree(gpuPrm->d_bias)); gpuPrm->d_bias = 0;
cuCHECK(hipMalloc(&gpuPrm->d_in, insz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_dw, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_out, outsz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_grout, insz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_wsFwd, wsFwdSz));
cuCHECK(hipMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz));
cuCHECK(hipMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz));
cuCHECK(hipMalloc(&gpuPrm->d_bias, insz.d * sizeof(snFloat)));
gpuPrm->inszMem = insz.size();
}
}
void Deconvolution::freeParamCUDA(void* gpuPrms){
hipSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
if (!gpuPrm) return;
cuCHECK(cudnnDestroy(gpuPrm->cudnn));
cuCHECK(cudnnDestroyConvolutionDescriptor(gpuPrm->conv_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->in_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->out_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grin_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grout_desc));
cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->w_desc));
cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->dw_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->bias_desc));
cuCHECK(hipFree(gpuPrm->d_in));
cuCHECK(hipFree(gpuPrm->d_w));
cuCHECK(hipFree(gpuPrm->d_dw));
cuCHECK(hipFree(gpuPrm->d_bias));
cuCHECK(hipFree(gpuPrm->d_out));
cuCHECK(hipFree(gpuPrm->d_grout));
cuCHECK(hipFree(gpuPrm->d_wsFwd));
cuCHECK(hipFree(gpuPrm->d_wsBwdData));
cuCHECK(hipFree(gpuPrm->d_wsBwdW));
}
void Deconvolution::forwardCUDA(const deconvParams& prms,
snFloat* weight, const snSize& insz, snFloat* input, const snSize& outsz, snFloat* output, void* gpuPrms){
hipSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
if (gpuClearMem_){
cuCHECK(hipMalloc(&gpuPrm->d_in, isz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_out, osz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_wsBwdData, gpuPrm->wsBwdDataSz));
}
// input
cuCHECK(hipMemcpy(gpuPrm->d_in, input, isz * sizeof(snFloat), hipMemcpyHostToDevice));
// weight
size_t wsz = outsz.d * insz.d * prms.fHeight * prms.fWidth;
cuCHECK(hipMemcpy(gpuPrm->d_w, weight, wsz * sizeof(snFloat), hipMemcpyHostToDevice));
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnConvolutionBackwardData(gpuPrm->cudnn,
&alpha,
gpuPrm->w_desc,
gpuPrm->d_w,
gpuPrm->in_desc,
gpuPrm->d_in,
gpuPrm->conv_desc,
gpuPrm->algoBwdData,
gpuPrm->d_wsBwdData,
gpuPrm->wsBwdDataSz,
&beta,
gpuPrm->out_desc,
gpuPrm->d_out));
// result
cuCHECK(hipMemcpy(output, gpuPrm->d_out, osz * sizeof(snFloat), hipMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(hipFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(hipFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(hipFree(gpuPrm->d_wsBwdData)); gpuPrm->d_wsBwdData = 0;
cuCHECK(hipFree(gpuPrm->d_out)); gpuPrm->d_out = 0;
}
}
__global__ void cuBwdBias(snSize insz, snFloat* bias, snFloat* grout){
size_t isz = insz.w * insz.h;
snFloat* pGrOut = grout + isz * insz.d * blockIdx.x;
unsigned int d = threadIdx.x;
while (d < insz.d){
snFloat b = bias[d];
for (size_t j = 0; j < isz; ++j)
pGrOut[j] += b;
pGrOut += isz * blockDim.x;
d += blockDim.x;
}
}
void Deconvolution::backwardCUDA_GW(const deconvParams& prms,
snFloat* weight, const snSize& insz, snFloat* input, const snSize& outsz, snFloat* gradIn, snFloat* gradOut, snFloat* dWeightOut, void* gpuPrms){
hipSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
void* d_grin = gpuPrm->d_out;
if (gpuClearMem_){
cuCHECK(hipMalloc(&gpuPrm->d_in, isz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_dw, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_bias, insz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&d_grin, osz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_grout, isz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_wsFwd, gpuPrm->wsFwdSz));
cuCHECK(hipMalloc(&gpuPrm->d_wsBwdW, gpuPrm->wsBwdWSz));
}
// input
cuCHECK(hipMemcpy(gpuPrm->d_in, input, isz * sizeof(snFloat), hipMemcpyHostToDevice));
// grin
cuCHECK(hipMemcpy(d_grin, gradIn, osz * sizeof(snFloat), hipMemcpyHostToDevice));
// weight
size_t wsz = outsz.d * insz.d * prms.fHeight * prms.fWidth;
cuCHECK(hipMemcpy(gpuPrm->d_w, weight, wsz * sizeof(snFloat), hipMemcpyHostToDevice));
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn,
&alpha,
gpuPrm->grin_desc,
d_grin,
gpuPrm->w_desc,
gpuPrm->d_w,
gpuPrm->conv_desc,
gpuPrm->algoFwd,
gpuPrm->d_wsFwd,
gpuPrm->wsFwdSz,
&beta,
gpuPrm->grout_desc,
gpuPrm->d_grout));
cuCHECK(cudnnConvolutionBackwardFilter(gpuPrm->cudnn,
&alpha,
gpuPrm->grin_desc,
d_grin,
gpuPrm->in_desc,
gpuPrm->d_in,
gpuPrm->conv_desc,
gpuPrm->algoBwdW,
gpuPrm->d_wsBwdW,
gpuPrm->wsBwdWSz,
&beta,
gpuPrm->dw_desc,
gpuPrm->d_dw));
cuCHECK(cudnnConvolutionBackwardBias(gpuPrm->cudnn,
&alpha,
gpuPrm->in_desc,
gpuPrm->d_in,
&beta,
gpuPrm->bias_desc,
gpuPrm->d_bias));
// +bias
hipLaunchKernelGGL(( cuBwdBias) , dim3(int(insz.n)), dim3(128) , 0, 0, insz, gpuPrm->d_bias, gpuPrm->d_grout);
// result
cuCHECK(hipMemcpy(gradOut, gpuPrm->d_grout, isz * sizeof(snFloat), hipMemcpyDeviceToHost));
cuCHECK(hipMemcpy(dWeightOut, gpuPrm->d_dw, wsz * sizeof(snFloat), hipMemcpyDeviceToHost));
cuCHECK(hipMemcpy(dWeightOut + wsz, gpuPrm->d_bias, insz.d * sizeof(snFloat), hipMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(hipFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(hipFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(hipFree(d_grin)); gpuPrm->d_out = 0;
cuCHECK(hipFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(hipFree(gpuPrm->d_dw)); gpuPrm->d_dw = 0;
cuCHECK(hipFree(gpuPrm->d_bias)); gpuPrm->d_bias = 0;
cuCHECK(hipFree(gpuPrm->d_wsFwd)); gpuPrm->d_wsFwd = 0;
cuCHECK(hipFree(gpuPrm->d_wsBwdW)); gpuPrm->d_wsBwdW = 0;
}
}
void Deconvolution::backwardCUDA_G(const deconvParams& prms,
snFloat* weight, const snSize& insz, const snSize& outsz, snFloat* gradIn, snFloat* gradOut, void* gpuPrms){
hipSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
void* d_grin = gpuPrm->d_out;
if (gpuClearMem_){
cuCHECK(hipMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&d_grin, osz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_grout, isz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_wsFwd, gpuPrm->wsFwdSz));
cuCHECK(hipMalloc(&gpuPrm->d_bias, insz.d * sizeof(snFloat)));
}
// grin
cuCHECK(hipMemcpy(d_grin, gradIn, osz * sizeof(snFloat), hipMemcpyHostToDevice));
// weight
size_t wsz = outsz.d * insz.d * prms.fHeight * prms.fWidth;
cuCHECK(hipMemcpy(gpuPrm->d_w, weight, wsz * sizeof(snFloat), hipMemcpyHostToDevice));
cuCHECK(hipMemcpy(gpuPrm->d_bias, weight + wsz, insz.d * sizeof(snFloat), hipMemcpyHostToDevice));
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn,
&alpha,
gpuPrm->grin_desc,
d_grin,
gpuPrm->w_desc,
gpuPrm->d_w,
gpuPrm->conv_desc,
gpuPrm->algoFwd,
gpuPrm->d_wsFwd,
gpuPrm->wsFwdSz,
&beta,
gpuPrm->grout_desc,
gpuPrm->d_grout));
// +bias
hipLaunchKernelGGL(( cuBwdBias) , dim3(int(insz.n)), dim3(128) , 0, 0, insz, gpuPrm->d_bias, gpuPrm->d_grout);
//
cuCHECK(hipMemcpy(gradOut, gpuPrm->d_grout, isz * sizeof(snFloat), hipMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(hipFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(hipFree(d_grin)); gpuPrm->d_out = 0;
cuCHECK(hipFree(gpuPrm->d_bias)); gpuPrm->d_bias = 0;
cuCHECK(hipFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(hipFree(gpuPrm->d_wsFwd)); gpuPrm->d_wsFwd = 0;
}
}
#elif SN_CUDA
#include <hip/hip_runtime.h>
#include "../stdafx.h"
#include "snOperator/src/Operator/deconvolution.h"
using namespace std;
using namespace SN_Base;
#ifndef cuCHECK
#define cuCHECK(func) if (func != 0){ ERROR_MESS("CUDA error: " + hipGetErrorString(hipGetLastError())); return;}
#endif
struct gpuParams{
snFloat* d_in = 0;
snFloat* d_w = 0;
snFloat* d_dw = 0;
snFloat* d_out = 0;
snFloat* d_grout = 0;
size_t inszMem = 0;
};
void Deconvolution::iniParamCUDA(const snSize& insz, const snSize& outsz, const deconvParams& prms, void** pGpuPrm){
hipSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)*pGpuPrm;
if (!gpuPrm){
hipDeviceProp_t cu_deviceProps;
hipGetDeviceProperties(&cu_deviceProps, 0);
if (cu_deviceProps.major < 3){
ERROR_MESS("%s requires SM >= 3.0");
return;
}
gpuPrm = new gpuParams();
memset(gpuPrm, 0, sizeof(gpuParams));
*pGpuPrm = gpuPrm;
if (!gpuClearMem_){
cuCHECK(hipMalloc(&gpuPrm->d_in, insz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_out, outsz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_grout, insz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_dw, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * outsz.n * sizeof(snFloat)));
}
}
else if (!gpuClearMem_ && (gpuPrm->inszMem < insz.size())){
cuCHECK(hipFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(hipFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(hipFree(gpuPrm->d_out)); gpuPrm->d_out = 0;
cuCHECK(hipFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(hipFree(gpuPrm->d_dw)); gpuPrm->d_dw = 0;
cuCHECK(hipMalloc(&gpuPrm->d_in, insz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_out, outsz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_grout, insz.size() * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_dw, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * outsz.n * sizeof(snFloat)));
gpuPrm->inszMem = insz.size();
}
}
void Deconvolution::freeParamCUDA(void* gpuPrms){
hipSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
if (!gpuPrm) return;
cuCHECK(hipFree(gpuPrm->d_in));
cuCHECK(hipFree(gpuPrm->d_w));
cuCHECK(hipFree(gpuPrm->d_dw));
cuCHECK(hipFree(gpuPrm->d_out));
cuCHECK(hipFree(gpuPrm->d_grout));
}
__global__ void cuDeconvFwd(size_t fWidth, size_t fHeight, size_t stride,
snFloat* weight, snSize insz, snFloat* input, snSize outsz, snFloat* output){
size_t wStepByD = fWidth * fHeight,
wStepByK = wStepByD * outsz.d + 1,
outStepByD = outsz.w * outsz.h,
outStepByN = outStepByD * outsz.d,
inStepByD = insz.w * insz.h,
inStepByN = inStepByD * insz.d;
// gridDim.x - number of output layers
// gridDim.y - batch sz
weight += blockIdx.x * wStepByD;
input += blockIdx.y * inStepByN;
output += blockIdx.x * outStepByD + blockIdx.y * outStepByN;
unsigned int oz = 0;
while (oz < insz.d){
unsigned int oy = threadIdx.y;
while (oy < insz.h){
unsigned int ox = threadIdx.x;
while (ox < insz.w){
size_t posW = ox * stride, posH = oy * stride;
if (oz == 0){
for (size_t c = 0; c < wStepByD; ++c){
size_t cx = c % fWidth, cy = c / fWidth;
output[(cx + posW) + (cy + posH) * outsz.w] = 0;
}
}
// kernel
snFloat in = input[ox + oy * insz.w];
#pragma unroll
for (size_t c = 0; c < wStepByD; ++c){
size_t cx = c % fWidth, cy = c / fWidth,
si = (cx + posW) + (cy + posH) * outsz.w,
sw = cx + cy * fWidth;
output[si] += in * weight[sw];
}
ox += blockDim.x;
}
oy += blockDim.y;
}
weight += wStepByK;
input += inStepByD;
++oz;
}
}
void Deconvolution::forwardCUDA(const deconvParams& prms,
snFloat* weight, const snSize& insz, snFloat* input, const snSize& outsz, snFloat* output, void* gpuPrms){
hipSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
if (gpuClearMem_){
cuCHECK(hipMalloc(&gpuPrm->d_in, isz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_out, osz * sizeof(snFloat)));
}
// input
cuCHECK(hipMemcpy(gpuPrm->d_in, input, isz * sizeof(snFloat), hipMemcpyHostToDevice));
// weight
cuCHECK(hipMemcpy(gpuPrm->d_w, weight, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat), hipMemcpyHostToDevice));
// run
dim3 dimBlock(16, 16);
dim3 dimGrid(int(outsz.d), int(outsz.n));
hipLaunchKernelGGL(( cuDeconvFwd) , dim3(dimGrid), dim3(dimBlock) , 0, 0, prms.fWidth,
prms.fHeight,
prms.stride,
gpuPrm->d_w,
insz,
gpuPrm->d_in,
outsz,
gpuPrm->d_out);
// result
cuCHECK(hipMemcpy(output, gpuPrm->d_out, osz * sizeof(snFloat), hipMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(hipFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(hipFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(hipFree(gpuPrm->d_out)); gpuPrm->d_out = 0;
}
}
__global__ void cuDeconvBwd_GW(size_t fWidth, size_t fHeight, size_t stride,
snFloat* weight, snSize insz, snFloat* input, snSize outsz, snFloat* gradIn, snFloat* gradOut, snFloat* dWeightOut){
size_t wStepByD = fWidth * fHeight,
wStepByK = wStepByD * outsz.d + 1,
wStepByN = wStepByK * insz.d,
outStepByD = outsz.w * outsz.h,
outStepByN = outStepByD * outsz.d,
inStepByD = insz.w * insz.h,
inStepByN = inStepByD * insz.d;
// gridDim.x - number of input layers
// gridDim.y - batch sz
input += blockIdx.x * inStepByD + blockIdx.y * inStepByN;
weight += blockIdx.x * wStepByK;
dWeightOut += blockIdx.x * wStepByK + blockIdx.y * wStepByN;
gradIn += blockIdx.y * outStepByN;
gradOut += blockIdx.x * inStepByD + blockIdx.y * inStepByN;
unsigned int oz = 0;
while (oz < outsz.d){
memset(dWeightOut, 0, wStepByD * sizeof(snFloat));
if (blockIdx.x == 0)
dWeightOut[wStepByD * outsz.d] = 0;
unsigned int oy = threadIdx.y;
while (oy < insz.h){
unsigned int ox = threadIdx.x;
while (ox < insz.w){
if (oz == 0)
gradOut[ox + oy * insz.w] = weight[wStepByD * outsz.d]; // bias
size_t posW = ox * stride, posH = oy * stride;
// kernel
snFloat csum = 0, cin = input[ox + oy * insz.w];
#pragma unroll
for (size_t c = 0; c < wStepByD; ++c){
size_t cx = c % fWidth, cy = c / fWidth,
si = (cx + posW) + (cy + posH) * outsz.w,
sw = cx + cy * fWidth;
csum += gradIn[si] * weight[sw];
dWeightOut[sw] += gradIn[si] * cin;
}
gradOut[ox + oy * insz.w] += csum;
if (blockIdx.x == 0)
dWeightOut[wStepByD * outsz.d] += cin; // bias
ox += blockDim.x;
}
oy += blockDim.y;
}
weight += wStepByD;
dWeightOut += wStepByD;
gradIn += outStepByD;
++oz;
}
}
__global__ void cuDeconvWeightMean(size_t kernel, size_t fWidth, size_t fHeight, snSize insz, snFloat* weight){
size_t wStepByD = fWidth * fHeight,
wStepByK = wStepByD * kernel + 1,
wStepByN = wStepByK * insz.d;
unsigned int ox = threadIdx.x;
while (ox < wStepByN){
snFloat csum = weight[ox];
for (size_t i = 1; i < insz.n; ++i)
csum += weight[ox + wStepByN * i];
weight[ox] = csum / insz.n;
ox += blockDim.x;
}
}
void Deconvolution::backwardCUDA_GW(const deconvParams& prms,
snFloat* weight, const snSize& insz, snFloat* input, const snSize& outsz, snFloat* gradIn, snFloat* gradOut, snFloat* dWeightOut, void* gpuPrms){
hipSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
snFloat* d_grin = gpuPrm->d_out;
if (gpuClearMem_){
cuCHECK(hipMalloc(&gpuPrm->d_in, isz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&d_grin, osz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_grout, isz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_dw, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * outsz.n * sizeof(snFloat)));
}
// input
cuCHECK(hipMemcpy(gpuPrm->d_in, input, isz * sizeof(snFloat), hipMemcpyHostToDevice));
cuCHECK(hipMemcpy(d_grin, gradIn, osz * sizeof(snFloat), hipMemcpyHostToDevice));
// weight
cuCHECK(hipMemcpy(gpuPrm->d_w, weight, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat), hipMemcpyHostToDevice));
// run
dim3 dimBlock(16, 16);
dim3 dimGrid(int(insz.d), int(outsz.n));
hipLaunchKernelGGL(( cuDeconvBwd_GW) , dim3(dimGrid), dim3(dimBlock) , 0, 0, prms.fWidth,
prms.fHeight,
prms.stride,
gpuPrm->d_w,
insz,
gpuPrm->d_in,
outsz, d_grin,
gpuPrm->d_grout,
gpuPrm->d_dw);
hipLaunchKernelGGL(( cuDeconvWeightMean) , dim3(1), dim3(32) , 0, 0, prms.kernel, prms.fWidth, prms.fHeight, insz, gpuPrm->d_dw);
// result
cuCHECK(hipMemcpy(gradOut, gpuPrm->d_grout, isz * sizeof(snFloat), hipMemcpyDeviceToHost));
cuCHECK(hipMemcpy(dWeightOut, gpuPrm->d_dw, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat), hipMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(hipFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(hipFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(hipFree(d_grin)); gpuPrm->d_out = 0;
cuCHECK(hipFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(hipFree(gpuPrm->d_dw)); gpuPrm->d_dw = 0;
}
}
__global__ void cuDeconvBwd_G(size_t fWidth, size_t fHeight, size_t stride,
snFloat* weight, snSize insz, snSize outsz, snFloat* gradIn, snFloat* gradOut){
size_t wStepByD = fWidth * fHeight,
wStepByK = wStepByD * outsz.d + 1,
outStepByD = outsz.w * outsz.h,
outStepByN = outStepByD * outsz.d,
inStepByD = insz.w * insz.h,
inStepByN = inStepByD * insz.d;
// gridDim.x - number of input layers
// gridDim.y - batch size
weight += blockIdx.x * wStepByK;
gradIn += blockIdx.y * outStepByN;
gradOut += blockIdx.x * inStepByD + blockIdx.y * inStepByN;
unsigned int oz = 0;
while (oz < outsz.d){
unsigned int oy = threadIdx.y;
while (oy < insz.h){
unsigned int ox = threadIdx.x;
while (ox < insz.w){
if (oz == 0)
gradOut[ox + oy * insz.w] = weight[wStepByD * outsz.d]; // bias
size_t posW = ox * stride, posH = oy * stride;
// kernel
snFloat csum = 0;
#pragma unroll
for (size_t c = 0; c < wStepByD; ++c){
size_t cx = c % fWidth, cy = c / fWidth,
si = (cx + posW) + (cy + posH) * outsz.w,
sw = cx + cy * fWidth;
csum += gradIn[si] * weight[sw];
}
gradOut[ox + oy * insz.w] += csum;
ox += blockDim.x;
}
oy += blockDim.y;
}
weight += wStepByD;
gradIn += outStepByD;
++oz;
}
}
void Deconvolution::backwardCUDA_G(const deconvParams& prms,
snFloat* weight, const snSize& insz, const snSize& outsz, snFloat* gradIn, snFloat* gradOut, void* gpuPrms){
hipSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
snFloat* d_grin = gpuPrm->d_out;
if (gpuClearMem_){
cuCHECK(hipMalloc(&d_grin, osz * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(hipMalloc(&gpuPrm->d_grout, isz * sizeof(snFloat)));
}
// input
cuCHECK(hipMemcpy(d_grin, gradIn, osz * sizeof(snFloat), hipMemcpyHostToDevice));
// weight
cuCHECK(hipMemcpy(gpuPrm->d_w, weight, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat), hipMemcpyHostToDevice));
// run
dim3 dimBlock(16, 16);
dim3 dimGrid(int(insz.d), int(outsz.n));
hipLaunchKernelGGL(( cuDeconvBwd_G) , dim3(dimGrid), dim3(dimBlock) , 0, 0, prms.fWidth,
prms.fHeight,
prms.stride,
gpuPrm->d_w,
insz,
outsz,
d_grin,
gpuPrm->d_grout);
// result
cuCHECK(hipMemcpy(gradOut, gpuPrm->d_grout, isz * sizeof(snFloat), hipMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(hipFree(d_grin)); gpuPrm->d_out = 0;
cuCHECK(hipFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(hipFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
}
}
#endif
| 03ee057180efe31fb7ab9da534d8ef5e681f5259.cu | //
// SkyNet Project
// Copyright (C) 2018 by Contributors <https://github.com/Tyill/skynet>
//
// This code is licensed under the MIT License.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files(the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#ifdef SN_CUDNN
#include <cuda_runtime.h>
#include <cudnn.h>
#include "../stdafx.h"
#include "snOperator/src/Operator/deconvolution.h"
using namespace std;
using namespace SN_Base;
#ifndef cuCHECK
#define cuCHECK(func) if (func != 0){ ERROR_MESS("CUDA error: " + cudaGetErrorString(cudaGetLastError())); return;}
#endif
struct gpuParams{
cudnnHandle_t cudnn = 0;
cudnnConvolutionDescriptor_t conv_desc = 0;
cudnnTensorDescriptor_t in_desc = 0;
cudnnTensorDescriptor_t out_desc = 0;
cudnnTensorDescriptor_t grin_desc = 0;
cudnnTensorDescriptor_t grout_desc = 0;
cudnnFilterDescriptor_t w_desc = 0;
cudnnFilterDescriptor_t dw_desc = 0;
cudnnTensorDescriptor_t bias_desc = 0;
cudnnConvolutionFwdAlgo_t algoFwd;
cudnnConvolutionBwdDataAlgo_t algoBwdData;
cudnnConvolutionBwdFilterAlgo_t algoBwdW;
size_t wsFwdSz = 0;
size_t wsBwdDataSz = 0;
size_t wsBwdWSz = 0;
size_t inszMem = 0;
snFloat* d_in = 0;
snFloat* d_w = 0;
snFloat* d_dw = 0;
snFloat* d_bias = 0;
snFloat* d_out = 0;
snFloat* d_grout = 0;
void* d_wsFwd = 0;
void* d_wsBwdData = 0;
void* d_wsBwdW = 0;
};
void Deconvolution::iniParamCUDA(const snSize& insz, const snSize& outsz,
const deconvParams& prms, void** pGpuPrm){
cudaSetDevice(gpuDeviceId_);
bool isFirst = false;
gpuParams* gpuPrm = (gpuParams*)*pGpuPrm;
if (!gpuPrm){
cudaDeviceProp cu_deviceProps;
cudaGetDeviceProperties(&cu_deviceProps, 0);
if (cu_deviceProps.major < 3){
ERROR_MESS("%s requires SM >= 3.0");
return;
}
gpuPrm = new gpuParams();
memset(gpuPrm, 0, sizeof(gpuParams));
*pGpuPrm = gpuPrm;
cudnnHandle_t cudnn = nullptr;
cuCHECK(cudnnCreate(&cudnn));
gpuPrm->cudnn = cudnn;
isFirst = true;
}
// input
cudnnTensorDescriptor_t in_desc = nullptr;
cuCHECK(cudnnCreateTensorDescriptor(&in_desc));
cuCHECK(cudnnSetTensor4dDescriptor(in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->in_desc));
gpuPrm->in_desc = in_desc;
// grout
cudnnTensorDescriptor_t grout_desc;
cuCHECK(cudnnCreateTensorDescriptor(&grout_desc));
cuCHECK(cudnnSetTensor4dDescriptor(grout_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, int(insz.n), int(insz.d), int(insz.h), int(insz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grout_desc));
gpuPrm->grout_desc = grout_desc;
// w
cudnnFilterDescriptor_t w_desc = nullptr;
cuCHECK(cudnnCreateFilterDescriptor(&w_desc));
cuCHECK(cudnnSetFilter4dDescriptor(w_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth)));
if (!isFirst)
cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->w_desc));
gpuPrm->w_desc = w_desc;
// dw
cudnnFilterDescriptor_t dw_desc = nullptr;
cuCHECK(cudnnCreateFilterDescriptor(&dw_desc));
cuCHECK(cudnnSetFilter4dDescriptor(dw_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
int(insz.d), int(outsz.d), int(prms.fHeight), int(prms.fWidth)));
if (!isFirst)
cuCHECK(cudnnDestroyFilterDescriptor((cudnnFilterDescriptor_t)gpuPrm->dw_desc));
gpuPrm->dw_desc = dw_desc;
// conv
cudnnConvolutionDescriptor_t conv_desc = nullptr;
cuCHECK(cudnnCreateConvolutionDescriptor(&conv_desc));
cuCHECK(cudnnSetConvolution2dDescriptor(conv_desc, 0, 0, int(prms.stride), int(prms.stride), 1, 1,
CUDNN_CONVOLUTION, CUDNN_DATA_FLOAT));
if (!isFirst)
cuCHECK(cudnnDestroyConvolutionDescriptor((cudnnConvolutionDescriptor_t)gpuPrm->conv_desc));
gpuPrm->conv_desc = conv_desc;
// output
cudnnTensorDescriptor_t out_desc;
cuCHECK(cudnnCreateTensorDescriptor(&out_desc));
cuCHECK(cudnnSetTensor4dDescriptor(out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->out_desc));
gpuPrm->out_desc = out_desc;
cudnnTensorDescriptor_t grin_desc;
cuCHECK(cudnnCreateTensorDescriptor(&grin_desc));
cuCHECK(cudnnSetTensor4dDescriptor(grin_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
int(outsz.n), int(outsz.d), int(outsz.h), int(outsz.w)));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->grin_desc));
gpuPrm->grin_desc = grin_desc;
// bias
cudnnTensorDescriptor_t bias_desc;
cuCHECK(cudnnCreateTensorDescriptor(&bias_desc));
cuCHECK(cudnnSetTensor4dDescriptor(bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
1, int(insz.d), 1, 1));
if (!isFirst)
cuCHECK(cudnnDestroyTensorDescriptor((cudnnTensorDescriptor_t)gpuPrm->bias_desc));
gpuPrm->bias_desc = bias_desc;
// algorithm
cudnnConvolutionFwdAlgo_t algoFwd;
cuCHECK(cudnnGetConvolutionForwardAlgorithm(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algoFwd));
gpuPrm->algoFwd = algoFwd;
cudnnConvolutionBwdDataAlgo_t algoBwdData;
cuCHECK(cudnnGetConvolutionBackwardDataAlgorithm(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoBwdData));
gpuPrm->algoBwdData = algoBwdData;
cudnnConvolutionBwdFilterAlgo_t algoBwdW;
cuCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoBwdW));
gpuPrm->algoBwdW = algoBwdW;
// workspace
size_t wsFwdSz = 0;
cuCHECK(cudnnGetConvolutionForwardWorkspaceSize(gpuPrm->cudnn, grin_desc, w_desc, conv_desc, grout_desc, algoFwd, &wsFwdSz));
gpuPrm->wsFwdSz = wsFwdSz;
size_t wsBwdDataSz = 0;
cuCHECK(cudnnGetConvolutionBackwardDataWorkspaceSize(gpuPrm->cudnn, w_desc, in_desc, conv_desc, out_desc, algoBwdData, &wsBwdDataSz));
gpuPrm->wsBwdDataSz = wsBwdDataSz;
size_t wsBwdWSz = 0;
cuCHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize(gpuPrm->cudnn, grin_desc, in_desc, conv_desc, dw_desc, algoBwdW, &wsBwdWSz));
gpuPrm->wsBwdWSz = wsBwdWSz;
if (isFirst && !gpuClearMem_){
cuCHECK(cudaMalloc(&gpuPrm->d_in, insz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_dw, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_out, outsz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_grout, insz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_wsFwd, wsFwdSz));
cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz));
cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz));
cuCHECK(cudaMalloc(&gpuPrm->d_bias, insz.d * sizeof(snFloat)));
}
else if (!gpuClearMem_ && (gpuPrm->inszMem < insz.size())){
cuCHECK(cudaFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(cudaFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(cudaFree(gpuPrm->d_dw)); gpuPrm->d_dw = 0;
cuCHECK(cudaFree(gpuPrm->d_out)); gpuPrm->d_out = 0;
cuCHECK(cudaFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(cudaFree(gpuPrm->d_wsFwd)); gpuPrm->d_wsFwd = 0;
cuCHECK(cudaFree(gpuPrm->d_wsBwdData)); gpuPrm->d_wsBwdData = 0;
cuCHECK(cudaFree(gpuPrm->d_wsBwdW)); gpuPrm->d_wsBwdW = 0;
cuCHECK(cudaFree(gpuPrm->d_bias)); gpuPrm->d_bias = 0;
cuCHECK(cudaMalloc(&gpuPrm->d_in, insz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_dw, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_out, outsz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_grout, insz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_wsFwd, wsFwdSz));
cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdData, wsBwdDataSz));
cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdW, wsBwdWSz));
cuCHECK(cudaMalloc(&gpuPrm->d_bias, insz.d * sizeof(snFloat)));
gpuPrm->inszMem = insz.size();
}
}
void Deconvolution::freeParamCUDA(void* gpuPrms){
cudaSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
if (!gpuPrm) return;
cuCHECK(cudnnDestroy(gpuPrm->cudnn));
cuCHECK(cudnnDestroyConvolutionDescriptor(gpuPrm->conv_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->in_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->out_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grin_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->grout_desc));
cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->w_desc));
cuCHECK(cudnnDestroyFilterDescriptor(gpuPrm->dw_desc));
cuCHECK(cudnnDestroyTensorDescriptor(gpuPrm->bias_desc));
cuCHECK(cudaFree(gpuPrm->d_in));
cuCHECK(cudaFree(gpuPrm->d_w));
cuCHECK(cudaFree(gpuPrm->d_dw));
cuCHECK(cudaFree(gpuPrm->d_bias));
cuCHECK(cudaFree(gpuPrm->d_out));
cuCHECK(cudaFree(gpuPrm->d_grout));
cuCHECK(cudaFree(gpuPrm->d_wsFwd));
cuCHECK(cudaFree(gpuPrm->d_wsBwdData));
cuCHECK(cudaFree(gpuPrm->d_wsBwdW));
}
void Deconvolution::forwardCUDA(const deconvParams& prms,
snFloat* weight, const snSize& insz, snFloat* input, const snSize& outsz, snFloat* output, void* gpuPrms){
cudaSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
if (gpuClearMem_){
cuCHECK(cudaMalloc(&gpuPrm->d_in, isz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_out, osz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdData, gpuPrm->wsBwdDataSz));
}
// input
cuCHECK(cudaMemcpy(gpuPrm->d_in, input, isz * sizeof(snFloat), cudaMemcpyHostToDevice));
// weight
size_t wsz = outsz.d * insz.d * prms.fHeight * prms.fWidth;
cuCHECK(cudaMemcpy(gpuPrm->d_w, weight, wsz * sizeof(snFloat), cudaMemcpyHostToDevice));
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnConvolutionBackwardData(gpuPrm->cudnn,
&alpha,
gpuPrm->w_desc,
gpuPrm->d_w,
gpuPrm->in_desc,
gpuPrm->d_in,
gpuPrm->conv_desc,
gpuPrm->algoBwdData,
gpuPrm->d_wsBwdData,
gpuPrm->wsBwdDataSz,
&beta,
gpuPrm->out_desc,
gpuPrm->d_out));
// result
cuCHECK(cudaMemcpy(output, gpuPrm->d_out, osz * sizeof(snFloat), cudaMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(cudaFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(cudaFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(cudaFree(gpuPrm->d_wsBwdData)); gpuPrm->d_wsBwdData = 0;
cuCHECK(cudaFree(gpuPrm->d_out)); gpuPrm->d_out = 0;
}
}
__global__ void cuBwdBias(snSize insz, snFloat* bias, snFloat* grout){
size_t isz = insz.w * insz.h;
snFloat* pGrOut = grout + isz * insz.d * blockIdx.x;
unsigned int d = threadIdx.x;
while (d < insz.d){
snFloat b = bias[d];
for (size_t j = 0; j < isz; ++j)
pGrOut[j] += b;
pGrOut += isz * blockDim.x;
d += blockDim.x;
}
}
void Deconvolution::backwardCUDA_GW(const deconvParams& prms,
snFloat* weight, const snSize& insz, snFloat* input, const snSize& outsz, snFloat* gradIn, snFloat* gradOut, snFloat* dWeightOut, void* gpuPrms){
cudaSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
void* d_grin = gpuPrm->d_out;
if (gpuClearMem_){
cuCHECK(cudaMalloc(&gpuPrm->d_in, isz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_dw, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_bias, insz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&d_grin, osz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_grout, isz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_wsFwd, gpuPrm->wsFwdSz));
cuCHECK(cudaMalloc(&gpuPrm->d_wsBwdW, gpuPrm->wsBwdWSz));
}
// input
cuCHECK(cudaMemcpy(gpuPrm->d_in, input, isz * sizeof(snFloat), cudaMemcpyHostToDevice));
// grin
cuCHECK(cudaMemcpy(d_grin, gradIn, osz * sizeof(snFloat), cudaMemcpyHostToDevice));
// weight
size_t wsz = outsz.d * insz.d * prms.fHeight * prms.fWidth;
cuCHECK(cudaMemcpy(gpuPrm->d_w, weight, wsz * sizeof(snFloat), cudaMemcpyHostToDevice));
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn,
&alpha,
gpuPrm->grin_desc,
d_grin,
gpuPrm->w_desc,
gpuPrm->d_w,
gpuPrm->conv_desc,
gpuPrm->algoFwd,
gpuPrm->d_wsFwd,
gpuPrm->wsFwdSz,
&beta,
gpuPrm->grout_desc,
gpuPrm->d_grout));
cuCHECK(cudnnConvolutionBackwardFilter(gpuPrm->cudnn,
&alpha,
gpuPrm->grin_desc,
d_grin,
gpuPrm->in_desc,
gpuPrm->d_in,
gpuPrm->conv_desc,
gpuPrm->algoBwdW,
gpuPrm->d_wsBwdW,
gpuPrm->wsBwdWSz,
&beta,
gpuPrm->dw_desc,
gpuPrm->d_dw));
cuCHECK(cudnnConvolutionBackwardBias(gpuPrm->cudnn,
&alpha,
gpuPrm->in_desc,
gpuPrm->d_in,
&beta,
gpuPrm->bias_desc,
gpuPrm->d_bias));
// +bias
cuBwdBias <<< int(insz.n), 128 >>> (insz, gpuPrm->d_bias, gpuPrm->d_grout);
// result
cuCHECK(cudaMemcpy(gradOut, gpuPrm->d_grout, isz * sizeof(snFloat), cudaMemcpyDeviceToHost));
cuCHECK(cudaMemcpy(dWeightOut, gpuPrm->d_dw, wsz * sizeof(snFloat), cudaMemcpyDeviceToHost));
cuCHECK(cudaMemcpy(dWeightOut + wsz, gpuPrm->d_bias, insz.d * sizeof(snFloat), cudaMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(cudaFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(cudaFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(cudaFree(d_grin)); gpuPrm->d_out = 0;
cuCHECK(cudaFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(cudaFree(gpuPrm->d_dw)); gpuPrm->d_dw = 0;
cuCHECK(cudaFree(gpuPrm->d_bias)); gpuPrm->d_bias = 0;
cuCHECK(cudaFree(gpuPrm->d_wsFwd)); gpuPrm->d_wsFwd = 0;
cuCHECK(cudaFree(gpuPrm->d_wsBwdW)); gpuPrm->d_wsBwdW = 0;
}
}
void Deconvolution::backwardCUDA_G(const deconvParams& prms,
snFloat* weight, const snSize& insz, const snSize& outsz, snFloat* gradIn, snFloat* gradOut, void* gpuPrms){
cudaSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
void* d_grin = gpuPrm->d_out;
if (gpuClearMem_){
cuCHECK(cudaMalloc(&gpuPrm->d_w, prms.fWidth * prms.fHeight * insz.d * outsz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&d_grin, osz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_grout, isz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_wsFwd, gpuPrm->wsFwdSz));
cuCHECK(cudaMalloc(&gpuPrm->d_bias, insz.d * sizeof(snFloat)));
}
// grin
cuCHECK(cudaMemcpy(d_grin, gradIn, osz * sizeof(snFloat), cudaMemcpyHostToDevice));
// weight
size_t wsz = outsz.d * insz.d * prms.fHeight * prms.fWidth;
cuCHECK(cudaMemcpy(gpuPrm->d_w, weight, wsz * sizeof(snFloat), cudaMemcpyHostToDevice));
cuCHECK(cudaMemcpy(gpuPrm->d_bias, weight + wsz, insz.d * sizeof(snFloat), cudaMemcpyHostToDevice));
// run
snFloat alpha = 1.f, beta = 0.f;
cuCHECK(cudnnConvolutionForward(gpuPrm->cudnn,
&alpha,
gpuPrm->grin_desc,
d_grin,
gpuPrm->w_desc,
gpuPrm->d_w,
gpuPrm->conv_desc,
gpuPrm->algoFwd,
gpuPrm->d_wsFwd,
gpuPrm->wsFwdSz,
&beta,
gpuPrm->grout_desc,
gpuPrm->d_grout));
// +bias
cuBwdBias <<< int(insz.n), 128 >>> (insz, gpuPrm->d_bias, gpuPrm->d_grout);
// результ
cuCHECK(cudaMemcpy(gradOut, gpuPrm->d_grout, isz * sizeof(snFloat), cudaMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(cudaFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(cudaFree(d_grin)); gpuPrm->d_out = 0;
cuCHECK(cudaFree(gpuPrm->d_bias)); gpuPrm->d_bias = 0;
cuCHECK(cudaFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(cudaFree(gpuPrm->d_wsFwd)); gpuPrm->d_wsFwd = 0;
}
}
#elif SN_CUDA
#include <cuda_runtime.h>
#include "../stdafx.h"
#include "snOperator/src/Operator/deconvolution.h"
using namespace std;
using namespace SN_Base;
#ifndef cuCHECK
#define cuCHECK(func) if (func != 0){ ERROR_MESS("CUDA error: " + cudaGetErrorString(cudaGetLastError())); return;}
#endif
struct gpuParams{
snFloat* d_in = 0;
snFloat* d_w = 0;
snFloat* d_dw = 0;
snFloat* d_out = 0;
snFloat* d_grout = 0;
size_t inszMem = 0;
};
void Deconvolution::iniParamCUDA(const snSize& insz, const snSize& outsz, const deconvParams& prms, void** pGpuPrm){
cudaSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)*pGpuPrm;
if (!gpuPrm){
cudaDeviceProp cu_deviceProps;
cudaGetDeviceProperties(&cu_deviceProps, 0);
if (cu_deviceProps.major < 3){
ERROR_MESS("%s requires SM >= 3.0");
return;
}
gpuPrm = new gpuParams();
memset(gpuPrm, 0, sizeof(gpuParams));
*pGpuPrm = gpuPrm;
if (!gpuClearMem_){
cuCHECK(cudaMalloc(&gpuPrm->d_in, insz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_out, outsz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_grout, insz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_dw, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * outsz.n * sizeof(snFloat)));
}
}
else if (!gpuClearMem_ && (gpuPrm->inszMem < insz.size())){
cuCHECK(cudaFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(cudaFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(cudaFree(gpuPrm->d_out)); gpuPrm->d_out = 0;
cuCHECK(cudaFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(cudaFree(gpuPrm->d_dw)); gpuPrm->d_dw = 0;
cuCHECK(cudaMalloc(&gpuPrm->d_in, insz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_out, outsz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_grout, insz.size() * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_dw, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * outsz.n * sizeof(snFloat)));
gpuPrm->inszMem = insz.size();
}
}
void Deconvolution::freeParamCUDA(void* gpuPrms){
cudaSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
if (!gpuPrm) return;
cuCHECK(cudaFree(gpuPrm->d_in));
cuCHECK(cudaFree(gpuPrm->d_w));
cuCHECK(cudaFree(gpuPrm->d_dw));
cuCHECK(cudaFree(gpuPrm->d_out));
cuCHECK(cudaFree(gpuPrm->d_grout));
}
__global__ void cuDeconvFwd(size_t fWidth, size_t fHeight, size_t stride,
snFloat* weight, snSize insz, snFloat* input, snSize outsz, snFloat* output){
size_t wStepByD = fWidth * fHeight,
wStepByK = wStepByD * outsz.d + 1,
outStepByD = outsz.w * outsz.h,
outStepByN = outStepByD * outsz.d,
inStepByD = insz.w * insz.h,
inStepByN = inStepByD * insz.d;
// gridDim.x - number of output layers
// gridDim.y - batch sz
weight += blockIdx.x * wStepByD;
input += blockIdx.y * inStepByN;
output += blockIdx.x * outStepByD + blockIdx.y * outStepByN;
unsigned int oz = 0;
while (oz < insz.d){
unsigned int oy = threadIdx.y;
while (oy < insz.h){
unsigned int ox = threadIdx.x;
while (ox < insz.w){
size_t posW = ox * stride, posH = oy * stride;
if (oz == 0){
for (size_t c = 0; c < wStepByD; ++c){
size_t cx = c % fWidth, cy = c / fWidth;
output[(cx + posW) + (cy + posH) * outsz.w] = 0;
}
}
// kernel
snFloat in = input[ox + oy * insz.w];
#pragma unroll
for (size_t c = 0; c < wStepByD; ++c){
size_t cx = c % fWidth, cy = c / fWidth,
si = (cx + posW) + (cy + posH) * outsz.w,
sw = cx + cy * fWidth;
output[si] += in * weight[sw];
}
ox += blockDim.x;
}
oy += blockDim.y;
}
weight += wStepByK;
input += inStepByD;
++oz;
}
}
void Deconvolution::forwardCUDA(const deconvParams& prms,
snFloat* weight, const snSize& insz, snFloat* input, const snSize& outsz, snFloat* output, void* gpuPrms){
cudaSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
if (gpuClearMem_){
cuCHECK(cudaMalloc(&gpuPrm->d_in, isz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_out, osz * sizeof(snFloat)));
}
// input
cuCHECK(cudaMemcpy(gpuPrm->d_in, input, isz * sizeof(snFloat), cudaMemcpyHostToDevice));
// weight
cuCHECK(cudaMemcpy(gpuPrm->d_w, weight, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat), cudaMemcpyHostToDevice));
// run
dim3 dimBlock(16, 16);
dim3 dimGrid(int(outsz.d), int(outsz.n));
cuDeconvFwd <<< dimGrid, dimBlock >>>(prms.fWidth,
prms.fHeight,
prms.stride,
gpuPrm->d_w,
insz,
gpuPrm->d_in,
outsz,
gpuPrm->d_out);
// result
cuCHECK(cudaMemcpy(output, gpuPrm->d_out, osz * sizeof(snFloat), cudaMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(cudaFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(cudaFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(cudaFree(gpuPrm->d_out)); gpuPrm->d_out = 0;
}
}
__global__ void cuDeconvBwd_GW(size_t fWidth, size_t fHeight, size_t stride,
snFloat* weight, snSize insz, snFloat* input, snSize outsz, snFloat* gradIn, snFloat* gradOut, snFloat* dWeightOut){
size_t wStepByD = fWidth * fHeight,
wStepByK = wStepByD * outsz.d + 1,
wStepByN = wStepByK * insz.d,
outStepByD = outsz.w * outsz.h,
outStepByN = outStepByD * outsz.d,
inStepByD = insz.w * insz.h,
inStepByN = inStepByD * insz.d;
// gridDim.x - number of input layers
// gridDim.y - batch sz
input += blockIdx.x * inStepByD + blockIdx.y * inStepByN;
weight += blockIdx.x * wStepByK;
dWeightOut += blockIdx.x * wStepByK + blockIdx.y * wStepByN;
gradIn += blockIdx.y * outStepByN;
gradOut += blockIdx.x * inStepByD + blockIdx.y * inStepByN;
unsigned int oz = 0;
while (oz < outsz.d){
memset(dWeightOut, 0, wStepByD * sizeof(snFloat));
if (blockIdx.x == 0)
dWeightOut[wStepByD * outsz.d] = 0;
unsigned int oy = threadIdx.y;
while (oy < insz.h){
unsigned int ox = threadIdx.x;
while (ox < insz.w){
if (oz == 0)
gradOut[ox + oy * insz.w] = weight[wStepByD * outsz.d]; // bias
size_t posW = ox * stride, posH = oy * stride;
// kernel
snFloat csum = 0, cin = input[ox + oy * insz.w];
#pragma unroll
for (size_t c = 0; c < wStepByD; ++c){
size_t cx = c % fWidth, cy = c / fWidth,
si = (cx + posW) + (cy + posH) * outsz.w,
sw = cx + cy * fWidth;
csum += gradIn[si] * weight[sw];
dWeightOut[sw] += gradIn[si] * cin;
}
gradOut[ox + oy * insz.w] += csum;
if (blockIdx.x == 0)
dWeightOut[wStepByD * outsz.d] += cin; // bias
ox += blockDim.x;
}
oy += blockDim.y;
}
weight += wStepByD;
dWeightOut += wStepByD;
gradIn += outStepByD;
++oz;
}
}
__global__ void cuDeconvWeightMean(size_t kernel, size_t fWidth, size_t fHeight, snSize insz, snFloat* weight){
size_t wStepByD = fWidth * fHeight,
wStepByK = wStepByD * kernel + 1,
wStepByN = wStepByK * insz.d;
unsigned int ox = threadIdx.x;
while (ox < wStepByN){
snFloat csum = weight[ox];
for (size_t i = 1; i < insz.n; ++i)
csum += weight[ox + wStepByN * i];
weight[ox] = csum / insz.n;
ox += blockDim.x;
}
}
void Deconvolution::backwardCUDA_GW(const deconvParams& prms,
snFloat* weight, const snSize& insz, snFloat* input, const snSize& outsz, snFloat* gradIn, snFloat* gradOut, snFloat* dWeightOut, void* gpuPrms){
cudaSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
snFloat* d_grin = gpuPrm->d_out;
if (gpuClearMem_){
cuCHECK(cudaMalloc(&gpuPrm->d_in, isz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&d_grin, osz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_grout, isz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_dw, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * outsz.n * sizeof(snFloat)));
}
// input
cuCHECK(cudaMemcpy(gpuPrm->d_in, input, isz * sizeof(snFloat), cudaMemcpyHostToDevice));
cuCHECK(cudaMemcpy(d_grin, gradIn, osz * sizeof(snFloat), cudaMemcpyHostToDevice));
// weight
cuCHECK(cudaMemcpy(gpuPrm->d_w, weight, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat), cudaMemcpyHostToDevice));
// run
dim3 dimBlock(16, 16);
dim3 dimGrid(int(insz.d), int(outsz.n));
cuDeconvBwd_GW <<< dimGrid, dimBlock >>> (prms.fWidth,
prms.fHeight,
prms.stride,
gpuPrm->d_w,
insz,
gpuPrm->d_in,
outsz, d_grin,
gpuPrm->d_grout,
gpuPrm->d_dw);
cuDeconvWeightMean <<< 1, 32 >>> (prms.kernel, prms.fWidth, prms.fHeight, insz, gpuPrm->d_dw);
// result
cuCHECK(cudaMemcpy(gradOut, gpuPrm->d_grout, isz * sizeof(snFloat), cudaMemcpyDeviceToHost));
cuCHECK(cudaMemcpy(dWeightOut, gpuPrm->d_dw, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat), cudaMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(cudaFree(gpuPrm->d_in)); gpuPrm->d_in = 0;
cuCHECK(cudaFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(cudaFree(d_grin)); gpuPrm->d_out = 0;
cuCHECK(cudaFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
cuCHECK(cudaFree(gpuPrm->d_dw)); gpuPrm->d_dw = 0;
}
}
__global__ void cuDeconvBwd_G(size_t fWidth, size_t fHeight, size_t stride,
snFloat* weight, snSize insz, snSize outsz, snFloat* gradIn, snFloat* gradOut){
size_t wStepByD = fWidth * fHeight,
wStepByK = wStepByD * outsz.d + 1,
outStepByD = outsz.w * outsz.h,
outStepByN = outStepByD * outsz.d,
inStepByD = insz.w * insz.h,
inStepByN = inStepByD * insz.d;
// gridDim.x - number of input layers
// gridDim.y - batch size
weight += blockIdx.x * wStepByK;
gradIn += blockIdx.y * outStepByN;
gradOut += blockIdx.x * inStepByD + blockIdx.y * inStepByN;
unsigned int oz = 0;
while (oz < outsz.d){
unsigned int oy = threadIdx.y;
while (oy < insz.h){
unsigned int ox = threadIdx.x;
while (ox < insz.w){
if (oz == 0)
gradOut[ox + oy * insz.w] = weight[wStepByD * outsz.d]; // bias
size_t posW = ox * stride, posH = oy * stride;
// kernel
snFloat csum = 0;
#pragma unroll
for (size_t c = 0; c < wStepByD; ++c){
size_t cx = c % fWidth, cy = c / fWidth,
si = (cx + posW) + (cy + posH) * outsz.w,
sw = cx + cy * fWidth;
csum += gradIn[si] * weight[sw];
}
gradOut[ox + oy * insz.w] += csum;
ox += blockDim.x;
}
oy += blockDim.y;
}
weight += wStepByD;
gradIn += outStepByD;
++oz;
}
}
void Deconvolution::backwardCUDA_G(const deconvParams& prms,
snFloat* weight, const snSize& insz, const snSize& outsz, snFloat* gradIn, snFloat* gradOut, void* gpuPrms){
cudaSetDevice(gpuDeviceId_);
gpuParams* gpuPrm = (gpuParams*)gpuPrms;
size_t isz = insz.size(), osz = outsz.size();
snFloat* d_grin = gpuPrm->d_out;
if (gpuClearMem_){
cuCHECK(cudaMalloc(&d_grin, osz * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_w, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat)));
cuCHECK(cudaMalloc(&gpuPrm->d_grout, isz * sizeof(snFloat)));
}
// input
cuCHECK(cudaMemcpy(d_grin, gradIn, osz * sizeof(snFloat), cudaMemcpyHostToDevice));
// weight
cuCHECK(cudaMemcpy(gpuPrm->d_w, weight, (prms.fWidth * prms.fHeight * outsz.d + 1) * insz.d * sizeof(snFloat), cudaMemcpyHostToDevice));
// run
dim3 dimBlock(16, 16);
dim3 dimGrid(int(insz.d), int(outsz.n));
cuDeconvBwd_G <<< dimGrid, dimBlock >>> (prms.fWidth,
prms.fHeight,
prms.stride,
gpuPrm->d_w,
insz,
outsz,
d_grin,
gpuPrm->d_grout);
// result
cuCHECK(cudaMemcpy(gradOut, gpuPrm->d_grout, isz * sizeof(snFloat), cudaMemcpyDeviceToHost));
if (gpuClearMem_){
cuCHECK(cudaFree(d_grin)); gpuPrm->d_out = 0;
cuCHECK(cudaFree(gpuPrm->d_w)); gpuPrm->d_w = 0;
cuCHECK(cudaFree(gpuPrm->d_grout)); gpuPrm->d_grout = 0;
}
}
#endif
|
569f15ae8628b01194b8a2d6bdec00dc28981899.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__host__ __device__ int outInvariant(int inValue) {
return inValue * inValue;
}
__host__ __device__ int outDependent(int value, int inIdx, int outIdx) {
if (inIdx == outIdx) {
return 2 * value;
} else if (inIdx > outIdx) {
return value / (inIdx - outIdx);
} else {
return value / (outIdx - inIdx);
}
}
__global__ void s2g_gpu_scatter_kernel(int *in, int *out, int len) {
//@@ INSERT CODE HERE
int inIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (inIdx >= len) return;
int intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
//atomicAdd(&out[outIdx], outDependent(intermediate, inIdx, outIdx));
}
}
__global__ void s2g_gpu_gather_kernel(int *in, int *out, int len) {
//@@ INSERT CODE HERE
int outIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (outIdx >= len) return;
for (int inIdx = 0; inIdx < len; ++inIdx) {
int intermediate = outInvariant(in[inIdx]);
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
static void s2g_cpu_scatter(int *in, int *out, int len) {
for (int inIdx = 0; inIdx < len; ++inIdx) {
int intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_cpu_gather(int *in, int *out, int len) {
//@@ INSERT CODE HERE
for (int outIdx = 0; outIdx < len; ++outIdx) {
for (int inIdx = 0; inIdx < len; ++inIdx) {
int intermediate = outInvariant(in[inIdx]);
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_gpu_scatter(int *in, int *out, int len) {
//@@ INSERT CODE HERE
int block = 1024;
int grid = (len%block) == 0 ? (len/block) : 1 + (len/block);
hipLaunchKernelGGL(( s2g_gpu_scatter_kernel), dim3(grid), dim3(block), 0, 0, in, out, len);
}
static void s2g_gpu_gather(int *in, int *out, int len) {
//@@ INSERT CODE HERE
int block = 1024;
int grid = (len%block) == 0 ? (len/block) : 1 + (len/block);
hipLaunchKernelGGL(( s2g_gpu_gather_kernel), dim3(grid), dim3(block), 0, 0, in, out, len);
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
int *hostInput;
int *hostOutput;
int *deviceInput;
int *deviceOutput;
size_t byteCount; //byte size
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (int *)wbImport(wbArg_getInputFile(args, 0), &inputLength,
"Integer");
hostOutput = (int *)malloc(inputLength * sizeof(int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
byteCount = inputLength * sizeof(int);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(hipMalloc((void **)&deviceInput, byteCount));
wbCheck(hipMalloc((void **)&deviceOutput, byteCount));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(hipMemcpy(deviceInput, hostInput, byteCount,
hipMemcpyHostToDevice));
wbCheck(hipMemset(deviceOutput, 0, byteCount));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//////////////////////////////////////////
// CPU Scatter Computation
//////////////////////////////////////////
wbTime_start(Compute, "Performing CPU Scatter computation");
s2g_cpu_scatter(hostInput, hostOutput, inputLength);
wbTime_stop(Compute, "Performing CPU Scatter computation");
wbSolution(args, hostOutput, inputLength);
memset(hostOutput, 0, byteCount);
//////////////////////////////////////////
// GPU Scatter Computation
//////////////////////////////////////////
wbTime_start(Compute, "Performing GPU Scatter computation");
s2g_gpu_scatter(deviceInput, deviceOutput, inputLength);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing GPU Scatter computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(hipMemcpy(hostOutput, deviceOutput, byteCount,
hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbSolution(args, hostOutput, inputLength);
wbCheck(hipMemset(deviceOutput, 0, byteCount));
//////////////////////////////////////////
// CPU Gather Computation
//////////////////////////////////////////
wbTime_start(Compute, "Performing CPU Gather computation");
s2g_cpu_gather(hostInput, hostOutput, inputLength);
wbTime_stop(Compute, "Performing CPU Gather computation");
wbSolution(args, hostOutput, inputLength);
memset(hostOutput, 0, byteCount);
//////////////////////////////////////////
// GPU Gather Computation
//////////////////////////////////////////
wbTime_start(Compute, "Performing GPU Gather computation");
s2g_gpu_gather(deviceInput, deviceOutput, inputLength);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing GPU Gather computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(hipMemcpy(hostOutput, deviceOutput, byteCount,
hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbSolution(args, hostOutput, inputLength);
wbCheck(hipMemset(deviceOutput, 0, byteCount));
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
free(hostInput);
free(hostOutput);
return 0;
} | 569f15ae8628b01194b8a2d6bdec00dc28981899.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__host__ __device__ int outInvariant(int inValue) {
return inValue * inValue;
}
__host__ __device__ int outDependent(int value, int inIdx, int outIdx) {
if (inIdx == outIdx) {
return 2 * value;
} else if (inIdx > outIdx) {
return value / (inIdx - outIdx);
} else {
return value / (outIdx - inIdx);
}
}
__global__ void s2g_gpu_scatter_kernel(int *in, int *out, int len) {
//@@ INSERT CODE HERE
int inIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (inIdx >= len) return;
int intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
//atomicAdd(&out[outIdx], outDependent(intermediate, inIdx, outIdx));
}
}
__global__ void s2g_gpu_gather_kernel(int *in, int *out, int len) {
//@@ INSERT CODE HERE
int outIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (outIdx >= len) return;
for (int inIdx = 0; inIdx < len; ++inIdx) {
int intermediate = outInvariant(in[inIdx]);
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
static void s2g_cpu_scatter(int *in, int *out, int len) {
for (int inIdx = 0; inIdx < len; ++inIdx) {
int intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_cpu_gather(int *in, int *out, int len) {
//@@ INSERT CODE HERE
for (int outIdx = 0; outIdx < len; ++outIdx) {
for (int inIdx = 0; inIdx < len; ++inIdx) {
int intermediate = outInvariant(in[inIdx]);
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_gpu_scatter(int *in, int *out, int len) {
//@@ INSERT CODE HERE
int block = 1024;
int grid = (len%block) == 0 ? (len/block) : 1 + (len/block);
s2g_gpu_scatter_kernel<<<grid, block>>>(in, out, len);
}
static void s2g_gpu_gather(int *in, int *out, int len) {
//@@ INSERT CODE HERE
int block = 1024;
int grid = (len%block) == 0 ? (len/block) : 1 + (len/block);
s2g_gpu_gather_kernel<<<grid, block>>>(in, out, len);
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
int *hostInput;
int *hostOutput;
int *deviceInput;
int *deviceOutput;
size_t byteCount; //byte size
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (int *)wbImport(wbArg_getInputFile(args, 0), &inputLength,
"Integer");
hostOutput = (int *)malloc(inputLength * sizeof(int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
byteCount = inputLength * sizeof(int);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(cudaMalloc((void **)&deviceInput, byteCount));
wbCheck(cudaMalloc((void **)&deviceOutput, byteCount));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(cudaMemcpy(deviceInput, hostInput, byteCount,
cudaMemcpyHostToDevice));
wbCheck(cudaMemset(deviceOutput, 0, byteCount));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//////////////////////////////////////////
// CPU Scatter Computation
//////////////////////////////////////////
wbTime_start(Compute, "Performing CPU Scatter computation");
s2g_cpu_scatter(hostInput, hostOutput, inputLength);
wbTime_stop(Compute, "Performing CPU Scatter computation");
wbSolution(args, hostOutput, inputLength);
memset(hostOutput, 0, byteCount);
//////////////////////////////////////////
// GPU Scatter Computation
//////////////////////////////////////////
wbTime_start(Compute, "Performing GPU Scatter computation");
s2g_gpu_scatter(deviceInput, deviceOutput, inputLength);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing GPU Scatter computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(cudaMemcpy(hostOutput, deviceOutput, byteCount,
cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbSolution(args, hostOutput, inputLength);
wbCheck(cudaMemset(deviceOutput, 0, byteCount));
//////////////////////////////////////////
// CPU Gather Computation
//////////////////////////////////////////
wbTime_start(Compute, "Performing CPU Gather computation");
s2g_cpu_gather(hostInput, hostOutput, inputLength);
wbTime_stop(Compute, "Performing CPU Gather computation");
wbSolution(args, hostOutput, inputLength);
memset(hostOutput, 0, byteCount);
//////////////////////////////////////////
// GPU Gather Computation
//////////////////////////////////////////
wbTime_start(Compute, "Performing GPU Gather computation");
s2g_gpu_gather(deviceInput, deviceOutput, inputLength);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing GPU Gather computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(cudaMemcpy(hostOutput, deviceOutput, byteCount,
cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbSolution(args, hostOutput, inputLength);
wbCheck(cudaMemset(deviceOutput, 0, byteCount));
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
free(hostInput);
free(hostOutput);
return 0;
} |
203271b22c9b6ef9b87d0532d03546c77771727d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "Graph.h"
#define MAX_THREAD_COUNT 1024
#define CEIL(a, b) ((a - 1) / b + 1)
#define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); }
using namespace std;
float device_time_taken;
void printTime(float ms) {
printf("%d,", (int)ms);
}
inline void gpuAssert(hipError_t error, const char *file, int line, bool abort = false) {
if (error != hipSuccess) {
printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, hipGetErrorString(error));
printf("\nIn file :%s\nOn line: %d", file, line);
if(abort)
exit(-1);
}
}
__global__ void betweennessCentralityKernel(
Graph *graph,
double *bwCentrality,
int nodeFrom,
int nodeTo,
int nodeCount,
int *sigma,
int *distance,
double *dependency) {
int idx = threadIdx.x;
if (idx >= nodeCount)
return;
__shared__ int s;
__shared__ int current_depth;
__shared__ bool done;
if(idx == 0) {
s = nodeFrom - 1;
}
__syncthreads();
while (s <= nodeTo) {
if (idx == 0) {
++s;
done = false;
current_depth = -1;
}
__syncthreads();
for (int v = idx; v < nodeCount; v += blockDim.x) {
if (v == s) {
distance[v] = 0;
sigma[v] = 1;
} else {
distance[v] = INT_MAX;
sigma[v] = 0;
}
dependency[v] = 0.0;
}
__syncthreads();
while (!done) {
if (idx == 0) {
current_depth++;
}
done = true;
__syncthreads();
for (int v = idx; v < nodeCount; v += blockDim.x) {
if (distance[v] == current_depth) {
for (int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) {
int w = graph->adjacencyList[r];
if (distance[w] == INT_MAX) {
distance[w] = distance[v] + 1;
done = false;
}
if (distance[w] == (distance[v] + 1)) {
atomicAdd(&sigma[w], sigma[v]);
}
}
}
}
__syncthreads();
}
while(current_depth) {
if (idx == 0) {
current_depth--;
}
__syncthreads();
for (int v = idx; v < nodeCount; v += blockDim.x) {
if (distance[v] == current_depth) {
for (int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) {
int w = graph->adjacencyList[r];
if (distance[w] == (distance[v] + 1)) {
if (sigma[w] != 0)
dependency[v] += (sigma[v] * 1.0 / sigma[w]) * (1 + dependency[w]);
}
}
if (v != s) {
bwCentrality[v] += dependency[v] / 2;
}
}
}
__syncthreads();
}
}
}
double *betweennessCentrality(Graph *graph, int nodeCount, int nodeFrom, int nodeTo) {
double *bwCentrality = new double[nodeCount]();
double *device_bwCentrality, *dependency;
int *sigma, *distance;
catchCudaError(hipMalloc((void **)&device_bwCentrality, sizeof(double) * nodeCount));
catchCudaError(hipMalloc((void **)&sigma, sizeof(int) * nodeCount));
catchCudaError(hipMalloc((void **)&distance, sizeof(int) * nodeCount));
catchCudaError(hipMalloc((void **)&dependency, sizeof(double) * nodeCount));
catchCudaError(hipMemcpy(device_bwCentrality, bwCentrality, sizeof(double) * nodeCount, hipMemcpyHostToDevice));
// Timer
hipEvent_t device_start, device_end;
catchCudaError(hipEventCreate(&device_start));
catchCudaError(hipEventCreate(&device_end));
catchCudaError(hipEventRecord(device_start));
hipLaunchKernelGGL(( betweennessCentralityKernel), dim3(1), dim3(MAX_THREAD_COUNT), 0, 0,
graph,
device_bwCentrality,
nodeFrom,
nodeTo,
nodeCount,
sigma,
distance,
dependency
);
hipDeviceSynchronize();
cout << endl;
// Timer
catchCudaError(hipEventRecord(device_end));
catchCudaError(hipEventSynchronize(device_end));
hipEventElapsedTime(&device_time_taken, device_start, device_end);
// Copy back and free memory
catchCudaError(hipMemcpy(bwCentrality, device_bwCentrality, sizeof(double) * nodeCount, hipMemcpyDeviceToHost));
catchCudaError(hipFree(device_bwCentrality));
catchCudaError(hipFree(sigma));
catchCudaError(hipFree(dependency));
catchCudaError(hipFree(distance));
return bwCentrality;
}
int main(int argc, char *argv[]) {
if (argc < 2) {
cout << "Usage: " << argv[0] << " <input_file> <output_file>\n";
return 0;
}
freopen(argv[1], "r", stdin);
Graph *host_graph = new Graph();
Graph *device_graph;
catchCudaError(hipMalloc((void **)&device_graph, sizeof(Graph)));
host_graph->readGraph();
int nodeCount = host_graph->getNodeCount();
int edgeCount = host_graph->getEdgeCount();
catchCudaError(hipMemcpy(device_graph, host_graph, sizeof(Graph), hipMemcpyHostToDevice));
// Set threshold
const long threshold_percent = strtol(argv[3], NULL, 10);
const int threshold = (int) ((float)nodeCount * (float)threshold_percent / (float) 100);
const int nodeFrom = threshold;
const int nodeTo = nodeCount - 1;
int *adjacencyList;
catchCudaError(hipMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1)));
catchCudaError(hipMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), hipMemcpyHostToDevice));
catchCudaError(hipMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), hipMemcpyHostToDevice));
int *adjacencyListPointers;
catchCudaError(hipMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1)));
catchCudaError(hipMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), hipMemcpyHostToDevice));
catchCudaError(hipMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), hipMemcpyHostToDevice));
double *bwCentrality = betweennessCentrality(device_graph, nodeCount, nodeFrom, nodeTo);
double maxBetweenness = -1;
for (int i = 0; i < nodeCount; i++) {
maxBetweenness = max(maxBetweenness, bwCentrality[i]);
}
printf("%s, %03d, ", argv[1], atoi(argv[3]));
// printf("%0.2lf, ", maxBetweenness);
printf("%0.2lf\n", device_time_taken);
if (argc == 3) {
freopen(argv[2], "w", stdout);
for (int i = 0; i < nodeCount; i++)
cout << bwCentrality[i] << " ";
cout << endl;
}
// Free all memory
delete[] bwCentrality;
catchCudaError(hipFree(adjacencyList));
catchCudaError(hipFree(adjacencyListPointers));
catchCudaError(hipFree(device_graph));
}
| 203271b22c9b6ef9b87d0532d03546c77771727d.cu | #include <iostream>
#include <cuda.h>
#include "Graph.h"
#define MAX_THREAD_COUNT 1024
#define CEIL(a, b) ((a - 1) / b + 1)
#define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); }
using namespace std;
float device_time_taken;
void printTime(float ms) {
printf("%d,", (int)ms);
}
inline void gpuAssert(cudaError_t error, const char *file, int line, bool abort = false) {
if (error != cudaSuccess) {
printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, cudaGetErrorString(error));
printf("\nIn file :%s\nOn line: %d", file, line);
if(abort)
exit(-1);
}
}
__global__ void betweennessCentralityKernel(
Graph *graph,
double *bwCentrality,
int nodeFrom,
int nodeTo,
int nodeCount,
int *sigma,
int *distance,
double *dependency) {
int idx = threadIdx.x;
if (idx >= nodeCount)
return;
__shared__ int s;
__shared__ int current_depth;
__shared__ bool done;
if(idx == 0) {
s = nodeFrom - 1;
}
__syncthreads();
while (s <= nodeTo) {
if (idx == 0) {
++s;
done = false;
current_depth = -1;
}
__syncthreads();
for (int v = idx; v < nodeCount; v += blockDim.x) {
if (v == s) {
distance[v] = 0;
sigma[v] = 1;
} else {
distance[v] = INT_MAX;
sigma[v] = 0;
}
dependency[v] = 0.0;
}
__syncthreads();
while (!done) {
if (idx == 0) {
current_depth++;
}
done = true;
__syncthreads();
for (int v = idx; v < nodeCount; v += blockDim.x) {
if (distance[v] == current_depth) {
for (int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) {
int w = graph->adjacencyList[r];
if (distance[w] == INT_MAX) {
distance[w] = distance[v] + 1;
done = false;
}
if (distance[w] == (distance[v] + 1)) {
atomicAdd(&sigma[w], sigma[v]);
}
}
}
}
__syncthreads();
}
while(current_depth) {
if (idx == 0) {
current_depth--;
}
__syncthreads();
for (int v = idx; v < nodeCount; v += blockDim.x) {
if (distance[v] == current_depth) {
for (int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) {
int w = graph->adjacencyList[r];
if (distance[w] == (distance[v] + 1)) {
if (sigma[w] != 0)
dependency[v] += (sigma[v] * 1.0 / sigma[w]) * (1 + dependency[w]);
}
}
if (v != s) {
bwCentrality[v] += dependency[v] / 2;
}
}
}
__syncthreads();
}
}
}
double *betweennessCentrality(Graph *graph, int nodeCount, int nodeFrom, int nodeTo) {
double *bwCentrality = new double[nodeCount]();
double *device_bwCentrality, *dependency;
int *sigma, *distance;
catchCudaError(cudaMalloc((void **)&device_bwCentrality, sizeof(double) * nodeCount));
catchCudaError(cudaMalloc((void **)&sigma, sizeof(int) * nodeCount));
catchCudaError(cudaMalloc((void **)&distance, sizeof(int) * nodeCount));
catchCudaError(cudaMalloc((void **)&dependency, sizeof(double) * nodeCount));
catchCudaError(cudaMemcpy(device_bwCentrality, bwCentrality, sizeof(double) * nodeCount, cudaMemcpyHostToDevice));
// Timer
cudaEvent_t device_start, device_end;
catchCudaError(cudaEventCreate(&device_start));
catchCudaError(cudaEventCreate(&device_end));
catchCudaError(cudaEventRecord(device_start));
betweennessCentralityKernel<<<1, MAX_THREAD_COUNT>>>(
graph,
device_bwCentrality,
nodeFrom,
nodeTo,
nodeCount,
sigma,
distance,
dependency
);
cudaDeviceSynchronize();
cout << endl;
// Timer
catchCudaError(cudaEventRecord(device_end));
catchCudaError(cudaEventSynchronize(device_end));
cudaEventElapsedTime(&device_time_taken, device_start, device_end);
// Copy back and free memory
catchCudaError(cudaMemcpy(bwCentrality, device_bwCentrality, sizeof(double) * nodeCount, cudaMemcpyDeviceToHost));
catchCudaError(cudaFree(device_bwCentrality));
catchCudaError(cudaFree(sigma));
catchCudaError(cudaFree(dependency));
catchCudaError(cudaFree(distance));
return bwCentrality;
}
int main(int argc, char *argv[]) {
if (argc < 2) {
cout << "Usage: " << argv[0] << " <input_file> <output_file>\n";
return 0;
}
freopen(argv[1], "r", stdin);
Graph *host_graph = new Graph();
Graph *device_graph;
catchCudaError(cudaMalloc((void **)&device_graph, sizeof(Graph)));
host_graph->readGraph();
int nodeCount = host_graph->getNodeCount();
int edgeCount = host_graph->getEdgeCount();
catchCudaError(cudaMemcpy(device_graph, host_graph, sizeof(Graph), cudaMemcpyHostToDevice));
// Set threshold
const long threshold_percent = strtol(argv[3], NULL, 10);
const int threshold = (int) ((float)nodeCount * (float)threshold_percent / (float) 100);
const int nodeFrom = threshold;
const int nodeTo = nodeCount - 1;
int *adjacencyList;
catchCudaError(cudaMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1)));
catchCudaError(cudaMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), cudaMemcpyHostToDevice));
catchCudaError(cudaMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), cudaMemcpyHostToDevice));
int *adjacencyListPointers;
catchCudaError(cudaMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1)));
catchCudaError(cudaMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), cudaMemcpyHostToDevice));
catchCudaError(cudaMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), cudaMemcpyHostToDevice));
double *bwCentrality = betweennessCentrality(device_graph, nodeCount, nodeFrom, nodeTo);
double maxBetweenness = -1;
for (int i = 0; i < nodeCount; i++) {
maxBetweenness = max(maxBetweenness, bwCentrality[i]);
}
printf("%s, %03d, ", argv[1], atoi(argv[3]));
// printf("%0.2lf, ", maxBetweenness);
printf("%0.2lf\n", device_time_taken);
if (argc == 3) {
freopen(argv[2], "w", stdout);
for (int i = 0; i < nodeCount; i++)
cout << bwCentrality[i] << " ";
cout << endl;
}
// Free all memory
delete[] bwCentrality;
catchCudaError(cudaFree(adjacencyList));
catchCudaError(cudaFree(adjacencyListPointers));
catchCudaError(cudaFree(device_graph));
}
|
bff7e1cca02c1733e5a4c28dc1462d7790323544.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
#define N 512
__global__ void inclusive_scan(int *d_in)
{
__shared__ int temp_in[N];
int i = threadIdx.x;
temp_in[i] = d_in[i];
__syncthreads();
for(unsigned int s = 1; s <= N-1; s <<= 1)
{
if((i >= s) && (i < N)) {
int a = temp_in[i];
int b = temp_in[i-s];
__syncthreads();
int c = a + b;
temp_in[i] = c;
}
__syncthreads();
}
d_in[i] = temp_in[i];
}
int main()
{
int h_in[N];
int h_out[N];
//timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
for(int i=0; i < N; i++)
h_in[i] = 1;
int *d_in;
//int *d_out;
hipMalloc((void**) &d_in, N*sizeof(int));
//hipMalloc((void**) &d_out, N*sizeof(int));
hipMemcpy(d_in, &h_in, N*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start);
//Implementing kernel call
hipLaunchKernelGGL(( inclusive_scan), dim3(1), dim3(N), 0, 0, d_in);
hipEventRecord(stop);
hipMemcpy(&h_out, d_in, N*sizeof(int), hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float ms = 0;
hipEventElapsedTime(&ms, start, stop);
hipFree(d_in);
//hipFree(d_out);
for(int i=0; i<N; i++)
printf("out[%d] = %d\n", i, h_out[i]);
printf("Time used: %f milliseconds\n", ms);
return -1;
}
| bff7e1cca02c1733e5a4c28dc1462d7790323544.cu | #include<stdio.h>
#include<math.h>
#define N 512
__global__ void inclusive_scan(int *d_in)
{
__shared__ int temp_in[N];
int i = threadIdx.x;
temp_in[i] = d_in[i];
__syncthreads();
for(unsigned int s = 1; s <= N-1; s <<= 1)
{
if((i >= s) && (i < N)) {
int a = temp_in[i];
int b = temp_in[i-s];
__syncthreads();
int c = a + b;
temp_in[i] = c;
}
__syncthreads();
}
d_in[i] = temp_in[i];
}
int main()
{
int h_in[N];
int h_out[N];
//timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int i=0; i < N; i++)
h_in[i] = 1;
int *d_in;
//int *d_out;
cudaMalloc((void**) &d_in, N*sizeof(int));
//cudaMalloc((void**) &d_out, N*sizeof(int));
cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
//Implementing kernel call
inclusive_scan<<<1, N>>>(d_in);
cudaEventRecord(stop);
cudaMemcpy(&h_out, d_in, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
cudaFree(d_in);
//cudaFree(d_out);
for(int i=0; i<N; i++)
printf("out[%d] = %d\n", i, h_out[i]);
printf("Time used: %f milliseconds\n", ms);
return -1;
}
|
e569a13e9aab857bb5d55248e8afcba79d4dbe87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
namespace cv { namespace cuda { namespace device
{
namespace hoofr
{
////////////////////////////////////////////////////////////////////////////////////////////////////////
//buid Pattern
////////////////////////////////////////////////////////////////////////////////////////////////////////
// cull
int cull_gpu(int* loc, float* response, int size, int n_points)
{
thrust::device_ptr<int> loc_ptr(loc);
thrust::device_ptr<float> response_ptr(response);
thrust::sort_by_key(response_ptr, response_ptr + size, loc_ptr, thrust::greater<float>());
return n_points;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// HessianResponses
__device__ float hessian_dxx[49] =
{
0.0008, 0.0015, -0.0007, -0.0034, -0.0007, 0.0015, 0.0008,
0.0044, 0.0085, -0.0041, -0.0191, -0.0041, 0.0085, 0.0044,
0.0125, 0.0240, -0.0117, -0.0542, -0.0117, 0.0240, 0.0125,
0.0177, 0.0340, -0.0166, -0.0768, -0.0166, 0.0340, 0.0177,
0.0125, 0.0240, -0.0117, -0.0542, -0.0117, 0.0240, 0.0125,
0.0044, 0.0085, -0.0041, -0.0191, -0.0041, 0.0085, 0.0044,
0.0008, 0.0015, -0.0007, -0.0034, -0.0007, 0.0015, 0.0008
};
__device__ float hessian_dyy[49] =
{
0.0008, 0.0044, 0.0125, 0.0177, 0.0125, 0.0044, 0.0008,
0.0015, 0.0085, 0.0240, 0.0340, 0.0240, 0.0085, 0.0015,
-0.0007, -0.0041, -0.0117, -0.0166, -0.0117, -0.0041, -0.0007,
-0.0034, -0.0191, -0.0542, -0.0768, -0.0542, -0.0191, -0.0034,
-0.0007, -0.0041, -0.0117, -0.0166, -0.0117, -0.0041, -0.0007,
0.0015, 0.0085, 0.0240, 0.0340, 0.0240, 0.0085, 0.0015,
0.0008, 0.0044, 0.0125, 0.0177, 0.0125, 0.0044, 0.0008
};
__device__ float hessian_dxy[49] =
{
0.0009, 0.0035, 0.0050, 0, -0.0050, -0.0035, -0.0009,
0.0035, 0.0133, 0.0188, 0, -0.0188, -0.0133, -0.0035,
0.0050, 0.0188, 0.0266, 0, -0.0266, -0.0188, -0.0050,
0, 0, 0, 0, 0, 0, 0,
-0.0050, -0.0188, -0.0266, 0, 0.0266, 0.0188, 0.0050,
-0.0035, -0.0133, -0.0188, 0, 0.0188, 0.0133, 0.0035,
-0.0009, -0.0035, -0.0050, 0, 0.0050, 0.0035, 0.0009
};
__global__ void HessianResponses(const PtrStepb img, const short2* loc_, float* response, const int npoints, const int blockSize, const float hessian_k)
{
__shared__ float smem0[8 * 32];
__shared__ float smem1[8 * 32];
__shared__ float smem2[8 * 32];
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx < npoints)
{
const short2 loc = loc_[ptidx];
const int r = blockSize / 2;
const int x0 = loc.x - r;
const int y0 = loc.y - r;
float Dxx = 0, Dyy = 0, Dxy = 0;
for (int ind = threadIdx.x; ind < blockSize * blockSize; ind += blockDim.x)
{
const int i = ind / blockSize;
const int j = ind % blockSize;
Dxx += ((float)img(y0 + i, x0 + j)) * hessian_dxx[ind];
Dyy += ((float)img(y0 + i, x0 + j)) * hessian_dyy[ind];
Dxy += ((float)img(y0 + i, x0 + j)) * hessian_dxy[ind];
}
float* srow0 = smem0 + threadIdx.y * blockDim.x;
float* srow1 = smem1 + threadIdx.y * blockDim.x;
float* srow2 = smem2 + threadIdx.y * blockDim.x;
plus<float> op;
reduce<32>(smem_tuple(srow0, srow1, srow2), thrust::tie(Dxx, Dyy, Dxy), threadIdx.x, thrust::make_tuple(op, op, op));
if (threadIdx.x == 0)
{
response[ptidx] = (Dxx * Dyy) - hessian_k*(Dxy * Dxy);
}
}
}
void HessianResponses_gpu(PtrStepSzb img, const short2* loc, float* response, const int npoints, int blockSize, float hessian_k, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(npoints, block.y);
hipLaunchKernelGGL(( HessianResponses), dim3(grid), dim3(block), 0, stream, img, loc, response, npoints, blockSize, hessian_k);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// IC_Angle
__constant__ int c_u_max[32];
void loadUMax(const int* u_max, int count)
{
cudaSafeCall( hipMemcpyToSymbol(c_u_max, u_max, count * sizeof(int)) );
}
__global__ void IC_Angle(const PtrStepb image, const short2* loc_, float* angle, const int npoints, const int half_k)
{
__shared__ int smem0[8 * 32];
__shared__ int smem1[8 * 32];
int* srow0 = smem0 + threadIdx.y * blockDim.x;
int* srow1 = smem1 + threadIdx.y * blockDim.x;
plus<int> op;
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx < npoints)
{
int m_01 = 0, m_10 = 0;
const short2 loc = loc_[ptidx];
// Treat the center line differently, v=0
for (int u = threadIdx.x - half_k; u <= half_k; u += blockDim.x)
m_10 += u * image(loc.y, loc.x + u);
reduce<32>(srow0, m_10, threadIdx.x, op);
for (int v = 1; v <= half_k; ++v)
{
// Proceed over the two lines
int v_sum = 0;
int m_sum = 0;
const int d = c_u_max[v];
for (int u = threadIdx.x - d; u <= d; u += blockDim.x)
{
int val_plus = image(loc.y + v, loc.x + u);
int val_minus = image(loc.y - v, loc.x + u);
v_sum += (val_plus - val_minus);
m_sum += u * (val_plus + val_minus);
}
reduce<32>(smem_tuple(srow0, srow1), thrust::tie(v_sum, m_sum), threadIdx.x, thrust::make_tuple(op, op));
m_10 += m_sum;
m_01 += v * v_sum;
}
if (threadIdx.x == 0)
{
float kp_dir = ::atan2f((float)m_01, (float)m_10);
kp_dir += (kp_dir < 0) * (2.0f * CV_PI_F);
kp_dir *= 180.0f / CV_PI_F;
angle[ptidx] = kp_dir;
}
}
}
void IC_Angle_gpu(PtrStepSzb image, const short2* loc, float* angle, int npoints, int half_k, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(npoints, block.y);
hipLaunchKernelGGL(( IC_Angle), dim3(grid), dim3(block), 0, stream, image, loc, angle, npoints, half_k);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ uchar meanIntensity(const PtrStepi integral, const float kp_x, const float kp_y, const int scale, const int rot, const int point,
const float* patternLookup_x, const float* patternLookup_y, const float* patternLookup_sigma, const int NB_ORIENTATION, const int NB_POINTS)
{
int id = scale * NB_ORIENTATION * NB_POINTS + rot * NB_POINTS + point;
float xf = patternLookup_x[id] + kp_x;
float yf = patternLookup_y[id] + kp_y;
float radius = patternLookup_sigma[id];
if(radius < 0.5) {radius = 0.5;}
int x_left = int(xf-radius+0.5);
int y_top = int(yf-radius+0.5);
int x_right = int(xf+radius+1.5);
int y_bottom = int(yf+radius+1.5);
int ret_val;
ret_val = integral(y_bottom,x_right) - integral(y_bottom,x_left);//bottom right corner
ret_val += integral(y_top,x_left);
ret_val -= integral(y_top,x_right);
ret_val = ret_val/( (x_right-x_left) * (y_bottom-y_top) );
return ((uchar)ret_val);
}
__global__ void computeHOOFRDescriptor(PtrStepSzi imgintegral, float* patternLookup_x, float* patternLookup_y, float* patternLookup_sigma,
uchar* descriptionPairs_i, uchar* descriptionPairs_j, int* orientationPairs_i, int* orientationPairs_j,
int* orientationPairs_wx, int* orientationPairs_wy, int npoints,const int NB_POINTS, int NB_ORIENTATION, int NB_PAIRS,
float* keypoint_x, float* keypoint_y, float* keypoint_angle, float* keypoint_octave, PtrStepb desc)
{
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx < npoints)
{
uchar pointsValue[49];
int direction0 = 0;
int direction1 = 0;
for( int i = 0; i < NB_POINTS-9; i++)
{
pointsValue[i] = meanIntensity(imgintegral, keypoint_x[ptidx], keypoint_y[ptidx], keypoint_octave[ptidx], 0, i, patternLookup_x, patternLookup_y, patternLookup_sigma, NB_ORIENTATION, NB_POINTS);
}
for( int m = 0; m < 40; m++)
{
//iterate through the orientation pairs
const int delta = (pointsValue[ orientationPairs_i[m] ] - pointsValue[ orientationPairs_j[m] ]);
direction0 += delta*(orientationPairs_wx[m])/2048;
direction1 += delta*(orientationPairs_wy[m])/2048;
}
keypoint_angle[ptidx] = static_cast<float>(atan2((float)direction1,(float)direction0)*(180.0/CV_PI));
int thetaIdx = int(NB_ORIENTATION*keypoint_angle[ptidx]*(1/360.0)+0.5);
if( thetaIdx < 0 )
thetaIdx += NB_ORIENTATION;
if( thetaIdx >= NB_ORIENTATION )
thetaIdx -= NB_ORIENTATION;
for( int i = 0; i < NB_POINTS; i++)
{
pointsValue[i] = meanIntensity(imgintegral, keypoint_x[ptidx], keypoint_y[ptidx], keypoint_octave[ptidx], thetaIdx, i, patternLookup_x, patternLookup_y, patternLookup_sigma, NB_ORIENTATION, NB_POINTS);
}
////////////////////
const int n_word = NB_PAIRS/32;
int cnt;
unsigned int reg;
for (int i = 0; i < n_word; i++)
{
reg = 0;
for (int j = 0; j < 32; j++)
{
cnt = j + i * 32;
if(pointsValue[descriptionPairs_i[cnt]] >= pointsValue[descriptionPairs_j[cnt]]) { reg |= (1<<j); }
}
unsigned int* r_t = (unsigned int*) (&(desc.ptr(ptidx)[4*i]));
*r_t = reg;
}
}
}
void computeHOOFRDescriptor_gpu(PtrStepSzi imgintegral, float* patternLookup_x, float* patternLookup_y, float* patternLookup_sigma,
uchar* descriptionPairs_i, uchar* descriptionPairs_j, int* orientationPairs_i, int* orientationPairs_j,
int* orientationPairs_wx, int* orientationPairs_wy, int npoints, int NB_POINTS, int NB_ORIENTATION, int NB_PAIRS,
float* keypoint_x, float* keypoint_y, float* keypoint_angle, float* keypoint_octave, PtrStepb desc, hipStream_t stream)
{
dim3 block(1, 8);
dim3 grid;
grid.x = divUp(npoints, block.y);
hipLaunchKernelGGL(( computeHOOFRDescriptor), dim3(grid), dim3(block), 0, stream, imgintegral, patternLookup_x, patternLookup_y, patternLookup_sigma,
descriptionPairs_i, descriptionPairs_j, orientationPairs_i, orientationPairs_j,
orientationPairs_wx, orientationPairs_wy, npoints, NB_POINTS, NB_ORIENTATION, NB_PAIRS,
keypoint_x, keypoint_y, keypoint_angle, keypoint_octave, desc);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeHoofrDescriptor
template <int WTA_K> struct HoofrDescriptor;
#define GET_VALUE(idx) \
img(loc.y + __float2int_rn(pattern_x[idx] * sina + pattern_y[idx] * cosa), \
loc.x + __float2int_rn(pattern_x[idx] * cosa - pattern_y[idx] * sina))
template <> struct HoofrDescriptor<2>
{
__device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i)
{
pattern_x += 16 * i;
pattern_y += 16 * i;
int t0, t1, val;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2); t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10); t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14); t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
return val;
}
};
template <> struct HoofrDescriptor<3>
{
__device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i)
{
pattern_x += 12 * i;
pattern_y += 12 * i;
int t0, t1, t2, val;
t0 = GET_VALUE(0); t1 = GET_VALUE(1); t2 = GET_VALUE(2);
val = t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0);
t0 = GET_VALUE(3); t1 = GET_VALUE(4); t2 = GET_VALUE(5);
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7); t2 = GET_VALUE(8);
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 4;
t0 = GET_VALUE(9); t1 = GET_VALUE(10); t2 = GET_VALUE(11);
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 6;
return val;
}
};
template <> struct HoofrDescriptor<4>
{
__device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i)
{
pattern_x += 16 * i;
pattern_y += 16 * i;
int t0, t1, t2, t3, k, val;
int a, b;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
t2 = GET_VALUE(2); t3 = GET_VALUE(3);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val = k;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
t2 = GET_VALUE(6); t3 = GET_VALUE(7);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val |= k << 2;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
t2 = GET_VALUE(10); t3 = GET_VALUE(11);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val |= k << 4;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
t2 = GET_VALUE(14); t3 = GET_VALUE(15);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val |= k << 6;
return val;
}
};
#undef GET_VALUE
template <int WTA_K>
__global__ void computeHoofrDescriptor(const PtrStepb img, const short2* loc, const float* angle_, const int npoints,
const int* pattern_x, const int* pattern_y, PtrStepb desc, int dsize)
{
const int descidx = blockIdx.x * blockDim.x + threadIdx.x;
const int ptidx = blockIdx.y * blockDim.y + threadIdx.y;
if (ptidx < npoints && descidx < dsize)
{
float angle = angle_[ptidx];
angle *= (float)(CV_PI_F / 180.f);
float sina, cosa;
::sincosf(angle, &sina, &cosa);
desc.ptr(ptidx)[descidx] = HoofrDescriptor<WTA_K>::calc(img, loc[ptidx], pattern_x, pattern_y, sina, cosa, descidx);
}
}
void computeHoofrDescriptor_gpu(PtrStepb img, const short2* loc, const float* angle, const int npoints,
const int* pattern_x, const int* pattern_y, PtrStepb desc, int dsize, int WTA_K, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(dsize, block.x);
grid.y = divUp(npoints, block.y);
switch (WTA_K)
{
case 2:
hipLaunchKernelGGL(( computeHoofrDescriptor<2>), dim3(grid), dim3(block), 0, stream, img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize);
break;
case 3:
hipLaunchKernelGGL(( computeHoofrDescriptor<3>), dim3(grid), dim3(block), 0, stream, img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize);
break;
case 4:
hipLaunchKernelGGL(( computeHoofrDescriptor<4>), dim3(grid), dim3(block), 0, stream, img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize);
break;
}
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// mergeLocation
__global__ void mergeLocation(const short2* loc_, float* x, float* y, const int npoints, float scale)
{
const int ptidx = blockIdx.x * blockDim.x + threadIdx.x;
if (ptidx < npoints)
{
short2 loc = loc_[ptidx];
x[ptidx] = loc.x * scale;
y[ptidx] = loc.y * scale;
}
}
void mergeLocation_gpu(const short2* loc, float* x, float* y, int npoints, float scale, hipStream_t stream)
{
dim3 block(256);
dim3 grid;
grid.x = divUp(npoints, block.x);
hipLaunchKernelGGL(( mergeLocation), dim3(grid), dim3(block), 0, stream, loc, x, y, npoints, scale);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
}
}}}
#endif /* CUDA_DISABLER */
| e569a13e9aab857bb5d55248e8afcba79d4dbe87.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
namespace cv { namespace cuda { namespace device
{
namespace hoofr
{
////////////////////////////////////////////////////////////////////////////////////////////////////////
//buid Pattern
////////////////////////////////////////////////////////////////////////////////////////////////////////
// cull
int cull_gpu(int* loc, float* response, int size, int n_points)
{
thrust::device_ptr<int> loc_ptr(loc);
thrust::device_ptr<float> response_ptr(response);
thrust::sort_by_key(response_ptr, response_ptr + size, loc_ptr, thrust::greater<float>());
return n_points;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// HessianResponses
__device__ float hessian_dxx[49] =
{
0.0008, 0.0015, -0.0007, -0.0034, -0.0007, 0.0015, 0.0008,
0.0044, 0.0085, -0.0041, -0.0191, -0.0041, 0.0085, 0.0044,
0.0125, 0.0240, -0.0117, -0.0542, -0.0117, 0.0240, 0.0125,
0.0177, 0.0340, -0.0166, -0.0768, -0.0166, 0.0340, 0.0177,
0.0125, 0.0240, -0.0117, -0.0542, -0.0117, 0.0240, 0.0125,
0.0044, 0.0085, -0.0041, -0.0191, -0.0041, 0.0085, 0.0044,
0.0008, 0.0015, -0.0007, -0.0034, -0.0007, 0.0015, 0.0008
};
__device__ float hessian_dyy[49] =
{
0.0008, 0.0044, 0.0125, 0.0177, 0.0125, 0.0044, 0.0008,
0.0015, 0.0085, 0.0240, 0.0340, 0.0240, 0.0085, 0.0015,
-0.0007, -0.0041, -0.0117, -0.0166, -0.0117, -0.0041, -0.0007,
-0.0034, -0.0191, -0.0542, -0.0768, -0.0542, -0.0191, -0.0034,
-0.0007, -0.0041, -0.0117, -0.0166, -0.0117, -0.0041, -0.0007,
0.0015, 0.0085, 0.0240, 0.0340, 0.0240, 0.0085, 0.0015,
0.0008, 0.0044, 0.0125, 0.0177, 0.0125, 0.0044, 0.0008
};
__device__ float hessian_dxy[49] =
{
0.0009, 0.0035, 0.0050, 0, -0.0050, -0.0035, -0.0009,
0.0035, 0.0133, 0.0188, 0, -0.0188, -0.0133, -0.0035,
0.0050, 0.0188, 0.0266, 0, -0.0266, -0.0188, -0.0050,
0, 0, 0, 0, 0, 0, 0,
-0.0050, -0.0188, -0.0266, 0, 0.0266, 0.0188, 0.0050,
-0.0035, -0.0133, -0.0188, 0, 0.0188, 0.0133, 0.0035,
-0.0009, -0.0035, -0.0050, 0, 0.0050, 0.0035, 0.0009
};
__global__ void HessianResponses(const PtrStepb img, const short2* loc_, float* response, const int npoints, const int blockSize, const float hessian_k)
{
__shared__ float smem0[8 * 32];
__shared__ float smem1[8 * 32];
__shared__ float smem2[8 * 32];
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx < npoints)
{
const short2 loc = loc_[ptidx];
const int r = blockSize / 2;
const int x0 = loc.x - r;
const int y0 = loc.y - r;
float Dxx = 0, Dyy = 0, Dxy = 0;
for (int ind = threadIdx.x; ind < blockSize * blockSize; ind += blockDim.x)
{
const int i = ind / blockSize;
const int j = ind % blockSize;
Dxx += ((float)img(y0 + i, x0 + j)) * hessian_dxx[ind];
Dyy += ((float)img(y0 + i, x0 + j)) * hessian_dyy[ind];
Dxy += ((float)img(y0 + i, x0 + j)) * hessian_dxy[ind];
}
float* srow0 = smem0 + threadIdx.y * blockDim.x;
float* srow1 = smem1 + threadIdx.y * blockDim.x;
float* srow2 = smem2 + threadIdx.y * blockDim.x;
plus<float> op;
reduce<32>(smem_tuple(srow0, srow1, srow2), thrust::tie(Dxx, Dyy, Dxy), threadIdx.x, thrust::make_tuple(op, op, op));
if (threadIdx.x == 0)
{
response[ptidx] = (Dxx * Dyy) - hessian_k*(Dxy * Dxy);
}
}
}
void HessianResponses_gpu(PtrStepSzb img, const short2* loc, float* response, const int npoints, int blockSize, float hessian_k, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(npoints, block.y);
HessianResponses<<<grid, block, 0, stream>>>(img, loc, response, npoints, blockSize, hessian_k);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// IC_Angle
__constant__ int c_u_max[32];
void loadUMax(const int* u_max, int count)
{
cudaSafeCall( cudaMemcpyToSymbol(c_u_max, u_max, count * sizeof(int)) );
}
__global__ void IC_Angle(const PtrStepb image, const short2* loc_, float* angle, const int npoints, const int half_k)
{
__shared__ int smem0[8 * 32];
__shared__ int smem1[8 * 32];
int* srow0 = smem0 + threadIdx.y * blockDim.x;
int* srow1 = smem1 + threadIdx.y * blockDim.x;
plus<int> op;
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx < npoints)
{
int m_01 = 0, m_10 = 0;
const short2 loc = loc_[ptidx];
// Treat the center line differently, v=0
for (int u = threadIdx.x - half_k; u <= half_k; u += blockDim.x)
m_10 += u * image(loc.y, loc.x + u);
reduce<32>(srow0, m_10, threadIdx.x, op);
for (int v = 1; v <= half_k; ++v)
{
// Proceed over the two lines
int v_sum = 0;
int m_sum = 0;
const int d = c_u_max[v];
for (int u = threadIdx.x - d; u <= d; u += blockDim.x)
{
int val_plus = image(loc.y + v, loc.x + u);
int val_minus = image(loc.y - v, loc.x + u);
v_sum += (val_plus - val_minus);
m_sum += u * (val_plus + val_minus);
}
reduce<32>(smem_tuple(srow0, srow1), thrust::tie(v_sum, m_sum), threadIdx.x, thrust::make_tuple(op, op));
m_10 += m_sum;
m_01 += v * v_sum;
}
if (threadIdx.x == 0)
{
float kp_dir = ::atan2f((float)m_01, (float)m_10);
kp_dir += (kp_dir < 0) * (2.0f * CV_PI_F);
kp_dir *= 180.0f / CV_PI_F;
angle[ptidx] = kp_dir;
}
}
}
void IC_Angle_gpu(PtrStepSzb image, const short2* loc, float* angle, int npoints, int half_k, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(npoints, block.y);
IC_Angle<<<grid, block, 0, stream>>>(image, loc, angle, npoints, half_k);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ uchar meanIntensity(const PtrStepi integral, const float kp_x, const float kp_y, const int scale, const int rot, const int point,
const float* patternLookup_x, const float* patternLookup_y, const float* patternLookup_sigma, const int NB_ORIENTATION, const int NB_POINTS)
{
int id = scale * NB_ORIENTATION * NB_POINTS + rot * NB_POINTS + point;
float xf = patternLookup_x[id] + kp_x;
float yf = patternLookup_y[id] + kp_y;
float radius = patternLookup_sigma[id];
if(radius < 0.5) {radius = 0.5;}
int x_left = int(xf-radius+0.5);
int y_top = int(yf-radius+0.5);
int x_right = int(xf+radius+1.5);
int y_bottom = int(yf+radius+1.5);
int ret_val;
ret_val = integral(y_bottom,x_right) - integral(y_bottom,x_left);//bottom right corner
ret_val += integral(y_top,x_left);
ret_val -= integral(y_top,x_right);
ret_val = ret_val/( (x_right-x_left) * (y_bottom-y_top) );
return ((uchar)ret_val);
}
__global__ void computeHOOFRDescriptor(PtrStepSzi imgintegral, float* patternLookup_x, float* patternLookup_y, float* patternLookup_sigma,
uchar* descriptionPairs_i, uchar* descriptionPairs_j, int* orientationPairs_i, int* orientationPairs_j,
int* orientationPairs_wx, int* orientationPairs_wy, int npoints,const int NB_POINTS, int NB_ORIENTATION, int NB_PAIRS,
float* keypoint_x, float* keypoint_y, float* keypoint_angle, float* keypoint_octave, PtrStepb desc)
{
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx < npoints)
{
uchar pointsValue[49];
int direction0 = 0;
int direction1 = 0;
for( int i = 0; i < NB_POINTS-9; i++)
{
pointsValue[i] = meanIntensity(imgintegral, keypoint_x[ptidx], keypoint_y[ptidx], keypoint_octave[ptidx], 0, i, patternLookup_x, patternLookup_y, patternLookup_sigma, NB_ORIENTATION, NB_POINTS);
}
for( int m = 0; m < 40; m++)
{
//iterate through the orientation pairs
const int delta = (pointsValue[ orientationPairs_i[m] ] - pointsValue[ orientationPairs_j[m] ]);
direction0 += delta*(orientationPairs_wx[m])/2048;
direction1 += delta*(orientationPairs_wy[m])/2048;
}
keypoint_angle[ptidx] = static_cast<float>(atan2((float)direction1,(float)direction0)*(180.0/CV_PI));
int thetaIdx = int(NB_ORIENTATION*keypoint_angle[ptidx]*(1/360.0)+0.5);
if( thetaIdx < 0 )
thetaIdx += NB_ORIENTATION;
if( thetaIdx >= NB_ORIENTATION )
thetaIdx -= NB_ORIENTATION;
for( int i = 0; i < NB_POINTS; i++)
{
pointsValue[i] = meanIntensity(imgintegral, keypoint_x[ptidx], keypoint_y[ptidx], keypoint_octave[ptidx], thetaIdx, i, patternLookup_x, patternLookup_y, patternLookup_sigma, NB_ORIENTATION, NB_POINTS);
}
////////////////////
const int n_word = NB_PAIRS/32;
int cnt;
unsigned int reg;
for (int i = 0; i < n_word; i++)
{
reg = 0;
for (int j = 0; j < 32; j++)
{
cnt = j + i * 32;
if(pointsValue[descriptionPairs_i[cnt]] >= pointsValue[descriptionPairs_j[cnt]]) { reg |= (1<<j); }
}
unsigned int* r_t = (unsigned int*) (&(desc.ptr(ptidx)[4*i]));
*r_t = reg;
}
}
}
void computeHOOFRDescriptor_gpu(PtrStepSzi imgintegral, float* patternLookup_x, float* patternLookup_y, float* patternLookup_sigma,
uchar* descriptionPairs_i, uchar* descriptionPairs_j, int* orientationPairs_i, int* orientationPairs_j,
int* orientationPairs_wx, int* orientationPairs_wy, int npoints, int NB_POINTS, int NB_ORIENTATION, int NB_PAIRS,
float* keypoint_x, float* keypoint_y, float* keypoint_angle, float* keypoint_octave, PtrStepb desc, cudaStream_t stream)
{
dim3 block(1, 8);
dim3 grid;
grid.x = divUp(npoints, block.y);
computeHOOFRDescriptor<<<grid, block, 0, stream>>>(imgintegral, patternLookup_x, patternLookup_y, patternLookup_sigma,
descriptionPairs_i, descriptionPairs_j, orientationPairs_i, orientationPairs_j,
orientationPairs_wx, orientationPairs_wy, npoints, NB_POINTS, NB_ORIENTATION, NB_PAIRS,
keypoint_x, keypoint_y, keypoint_angle, keypoint_octave, desc);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeHoofrDescriptor
template <int WTA_K> struct HoofrDescriptor;
#define GET_VALUE(idx) \
img(loc.y + __float2int_rn(pattern_x[idx] * sina + pattern_y[idx] * cosa), \
loc.x + __float2int_rn(pattern_x[idx] * cosa - pattern_y[idx] * sina))
template <> struct HoofrDescriptor<2>
{
__device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i)
{
pattern_x += 16 * i;
pattern_y += 16 * i;
int t0, t1, val;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2); t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10); t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14); t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
return val;
}
};
template <> struct HoofrDescriptor<3>
{
__device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i)
{
pattern_x += 12 * i;
pattern_y += 12 * i;
int t0, t1, t2, val;
t0 = GET_VALUE(0); t1 = GET_VALUE(1); t2 = GET_VALUE(2);
val = t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0);
t0 = GET_VALUE(3); t1 = GET_VALUE(4); t2 = GET_VALUE(5);
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7); t2 = GET_VALUE(8);
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 4;
t0 = GET_VALUE(9); t1 = GET_VALUE(10); t2 = GET_VALUE(11);
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 6;
return val;
}
};
template <> struct HoofrDescriptor<4>
{
__device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i)
{
pattern_x += 16 * i;
pattern_y += 16 * i;
int t0, t1, t2, t3, k, val;
int a, b;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
t2 = GET_VALUE(2); t3 = GET_VALUE(3);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val = k;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
t2 = GET_VALUE(6); t3 = GET_VALUE(7);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val |= k << 2;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
t2 = GET_VALUE(10); t3 = GET_VALUE(11);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val |= k << 4;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
t2 = GET_VALUE(14); t3 = GET_VALUE(15);
a = 0, b = 2;
if( t1 > t0 ) t0 = t1, a = 1;
if( t3 > t2 ) t2 = t3, b = 3;
k = t0 > t2 ? a : b;
val |= k << 6;
return val;
}
};
#undef GET_VALUE
template <int WTA_K>
__global__ void computeHoofrDescriptor(const PtrStepb img, const short2* loc, const float* angle_, const int npoints,
const int* pattern_x, const int* pattern_y, PtrStepb desc, int dsize)
{
const int descidx = blockIdx.x * blockDim.x + threadIdx.x;
const int ptidx = blockIdx.y * blockDim.y + threadIdx.y;
if (ptidx < npoints && descidx < dsize)
{
float angle = angle_[ptidx];
angle *= (float)(CV_PI_F / 180.f);
float sina, cosa;
::sincosf(angle, &sina, &cosa);
desc.ptr(ptidx)[descidx] = HoofrDescriptor<WTA_K>::calc(img, loc[ptidx], pattern_x, pattern_y, sina, cosa, descidx);
}
}
void computeHoofrDescriptor_gpu(PtrStepb img, const short2* loc, const float* angle, const int npoints,
const int* pattern_x, const int* pattern_y, PtrStepb desc, int dsize, int WTA_K, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(dsize, block.x);
grid.y = divUp(npoints, block.y);
switch (WTA_K)
{
case 2:
computeHoofrDescriptor<2><<<grid, block, 0, stream>>>(img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize);
break;
case 3:
computeHoofrDescriptor<3><<<grid, block, 0, stream>>>(img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize);
break;
case 4:
computeHoofrDescriptor<4><<<grid, block, 0, stream>>>(img, loc, angle, npoints, pattern_x, pattern_y, desc, dsize);
break;
}
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// mergeLocation
__global__ void mergeLocation(const short2* loc_, float* x, float* y, const int npoints, float scale)
{
const int ptidx = blockIdx.x * blockDim.x + threadIdx.x;
if (ptidx < npoints)
{
short2 loc = loc_[ptidx];
x[ptidx] = loc.x * scale;
y[ptidx] = loc.y * scale;
}
}
void mergeLocation_gpu(const short2* loc, float* x, float* y, int npoints, float scale, cudaStream_t stream)
{
dim3 block(256);
dim3 grid;
grid.x = divUp(npoints, block.x);
mergeLocation<<<grid, block, 0, stream>>>(loc, x, y, npoints, scale);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
}}}
#endif /* CUDA_DISABLER */
|
f4e84ea5db2f40eb7ace02772ccfd38cdc176eea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Author: Ulises Olivares
// [email protected]
// Oct 22, 2020
#include<iostream>
#include<stdio.h>
#include<time.h>
#include<cstdlib>
#include<math.h>
#include <unistd.h>
#define n 99999999 // input/output 1D array size
#define m 9999 //assume mask size as odd
#define TILE_SIZE 1024
#define MAX_MASK_WIDTH 256
using namespace std;
//Global variables
long long int sizeN = n * sizeof(float);
long long int sizeM = m * sizeof(float);
float h_N[n] , h_M[m], h_P[n];
int threads = 1024;
int blocks = ceil(float(n)/float(threads));
__constant__ float c_M[m];
// GPU timers using CUDA events
float globalMemTimer = 0.0f, constantMemTimer = 0.0f, sharedMemTimer = 0.0f;
// Method definition
void generateRandom(float *h_a, int size);
void parallelConvolution1D();
void parallelConvolutionConstant1D();
void parallelConvolutionTiled1D();
template <typename vec>
void printVector(vec *V, int size);
// Kernel definition
__global__ void CUDAConvolution1D(float *N, float *M, float *P, int Mask_Width, int Width);
__global__ void CUDAConvolutionConstant1D(float *N, float *P, int Mask_Width, int Width);
__global__ void CUDAconvolution_1D_tiled(float *N, float *P, int Mask_Width, int Width);
int main(){
//init N and M with random numbers
generateRandom(h_N, n);
generateRandom(h_M, m);
// Parallel convolution 1D kernel
parallelConvolution1D();
// Parallel convolution 1D constant memory
parallelConvolutionConstant1D();
// Parallel convolution 1D shared - constant memory
parallelConvolutionTiled1D();
return 0;
}
__global__ void CUDAConvolution1D(float *N, float *M, float *P, int Mask_Width, int Width){
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++) {
if (N_start_point + j >= 0 && N_start_point + j < Width) {
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
__global__ void CUDAConvolutionConstant1D(float *N, float *P, int Mask_Width, int Width){
int i = blockIdx.x*blockDim.x + threadIdx.x;
//printf("M[i]: %d ", c_M[i] );
//printf("thread: %d", i );
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++) {
if (N_start_point + j >= 0 && N_start_point + j < Width) {
Pvalue += N[N_start_point + j]*c_M[j];
}
}
P[i] = Pvalue;
}
__global__ void CUDAconvolution_1D_tiled(float *N, float *P, int Mask_Width, int Width) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
//printf("tid: %d ", i);
__shared__ float N_ds[TILE_SIZE + MAX_MASK_WIDTH - 1];
int n1 = Mask_Width/2;
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
if (threadIdx.x >= blockDim.x - n1) {
N_ds[threadIdx.x - (blockDim.x - n1)] =
(halo_index_left < 0) ? 0 : N[halo_index_left];
}
N_ds[n1 + threadIdx.x] = N[blockIdx.x*blockDim.x + threadIdx.x];
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
if (threadIdx.x < n1) {
N_ds[n1 + blockDim.x + threadIdx.x] = (halo_index_right >= Width) ? 0 : N[halo_index_right];
}
__syncthreads();
float Pvalue = 0;
for(int j = 0; j < Mask_Width; j++) {
Pvalue += N_ds[threadIdx.x + j]*c_M[j];
}
/*if(Pvalue!=0)
printf("value: %f", Pvalue);*/
P[i] = Pvalue;
//printf("tid %d Pvalue: %lf ", i, Pvalue );
}
template <typename vec>
void printVector(vec *V, int size){
for(int i = 0; i < size; i++){
cout<< V[i] << " ";
}
cout << endl;
}
void generateRandom(float *h_a, int size){
// Initialize seed
srand(time(NULL));
for(int i=0; i<size; i++){
h_a[i] = float(rand() % 10 +1);
}
}
void parallelConvolutionTiled1D() {
float *d_N, *d_P;
hipMalloc((void **)&d_N, sizeN);
hipMalloc((void **)&d_P, sizeN);
// copy data from host to device
hipMemcpy(d_N, h_N, sizeN, hipMemcpyHostToDevice);
// Trasfeer data to constant memory
hipMemcpyToSymbol(c_M, h_M, sizeM);
// define timers
hipEvent_t start, stop;
// events to take time
hipEventCreate(&start);
hipEventCreate(&stop);
// start timer
hipEventRecord(start,0);
//Launch kernel
hipLaunchKernelGGL(( CUDAconvolution_1D_tiled), dim3(blocks), dim3(threads), 0, 0, d_N, d_P, m, n);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&sharedMemTimer, start, stop);
hipDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Shared-Constant Mem) : " << sharedMemTimer << " ms, " << sharedMemTimer / 1000 << " secs" <<endl;
hipMemcpy(h_P, d_P, sizeN, hipMemcpyDeviceToHost);
//printVector(h_P, n);
hipFree(c_M); hipFree(d_N); hipFree(d_P);
}
void parallelConvolutionConstant1D(){
float *d_N, *d_P;
hipMalloc((void **)&d_N, sizeN);
hipMalloc((void **)&d_P, sizeN);
// copy data from host to device
hipMemcpy(d_N, h_N, sizeN, hipMemcpyHostToDevice);
// Trasfeer data to constant memory
hipMemcpyToSymbol(c_M, h_M, sizeM);
// define timers
hipEvent_t start, stop;
// events to take time
hipEventCreate(&start);
hipEventCreate(&stop);
// start timer
hipEventRecord(start,0);
//Launch kernel
hipLaunchKernelGGL(( CUDAConvolutionConstant1D), dim3(blocks), dim3(threads), 0, 0, d_N, d_P, m, n);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&constantMemTimer, start, stop);
hipDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Constant Mem) : " << constantMemTimer << " ms, " << constantMemTimer / 1000 << " secs" <<endl;
hipMemcpy(h_P, d_P, sizeN, hipMemcpyDeviceToHost);
//cout<< "Resulting P vector (Constant)" << endl;
//printVector(h_P, n);
hipFree(c_M); hipFree(d_N); hipFree(d_P);
}
void parallelConvolution1D(){
float *d_N, *d_M, *d_P;
// Reservar memoria en device
hipMalloc((void **)&d_N, sizeN);
hipMalloc((void **)&d_M, sizeM);
hipMalloc((void **)&d_P, sizeN);
// Transferir datos de host a device
hipMemcpy(d_N, h_N, sizeN, hipMemcpyHostToDevice);
hipMemcpy(d_M, h_M, sizeM, hipMemcpyHostToDevice);
// define timers
hipEvent_t start, stop;
// events to take time
hipEventCreate(&start);
hipEventCreate(&stop);
// start timer
hipEventRecord(start,0);
//Launch kernel
hipLaunchKernelGGL(( CUDAConvolution1D), dim3(blocks), dim3(threads), 0, 0, d_N, d_M, d_P, m, n);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&globalMemTimer, start, stop);
//hipDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Global Mem) : " << globalMemTimer << " ms, " << globalMemTimer / 1000 << " secs" <<endl;
hipMemcpy(h_P, d_P, sizeN, hipMemcpyDeviceToHost);
//cout<< "Resulting P vector (Global)" << endl;
//printVector(h_P, n);
//free(h_N); free(h_M); free(h_P);
hipFree(d_M); hipFree(d_N); hipFree(d_P);
}
| f4e84ea5db2f40eb7ace02772ccfd38cdc176eea.cu | // Author: Ulises Olivares
// [email protected]
// Oct 22, 2020
#include<iostream>
#include<stdio.h>
#include<time.h>
#include<cstdlib>
#include<math.h>
#include <unistd.h>
#define n 99999999 // input/output 1D array size
#define m 9999 //assume mask size as odd
#define TILE_SIZE 1024
#define MAX_MASK_WIDTH 256
using namespace std;
//Global variables
long long int sizeN = n * sizeof(float);
long long int sizeM = m * sizeof(float);
float h_N[n] , h_M[m], h_P[n];
int threads = 1024;
int blocks = ceil(float(n)/float(threads));
__constant__ float c_M[m];
// GPU timers using CUDA events
float globalMemTimer = 0.0f, constantMemTimer = 0.0f, sharedMemTimer = 0.0f;
// Method definition
void generateRandom(float *h_a, int size);
void parallelConvolution1D();
void parallelConvolutionConstant1D();
void parallelConvolutionTiled1D();
template <typename vec>
void printVector(vec *V, int size);
// Kernel definition
__global__ void CUDAConvolution1D(float *N, float *M, float *P, int Mask_Width, int Width);
__global__ void CUDAConvolutionConstant1D(float *N, float *P, int Mask_Width, int Width);
__global__ void CUDAconvolution_1D_tiled(float *N, float *P, int Mask_Width, int Width);
int main(){
//init N and M with random numbers
generateRandom(h_N, n);
generateRandom(h_M, m);
// Parallel convolution 1D kernel
parallelConvolution1D();
// Parallel convolution 1D constant memory
parallelConvolutionConstant1D();
// Parallel convolution 1D shared - constant memory
parallelConvolutionTiled1D();
return 0;
}
__global__ void CUDAConvolution1D(float *N, float *M, float *P, int Mask_Width, int Width){
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++) {
if (N_start_point + j >= 0 && N_start_point + j < Width) {
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
__global__ void CUDAConvolutionConstant1D(float *N, float *P, int Mask_Width, int Width){
int i = blockIdx.x*blockDim.x + threadIdx.x;
//printf("M[i]: %d ", c_M[i] );
//printf("thread: %d", i );
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++) {
if (N_start_point + j >= 0 && N_start_point + j < Width) {
Pvalue += N[N_start_point + j]*c_M[j];
}
}
P[i] = Pvalue;
}
__global__ void CUDAconvolution_1D_tiled(float *N, float *P, int Mask_Width, int Width) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
//printf("tid: %d ", i);
__shared__ float N_ds[TILE_SIZE + MAX_MASK_WIDTH - 1];
int n1 = Mask_Width/2;
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
if (threadIdx.x >= blockDim.x - n1) {
N_ds[threadIdx.x - (blockDim.x - n1)] =
(halo_index_left < 0) ? 0 : N[halo_index_left];
}
N_ds[n1 + threadIdx.x] = N[blockIdx.x*blockDim.x + threadIdx.x];
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
if (threadIdx.x < n1) {
N_ds[n1 + blockDim.x + threadIdx.x] = (halo_index_right >= Width) ? 0 : N[halo_index_right];
}
__syncthreads();
float Pvalue = 0;
for(int j = 0; j < Mask_Width; j++) {
Pvalue += N_ds[threadIdx.x + j]*c_M[j];
}
/*if(Pvalue!=0)
printf("value: %f", Pvalue);*/
P[i] = Pvalue;
//printf("tid %d Pvalue: %lf ", i, Pvalue );
}
template <typename vec>
void printVector(vec *V, int size){
for(int i = 0; i < size; i++){
cout<< V[i] << " ";
}
cout << endl;
}
void generateRandom(float *h_a, int size){
// Initialize seed
srand(time(NULL));
for(int i=0; i<size; i++){
h_a[i] = float(rand() % 10 +1);
}
}
void parallelConvolutionTiled1D() {
float *d_N, *d_P;
cudaMalloc((void **)&d_N, sizeN);
cudaMalloc((void **)&d_P, sizeN);
// copy data from host to device
cudaMemcpy(d_N, h_N, sizeN, cudaMemcpyHostToDevice);
// Trasfeer data to constant memory
cudaMemcpyToSymbol(c_M, h_M, sizeM);
// define timers
cudaEvent_t start, stop;
// events to take time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start,0);
//Launch kernel
CUDAconvolution_1D_tiled<<<blocks, threads>>> (d_N, d_P, m, n);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&sharedMemTimer, start, stop);
cudaDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Shared-Constant Mem) : " << sharedMemTimer << " ms, " << sharedMemTimer / 1000 << " secs" <<endl;
cudaMemcpy(h_P, d_P, sizeN, cudaMemcpyDeviceToHost);
//printVector(h_P, n);
cudaFree(c_M); cudaFree(d_N); cudaFree(d_P);
}
void parallelConvolutionConstant1D(){
float *d_N, *d_P;
cudaMalloc((void **)&d_N, sizeN);
cudaMalloc((void **)&d_P, sizeN);
// copy data from host to device
cudaMemcpy(d_N, h_N, sizeN, cudaMemcpyHostToDevice);
// Trasfeer data to constant memory
cudaMemcpyToSymbol(c_M, h_M, sizeM);
// define timers
cudaEvent_t start, stop;
// events to take time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start,0);
//Launch kernel
CUDAConvolutionConstant1D<<<blocks, threads>>>(d_N, d_P, m, n);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&constantMemTimer, start, stop);
cudaDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Constant Mem) : " << constantMemTimer << " ms, " << constantMemTimer / 1000 << " secs" <<endl;
cudaMemcpy(h_P, d_P, sizeN, cudaMemcpyDeviceToHost);
//cout<< "Resulting P vector (Constant)" << endl;
//printVector(h_P, n);
cudaFree(c_M); cudaFree(d_N); cudaFree(d_P);
}
void parallelConvolution1D(){
float *d_N, *d_M, *d_P;
// Reservar memoria en device
cudaMalloc((void **)&d_N, sizeN);
cudaMalloc((void **)&d_M, sizeM);
cudaMalloc((void **)&d_P, sizeN);
// Transferir datos de host a device
cudaMemcpy(d_N, h_N, sizeN, cudaMemcpyHostToDevice);
cudaMemcpy(d_M, h_M, sizeM, cudaMemcpyHostToDevice);
// define timers
cudaEvent_t start, stop;
// events to take time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start,0);
//Launch kernel
CUDAConvolution1D<<<blocks, threads>>>(d_N, d_M, d_P, m, n);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&globalMemTimer, start, stop);
//cudaDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Global Mem) : " << globalMemTimer << " ms, " << globalMemTimer / 1000 << " secs" <<endl;
cudaMemcpy(h_P, d_P, sizeN, cudaMemcpyDeviceToHost);
//cout<< "Resulting P vector (Global)" << endl;
//printVector(h_P, n);
//free(h_N); free(h_M); free(h_P);
cudaFree(d_M); cudaFree(d_N); cudaFree(d_P);
}
|
2497e73c643f6dbcefb7a8c9a4241ca6c79cd797.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/reordering.cu.h>
#include <spconv/reordering.h>
#include <tensorview/cuda_utils.h>
#include <tensorview/kernel_utils.h>
#include <tensorview/mp_helper.h>
#include <tensorview/tensor.h>
#include <tensorview/tensorview.h>
#include <tensorview/torch_utils.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
using float_types_t = tv::mp_list<float, double, at::Half>;
using int_types_t = tv::mp_list<int32_t, int64_t>;
template <typename T>
struct half_vec{
using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>;
};
template <typename T>
struct half_vec_sadd{
using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>;
};
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
void sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features,
torch::Tensor indices, int size) {
if (size <= 0)
return;
int numPlanes = features.size(1);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto dtype = features.scalar_type();
auto inds_dtype = indices.scalar_type();
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = TV_DECLTYPE(TValue);
using vecload_type_t = typename half_vec_sadd<T>::type;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = TV_DECLTYPE(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
tv::mp_for_each<kernel_block_t>([=, &buffer, &features, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( gatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(size / NumTLP, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor);
#ifdef TV_LOG_KERNEL_INFO
hipFuncAttributes attr;
checkCudaErrors(hipFuncGetAttributes(
&attr, gatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>));
tv::ssprint("gatherVecBlockKernel<", tv::type_s<T>,
tv::type_s<Index>, int(NumTLP), NumILP, ">",
attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)
, dim3(dim3(1, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, buffer.data_ptr<T>() + nHotBlock * numPlanes,
features.data_ptr<T>(),
indices.data_ptr<Index>() + nHotBlock,
size - nHotBlock, numPlanes / vecloadFactor);
#ifdef TV_LOG_KERNEL_INFO
hipFuncAttributes attr;
checkCudaErrors(hipFuncGetAttributes(
&attr, gatherVecKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>));
tv::ssprint("gatherVecKernel<", tv::type_s<T>, tv::type_s<Index>,
int(NumTLP), NumILP, ">", attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( gatherGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream,
buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes);
#ifdef TV_LOG_KERNEL_INFO
hipFuncAttributes attr;
checkCudaErrors(hipFuncGetAttributes(
&attr, gatherGenericKernel<T, Index, NumTLP, NumILP>));
tv::ssprint("gatherGenericKernel<", tv::type_s<T>, tv::type_s<Index>,
int(NumTLP), NumILP, ">", attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
});
});
}
void sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures,
torch::Tensor indices, int size) {
if (size <= 0)
return;
int numPlanes = outFeatures.size(1);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto dtype = outFeatures.scalar_type();
auto inds_dtype = indices.scalar_type();
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = TV_DECLTYPE(TValue);
using vecload_type_t = typename half_vec_sadd<T>::type;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = TV_DECLTYPE(IndexValue);
bool notFound = true;
constexpr int vecloadFactor =
sizeof(vecload_type_t) / sizeof(T); // important for half.
tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP /
// vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(size / NumTLP, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor);
#ifdef TV_LOG_KERNEL_INFO
hipFuncAttributes attr;
checkCudaErrors(hipFuncGetAttributes(
&attr, scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>));
tv::ssprint("scatterAddVecBlockKernel<", tv::type_s<T>,
tv::type_s<Index>, int(NumTLP), NumILP, ">",
attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, stream, outFeatures.data_ptr<T>(),
buffer.data_ptr<T>() + nHotBlock * numPlanes,
indices.data_ptr<Index>() + nHotBlock,
size - nHotBlock, numPlanes);
#ifdef TV_LOG_KERNEL_INFO
hipFuncAttributes attr;
checkCudaErrors(hipFuncGetAttributes(
&attr,
scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>));
tv::ssprint("scatterAddGenericKernel<", tv::type_s<T>,
tv::type_s<Index>, int(NumTLP), NumILP, ">",
attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream,
outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes);
#ifdef TV_LOG_KERNEL_INFO
hipFuncAttributes attr;
checkCudaErrors(hipFuncGetAttributes(
&attr, scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>));
tv::ssprint("notfound scatterAddGenericKernel<", tv::type_s<T>,
tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
});
});
}
void batch_sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features,
torch::Tensor indices, int size) {
// indices: [volume, inds_stride]
// buffer: [volume, num_points, num_features]
// size == volume * num_points
if (size <= 0)
return;
int numPlanes = features.size(1);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto dtype = features.scalar_type();
auto inds_dtype = indices.scalar_type();
int inds_stride = indices.size(1);
int feature_stride = buffer.size(1);
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = TV_DECLTYPE(TValue);
using vecload_type_t = typename half_vec<T>::type;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = TV_DECLTYPE(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
tv::mp_for_each<kernel_block_t>(
[=, &buffer, &features, &indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( batchGatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(size / NumTLP, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR_V2("batchGatherVecBlockKernel");
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( batchGatherVecKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(1, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, buffer.data_ptr<T>() + nHotBlock * numPlanes,
features.data_ptr<T>(),
indices.data_ptr<Index>(), size - nHotBlock,
nHotBlock, numPlanes / vecloadFactor,
inds_stride, feature_stride);
TV_CHECK_CUDA_ERR_V2("batchGatherVecKernel");
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( batchGatherGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream,
buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
});
});
}
void batch_sparse_scatter_add_cuda(torch::Tensor buffer,
torch::Tensor outFeatures,
torch::Tensor indices, int size) {
// indices: [volume, inds_stride]
// buffer: [volume, num_points, num_features]
// size == volume * num_points
if (size <= 0)
return;
int numPlanes = outFeatures.size(1);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto dtype = outFeatures.scalar_type();
auto inds_dtype = indices.scalar_type();
int inds_stride = indices.size(1);
int feature_stride = buffer.size(1);
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = TV_DECLTYPE(TValue);
using vecload_type_t = typename half_vec_sadd<T>::type;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = TV_DECLTYPE(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = 1; // important for half.
tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP /
// vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( batchScatterAddBlockKernel<T, Index, int(NumTLP), NumILP>)
, dim3(dim3(size / NumTLP, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( batchScatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, stream, outFeatures.data_ptr<T>(),
buffer.data_ptr<T>() + nHotBlock * numPlanes,
indices.data_ptr<Index>(), size - nHotBlock,
nHotBlock, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( batchScatterAddGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream,
outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), size, 0, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
});
});
}
} // namespace spconv
| 2497e73c643f6dbcefb7a8c9a4241ca6c79cd797.cu | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/reordering.cu.h>
#include <spconv/reordering.h>
#include <tensorview/cuda_utils.h>
#include <tensorview/kernel_utils.h>
#include <tensorview/mp_helper.h>
#include <tensorview/tensor.h>
#include <tensorview/tensorview.h>
#include <tensorview/torch_utils.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
using float_types_t = tv::mp_list<float, double, at::Half>;
using int_types_t = tv::mp_list<int32_t, int64_t>;
template <typename T>
struct half_vec{
using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>;
};
template <typename T>
struct half_vec_sadd{
using type = typename std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>;
};
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
void sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features,
torch::Tensor indices, int size) {
if (size <= 0)
return;
int numPlanes = features.size(1);
auto stream = at::cuda::getCurrentCUDAStream();
auto dtype = features.scalar_type();
auto inds_dtype = indices.scalar_type();
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = TV_DECLTYPE(TValue);
using vecload_type_t = typename half_vec_sadd<T>::type;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = TV_DECLTYPE(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
tv::mp_for_each<kernel_block_t>([=, &buffer, &features, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
gatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(size / NumTLP, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor);
#ifdef TV_LOG_KERNEL_INFO
cudaFuncAttributes attr;
checkCudaErrors(cudaFuncGetAttributes(
&attr, gatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>));
tv::ssprint("gatherVecBlockKernel<", tv::type_s<T>,
tv::type_s<Index>, int(NumTLP), NumILP, ">",
attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>
<<<dim3(1, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(buffer.data_ptr<T>() + nHotBlock * numPlanes,
features.data_ptr<T>(),
indices.data_ptr<Index>() + nHotBlock,
size - nHotBlock, numPlanes / vecloadFactor);
#ifdef TV_LOG_KERNEL_INFO
cudaFuncAttributes attr;
checkCudaErrors(cudaFuncGetAttributes(
&attr, gatherVecKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>));
tv::ssprint("gatherVecKernel<", tv::type_s<T>, tv::type_s<Index>,
int(NumTLP), NumILP, ">", attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
gatherGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(
buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes);
#ifdef TV_LOG_KERNEL_INFO
cudaFuncAttributes attr;
checkCudaErrors(cudaFuncGetAttributes(
&attr, gatherGenericKernel<T, Index, NumTLP, NumILP>));
tv::ssprint("gatherGenericKernel<", tv::type_s<T>, tv::type_s<Index>,
int(NumTLP), NumILP, ">", attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
});
});
}
void sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures,
torch::Tensor indices, int size) {
if (size <= 0)
return;
int numPlanes = outFeatures.size(1);
auto stream = at::cuda::getCurrentCUDAStream();
auto dtype = outFeatures.scalar_type();
auto inds_dtype = indices.scalar_type();
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = TV_DECLTYPE(TValue);
using vecload_type_t = typename half_vec_sadd<T>::type;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = TV_DECLTYPE(IndexValue);
bool notFound = true;
constexpr int vecloadFactor =
sizeof(vecload_type_t) / sizeof(T); // important for half.
tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP /
// vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(size / NumTLP, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor);
#ifdef TV_LOG_KERNEL_INFO
cudaFuncAttributes attr;
checkCudaErrors(cudaFuncGetAttributes(
&attr, scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>));
tv::ssprint("scatterAddVecBlockKernel<", tv::type_s<T>,
tv::type_s<Index>, int(NumTLP), NumILP, ">",
attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, stream>>>(outFeatures.data_ptr<T>(),
buffer.data_ptr<T>() + nHotBlock * numPlanes,
indices.data_ptr<Index>() + nHotBlock,
size - nHotBlock, numPlanes);
#ifdef TV_LOG_KERNEL_INFO
cudaFuncAttributes attr;
checkCudaErrors(cudaFuncGetAttributes(
&attr,
scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>));
tv::ssprint("scatterAddGenericKernel<", tv::type_s<T>,
tv::type_s<Index>, int(NumTLP), NumILP, ">",
attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
scatterAddGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(
outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes);
#ifdef TV_LOG_KERNEL_INFO
cudaFuncAttributes attr;
checkCudaErrors(cudaFuncGetAttributes(
&attr, scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>));
tv::ssprint("notfound scatterAddGenericKernel<", tv::type_s<T>,
tv::type_s<Index>, int(NumTLP), NumILP, ">", attr.numRegs);
#endif
TV_CHECK_CUDA_ERR();
}
});
});
}
void batch_sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features,
torch::Tensor indices, int size) {
// indices: [volume, inds_stride]
// buffer: [volume, num_points, num_features]
// size == volume * num_points
if (size <= 0)
return;
int numPlanes = features.size(1);
auto stream = at::cuda::getCurrentCUDAStream();
auto dtype = features.scalar_type();
auto inds_dtype = indices.scalar_type();
int inds_stride = indices.size(1);
int feature_stride = buffer.size(1);
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = TV_DECLTYPE(TValue);
using vecload_type_t = typename half_vec<T>::type;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = TV_DECLTYPE(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
tv::mp_for_each<kernel_block_t>(
[=, &buffer, &features, &indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
batchGatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(size / NumTLP, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR_V2("batchGatherVecBlockKernel");
}
if (size - nHotBlock > 0) {
batchGatherVecKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(1, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(buffer.data_ptr<T>() + nHotBlock * numPlanes,
features.data_ptr<T>(),
indices.data_ptr<Index>(), size - nHotBlock,
nHotBlock, numPlanes / vecloadFactor,
inds_stride, feature_stride);
TV_CHECK_CUDA_ERR_V2("batchGatherVecKernel");
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
batchGatherGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(
buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
});
});
}
void batch_sparse_scatter_add_cuda(torch::Tensor buffer,
torch::Tensor outFeatures,
torch::Tensor indices, int size) {
// indices: [volume, inds_stride]
// buffer: [volume, num_points, num_features]
// size == volume * num_points
if (size <= 0)
return;
int numPlanes = outFeatures.size(1);
auto stream = at::cuda::getCurrentCUDAStream();
auto dtype = outFeatures.scalar_type();
auto inds_dtype = indices.scalar_type();
int inds_stride = indices.size(1);
int feature_stride = buffer.size(1);
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = TV_DECLTYPE(TValue);
using vecload_type_t = typename half_vec_sadd<T>::type;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = TV_DECLTYPE(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = 1; // important for half.
tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP /
// vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
batchScatterAddBlockKernel<T, Index, int(NumTLP), NumILP>
<<<dim3(size / NumTLP, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
batchScatterAddGenericKernel<T, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, stream>>>(outFeatures.data_ptr<T>(),
buffer.data_ptr<T>() + nHotBlock * numPlanes,
indices.data_ptr<Index>(), size - nHotBlock,
nHotBlock, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
batchScatterAddGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(
outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), size, 0, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
});
});
}
} // namespace spconv
|
7f7c2e78686f5b23da3617e01fc21b4e44bee80d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "kernel.cu.h"
Timer timer1;
int main(){
//test();
//DLCode();
//testRand();
DL_encapsulated();
hipDeviceReset();
}
__global__ void uniformTest_kernel(double* d_samples, hiprandState_t* d_localstates)
{
int length = 16;
int sid = threadIdx.x * length;
hiprandState_t localState = d_localstates[threadIdx.x];
for (int i = 0; i < length; i++)
{
d_samples[sid + i] = hiprand_uniform(&localState);
}
}
__global__ void gammaTest_kernel(hiprandState_t* d_localstates, double2* params, double* d_samples, int length)
{
int sid = threadIdx.x * length;
hiprandState_t localState = d_localstates[threadIdx.x];
for (int i = 0; i < length; i++)
{
gamrnd_d(d_samples + sid + i, params, &localState);
}
}
__global__ void betaTest_kernel(hiprandState_t* d_localstates, double2* params, double* d_samples, int length)
{
int sid = threadIdx.x * length;
hiprandState_t localState = d_localstates[threadIdx.x];
for (int i = 0; i < length; i++)
{
betarnd_d(d_samples + sid + i, params, &localState);
}
}
void testRand()
{
hiprandState_t* d_states;
int seqs = 200;
int length = 32;
hipMalloc(&d_states, sizeof(hiprandState_t) * seqs);
hipLaunchKernelGGL(( setup_kernel), dim3(1), dim3(seqs), 0, 0, d_states, time(NULL));
double* samples = new double[seqs * length];
double* d_samples;
size_t bytes = sizeof(double)*seqs * length;
hipMalloc(&d_samples, bytes);
hipMemset(d_samples, 0, bytes);
/*uniformTest_kernel<<<1, seqs>>>(d_samples, d_states);
hipMemcpy(samples, d_samples, bytes, hipMemcpyDeviceToHost);
for (int s = 0; s < seqs; s++)
{
for (int i = 0; i < length; i++)
{
printf("%4.2f ", samples[s * length + i]);
}
cout << endl;
}*/
{
cout << "Gamma Distro" << endl;
double2 params{ 0.8, 6000 };
double2* params_d;
hipMalloc(¶ms_d, sizeof(double2));
hipMemcpy(params_d, ¶ms, sizeof(double2), hipMemcpyHostToDevice);
gammaTest_kernel << <1, seqs >> >(d_states, params_d, d_samples, length);
hipMemcpy(samples, d_samples, bytes, hipMemcpyDeviceToHost);
ofstream gcsv("gamout.csv");
for (int s = 0; s < seqs; s++)
{
for (int i = 0; i < length; i++)
{
gcsv << samples[s * length + i] << endl;
}
}
gcsv.close();
}
{
cout << "Beta Distro" << endl;
double2 params{ 0.8, 3000 };
double2* params_d;
hipMalloc(¶ms_d, sizeof(double2));
hipMemcpy(params_d, ¶ms, sizeof(double2), hipMemcpyHostToDevice);
betaTest_kernel << <1, seqs >> >(d_states, params_d, d_samples, length);
hipMemcpy(samples, d_samples, bytes, hipMemcpyDeviceToHost);
ofstream gcsv("betaout.csv");
for (int s = 0; s < seqs; s++)
{
for (int i = 0; i < length; i++)
{
gcsv << samples[s * length + i] << endl;
}
}
gcsv.close();
}
// Free mems
delete[] samples;
hipFree(d_samples);
hipFree(d_states);
}
int testPerformance()
{
int S = 20;
gpuMat<float> Y(S, S);
gpuMat<bool> B(S, S);
gpuMat<double> C(S, S);
cout << Y.cols << "by" << Y.rows << endl;
for (int i = 0; i < S; i++)
{
for (int j = 0; j < S; j++)
{
Y(i, j) = i*Y.cols + j;
B(i, j) = (i >= j);
}
}
Y.copy2Device();
B.copy2Device();
Y.print();
B.print();
// CUBLAS TEST
/*float al = 1;
float bet = 0;
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, S, S, S, &al, Y.d_elems, S, B.d_elems, S, &bet, C.d_elems, S);*/
MatMul<float, bool, double>(Y.d_elems, B.d_elems, C.d_elems, S, S, S);
C.copy2Host();
C.print();
// Test functions for rectangular matrices
int m = 682768, n = 256, k = 128;
gpuMat<float> mat1(m, k);
gpuMat<float> vec1(k, n);
gpuMat<float> result(m, n);
for (int i = 0; i < m; i++)
{
for (int j = 0; j < k; j++)
{
//mat1(i, j) = ((i + 1)%(j + 1));
mat1(i, j) = (float)rand() / RAND_MAX - 0.5;
}
}
for (int i = 0; i < k; i++)
{
for (int j = 0; j < n; j++)
{
vec1(i, j) = (float)rand() / RAND_MAX - 0.5;
}
}
mat1.print();
vec1.print();
mat1.copy2Device();
vec1.copy2Device();
cout << "Using my API." << endl;
{
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
//Do kernel activity here
MatMul<float, float, float>(mat1.d_elems, vec1.d_elems, result.d_elems, m, n, k);
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
}
result.copy2Host();
result.print();
cout << "Using CUBLAS" << endl;
float al = 1;
float bet = 0;
hipblasHandle_t handle;
hipblasCreate(&handle);
{
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventRecord(start, 0);
//Do kernel activity here
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &al, mat1.d_elems, m, vec1.d_elems, k, &bet, result.d_elems, m);
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
}
result.copy2Host();
result.print();
/*cout << "Calculating in host CPU | Single thread" << endl;
{
timer1.start();
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
double cvalue = 0;
for (int l = 0; l < k; l++)
{
cvalue += mat1(i, l)*vec1(l, j);
}
result(i, j) = cvalue;
}
}
timer1.stop();
}
result.print();*/
return 0;
}
int calcN(int imsize, int patchsize, int imcount)
{
return (imsize - patchsize + 1)*(imsize - patchsize + 1)*imcount;
}
void DLCode()
{
int propImSize = 256;
int propPatchSize = 8;
int propImCount = 5;
int N = calcN(propImSize, propPatchSize, propImCount);
int M = propPatchSize*propPatchSize;
int K = 100;
cout << "M: " << M << ", N: " << N << ", K: " << K << endl;
ImLoader imloader(propImSize, propPatchSize);
gpuMat<double> Y(M, N);
imloader.GetDataMatrix(Y, propImCount);
DLConfig config1;
DLConfig *config1_d;
hipMalloc(&config1_d, sizeof(DLConfig));
hipMemcpy(config1_d, &config1, sizeof(DLConfig), hipMemcpyHostToDevice);
gpuMat<double> D(M, K);
gpuMat<double> S(K, N);
gpuMat<bool> B(K, N);
gpuMat<double> PI(K, 1);
gpuMat<double> post_PI(K, N);
ModelParams modelParams1;
}
void DL_encapsulated(){
DLLayer layer1;
} | 7f7c2e78686f5b23da3617e01fc21b4e44bee80d.cu | #pragma once
#include "kernel.cu.h"
Timer timer1;
int main(){
//test();
//DLCode();
//testRand();
DL_encapsulated();
cudaDeviceReset();
}
__global__ void uniformTest_kernel(double* d_samples, curandState_t* d_localstates)
{
int length = 16;
int sid = threadIdx.x * length;
curandState_t localState = d_localstates[threadIdx.x];
for (int i = 0; i < length; i++)
{
d_samples[sid + i] = curand_uniform(&localState);
}
}
__global__ void gammaTest_kernel(curandState_t* d_localstates, double2* params, double* d_samples, int length)
{
int sid = threadIdx.x * length;
curandState_t localState = d_localstates[threadIdx.x];
for (int i = 0; i < length; i++)
{
gamrnd_d(d_samples + sid + i, params, &localState);
}
}
__global__ void betaTest_kernel(curandState_t* d_localstates, double2* params, double* d_samples, int length)
{
int sid = threadIdx.x * length;
curandState_t localState = d_localstates[threadIdx.x];
for (int i = 0; i < length; i++)
{
betarnd_d(d_samples + sid + i, params, &localState);
}
}
void testRand()
{
curandState_t* d_states;
int seqs = 200;
int length = 32;
cudaMalloc(&d_states, sizeof(curandState_t) * seqs);
setup_kernel<<<1, seqs>>>(d_states, time(NULL));
double* samples = new double[seqs * length];
double* d_samples;
size_t bytes = sizeof(double)*seqs * length;
cudaMalloc(&d_samples, bytes);
cudaMemset(d_samples, 0, bytes);
/*uniformTest_kernel<<<1, seqs>>>(d_samples, d_states);
cudaMemcpy(samples, d_samples, bytes, cudaMemcpyDeviceToHost);
for (int s = 0; s < seqs; s++)
{
for (int i = 0; i < length; i++)
{
printf("%4.2f ", samples[s * length + i]);
}
cout << endl;
}*/
{
cout << "Gamma Distro" << endl;
double2 params{ 0.8, 6000 };
double2* params_d;
cudaMalloc(¶ms_d, sizeof(double2));
cudaMemcpy(params_d, ¶ms, sizeof(double2), cudaMemcpyHostToDevice);
gammaTest_kernel << <1, seqs >> >(d_states, params_d, d_samples, length);
cudaMemcpy(samples, d_samples, bytes, cudaMemcpyDeviceToHost);
ofstream gcsv("gamout.csv");
for (int s = 0; s < seqs; s++)
{
for (int i = 0; i < length; i++)
{
gcsv << samples[s * length + i] << endl;
}
}
gcsv.close();
}
{
cout << "Beta Distro" << endl;
double2 params{ 0.8, 3000 };
double2* params_d;
cudaMalloc(¶ms_d, sizeof(double2));
cudaMemcpy(params_d, ¶ms, sizeof(double2), cudaMemcpyHostToDevice);
betaTest_kernel << <1, seqs >> >(d_states, params_d, d_samples, length);
cudaMemcpy(samples, d_samples, bytes, cudaMemcpyDeviceToHost);
ofstream gcsv("betaout.csv");
for (int s = 0; s < seqs; s++)
{
for (int i = 0; i < length; i++)
{
gcsv << samples[s * length + i] << endl;
}
}
gcsv.close();
}
// Free mems
delete[] samples;
cudaFree(d_samples);
cudaFree(d_states);
}
int testPerformance()
{
int S = 20;
gpuMat<float> Y(S, S);
gpuMat<bool> B(S, S);
gpuMat<double> C(S, S);
cout << Y.cols << "by" << Y.rows << endl;
for (int i = 0; i < S; i++)
{
for (int j = 0; j < S; j++)
{
Y(i, j) = i*Y.cols + j;
B(i, j) = (i >= j);
}
}
Y.copy2Device();
B.copy2Device();
Y.print();
B.print();
// CUBLAS TEST
/*float al = 1;
float bet = 0;
cublasHandle_t handle;
cublasCreate(&handle);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, S, S, S, &al, Y.d_elems, S, B.d_elems, S, &bet, C.d_elems, S);*/
MatMul<float, bool, double>(Y.d_elems, B.d_elems, C.d_elems, S, S, S);
C.copy2Host();
C.print();
// Test functions for rectangular matrices
int m = 682768, n = 256, k = 128;
gpuMat<float> mat1(m, k);
gpuMat<float> vec1(k, n);
gpuMat<float> result(m, n);
for (int i = 0; i < m; i++)
{
for (int j = 0; j < k; j++)
{
//mat1(i, j) = ((i + 1)%(j + 1));
mat1(i, j) = (float)rand() / RAND_MAX - 0.5;
}
}
for (int i = 0; i < k; i++)
{
for (int j = 0; j < n; j++)
{
vec1(i, j) = (float)rand() / RAND_MAX - 0.5;
}
}
mat1.print();
vec1.print();
mat1.copy2Device();
vec1.copy2Device();
cout << "Using my API." << endl;
{
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
//Do kernel activity here
MatMul<float, float, float>(mat1.d_elems, vec1.d_elems, result.d_elems, m, n, k);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
}
result.copy2Host();
result.print();
cout << "Using CUBLAS" << endl;
float al = 1;
float bet = 0;
cublasHandle_t handle;
cublasCreate(&handle);
{
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
//Do kernel activity here
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &al, mat1.d_elems, m, vec1.d_elems, k, &bet, result.d_elems, m);
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
}
result.copy2Host();
result.print();
/*cout << "Calculating in host CPU | Single thread" << endl;
{
timer1.start();
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
double cvalue = 0;
for (int l = 0; l < k; l++)
{
cvalue += mat1(i, l)*vec1(l, j);
}
result(i, j) = cvalue;
}
}
timer1.stop();
}
result.print();*/
return 0;
}
int calcN(int imsize, int patchsize, int imcount)
{
return (imsize - patchsize + 1)*(imsize - patchsize + 1)*imcount;
}
void DLCode()
{
int propImSize = 256;
int propPatchSize = 8;
int propImCount = 5;
int N = calcN(propImSize, propPatchSize, propImCount);
int M = propPatchSize*propPatchSize;
int K = 100;
cout << "M: " << M << ", N: " << N << ", K: " << K << endl;
ImLoader imloader(propImSize, propPatchSize);
gpuMat<double> Y(M, N);
imloader.GetDataMatrix(Y, propImCount);
DLConfig config1;
DLConfig *config1_d;
cudaMalloc(&config1_d, sizeof(DLConfig));
cudaMemcpy(config1_d, &config1, sizeof(DLConfig), cudaMemcpyHostToDevice);
gpuMat<double> D(M, K);
gpuMat<double> S(K, N);
gpuMat<bool> B(K, N);
gpuMat<double> PI(K, 1);
gpuMat<double> post_PI(K, N);
ModelParams modelParams1;
}
void DL_encapsulated(){
DLLayer layer1;
} |
03eea4c453ae7cf9444dfaf8d39eeb2c60befc3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
} | 03eea4c453ae7cf9444dfaf8d39eeb2c60befc3f.cu | #include "includes.h"
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
} |
46613d5551ac2addecd20bfeba655b343e460bc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuseful.h"
#include "R.h"
#include "kendall.h"
#include "hip/hiprtc.h"
#include "hip/hip_runtime.h"
#define NUMTHREADS 16
#define NVRTC_SAFE_CALL(x) \
do { \
hiprtcResult result = x; \
if (result != HIPRTC_SUCCESS) { \
error("\nerror: %d failed with error %s\n", x, \
hiprtcGetErrorString(result)); \
} \
} while(0)
#define CUDA_SAFE_CALL(x) \
do { \
hipError_t result = x; \
if (result != hipSuccess) { \
const char *msg; \
hipGetErrorName(result, &msg); \
error("\nerror: %d failed with error %s\n", x, msg); \
} \
} while(0)
void masterKendall(const float * x, size_t nx,
const float * y, size_t ny,
size_t sampleSize, double * results,
const char * kernel_src)
{
size_t
outputLength = nx * ny, outputBytes = outputLength*sizeof(double),
xBytes = nx*sampleSize*sizeof(float),
yBytes = ny*sampleSize*sizeof(float);
float
* gpux, * gpuy;
double
* gpuResults;
dim3
initGrid(nx, ny), initBlock(NUMTHREADS, NUMTHREADS);
hipMalloc((void **)&gpux, xBytes);
hipMalloc((void **)&gpuy, yBytes);
checkCudaError("input vector space allocation");
hipMemcpy(gpux, x, xBytes, hipMemcpyHostToDevice);
hipMemcpy(gpuy, y, yBytes, hipMemcpyHostToDevice);
checkCudaError("copying input vectors to gpu");
hipMalloc((void **)&gpuResults, outputBytes);
checkCudaError("allocation of space for result matrix");
hiprtcProgram prog;
NVRTC_SAFE_CALL(
hiprtcCreateProgram(&prog, // prog
kernel_src, // buffer
"kendall", // name
0, // numHeaders
NULL, // headers
NULL)); // includeNames
hiprtcResult compileResult = hiprtcCompileProgram(prog, 0, NULL);
if (compileResult != HIPRTC_SUCCESS) error("cuda kernel compile failed");
// Obtain PTX from the program.
size_t ptxSize;
NVRTC_SAFE_CALL(hiprtcGetCodeSize(prog, &ptxSize));
char *ptx = new char[ptxSize];
NVRTC_SAFE_CALL(hiprtcGetCode(prog, ptx));
// Destroy the program.
NVRTC_SAFE_CALL(hiprtcDestroyProgram(&prog));
// Load the generated PTX and get a handle to the SAXPY kernel.
// hipDevice_t cuDevice;
// hipCtx_t context;
CUDA_SAFE_CALL(hipInit(0));
// CUDA_SAFE_CALL(hipDeviceGet(&cuDevice, 0));
// CUDA_SAFE_CALL(hipCtxCreate(&context, 0, cuDevice));
hipModule_t module;
CUDA_SAFE_CALL(hipModuleLoadDataEx(&module, ptx, 0, 0, 0));
hipFunction_t kernel;
CUDA_SAFE_CALL(hipModuleGetFunction(&kernel, module, "gpuKendall"));
// execute kendall kernel
void *args[] =
{ &gpux
, &nx
, &gpuy
, &ny
, &sampleSize
, &gpuResults
};
CUDA_SAFE_CALL(
hipModuleLaunchKernel(kernel,
nx, ny, 1, // grid dim
NUMTHREADS, NUMTHREADS, 1, // block dim
0, NULL, // shared mem and stream
args, 0)); // arguments
CUDA_SAFE_CALL(hipCtxSynchronize());
hipFree(gpux);
hipFree(gpuy);
hipMemcpy(results, gpuResults, outputBytes, hipMemcpyDeviceToHost);
hipFree(gpuResults);
checkCudaError("copying results from gpu and cleaning up");
CUDA_SAFE_CALL(hipModuleUnload(module));
// CUDA_SAFE_CALL(hipCtxDestroy(context));
}
| 46613d5551ac2addecd20bfeba655b343e460bc1.cu | #include "cuseful.h"
#include "R.h"
#include "kendall.h"
#include "nvrtc.h"
#include "cuda.h"
#define NUMTHREADS 16
#define NVRTC_SAFE_CALL(x) \
do { \
nvrtcResult result = x; \
if (result != NVRTC_SUCCESS) { \
error("\nerror: %d failed with error %s\n", x, \
nvrtcGetErrorString(result)); \
} \
} while(0)
#define CUDA_SAFE_CALL(x) \
do { \
CUresult result = x; \
if (result != CUDA_SUCCESS) { \
const char *msg; \
cuGetErrorName(result, &msg); \
error("\nerror: %d failed with error %s\n", x, msg); \
} \
} while(0)
void masterKendall(const float * x, size_t nx,
const float * y, size_t ny,
size_t sampleSize, double * results,
const char * kernel_src)
{
size_t
outputLength = nx * ny, outputBytes = outputLength*sizeof(double),
xBytes = nx*sampleSize*sizeof(float),
yBytes = ny*sampleSize*sizeof(float);
float
* gpux, * gpuy;
double
* gpuResults;
dim3
initGrid(nx, ny), initBlock(NUMTHREADS, NUMTHREADS);
cudaMalloc((void **)&gpux, xBytes);
cudaMalloc((void **)&gpuy, yBytes);
checkCudaError("input vector space allocation");
cudaMemcpy(gpux, x, xBytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpuy, y, yBytes, cudaMemcpyHostToDevice);
checkCudaError("copying input vectors to gpu");
cudaMalloc((void **)&gpuResults, outputBytes);
checkCudaError("allocation of space for result matrix");
nvrtcProgram prog;
NVRTC_SAFE_CALL(
nvrtcCreateProgram(&prog, // prog
kernel_src, // buffer
"kendall", // name
0, // numHeaders
NULL, // headers
NULL)); // includeNames
nvrtcResult compileResult = nvrtcCompileProgram(prog, 0, NULL);
if (compileResult != NVRTC_SUCCESS) error("cuda kernel compile failed");
// Obtain PTX from the program.
size_t ptxSize;
NVRTC_SAFE_CALL(nvrtcGetPTXSize(prog, &ptxSize));
char *ptx = new char[ptxSize];
NVRTC_SAFE_CALL(nvrtcGetPTX(prog, ptx));
// Destroy the program.
NVRTC_SAFE_CALL(nvrtcDestroyProgram(&prog));
// Load the generated PTX and get a handle to the SAXPY kernel.
// CUdevice cuDevice;
// CUcontext context;
CUDA_SAFE_CALL(cuInit(0));
// CUDA_SAFE_CALL(cuDeviceGet(&cuDevice, 0));
// CUDA_SAFE_CALL(cuCtxCreate(&context, 0, cuDevice));
CUmodule module;
CUDA_SAFE_CALL(cuModuleLoadDataEx(&module, ptx, 0, 0, 0));
CUfunction kernel;
CUDA_SAFE_CALL(cuModuleGetFunction(&kernel, module, "gpuKendall"));
// execute kendall kernel
void *args[] =
{ &gpux
, &nx
, &gpuy
, &ny
, &sampleSize
, &gpuResults
};
CUDA_SAFE_CALL(
cuLaunchKernel(kernel,
nx, ny, 1, // grid dim
NUMTHREADS, NUMTHREADS, 1, // block dim
0, NULL, // shared mem and stream
args, 0)); // arguments
CUDA_SAFE_CALL(cuCtxSynchronize());
cudaFree(gpux);
cudaFree(gpuy);
cudaMemcpy(results, gpuResults, outputBytes, cudaMemcpyDeviceToHost);
cudaFree(gpuResults);
checkCudaError("copying results from gpu and cleaning up");
CUDA_SAFE_CALL(cuModuleUnload(module));
// CUDA_SAFE_CALL(cuCtxDestroy(context));
}
|
8201adabffbc7474b2c4618ea7bff16b2d5af243.hip | // !!! This is a file automatically generated by hipify!!!
#include <cuwrap/kernels/mul.hpp>
#include <initializer_list>
#include <tuple>
namespace cuwrap {
template <typename T>
__global__ void kmul(T* lhs, T* rhs, T* out, std::size_t maxn)
{
int index = (blockDim.x * blockIdx.x) + threadIdx.x; // Thread id in one grid.
int stride = gridDim.x * blockDim.x; // Thread num for each grid.
for (std::size_t i = index; i < maxn; i += stride)
out[i] = lhs[i] * rhs[i];
}
// (std::size_t n, const T* lhs, const T* rhs, T* out, const kparam_t& param = kparam_t{})
template <typename T>
void mul_impl_t<T>::operator()(std::size_t n, T* lhs, T* rhs, T* out, kparam_t param) // But well, there will be a lot of time wasted during each kernel section.
{
if (param.is_default_initialized())
param.adapt_amount(n);
T *cl, *cr;
CUWRAP_IF_CUDA_ERR(hipMalloc(&cl, n * sizeof(T)));
if (lhs == rhs)
cr = cl;
else
CUWRAP_IF_CUDA_ERR(hipMalloc(&cr, n * sizeof(T)));
CUWRAP_IF_CUDA_ERR(hipMemcpy(cl, lhs, n * sizeof(T), hipMemcpyHostToDevice));
if (lhs != rhs)
CUWRAP_IF_CUDA_ERR(hipMemcpy(cr, rhs, n * sizeof(T), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kmul), dim3(param.blocks), dim3(param.threads_per_block), param.shared_size, (ihipStream_t*)param.cuda_stream, cl, cr, cr, n);
CUWRAP_IF_CUDA_ERR(hipMemcpy(out, cr, n * sizeof(T), hipMemcpyDeviceToHost));
hipFree(cl);
if (lhs != rhs)
hipFree(cr);
// int mygpu = hipGetDevice(&mygpu); // TODO: Specify the custom setting for GPU choice.
// CUWRAP_IF_CUDA_ERR(hipMallocManaged(&lhs, sizeof(T) * n));
// if (lhs != rhs)
// CUWRAP_IF_CUDA_ERR(hipMallocManaged(&rhs, sizeof(T) * n));
// if (lhs != out && rhs != out)
// CUWRAP_IF_CUDA_ERR(hipMallocManaged(&out, sizeof(T) * n));
// CUWRAP_IF_CUDA_ERR(hipMemPrefetchAsync(lhs, sizeof(T) * n, mygpu)); // => GPU
// CUWRAP_IF_CUDA_ERR(hipMemPrefetchAsync(rhs, sizeof(T) * n, mygpu)); // => GPU
// CUWRAP_IF_CUDA_ERR(hipMemPrefetchAsync(out, sizeof(T) * n, mygpu)); // => GPU
// kadd<<<param.blocks, param.threads_per_block, param.shared_size, (ihipStream_t*)param.cuda_stream>>>(lhs, rhs, out, n);
// CUWRAP_IF_CUDA_ERR(hipDeviceSynchronize());
// CUWRAP_IF_CUDA_ERR(hipMemPrefetchAsync(out, sizeof(T) * n, hipCpuDeviceId)); // => CPU
// hipFree(lhs);
// if (lhs != rhs)
// hipFree(rhs);
// if (lhs != out && rhs != out)
// hipFree(out);
}
template <typename... Ts>
static void force_initialization__()
{
// (add_impl<Ts>(std::size_t{}, nullptr, nullptr, nullptr), ...); // CUDA: We do not support CXX17 currently.
std::initializer_list<std::nullptr_t>{ ((mul_impl_t<Ts>{})(std::size_t{}, nullptr, nullptr, nullptr, kparam_t{}), nullptr)... };
}
void force_initialization_()
{
force_initialization__<CUWRAP_ARITHMETIC_TS>();
}
} // namespace cuwrap
// MAT MUL ==> TO be reimplemented =================================
// #pragma once
// #include "../utils/util.hpp"
// #include "init_hip.cuh"
// #include <hip/hip_runtime.h>
// #include <device_launch_parameters.h>
// #include <type_traits>
// template <
// typename T,
// typename = std::enable_if_t<std::is_arithmetic_v<T>>> __global__
// void tiled_matmul(const T* lhs, const T* rhs, T* out, const int n, const int tile_sz)
// { // This is what ONE thread is doing !!!
// // For one thread => get one value in the output matrix;
// // For one block => get one tiel in the output matrix;
// constexpr std::size_t mat_sz = 1024; // As my GPU supports 1024 threads per block :)
// __shared__ T sA[mat_sz], sB[mat_sz]; // Shared in the block.
// // Position for the thread: C[row][col];
// int row = blockIdx.x * tile_sz + threadIdx.x;
// int col = blockIdx.y * tile_sz + threadIdx.y;
// // Tx, Ty \in [0, 31].
// T temp = 0;
// for (int i = 0; i < n / tile_sz; ++i) // cros all tiles. ONE block 2 tile in one loop.
// { // In one `block x block` section.
// // To calculate C[row][col] we need to initialize: tiles => A, B // Note that y ~ row, x ~ col.
// sA[tile_sz * threadIdx.y + threadIdx.x] = lhs[row * n + i * tile_sz + threadIdx.x];
// sB[tile_sz * threadIdx.y + threadIdx.x] = rhs[col + i * tile_sz * n + threadIdx.y * n]; // A better way maybe trans it.
// __syncthreads(); // BLOCK_lhs & BLOCK_rhs >>> shared memory prepared.
// for (int j = 0; j < tile_sz; ++j) // Micro kernel. Consider sA & sB only.
// temp += sA[tile_sz * threadIdx.y + j] * sB[j * tile_sz + threadIdx.x];
// __syncthreads();
// }
// out[row * n + col] = temp;
// }
// template <
// typename T,
// typename = std::enable_if_t<std::is_arithmetic_v<T>>> __global__
// void naive_matmul(const T* lhs, const T* rhs, T* out, const int n)
// {
// int row = blockDim.y * blockIdx.y + threadIdx.y;
// int col = blockDim.x * blockIdx.x + threadIdx.x;
// T tem = 0;
// if (row < n && col < n)
// {
// for (int k = 0; k < n; ++k)
// tem += lhs[row * n + k] * rhs[k * n + col];
// out[row * n + col] = tem;
// }
// }
// void tiled_matmul_test()
// {
// using type = float;
// int mygpu = hipGetDevice(&mygpu);
// type* lhs, * rhs, * dst;
// // It's 2-D. But we just consider 1-D first.
// constexpr int matsz = (1 << 12);
// hipMallocManaged(&lhs, sizeof(type) * matsz * matsz);
// hipMallocManaged(&rhs, sizeof(type) * matsz * matsz);
// hipMallocManaged(&dst, sizeof(type) * matsz * matsz);
// hipMemPrefetchAsync(lhs, sizeof(type) * matsz * matsz, mygpu);
// hipMemPrefetchAsync(rhs, sizeof(type) * matsz * matsz, mygpu);
// hipMemPrefetchAsync(dst, sizeof(type) * matsz * matsz, mygpu);
// constexpr int threads_per_blk = 16; // 32 x 32 => 1024
// constexpr int blks_per_grid = (matsz + threads_per_blk - 1) / threads_per_blk;
// dim3 blks(blks_per_grid, blks_per_grid), thres(threads_per_blk, threads_per_blk);
// constexpr int init_blks = (matsz * matsz + 1023) / 1024;
// init << <init_blks, 1024 >> > (lhs, 1.f, matsz * matsz);
// init << <init_blks, 1024 >> > (rhs, 1.f, matsz * matsz);
// hipDeviceSynchronize();
// std::cout << "NAIVE_MATMAL: \n";
// ganler::timer t;
// naive_matmul << <blks, thres >> > (lhs, rhs, dst, matsz);
// hipDeviceSynchronize();
// t.print_milli();
// std::cout << "TILED_MATMAL: \n";
// t.reset();
// tiled_matmul << <blks, thres >> > (lhs, rhs, dst, matsz, threads_per_blk); // Slower ? Why ?
// hipDeviceSynchronize();
// t.print_milli();
// hipMemPrefetchAsync(dst, sizeof(type) * matsz * matsz, hipCpuDeviceId);
// std::cout << dst[matsz * matsz - 1] << '\n';
// hipFree(lhs);
// hipFree(rhs);
// hipFree(dst);
// } | 8201adabffbc7474b2c4618ea7bff16b2d5af243.cu | #include <cuwrap/kernels/mul.hpp>
#include <initializer_list>
#include <tuple>
namespace cuwrap {
template <typename T>
__global__ void kmul(T* lhs, T* rhs, T* out, std::size_t maxn)
{
int index = (blockDim.x * blockIdx.x) + threadIdx.x; // Thread id in one grid.
int stride = gridDim.x * blockDim.x; // Thread num for each grid.
for (std::size_t i = index; i < maxn; i += stride)
out[i] = lhs[i] * rhs[i];
}
// (std::size_t n, const T* lhs, const T* rhs, T* out, const kparam_t& param = kparam_t{})
template <typename T>
void mul_impl_t<T>::operator()(std::size_t n, T* lhs, T* rhs, T* out, kparam_t param) // But well, there will be a lot of time wasted during each kernel section.
{
if (param.is_default_initialized())
param.adapt_amount(n);
T *cl, *cr;
CUWRAP_IF_CUDA_ERR(cudaMalloc(&cl, n * sizeof(T)));
if (lhs == rhs)
cr = cl;
else
CUWRAP_IF_CUDA_ERR(cudaMalloc(&cr, n * sizeof(T)));
CUWRAP_IF_CUDA_ERR(cudaMemcpy(cl, lhs, n * sizeof(T), cudaMemcpyHostToDevice));
if (lhs != rhs)
CUWRAP_IF_CUDA_ERR(cudaMemcpy(cr, rhs, n * sizeof(T), cudaMemcpyHostToDevice));
kmul<<<param.blocks, param.threads_per_block, param.shared_size, (CUstream_st*)param.cuda_stream>>>(cl, cr, cr, n);
CUWRAP_IF_CUDA_ERR(cudaMemcpy(out, cr, n * sizeof(T), cudaMemcpyDeviceToHost));
cudaFree(cl);
if (lhs != rhs)
cudaFree(cr);
// int mygpu = cudaGetDevice(&mygpu); // TODO: Specify the custom setting for GPU choice.
// CUWRAP_IF_CUDA_ERR(cudaMallocManaged(&lhs, sizeof(T) * n));
// if (lhs != rhs)
// CUWRAP_IF_CUDA_ERR(cudaMallocManaged(&rhs, sizeof(T) * n));
// if (lhs != out && rhs != out)
// CUWRAP_IF_CUDA_ERR(cudaMallocManaged(&out, sizeof(T) * n));
// CUWRAP_IF_CUDA_ERR(cudaMemPrefetchAsync(lhs, sizeof(T) * n, mygpu)); // => GPU
// CUWRAP_IF_CUDA_ERR(cudaMemPrefetchAsync(rhs, sizeof(T) * n, mygpu)); // => GPU
// CUWRAP_IF_CUDA_ERR(cudaMemPrefetchAsync(out, sizeof(T) * n, mygpu)); // => GPU
// kadd<<<param.blocks, param.threads_per_block, param.shared_size, (CUstream_st*)param.cuda_stream>>>(lhs, rhs, out, n);
// CUWRAP_IF_CUDA_ERR(cudaDeviceSynchronize());
// CUWRAP_IF_CUDA_ERR(cudaMemPrefetchAsync(out, sizeof(T) * n, cudaCpuDeviceId)); // => CPU
// cudaFree(lhs);
// if (lhs != rhs)
// cudaFree(rhs);
// if (lhs != out && rhs != out)
// cudaFree(out);
}
template <typename... Ts>
static void force_initialization__()
{
// (add_impl<Ts>(std::size_t{}, nullptr, nullptr, nullptr), ...); // CUDA: We do not support CXX17 currently.
std::initializer_list<std::nullptr_t>{ ((mul_impl_t<Ts>{})(std::size_t{}, nullptr, nullptr, nullptr, kparam_t{}), nullptr)... };
}
void force_initialization_()
{
force_initialization__<CUWRAP_ARITHMETIC_TS>();
}
} // namespace cuwrap
// MAT MUL ==> TO be reimplemented =================================
// #pragma once
// #include "../utils/util.hpp"
// #include "init.cuh"
// #include <cuda_runtime.h>
// #include <device_launch_parameters.h>
// #include <type_traits>
// template <
// typename T,
// typename = std::enable_if_t<std::is_arithmetic_v<T>>> __global__
// void tiled_matmul(const T* lhs, const T* rhs, T* out, const int n, const int tile_sz)
// { // This is what ONE thread is doing !!!
// // For one thread => get one value in the output matrix;
// // For one block => get one tiel in the output matrix;
// constexpr std::size_t mat_sz = 1024; // As my GPU supports 1024 threads per block :)
// __shared__ T sA[mat_sz], sB[mat_sz]; // Shared in the block.
// // Position for the thread: C[row][col];
// int row = blockIdx.x * tile_sz + threadIdx.x;
// int col = blockIdx.y * tile_sz + threadIdx.y;
// // Tx, Ty \in [0, 31].
// T temp = 0;
// for (int i = 0; i < n / tile_sz; ++i) // cros all tiles. ONE block 2 tile in one loop.
// { // In one `block x block` section.
// // To calculate C[row][col] we need to initialize: tiles => A, B // Note that y ~ row, x ~ col.
// sA[tile_sz * threadIdx.y + threadIdx.x] = lhs[row * n + i * tile_sz + threadIdx.x];
// sB[tile_sz * threadIdx.y + threadIdx.x] = rhs[col + i * tile_sz * n + threadIdx.y * n]; // A better way maybe trans it.
// __syncthreads(); // BLOCK_lhs & BLOCK_rhs >>> shared memory prepared.
// for (int j = 0; j < tile_sz; ++j) // Micro kernel. Consider sA & sB only.
// temp += sA[tile_sz * threadIdx.y + j] * sB[j * tile_sz + threadIdx.x];
// __syncthreads();
// }
// out[row * n + col] = temp;
// }
// template <
// typename T,
// typename = std::enable_if_t<std::is_arithmetic_v<T>>> __global__
// void naive_matmul(const T* lhs, const T* rhs, T* out, const int n)
// {
// int row = blockDim.y * blockIdx.y + threadIdx.y;
// int col = blockDim.x * blockIdx.x + threadIdx.x;
// T tem = 0;
// if (row < n && col < n)
// {
// for (int k = 0; k < n; ++k)
// tem += lhs[row * n + k] * rhs[k * n + col];
// out[row * n + col] = tem;
// }
// }
// void tiled_matmul_test()
// {
// using type = float;
// int mygpu = cudaGetDevice(&mygpu);
// type* lhs, * rhs, * dst;
// // It's 2-D. But we just consider 1-D first.
// constexpr int matsz = (1 << 12);
// cudaMallocManaged(&lhs, sizeof(type) * matsz * matsz);
// cudaMallocManaged(&rhs, sizeof(type) * matsz * matsz);
// cudaMallocManaged(&dst, sizeof(type) * matsz * matsz);
// cudaMemPrefetchAsync(lhs, sizeof(type) * matsz * matsz, mygpu);
// cudaMemPrefetchAsync(rhs, sizeof(type) * matsz * matsz, mygpu);
// cudaMemPrefetchAsync(dst, sizeof(type) * matsz * matsz, mygpu);
// constexpr int threads_per_blk = 16; // 32 x 32 => 1024
// constexpr int blks_per_grid = (matsz + threads_per_blk - 1) / threads_per_blk;
// dim3 blks(blks_per_grid, blks_per_grid), thres(threads_per_blk, threads_per_blk);
// constexpr int init_blks = (matsz * matsz + 1023) / 1024;
// init << <init_blks, 1024 >> > (lhs, 1.f, matsz * matsz);
// init << <init_blks, 1024 >> > (rhs, 1.f, matsz * matsz);
// cudaDeviceSynchronize();
// std::cout << "NAIVE_MATMAL: \n";
// ganler::timer t;
// naive_matmul << <blks, thres >> > (lhs, rhs, dst, matsz);
// cudaDeviceSynchronize();
// t.print_milli();
// std::cout << "TILED_MATMAL: \n";
// t.reset();
// tiled_matmul << <blks, thres >> > (lhs, rhs, dst, matsz, threads_per_blk); // Slower ? Why ?
// cudaDeviceSynchronize();
// t.print_milli();
// cudaMemPrefetchAsync(dst, sizeof(type) * matsz * matsz, cudaCpuDeviceId);
// std::cout << dst[matsz * matsz - 1] << '\n';
// cudaFree(lhs);
// cudaFree(rhs);
// cudaFree(dst);
// } |
a71a9be804a2ffb8c39bf9bd1ce34297422a1613.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pointwise_hist2.cuh"
#include "split_properties_helpers.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
namespace NKernel
{
template<int OUTER_HIST_BITS_COUNT,
int INNER_HIST_BITS_COUNT,
int BLOCK_SIZE>
struct TPointHist {
float* __restrict__ Buffer;
float mostRecentStat1[4];
float mostRecentStat2[4];
uchar mostRecentBin[4];
__forceinline__ __device__ int SliceOffset() {
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
static_assert(OUTER_HIST_BITS_COUNT <= 2, "Error: assume 12 warps, so limited by 128-bin histogram per warp");
const int warpId = (threadIdx.x / 32) % maxBlocks;
const int warpOffset = (1024 << OUTER_HIST_BITS_COUNT) * warpId;
const int blocks = 4 >> INNER_HIST_BITS_COUNT;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
#pragma unroll
for (int f = 0; f < 4; ++f) {
mostRecentBin[f] = 0;
mostRecentStat1[f] = 0;
mostRecentStat2[f] = 0;
}
}
__forceinline__ __device__ void Add(float val, float* dst) {
if (OUTER_HIST_BITS_COUNT > 0 || INNER_HIST_BITS_COUNT > 0) {
atomicAdd(dst, val);
} else {
dst[0] += val;
}
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
if (bin != mostRecentBin[i]) {
const bool pass = (mostRecentBin[i] >> (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT)) == 0;
if (pass) {
int offset = 2 * f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
offset += flag;
Add(mostRecentStat1[i], Buffer + offset);
offset = flag ? offset - 1 : offset + 1;
Add(mostRecentStat2[i], Buffer + offset);
}
mostRecentBin[i] = bin;
mostRecentStat1[i] = 0;
mostRecentStat2[i] = 0;
}
{
mostRecentStat1[i] += stat1;
mostRecentStat2[i] += stat2;
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const bool pass = (mostRecentBin[i] >> (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT)) == 0;
if (pass) {
int offset = 2 * f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
Add(mostRecentStat1[i], Buffer + offset + flag);
Add(mostRecentStat2[i], Buffer + offset + !flag);
}
}
}
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024 << OUTER_HIST_BITS_COUNT;
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll maxBlocks
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
float sum[4];
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
for (int fold = (threadIdx.x >> 1); fold < maxFoldCount; fold += 128) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] = 0;
}
const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT;
const int lowBitMask = (1 << INNER_HIST_BITS_COUNT) - 1;
const float* __restrict__ src = Buffer
+ (1024 << OUTER_HIST_BITS_COUNT) //warpHistSize
+ 8 * (fold & lowBitMask)
+ 32 * (fold >> INNER_HIST_BITS_COUNT)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] += src[2 * f + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))];
}
}
#pragma unroll
for (int f = 0; f < 4; ++f) {
Buffer[2 * (maxFoldCount * f + fold) + w] = sum[f];
}
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 0, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 4;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void Add(float val, float* dst) {
dst[0] += val;
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
const bool pass = bin != 32;
int offset = 2 * f;
offset += 32 * (bin & 31);
Buffer[offset + flag] += pass * stat1;
Buffer[offset + !flag] += pass * stat2;
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum = 0.0f;
const int fold = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 32;
if (fold < maxFoldCount) {
const int innerHistCount = 4;
const float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 32 * fold
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum += src[2 * f + (inWarpHist << 3)];
}
Buffer[2 * (maxFoldCount * f + fold) + w] = sum;
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 1, BLOCK_SIZE> {
volatile float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 2;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 4));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
const bool pass = bin != 64;
int offset = 2 * f;
offset += 16 * (bin & 62) + 8 * (bin & 1);
const bool writeFirstFlag = threadIdx.x & 8;
const float val1 = pass * stat1;
offset += flag;
if (writeFirstFlag) {
Buffer[offset] += val1;
}
if (!writeFirstFlag) {
Buffer[offset] += val1;
}
const float val2 = pass * stat2;
// offset -= flag;
// offset += !flag;
offset = flag ? offset - 1 : offset + 1;
if (writeFirstFlag) {
Buffer[offset] += val2;
}
if (!writeFirstFlag) {
Buffer[offset] += val2;
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum0 = 0.0f;
float sum1 = 0.0f;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 64;
{
const int innerHistCount = 2;
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 8 * (fold0 & 1)
+ 32 * (fold0 >> 1)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum0 += src[2 * f + (inWarpHist << 4)];
sum1 += src[2 * f + (inWarpHist << 4) + 512];
}
Buffer[2 * (maxFoldCount * f + fold0) + w] = sum0;
Buffer[2 * (maxFoldCount * f + fold0 + 32) + w] = sum1;
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 2, BLOCK_SIZE> {
volatile float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
return warpOffset;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const int bin = bfe(ci, 24 - (f << 3), 8);
const bool pass = bin != 128;
int offset = 2 * f;
offset += 8 * (bin & 127);
//
const int writeTime = (threadIdx.x >> 3) & 3;
const float val1 = pass * stat1;
offset += flag;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val1;
}
}
const float val2 = pass * stat2;
offset = flag ? offset - 1 : offset + 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val2;
}
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 128;
{
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 2 * f
+ w;
#pragma unroll
for (int k = 0; k < 4; ++k) {
int fold = fold0 + 32 * k;
Buffer[2 * (maxFoldCount * f + fold) + w] = src[8 * fold];
}
}
}
__syncthreads();
}
};
template<int STRIPE_SIZE, int OUTER_UNROLL, int N, int HIST_BLOCK_COUNT, int BLOCKS_PER_FEATURE, typename THist>
__forceinline__ __device__ void ComputeHistogram(const ui32* __restrict__ indices, int offset, int dsSize,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ cindex, float* __restrict__ result) {
weight += offset;
target += offset;
indices += offset;
THist hist(result);
indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE, 0);
const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE;
if (dsSize)
{
int i = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
int iteration_count = (dsSize - i + (stripe - 1)) / stripe;
int blocked_iteration_count = ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N;
weight += i;
target += i;
indices += i;
#pragma unroll OUTER_UNROLL
for (int j = 0; j < blocked_iteration_count; ++j) {
ui32 local_index[N];
#pragma unroll
for (int k = 0; k < N; k++) {
local_index[k] = __ldg(indices + stripe * k);
}
ui32 local_ci[N];
float local_w[N];
float local_wt[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
local_ci[k] = __ldg(cindex + local_index[k]);
local_w[k] = __ldg(weight + stripe * k);
local_wt[k] = __ldg(target + stripe * k);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
hist.AddPoint(local_ci[k], local_wt[k], local_w[k]);
}
i += stripe * N;
indices += stripe * N;
target += stripe * N;
weight += stripe * N;
}
for (int k = blocked_iteration_count * N; k < iteration_count; ++k) {
const int index = __ldg(indices);
ui32 ci = __ldg(cindex + index);
float w = __ldg(weight);
float wt = __ldg(target);
hist.AddPoint(ci, wt, w);
i += stripe;
indices += stripe;
target += stripe;
weight += stripe;
}
__syncthreads();
hist.Reduce();
}
}
template<int STRIPE_SIZE, int OUTER_UNROLL, int HIST_BLOCK_COUNT, int BLOCKS_PER_FEATURE, typename THist>
__forceinline__ __device__ void ComputeHistogram2(
const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ cindex, float* __restrict__ result) {
weight += offset;
target += offset;
indices += offset;
THist hist(result);
if (dsSize) {
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
int lastId = min(dsSize, 128 - (offset & 127));
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0)
{
for (; (colId < lastId); colId += blockDim.x / HIST_BLOCK_COUNT)
{
const int index = __ldg(indices + colId);
const ui32 ci = __ldg(cindex + index);
const float w = __ldg(weight + colId);
const float wt = __ldg(target + colId);
hist.AddPoint(ci, wt, w);
}
}
dsSize = max(dsSize - lastId, 0);
indices += lastId;
target += lastId;
weight += lastId;
}
//now lets align end
const int unalignedTail = (dsSize & 31);
if (unalignedTail != 0) {
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0)
{
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
const int tailOffset = dsSize - unalignedTail;
for (; colId < unalignedTail; colId += blockDim.x / HIST_BLOCK_COUNT)
{
const int index = __ldg(indices + tailOffset + colId);
const ui32 ci = __ldg(cindex + index);
const float w = __ldg(weight + tailOffset + colId);
const float wt = __ldg(target + tailOffset + colId);
hist.AddPoint(ci, wt, w);
}
}
}
dsSize -= unalignedTail;
if (dsSize <= 0) {
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0) {
__syncthreads();
hist.Reduce();
}
return;
}
indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE * 2;
dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2, 0);
if (dsSize) {
int iterCount;
{
const int i = 2 * ((threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32);
weight += i;
target += i;
indices += i;
iterCount = (dsSize - i + (stripe - 1)) / stripe;
}
#pragma unroll OUTER_UNROLL
for (int j = 0; j < iterCount; ++j) {
const uint2 localIndices = __ldg((uint2*) indices);
const ui32 firstBin = __ldg(cindex + localIndices.x);
const ui32 secondBin = __ldg(cindex + localIndices.y);
const float2 localTarget = __ldg((float2* )(target));
const float2 localWeight = __ldg((float2* )(weight));
hist.AddPoint(firstBin, localTarget.x, localWeight.x);
hist.AddPoint(secondBin, localTarget.y, localWeight.y);
indices += stripe;
target += stripe;
weight += stripe;
}
__syncthreads();
hist.Reduce();
}
}
}
template<int BLOCK_SIZE, int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCKS_PER_FEATURE, bool USE_64_BIT_LOAD>
__forceinline__ __device__ void ComputeSplitPropertiesPass(const TCFeature* __restrict__ feature, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* smem)
{
using THist = TPointHist<OUTER_HIST_BITS_COUNT, INNER_HIST_BITS_COUNT, BLOCK_SIZE>;
const int stripeSize = BLOCK_SIZE;
const int histBlockCount = 1;
if (USE_64_BIT_LOAD) {
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) <= 2 ? 4 : 2;
#endif
const int size = partition->Size;
const int offset = partition->Offset;
ComputeHistogram2 < stripeSize, OUTER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist > (indices, offset, size,
target,
weight,
cindex,
smem);
} else {
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 2;
#endif
ComputeHistogram<stripeSize, OUTER_UNROLL, INNER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist>(indices,
partition->Offset,
partition->Size,
target,
weight,
cindex,
smem);
}
__syncthreads();
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
const int fid = (threadIdx.x / 64);
const int w = threadIdx.x & 1;
const int featureFolds = fid < fCount ? feature[fid].Folds : 0;
const int featureOffset = fid * maxFoldCount * 2 + w;
for (int fold = (threadIdx.x / 2) & 31; fold < featureFolds; fold += 32) {
if (fid < fCount) {
const float val = smem[featureOffset + 2 * fold];
if (abs(val) > 1e-20f) {
if (BLOCKS_PER_FEATURE > 1) {
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold) * 2 + w, val);
} else {
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold) * 2 + w, val);
}
}
}
}
}
#define DECLARE_PASS(O, I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BLOCK_SIZE, O, I, M, USE_64_BIT_LOAD>(feature, cindex, target, weight, indices, partition, fCount, binSums, &counters[0]);
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BLOCK_SIZE, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount)
{
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BLOCK_SIZE];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, 0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(0, 1, M, use64BitLoad);
} else if (maxBinCount <= 128) {
DECLARE_PASS(0, 2, M, use64BitLoad);
} else {
DECLARE_PASS(2, 1, M, use64BitLoad);
}
}
}
template<int BLOCK_SIZE>
struct TPointHistHalfByte {
volatile float* Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 512 * (threadIdx.x / 32);
const int innerHistStart = threadIdx.x & 16;
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistHalfByte(float* buff) {
const int HIST_SIZE = 16 * BLOCK_SIZE;
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
__syncthreads();
Buffer = buff + SliceOffset();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w)
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 8; i++) {
const short f = (threadIdx.x + (i << 1)) & 14;
short bin = bfe(ci, 28 - (f << 1), 4);
bin <<= 5;
bin += f;
const int offset0 = bin + flag;
const int offset1 = bin + !flag;
Buffer[offset0] += (flag ? t : w);
Buffer[offset1] += (flag ? w : t);
}
}
__device__ void Reduce() {
Buffer -= SliceOffset();
const int warpCount = BLOCK_SIZE >> 5;
{
const int fold = (threadIdx.x >> 5) & 15;
const int sumOffset = threadIdx.x & 31;
float sum = 0.0;
if (threadIdx.x < 512)
{
float* __restrict__ buffer = const_cast<float*>(Buffer);
#pragma unroll
for (int warpId = 0; warpId < warpCount; ++warpId)
{
const int warpOffset = 512 * warpId;
sum += buffer[warpOffset + sumOffset + 32 * fold];
}
}
__syncthreads();
if (threadIdx.x < 512) {
Buffer[threadIdx.x] = sum;
}
}
__syncthreads();
const int fold = (threadIdx.x >> 4) & 15;
float sum = 0.0f;
if (threadIdx.x < 256)
{
const int histEntryId = (threadIdx.x & 15);
sum = Buffer[32 * fold + histEntryId] + Buffer[32 * fold + histEntryId + 16];
}
__syncthreads();
if (threadIdx.x < 256) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
};
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, float* __restrict__ binSums, int totalFeatureCount)
{
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 * BLOCK_SIZE];
if (partition->Size)
{
using THist = TPointHistHalfByte<BLOCK_SIZE>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = FULL_PASS;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 < BLOCK_SIZE, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 1;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram < BLOCK_SIZE, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
}
ui32 w = threadIdx.x & 1;
ui32 fid = (threadIdx.x >> 1);
if (fid < fCount)
{
const int groupId = fid / 4;
uchar fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
sum += counters[i * 16 + 2 * groupId + w];
}
}
if (abs(sum) > 1e-20f) {
if (M > 1)
{
atomicAdd(binSums + (feature[fid].FirstFoldIndex) * 2 + w, sum);
} else
{
binSums[(feature[fid].FirstFoldIndex) * 2 + w] = sum;
}
}
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else
{
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
}
}
template<int BLOCK_SIZE, int BLOCKS_PER_FEATURE_COUNT>
void RunComputeHist2BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, bool fullPass,
int totalFeatureCount,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesBImpl < BLOCK_SIZE, true,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, totalFeatureCount
);
} else
{
ComputeSplitPropertiesBImpl < BLOCK_SIZE, false,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, totalFeatureCount
);
}
};
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesHalfByteImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount)
{
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 8;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 8, 8);
//
__shared__ float smem[16 * BLOCK_SIZE];
using THist = TPointHistHalfByte<BLOCK_SIZE>;
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;
#else
const bool use64BitLoad = false;
#endif
if (use64BitLoad)
{
#if __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 < BLOCK_SIZE, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, smem);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram < BLOCK_SIZE, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (
indices, partition->Offset, partition->Size,
target, weight,
cindex, smem);
}
__syncthreads();
const int fid = (threadIdx.x / 32);
const int fold = (threadIdx.x / 2) & 15;
const int w = threadIdx.x & 1;
if (fid < fCount && fold < feature[fid].Folds) {
const float result = smem[fold * 16 + 2 * fid + w];
if (abs(result) > 1e-20) {
if (M > 1) {
atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, result);
} else {
binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = result;
}
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2HalfByteKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesHalfByteImpl < BLOCK_SIZE, true,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else
{
ComputeSplitPropertiesHalfByteImpl < BLOCK_SIZE, false,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount);
}
}
__global__ void UpdateBinsImpl(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size,
ui32 loadBit, ui32 foldBits) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
const ui32 idx = LdgWithFallback(docIndices, i);
const ui32 bit = (LdgWithFallback(bins, idx) >> loadBit) & 1;
dstBins[i] = dstBins[i] | (bit << (loadBit + foldBits));
}
}
void UpdateFoldBins(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size,
ui32 loadBit, ui32 foldBits, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = CeilDivide(size, blockSize);
UpdateBinsImpl << < numBlocks, blockSize, 0, stream >> > (dstBins, bins, docIndices, size, loadBit, foldBits);
}
template <int HIST_COUNT>
__global__ void UpdatePointwiseHistogramsImpl(float* histogram,
const int firstBinFeature, int featuresCount,
const TDataPartition* parts,
const ui64 histLineSize) {
TPointwisePartOffsetsHelper helper(gridDim.z);
const int leftPartId = helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z);
const int rightPartId = helper.GetDataPartitionOffset(blockIdx.y | gridDim.y, blockIdx.z);
const int binFeature = firstBinFeature + blockIdx.x * blockDim.x + threadIdx.x;
if (binFeature < (firstBinFeature + featuresCount)) {
const TDataPartition leftPart = parts[leftPartId];
const TDataPartition rightPart = parts[rightPartId];
const bool isLeftCalculated = leftPart.Size < rightPart.Size;
const size_t leftOffset = HIST_COUNT * (helper.GetHistogramOffset(blockIdx.y, blockIdx.z) * histLineSize + binFeature);
const size_t rightOffset = HIST_COUNT * (helper.GetHistogramOffset(blockIdx.y | gridDim.y, blockIdx.z) * histLineSize + binFeature);
float calcVal[HIST_COUNT];
float complementVal[HIST_COUNT];
#pragma unroll
for (int histId = 0; histId < HIST_COUNT; ++histId) {
calcVal[histId] = histogram[rightOffset + histId];
complementVal[histId] = histogram[leftOffset + histId] - calcVal[histId];
}
#pragma unroll
for (int histId = 0; histId < HIST_COUNT; ++histId) {
histogram[leftOffset + histId] = isLeftCalculated ? calcVal[histId] : complementVal[histId] ;
histogram[rightOffset + histId] = isLeftCalculated ? complementVal[histId] : calcVal[histId];
}
}
}
bool UpdatePointwiseHistograms(float* histograms,
int firstBinFeature, int binFeatureCount,
int partCount,
int foldCount,
int histCount,
int histLineSize,
const TDataPartition* parts,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = (binFeatureCount + blockSize - 1) / blockSize;
numBlocks.y = partCount / 2;
numBlocks.z = foldCount;
if (histCount == 1) {
hipLaunchKernelGGL(( UpdatePointwiseHistogramsImpl<1>), dim3(numBlocks), dim3(blockSize), 0, stream, histograms, firstBinFeature, binFeatureCount, parts, histLineSize);
}
else if (histCount == 2) {
hipLaunchKernelGGL(( UpdatePointwiseHistogramsImpl<2>), dim3(numBlocks), dim3(blockSize), 0, stream, histograms, firstBinFeature, binFeatureCount, parts, histLineSize);
} else {
return false;
}
return true;
}
void ComputeHist2Binary(const TCFeature* bFeatures, ui32 bCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition,
ui32 partsCount, ui32 foldCount,
bool fullPass,
ui32 totalFeatureCount,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (bCount) {
#define COMPUTE(k) \
RunComputeHist2BinaryKernel<blockSize, k>(bFeatures, bCount, cindex, target, weight, indices, \
partition, binSums, fullPass, totalFeatureCount, stream, numBlocks); \
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8);
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ComputeHist2HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
ui32 size,
const TDataPartition* partition, ui32 partsCount, ui32 foldCount,
bool fullPass,
const ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8);
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = static_cast<ui32>(histCount);
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (halfByteFeaturesCount) {
#define COMPUTE(k)\
RunComputeHist2HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\
target,\
weight, indices, partition, binSums, histLineSize,\
fullPass,\
stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ComputeHist2NonBinary(const TCFeature* nbFeatures, ui32 nbCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition, ui32 partCount, ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
if (nbCount) {
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const int histPartCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histPartCount;
numBlocks.z = foldCount;
const int blockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
#define COMPUTE(k)\
RunComputeHist2NonBinaryKernel<blockSize, k>(nbFeatures, nbCount, cindex, target, weight, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ScanPointwiseHistograms(const TCFeature* features,
int featureCount, int partCount, int foldCount,
int histLineSize, bool fullPass,
int histCount,
float* binSums,
TCudaStream stream) {
const int scanBlockSize = 256;
const int histPartCount = (fullPass ? partCount : partCount / 2);
dim3 scanBlocks;
scanBlocks.x = (featureCount * 32 + scanBlockSize - 1) / scanBlockSize;
scanBlocks.y = histPartCount;
scanBlocks.z = foldCount;
const int scanOffset = fullPass ? 0 : ((partCount / 2) * histLineSize * histCount) * foldCount;
if (histCount == 1) {
ScanHistogramsImpl<scanBlockSize, 1> << < scanBlocks, scanBlockSize, 0, stream >> > (features, featureCount, histLineSize, binSums + scanOffset);
} else if (histCount == 2) {
ScanHistogramsImpl<scanBlockSize, 2> << < scanBlocks, scanBlockSize, 0, stream >> >
(features, featureCount, histLineSize, binSums + scanOffset);
} else {
exit(0);
}
}
}
| a71a9be804a2ffb8c39bf9bd1ce34297422a1613.cu | #include "pointwise_hist2.cuh"
#include "split_properties_helpers.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
namespace NKernel
{
template<int OUTER_HIST_BITS_COUNT,
int INNER_HIST_BITS_COUNT,
int BLOCK_SIZE>
struct TPointHist {
float* __restrict__ Buffer;
float mostRecentStat1[4];
float mostRecentStat2[4];
uchar mostRecentBin[4];
__forceinline__ __device__ int SliceOffset() {
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
static_assert(OUTER_HIST_BITS_COUNT <= 2, "Error: assume 12 warps, so limited by 128-bin histogram per warp");
const int warpId = (threadIdx.x / 32) % maxBlocks;
const int warpOffset = (1024 << OUTER_HIST_BITS_COUNT) * warpId;
const int blocks = 4 >> INNER_HIST_BITS_COUNT;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
#pragma unroll
for (int f = 0; f < 4; ++f) {
mostRecentBin[f] = 0;
mostRecentStat1[f] = 0;
mostRecentStat2[f] = 0;
}
}
__forceinline__ __device__ void Add(float val, float* dst) {
if (OUTER_HIST_BITS_COUNT > 0 || INNER_HIST_BITS_COUNT > 0) {
atomicAdd(dst, val);
} else {
dst[0] += val;
}
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
if (bin != mostRecentBin[i]) {
const bool pass = (mostRecentBin[i] >> (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT)) == 0;
if (pass) {
int offset = 2 * f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
offset += flag;
Add(mostRecentStat1[i], Buffer + offset);
offset = flag ? offset - 1 : offset + 1;
Add(mostRecentStat2[i], Buffer + offset);
}
mostRecentBin[i] = bin;
mostRecentStat1[i] = 0;
mostRecentStat2[i] = 0;
}
{
mostRecentStat1[i] += stat1;
mostRecentStat2[i] += stat2;
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const bool pass = (mostRecentBin[i] >> (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT)) == 0;
if (pass) {
int offset = 2 * f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
Add(mostRecentStat1[i], Buffer + offset + flag);
Add(mostRecentStat2[i], Buffer + offset + !flag);
}
}
}
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024 << OUTER_HIST_BITS_COUNT;
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll maxBlocks
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
float sum[4];
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
for (int fold = (threadIdx.x >> 1); fold < maxFoldCount; fold += 128) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] = 0;
}
const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT;
const int lowBitMask = (1 << INNER_HIST_BITS_COUNT) - 1;
const float* __restrict__ src = Buffer
+ (1024 << OUTER_HIST_BITS_COUNT) //warpHistSize
+ 8 * (fold & lowBitMask)
+ 32 * (fold >> INNER_HIST_BITS_COUNT)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] += src[2 * f + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))];
}
}
#pragma unroll
for (int f = 0; f < 4; ++f) {
Buffer[2 * (maxFoldCount * f + fold) + w] = sum[f];
}
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 0, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 4;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void Add(float val, float* dst) {
dst[0] += val;
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
const bool pass = bin != 32;
int offset = 2 * f;
offset += 32 * (bin & 31);
Buffer[offset + flag] += pass * stat1;
Buffer[offset + !flag] += pass * stat2;
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum = 0.0f;
const int fold = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 32;
if (fold < maxFoldCount) {
const int innerHistCount = 4;
const float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 32 * fold
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum += src[2 * f + (inWarpHist << 3)];
}
Buffer[2 * (maxFoldCount * f + fold) + w] = sum;
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 1, BLOCK_SIZE> {
volatile float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 2;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 4));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
const bool pass = bin != 64;
int offset = 2 * f;
offset += 16 * (bin & 62) + 8 * (bin & 1);
const bool writeFirstFlag = threadIdx.x & 8;
const float val1 = pass * stat1;
offset += flag;
if (writeFirstFlag) {
Buffer[offset] += val1;
}
if (!writeFirstFlag) {
Buffer[offset] += val1;
}
const float val2 = pass * stat2;
// offset -= flag;
// offset += !flag;
offset = flag ? offset - 1 : offset + 1;
if (writeFirstFlag) {
Buffer[offset] += val2;
}
if (!writeFirstFlag) {
Buffer[offset] += val2;
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum0 = 0.0f;
float sum1 = 0.0f;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 64;
{
const int innerHistCount = 2;
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 8 * (fold0 & 1)
+ 32 * (fold0 >> 1)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum0 += src[2 * f + (inWarpHist << 4)];
sum1 += src[2 * f + (inWarpHist << 4) + 512];
}
Buffer[2 * (maxFoldCount * f + fold0) + w] = sum0;
Buffer[2 * (maxFoldCount * f + fold0 + 32) + w] = sum1;
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 2, BLOCK_SIZE> {
volatile float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
return warpOffset;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const int bin = bfe(ci, 24 - (f << 3), 8);
const bool pass = bin != 128;
int offset = 2 * f;
offset += 8 * (bin & 127);
//
const int writeTime = (threadIdx.x >> 3) & 3;
const float val1 = pass * stat1;
offset += flag;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val1;
}
}
const float val2 = pass * stat2;
offset = flag ? offset - 1 : offset + 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val2;
}
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 128;
{
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 2 * f
+ w;
#pragma unroll
for (int k = 0; k < 4; ++k) {
int fold = fold0 + 32 * k;
Buffer[2 * (maxFoldCount * f + fold) + w] = src[8 * fold];
}
}
}
__syncthreads();
}
};
template<int STRIPE_SIZE, int OUTER_UNROLL, int N, int HIST_BLOCK_COUNT, int BLOCKS_PER_FEATURE, typename THist>
__forceinline__ __device__ void ComputeHistogram(const ui32* __restrict__ indices, int offset, int dsSize,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ cindex, float* __restrict__ result) {
weight += offset;
target += offset;
indices += offset;
THist hist(result);
indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE, 0);
const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE;
if (dsSize)
{
int i = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
int iteration_count = (dsSize - i + (stripe - 1)) / stripe;
int blocked_iteration_count = ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N;
weight += i;
target += i;
indices += i;
#pragma unroll OUTER_UNROLL
for (int j = 0; j < blocked_iteration_count; ++j) {
ui32 local_index[N];
#pragma unroll
for (int k = 0; k < N; k++) {
local_index[k] = __ldg(indices + stripe * k);
}
ui32 local_ci[N];
float local_w[N];
float local_wt[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
local_ci[k] = __ldg(cindex + local_index[k]);
local_w[k] = __ldg(weight + stripe * k);
local_wt[k] = __ldg(target + stripe * k);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
hist.AddPoint(local_ci[k], local_wt[k], local_w[k]);
}
i += stripe * N;
indices += stripe * N;
target += stripe * N;
weight += stripe * N;
}
for (int k = blocked_iteration_count * N; k < iteration_count; ++k) {
const int index = __ldg(indices);
ui32 ci = __ldg(cindex + index);
float w = __ldg(weight);
float wt = __ldg(target);
hist.AddPoint(ci, wt, w);
i += stripe;
indices += stripe;
target += stripe;
weight += stripe;
}
__syncthreads();
hist.Reduce();
}
}
template<int STRIPE_SIZE, int OUTER_UNROLL, int HIST_BLOCK_COUNT, int BLOCKS_PER_FEATURE, typename THist>
__forceinline__ __device__ void ComputeHistogram2(
const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ cindex, float* __restrict__ result) {
weight += offset;
target += offset;
indices += offset;
THist hist(result);
if (dsSize) {
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
int lastId = min(dsSize, 128 - (offset & 127));
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0)
{
for (; (colId < lastId); colId += blockDim.x / HIST_BLOCK_COUNT)
{
const int index = __ldg(indices + colId);
const ui32 ci = __ldg(cindex + index);
const float w = __ldg(weight + colId);
const float wt = __ldg(target + colId);
hist.AddPoint(ci, wt, w);
}
}
dsSize = max(dsSize - lastId, 0);
indices += lastId;
target += lastId;
weight += lastId;
}
//now lets align end
const int unalignedTail = (dsSize & 31);
if (unalignedTail != 0) {
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0)
{
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
const int tailOffset = dsSize - unalignedTail;
for (; colId < unalignedTail; colId += blockDim.x / HIST_BLOCK_COUNT)
{
const int index = __ldg(indices + tailOffset + colId);
const ui32 ci = __ldg(cindex + index);
const float w = __ldg(weight + tailOffset + colId);
const float wt = __ldg(target + tailOffset + colId);
hist.AddPoint(ci, wt, w);
}
}
}
dsSize -= unalignedTail;
if (dsSize <= 0) {
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0) {
__syncthreads();
hist.Reduce();
}
return;
}
indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE * 2;
dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2, 0);
if (dsSize) {
int iterCount;
{
const int i = 2 * ((threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32);
weight += i;
target += i;
indices += i;
iterCount = (dsSize - i + (stripe - 1)) / stripe;
}
#pragma unroll OUTER_UNROLL
for (int j = 0; j < iterCount; ++j) {
const uint2 localIndices = __ldg((uint2*) indices);
const ui32 firstBin = __ldg(cindex + localIndices.x);
const ui32 secondBin = __ldg(cindex + localIndices.y);
const float2 localTarget = __ldg((float2* )(target));
const float2 localWeight = __ldg((float2* )(weight));
hist.AddPoint(firstBin, localTarget.x, localWeight.x);
hist.AddPoint(secondBin, localTarget.y, localWeight.y);
indices += stripe;
target += stripe;
weight += stripe;
}
__syncthreads();
hist.Reduce();
}
}
}
template<int BLOCK_SIZE, int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCKS_PER_FEATURE, bool USE_64_BIT_LOAD>
__forceinline__ __device__ void ComputeSplitPropertiesPass(const TCFeature* __restrict__ feature, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* smem)
{
using THist = TPointHist<OUTER_HIST_BITS_COUNT, INNER_HIST_BITS_COUNT, BLOCK_SIZE>;
const int stripeSize = BLOCK_SIZE;
const int histBlockCount = 1;
if (USE_64_BIT_LOAD) {
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) <= 2 ? 4 : 2;
#endif
const int size = partition->Size;
const int offset = partition->Offset;
ComputeHistogram2 < stripeSize, OUTER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist > (indices, offset, size,
target,
weight,
cindex,
smem);
} else {
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 2;
#endif
ComputeHistogram<stripeSize, OUTER_UNROLL, INNER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist>(indices,
partition->Offset,
partition->Size,
target,
weight,
cindex,
smem);
}
__syncthreads();
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
const int fid = (threadIdx.x / 64);
const int w = threadIdx.x & 1;
const int featureFolds = fid < fCount ? feature[fid].Folds : 0;
const int featureOffset = fid * maxFoldCount * 2 + w;
for (int fold = (threadIdx.x / 2) & 31; fold < featureFolds; fold += 32) {
if (fid < fCount) {
const float val = smem[featureOffset + 2 * fold];
if (abs(val) > 1e-20f) {
if (BLOCKS_PER_FEATURE > 1) {
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold) * 2 + w, val);
} else {
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold) * 2 + w, val);
}
}
}
}
}
#define DECLARE_PASS(O, I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BLOCK_SIZE, O, I, M, USE_64_BIT_LOAD>(feature, cindex, target, weight, indices, partition, fCount, binSums, &counters[0]);
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BLOCK_SIZE, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount)
{
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BLOCK_SIZE];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, 0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(0, 1, M, use64BitLoad);
} else if (maxBinCount <= 128) {
DECLARE_PASS(0, 2, M, use64BitLoad);
} else {
DECLARE_PASS(2, 1, M, use64BitLoad);
}
}
}
template<int BLOCK_SIZE>
struct TPointHistHalfByte {
volatile float* Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 512 * (threadIdx.x / 32);
const int innerHistStart = threadIdx.x & 16;
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistHalfByte(float* buff) {
const int HIST_SIZE = 16 * BLOCK_SIZE;
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
__syncthreads();
Buffer = buff + SliceOffset();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w)
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 8; i++) {
const short f = (threadIdx.x + (i << 1)) & 14;
short bin = bfe(ci, 28 - (f << 1), 4);
bin <<= 5;
bin += f;
const int offset0 = bin + flag;
const int offset1 = bin + !flag;
Buffer[offset0] += (flag ? t : w);
Buffer[offset1] += (flag ? w : t);
}
}
__device__ void Reduce() {
Buffer -= SliceOffset();
const int warpCount = BLOCK_SIZE >> 5;
{
const int fold = (threadIdx.x >> 5) & 15;
const int sumOffset = threadIdx.x & 31;
float sum = 0.0;
if (threadIdx.x < 512)
{
float* __restrict__ buffer = const_cast<float*>(Buffer);
#pragma unroll
for (int warpId = 0; warpId < warpCount; ++warpId)
{
const int warpOffset = 512 * warpId;
sum += buffer[warpOffset + sumOffset + 32 * fold];
}
}
__syncthreads();
if (threadIdx.x < 512) {
Buffer[threadIdx.x] = sum;
}
}
__syncthreads();
const int fold = (threadIdx.x >> 4) & 15;
float sum = 0.0f;
if (threadIdx.x < 256)
{
const int histEntryId = (threadIdx.x & 15);
sum = Buffer[32 * fold + histEntryId] + Buffer[32 * fold + histEntryId + 16];
}
__syncthreads();
if (threadIdx.x < 256) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
};
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, float* __restrict__ binSums, int totalFeatureCount)
{
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 * BLOCK_SIZE];
if (partition->Size)
{
using THist = TPointHistHalfByte<BLOCK_SIZE>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = FULL_PASS;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 < BLOCK_SIZE, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 1;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram < BLOCK_SIZE, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
}
ui32 w = threadIdx.x & 1;
ui32 fid = (threadIdx.x >> 1);
if (fid < fCount)
{
const int groupId = fid / 4;
uchar fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
sum += counters[i * 16 + 2 * groupId + w];
}
}
if (abs(sum) > 1e-20f) {
if (M > 1)
{
atomicAdd(binSums + (feature[fid].FirstFoldIndex) * 2 + w, sum);
} else
{
binSums[(feature[fid].FirstFoldIndex) * 2 + w] = sum;
}
}
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else
{
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
}
}
template<int BLOCK_SIZE, int BLOCKS_PER_FEATURE_COUNT>
void RunComputeHist2BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, bool fullPass,
int totalFeatureCount,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesBImpl < BLOCK_SIZE, true,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, totalFeatureCount
);
} else
{
ComputeSplitPropertiesBImpl < BLOCK_SIZE, false,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, totalFeatureCount
);
}
};
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesHalfByteImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount)
{
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 8;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 8, 8);
//
__shared__ float smem[16 * BLOCK_SIZE];
using THist = TPointHistHalfByte<BLOCK_SIZE>;
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;
#else
const bool use64BitLoad = false;
#endif
if (use64BitLoad)
{
#if __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 < BLOCK_SIZE, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, smem);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram < BLOCK_SIZE, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (
indices, partition->Offset, partition->Size,
target, weight,
cindex, smem);
}
__syncthreads();
const int fid = (threadIdx.x / 32);
const int fold = (threadIdx.x / 2) & 15;
const int w = threadIdx.x & 1;
if (fid < fCount && fold < feature[fid].Folds) {
const float result = smem[fold * 16 + 2 * fid + w];
if (abs(result) > 1e-20) {
if (M > 1) {
atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, result);
} else {
binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = result;
}
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2HalfByteKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesHalfByteImpl < BLOCK_SIZE, true,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else
{
ComputeSplitPropertiesHalfByteImpl < BLOCK_SIZE, false,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount);
}
}
__global__ void UpdateBinsImpl(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size,
ui32 loadBit, ui32 foldBits) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
const ui32 idx = LdgWithFallback(docIndices, i);
const ui32 bit = (LdgWithFallback(bins, idx) >> loadBit) & 1;
dstBins[i] = dstBins[i] | (bit << (loadBit + foldBits));
}
}
void UpdateFoldBins(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size,
ui32 loadBit, ui32 foldBits, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = CeilDivide(size, blockSize);
UpdateBinsImpl << < numBlocks, blockSize, 0, stream >> > (dstBins, bins, docIndices, size, loadBit, foldBits);
}
template <int HIST_COUNT>
__global__ void UpdatePointwiseHistogramsImpl(float* histogram,
const int firstBinFeature, int featuresCount,
const TDataPartition* parts,
const ui64 histLineSize) {
TPointwisePartOffsetsHelper helper(gridDim.z);
const int leftPartId = helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z);
const int rightPartId = helper.GetDataPartitionOffset(blockIdx.y | gridDim.y, blockIdx.z);
const int binFeature = firstBinFeature + blockIdx.x * blockDim.x + threadIdx.x;
if (binFeature < (firstBinFeature + featuresCount)) {
const TDataPartition leftPart = parts[leftPartId];
const TDataPartition rightPart = parts[rightPartId];
const bool isLeftCalculated = leftPart.Size < rightPart.Size;
const size_t leftOffset = HIST_COUNT * (helper.GetHistogramOffset(blockIdx.y, blockIdx.z) * histLineSize + binFeature);
const size_t rightOffset = HIST_COUNT * (helper.GetHistogramOffset(blockIdx.y | gridDim.y, blockIdx.z) * histLineSize + binFeature);
float calcVal[HIST_COUNT];
float complementVal[HIST_COUNT];
#pragma unroll
for (int histId = 0; histId < HIST_COUNT; ++histId) {
calcVal[histId] = histogram[rightOffset + histId];
complementVal[histId] = histogram[leftOffset + histId] - calcVal[histId];
}
#pragma unroll
for (int histId = 0; histId < HIST_COUNT; ++histId) {
histogram[leftOffset + histId] = isLeftCalculated ? calcVal[histId] : complementVal[histId] ;
histogram[rightOffset + histId] = isLeftCalculated ? complementVal[histId] : calcVal[histId];
}
}
}
bool UpdatePointwiseHistograms(float* histograms,
int firstBinFeature, int binFeatureCount,
int partCount,
int foldCount,
int histCount,
int histLineSize,
const TDataPartition* parts,
TCudaStream stream) {
const int blockSize = 256;
dim3 numBlocks;
numBlocks.x = (binFeatureCount + blockSize - 1) / blockSize;
numBlocks.y = partCount / 2;
numBlocks.z = foldCount;
if (histCount == 1) {
UpdatePointwiseHistogramsImpl<1><<<numBlocks, blockSize, 0, stream>>>(histograms, firstBinFeature, binFeatureCount, parts, histLineSize);
}
else if (histCount == 2) {
UpdatePointwiseHistogramsImpl<2><<<numBlocks, blockSize, 0, stream>>>(histograms, firstBinFeature, binFeatureCount, parts, histLineSize);
} else {
return false;
}
return true;
}
void ComputeHist2Binary(const TCFeature* bFeatures, ui32 bCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition,
ui32 partsCount, ui32 foldCount,
bool fullPass,
ui32 totalFeatureCount,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (bCount) {
#define COMPUTE(k) \
RunComputeHist2BinaryKernel<blockSize, k>(bFeatures, bCount, cindex, target, weight, indices, \
partition, binSums, fullPass, totalFeatureCount, stream, numBlocks); \
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8);
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ComputeHist2HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
ui32 size,
const TDataPartition* partition, ui32 partsCount, ui32 foldCount,
bool fullPass,
const ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8);
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = static_cast<ui32>(histCount);
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (halfByteFeaturesCount) {
#define COMPUTE(k)\
RunComputeHist2HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\
target,\
weight, indices, partition, binSums, histLineSize,\
fullPass,\
stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ComputeHist2NonBinary(const TCFeature* nbFeatures, ui32 nbCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition, ui32 partCount, ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
if (nbCount) {
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const int histPartCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histPartCount;
numBlocks.z = foldCount;
const int blockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
#define COMPUTE(k)\
RunComputeHist2NonBinaryKernel<blockSize, k>(nbFeatures, nbCount, cindex, target, weight, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
void ScanPointwiseHistograms(const TCFeature* features,
int featureCount, int partCount, int foldCount,
int histLineSize, bool fullPass,
int histCount,
float* binSums,
TCudaStream stream) {
const int scanBlockSize = 256;
const int histPartCount = (fullPass ? partCount : partCount / 2);
dim3 scanBlocks;
scanBlocks.x = (featureCount * 32 + scanBlockSize - 1) / scanBlockSize;
scanBlocks.y = histPartCount;
scanBlocks.z = foldCount;
const int scanOffset = fullPass ? 0 : ((partCount / 2) * histLineSize * histCount) * foldCount;
if (histCount == 1) {
ScanHistogramsImpl<scanBlockSize, 1> << < scanBlocks, scanBlockSize, 0, stream >> > (features, featureCount, histLineSize, binSums + scanOffset);
} else if (histCount == 2) {
ScanHistogramsImpl<scanBlockSize, 2> << < scanBlocks, scanBlockSize, 0, stream >> >
(features, featureCount, histLineSize, binSums + scanOffset);
} else {
exit(0);
}
}
}
|
8cd1cbb5f5a667e99ebff245cdbe02ae0037eebd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/atan_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AtanGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) / (T(1) + __ldg(X + i) * __ldg(X + i));
#else
dX[i] = dY[i] / (T(1) + X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AtanGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( AtanGradientCUDAKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, dY, X, dX);
return true;
}
REGISTER_CUDA_OPERATOR(
Atan,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AtanFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
AtanGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AtanGradientFunctor<CUDAContext>>);
} // namespace caffe2
| 8cd1cbb5f5a667e99ebff245cdbe02ae0037eebd.cu | #include "caffe2/operators/atan_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AtanGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) / (T(1) + __ldg(X + i) * __ldg(X + i));
#else
dX[i] = dY[i] / (T(1) + X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AtanGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
AtanGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
return true;
}
REGISTER_CUDA_OPERATOR(
Atan,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AtanFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
AtanGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AtanGradientFunctor<CUDAContext>>);
} // namespace caffe2
|
47715cda5e593d423d5ecb504257f33907937733.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "scan_x.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_odata = NULL;
hipMalloc(&g_odata, XSIZE*YSIZE);
int *g_idata = NULL;
hipMalloc(&g_idata, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
scan_x), dim3(gridBlock),dim3(threadBlock), 0, 0, g_odata,g_idata,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
scan_x), dim3(gridBlock),dim3(threadBlock), 0, 0, g_odata,g_idata,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
scan_x), dim3(gridBlock),dim3(threadBlock), 0, 0, g_odata,g_idata,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 47715cda5e593d423d5ecb504257f33907937733.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "scan_x.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_odata = NULL;
cudaMalloc(&g_odata, XSIZE*YSIZE);
int *g_idata = NULL;
cudaMalloc(&g_idata, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
scan_x<<<gridBlock,threadBlock>>>(g_odata,g_idata,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
scan_x<<<gridBlock,threadBlock>>>(g_odata,g_idata,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
scan_x<<<gridBlock,threadBlock>>>(g_odata,g_idata,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
sparse_maxpool.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
// clang-format off
// TODO: make spconv_utils.h order agnostic
#include "../spconv_utils.h"
// clang-format on
#include <utils/spconv/spconv/maxpool.h>
#include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/tensorview/helper_launch.h>
#include <utils/spconv/tensorview/tensorview.h>
#include <chrono>
#include <limits>
#include <type_traits>
#include <utils/spconv/tensorview/helper_kernel.cuh>
#include "pytorch_cuda_helper.hpp"
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
scalar_t in, out;
int ILPStrideY[NumILP];
Index idxo, idxi;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x; ix < numHot;
ix += blockDim.x * gridDim.x) {
{
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
in = inFeatures[idxi];
out = outFeatures[idxo];
if (in > out) {
outFeatures[idxo] = in;
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdGenericBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut,
int numHot, int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in > out) {
outFeatures[RO[ilp] + iy] = in;
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP,
typename VecType>
__global__ void maxPoolFwdVecBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideY[NumILP];
constexpr int vecloadFactor = sizeof(VecType) / sizeof(scalar_t);
scalar_t bufi[vecloadFactor];
scalar_t bufo[vecloadFactor];
Index idxi, idxo;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot;
ix += blockDim.x * gridDim.x * vecloadFactor) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
reinterpret_cast<VecType *>(bufo)[0] =
reinterpret_cast<VecType *>(outFeatures)[idxo];
reinterpret_cast<VecType *>(bufi)[0] =
reinterpret_cast<const VecType *>(inFeatures)[idxi];
#pragma unroll
for (int i = 0; i < vecloadFactor; i++) {
if (bufi[i] > bufo[i]) {
bufo[i] = bufi[i];
}
}
reinterpret_cast<VecType *>(outFeatures)[idxo] =
reinterpret_cast<VecType *>(bufo)[0];
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdGenericKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
if (ix + ILPStrideX[ilp] < numHot) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
if (ix + ILPStrideX[ilp] < numHot) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in > out) {
outFeatures[RO[ilp] + iy] = in;
}
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdBlockKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
scalar_t in, out;
Index idxo, idxi;
int ILPStrideY[NumILP];
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
fout += blockIdx.y * NumTLP;
fin += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x; ix < numHot;
ix += blockDim.x * gridDim.x) {
{
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
in = inFeatures[idxi];
out = outFeatures[idxo];
if (in == out) {
fin[idxi] += fout[idxo];
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdGenericBlockKernel(
const scalar_t *outFeatures, const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin, const Index *indicesIn,
const Index *indicesOut, int numHot, int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in == out) {
fin[RI[ilp] + iy] += fout[RO[ilp] + iy];
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP,
typename VecType>
__global__ void maxPoolBwdVecBlockKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideY[NumILP];
constexpr int vecloadFactor = sizeof(VecType) / sizeof(scalar_t);
scalar_t bufi[vecloadFactor];
scalar_t bufo[vecloadFactor];
scalar_t bufdi[vecloadFactor];
scalar_t bufdo[vecloadFactor];
Index idxi, idxo;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot;
ix += blockDim.x * gridDim.x * vecloadFactor) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
reinterpret_cast<VecType *>(bufo)[0] =
reinterpret_cast<const VecType *>(outFeatures)[idxo];
reinterpret_cast<VecType *>(bufi)[0] =
reinterpret_cast<const VecType *>(inFeatures)[idxi];
reinterpret_cast<VecType *>(bufdo)[0] =
reinterpret_cast<const VecType *>(fout)[idxo];
reinterpret_cast<VecType *>(bufdi)[0] =
reinterpret_cast<VecType *>(fin)[idxi];
#pragma unroll
for (int i = 0; i < vecloadFactor; i++) {
if (bufi[i] == bufo[i]) {
bufdi[i] += bufdo[i];
}
}
reinterpret_cast<VecType *>(fin)[idxi] =
reinterpret_cast<VecType *>(bufdi)[0];
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdGenericKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
if (ix + ILPStrideX[ilp] < numHot) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
if (ix + ILPStrideX[ilp] < numHot) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in == out) {
fin[RI[ilp] + iy] += fout[RO[ilp] + iy];
}
}
}
}
}
}
namespace functor {
template <typename scalar_t, typename Index>
struct SparseMaxPoolForwardFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d, tv::TensorView<scalar_t> outFeatures,
tv::TensorView<const scalar_t> inFeatures,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = inFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (numHotBlock >= NumTLP) {
hipLaunchKernelGGL(( maxPoolFwdVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(::min(size / NumTLP, 512), numPlanes / NumTLP)),
dim3(dim3(NumTLP / vecloadFactor, NumTLP / NumILP)), 0,
d.getStream(), outFeatures.data(), inFeatures.data(),
indices.subview(0).data(),
indices.subview(1).data(), numHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
hipLaunchKernelGGL(( maxPoolFwdGenericKernel<scalar_t, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, d.getStream(), outFeatures.data(), inFeatures.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock,
size - numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (numHotBlock >= NumTLP) {
hipLaunchKernelGGL(( maxPoolFwdGenericBlockKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(dim3(size / NumTLP, tv::launch::DivUp(numPlanes, NumTLP))),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), inFeatures.data(),
indices.subview(0).data(), indices.subview(1).data(),
numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
hipLaunchKernelGGL(( maxPoolFwdGenericKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(dim3(1, tv::launch::DivUp(numPlanes, NumTLP))),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), inFeatures.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock, size - numHotBlock,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
}
};
template <typename scalar_t, typename Index>
struct SparseMaxPoolBackwardFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d,
tv::TensorView<const scalar_t> outFeatures,
tv::TensorView<const scalar_t> inFeatures,
tv::TensorView<const scalar_t> fout,
tv::TensorView<scalar_t> fin,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = inFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &fout, &fin,
&indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (numHotBlock >= NumTLP) {
hipLaunchKernelGGL(( maxPoolBwdVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(::min(size / NumTLP, 512), numPlanes / NumTLP)),
dim3(dim3(NumTLP / vecloadFactor, NumTLP / NumILP)), 0,
d.getStream(), outFeatures.data(), inFeatures.data(),
fout.data(), fin.data(),
indices.subview(0).data(),
indices.subview(1).data(), numHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
hipLaunchKernelGGL(( maxPoolBwdGenericKernel<scalar_t, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, d.getStream(), outFeatures.data(), inFeatures.data(),
fout.data(), fin.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock,
size - numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (numHotBlock >= NumTLP) {
hipLaunchKernelGGL(( maxPoolBwdGenericBlockKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(dim3(size / NumTLP, tv::launch::DivUp(numPlanes, NumTLP))),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), inFeatures.data(), fout.data(), fin.data(),
indices.subview(0).data(), indices.subview(1).data(),
numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
hipLaunchKernelGGL(( maxPoolBwdGenericKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(dim3(1, tv::launch::DivUp(numPlanes, NumTLP))),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), inFeatures.data(), fout.data(), fin.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock, size - numHotBlock,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(scalar_t, Index) \
template struct functor::SparseMaxPoolForwardFunctor<tv::TorchGPU, scalar_t, \
Index>; \
template struct functor::SparseMaxPoolBackwardFunctor<tv::TorchGPU, \
scalar_t, Index>;
#define DECLARE_GPU_SPECS(scalar_t) DECLARE_GPU_SPECS_T_INDEX(scalar_t, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
| sparse_maxpool.cu | // Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
// clang-format off
// TODO: make spconv_utils.h order agnostic
#include "../spconv_utils.h"
// clang-format on
#include <utils/spconv/spconv/maxpool.h>
#include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/tensorview/helper_launch.h>
#include <utils/spconv/tensorview/tensorview.h>
#include <chrono>
#include <limits>
#include <type_traits>
#include <utils/spconv/tensorview/helper_kernel.cuh>
#include "pytorch_cuda_helper.hpp"
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
scalar_t in, out;
int ILPStrideY[NumILP];
Index idxo, idxi;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x; ix < numHot;
ix += blockDim.x * gridDim.x) {
{
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
in = inFeatures[idxi];
out = outFeatures[idxo];
if (in > out) {
outFeatures[idxo] = in;
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdGenericBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut,
int numHot, int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in > out) {
outFeatures[RO[ilp] + iy] = in;
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP,
typename VecType>
__global__ void maxPoolFwdVecBlockKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideY[NumILP];
constexpr int vecloadFactor = sizeof(VecType) / sizeof(scalar_t);
scalar_t bufi[vecloadFactor];
scalar_t bufo[vecloadFactor];
Index idxi, idxo;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot;
ix += blockDim.x * gridDim.x * vecloadFactor) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
reinterpret_cast<VecType *>(bufo)[0] =
reinterpret_cast<VecType *>(outFeatures)[idxo];
reinterpret_cast<VecType *>(bufi)[0] =
reinterpret_cast<const VecType *>(inFeatures)[idxi];
#pragma unroll
for (int i = 0; i < vecloadFactor; i++) {
if (bufi[i] > bufo[i]) {
bufo[i] = bufi[i];
}
}
reinterpret_cast<VecType *>(outFeatures)[idxo] =
reinterpret_cast<VecType *>(bufo)[0];
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolFwdGenericKernel(scalar_t *outFeatures,
const scalar_t *inFeatures,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
if (ix + ILPStrideX[ilp] < numHot) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
if (ix + ILPStrideX[ilp] < numHot) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in > out) {
outFeatures[RO[ilp] + iy] = in;
}
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdBlockKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
scalar_t in, out;
Index idxo, idxi;
int ILPStrideY[NumILP];
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
fout += blockIdx.y * NumTLP;
fin += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x; ix < numHot;
ix += blockDim.x * gridDim.x) {
{
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
in = inFeatures[idxi];
out = outFeatures[idxo];
if (in == out) {
fin[idxi] += fout[idxo];
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdGenericBlockKernel(
const scalar_t *outFeatures, const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin, const Index *indicesIn,
const Index *indicesOut, int numHot, int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in == out) {
fin[RI[ilp] + iy] += fout[RO[ilp] + iy];
}
}
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP,
typename VecType>
__global__ void maxPoolBwdVecBlockKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideY[NumILP];
constexpr int vecloadFactor = sizeof(VecType) / sizeof(scalar_t);
scalar_t bufi[vecloadFactor];
scalar_t bufo[vecloadFactor];
scalar_t bufdi[vecloadFactor];
scalar_t bufdo[vecloadFactor];
Index idxi, idxo;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideY[ilp] = threadIdx.y + ilp * blockDim.y;
outFeatures += blockIdx.y * NumTLP;
inFeatures += blockIdx.y * NumTLP;
for (int ix = blockIdx.x * blockDim.x * vecloadFactor; ix < numHot;
ix += blockDim.x * gridDim.x * vecloadFactor) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
idxi = indicesIn[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
idxo = indicesOut[ix + ILPStrideY[ilp]] * numPlanes + threadIdx.x;
reinterpret_cast<VecType *>(bufo)[0] =
reinterpret_cast<const VecType *>(outFeatures)[idxo];
reinterpret_cast<VecType *>(bufi)[0] =
reinterpret_cast<const VecType *>(inFeatures)[idxi];
reinterpret_cast<VecType *>(bufdo)[0] =
reinterpret_cast<const VecType *>(fout)[idxo];
reinterpret_cast<VecType *>(bufdi)[0] =
reinterpret_cast<VecType *>(fin)[idxi];
#pragma unroll
for (int i = 0; i < vecloadFactor; i++) {
if (bufi[i] == bufo[i]) {
bufdi[i] += bufdo[i];
}
}
reinterpret_cast<VecType *>(fin)[idxi] =
reinterpret_cast<VecType *>(bufdi)[0];
}
}
}
template <typename scalar_t, typename Index, int NumTLP, int NumILP>
__global__ void maxPoolBwdGenericKernel(const scalar_t *outFeatures,
const scalar_t *inFeatures,
const scalar_t *fout, scalar_t *fin,
const Index *indicesIn,
const Index *indicesOut, int numHot,
int numPlanes) {
int ILPStrideX[NumILP];
Index RI[NumILP];
Index RO[NumILP];
scalar_t in, out;
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++)
ILPStrideX[ilp] = ilp * gridDim.x * blockDim.x;
for (int ix : tv::KernelLoopX<int, NumILP>(numHot)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ilp++) {
if (ix + ILPStrideX[ilp] < numHot) {
RI[ilp] = indicesIn[ix + ILPStrideX[ilp]] * numPlanes;
RO[ilp] = indicesOut[ix + ILPStrideX[ilp]] * numPlanes;
}
}
for (int iy : tv::KernelLoopY<int>(numPlanes)) {
#pragma unroll
for (int ilp = 0; ilp < NumILP; ++ilp) {
if (ix + ILPStrideX[ilp] < numHot) {
in = inFeatures[RI[ilp] + iy];
out = outFeatures[RO[ilp] + iy];
if (in == out) {
fin[RI[ilp] + iy] += fout[RO[ilp] + iy];
}
}
}
}
}
}
namespace functor {
template <typename scalar_t, typename Index>
struct SparseMaxPoolForwardFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d, tv::TensorView<scalar_t> outFeatures,
tv::TensorView<const scalar_t> inFeatures,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = inFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (numHotBlock >= NumTLP) {
maxPoolFwdVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(std::min(size / NumTLP, 512), numPlanes / NumTLP),
dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0,
d.getStream()>>>(outFeatures.data(), inFeatures.data(),
indices.subview(0).data(),
indices.subview(1).data(), numHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
maxPoolFwdGenericKernel<scalar_t, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, d.getStream()>>>(outFeatures.data(), inFeatures.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock,
size - numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (numHotBlock >= NumTLP) {
maxPoolFwdGenericBlockKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(size / NumTLP, tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), inFeatures.data(),
indices.subview(0).data(), indices.subview(1).data(),
numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
maxPoolFwdGenericKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(1, tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), inFeatures.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock, size - numHotBlock,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
}
};
template <typename scalar_t, typename Index>
struct SparseMaxPoolBackwardFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d,
tv::TensorView<const scalar_t> outFeatures,
tv::TensorView<const scalar_t> inFeatures,
tv::TensorView<const scalar_t> fout,
tv::TensorView<scalar_t> fin,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = inFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &outFeatures, &inFeatures, &fout, &fin,
&indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (numHotBlock >= NumTLP) {
maxPoolBwdVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(std::min(size / NumTLP, 512), numPlanes / NumTLP),
dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0,
d.getStream()>>>(outFeatures.data(), inFeatures.data(),
fout.data(), fin.data(),
indices.subview(0).data(),
indices.subview(1).data(), numHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
maxPoolBwdGenericKernel<scalar_t, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, d.getStream()>>>(outFeatures.data(), inFeatures.data(),
fout.data(), fin.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock,
size - numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
int numHotBlock = (size / NumTLP) * NumTLP;
if (numHotBlock >= NumTLP) {
maxPoolBwdGenericBlockKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(size / NumTLP, tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), inFeatures.data(), fout.data(), fin.data(),
indices.subview(0).data(), indices.subview(1).data(),
numHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
if (size > numHotBlock) {
maxPoolBwdGenericKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(1, tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), inFeatures.data(), fout.data(), fin.data(),
indices.subview(0).data() + numHotBlock,
indices.subview(1).data() + numHotBlock, size - numHotBlock,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(scalar_t, Index) \
template struct functor::SparseMaxPoolForwardFunctor<tv::TorchGPU, scalar_t, \
Index>; \
template struct functor::SparseMaxPoolBackwardFunctor<tv::TorchGPU, \
scalar_t, Index>;
#define DECLARE_GPU_SPECS(scalar_t) DECLARE_GPU_SPECS_T_INDEX(scalar_t, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
|
cf6da7a2ae33969a76a984fc7621e88869bb78c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlacpy_batched.cu normal z -> s, Wed Sep 17 15:08:23 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches slacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread copies one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
slacpy_batched_kernel(
int m, int n,
const float * const *dAarray, int ldda,
float **dBarray, int lddb )
{
// dA and dB iterate across row i
const float *dA = dAarray[ blockIdx.y ];
float *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = *dA;
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Note
--------
- UPLO Parameter is disabled
- Do we want to provide a generic function to the user with all the options?
Purpose
-------
SLACPY copies all or part of a set of two-dimensional matrices dAarray[i]
to another set of matrices dBarray[i], for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dAarray[i] to be copied to dBarray[i].
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dAarray[i]
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDA,N)
The m by n matrices dAarray[i].
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDB,N)
The m by n matrices dBarray[i].
On exit, matrix dBarray[i] = matrix dAarray[i] in the locations
specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy_batched_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
if ( uplo == MagmaUpper ) {
fprintf(stderr, "lacpy upper is not implemented\n");
}
else if ( uplo == MagmaLower ) {
fprintf(stderr, "lacpy lower is not implemented\n");
}
else {
hipLaunchKernelGGL(( slacpy_batched_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, dAarray, ldda, dBarray, lddb );
}
}
/**
@see magmablas_slacpy_batched_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_slacpy_batched_q(
uplo, m, n, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
| cf6da7a2ae33969a76a984fc7621e88869bb78c6.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlacpy_batched.cu normal z -> s, Wed Sep 17 15:08:23 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches slacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread copies one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
slacpy_batched_kernel(
int m, int n,
const float * const *dAarray, int ldda,
float **dBarray, int lddb )
{
// dA and dB iterate across row i
const float *dA = dAarray[ blockIdx.y ];
float *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = *dA;
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Note
--------
- UPLO Parameter is disabled
- Do we want to provide a generic function to the user with all the options?
Purpose
-------
SLACPY copies all or part of a set of two-dimensional matrices dAarray[i]
to another set of matrices dBarray[i], for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dAarray[i] to be copied to dBarray[i].
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dAarray[i]
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDA,N)
The m by n matrices dAarray[i].
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a REAL array, dimension (LDDB,N)
The m by n matrices dBarray[i].
On exit, matrix dBarray[i] = matrix dAarray[i] in the locations
specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy_batched_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
if ( uplo == MagmaUpper ) {
fprintf(stderr, "lacpy upper is not implemented\n");
}
else if ( uplo == MagmaLower ) {
fprintf(stderr, "lacpy lower is not implemented\n");
}
else {
slacpy_batched_kernel<<< grid, threads, 0, queue >>>(
m, n, dAarray, ldda, dBarray, lddb );
}
}
/**
@see magmablas_slacpy_batched_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const float * const *dAarray, magma_int_t ldda,
float **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_slacpy_batched_q(
uplo, m, n, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
|
c6f921a39af091acdb763c8fbc57fa8c9de5caee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/adaptive_avg_pool3d_impl.cuh"
#include "include/hip/hip_fp16.h"
template <typename T>
__global__ void AdaptiveAvgPool3DKernel(const uint out_size, const uint input_channel, const uint input_height,
const uint input_width, const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width, const uint output_depth,
T *input_data, T *output_data) {
for (uint pos = blockIdx.x * blockDim.x + threadIdx.x; pos < out_size; pos += gridDim.x * blockDim.x) {
const uint on = pos / (output_channel * output_height * output_width * output_depth);
const uint oc = pos / (output_height * output_width * output_depth) % output_channel;
const uint oh = pos / (output_width * output_depth) % output_height;
const uint ow = pos / output_depth % output_width;
const uint od = pos % output_depth;
const uint in = on;
const uint ic = oc;
uint ih0 = floorf(__uint2float_rn(oh * input_height) / __uint2float_rn(output_height));
uint ih1 = ceilf(__uint2float_rn((oh + 1) * input_height) / __uint2float_rn(output_height));
uint kh = ih1 - ih0;
uint iw0 = floorf(__uint2float_rn(ow * input_width) / __uint2float_rn(output_width));
uint iw1 = ceilf(__uint2float_rn((ow + 1) * input_width) / __uint2float_rn(output_width));
uint kw = iw1 - iw0;
uint id0 = floorf(__uint2float_rn(od * input_depth) / __uint2float_rn(output_depth));
uint id1 = ceilf(__uint2float_rn((od + 1) * input_depth) / __uint2float_rn(output_depth));
uint kd = id1 - id0;
T sum = 0;
uint in_index = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
for (uint id = id0; id < id1; id++) {
in_index = (((in * input_channel + ic) * input_height + ih) * input_width + iw) * input_depth + id;
sum += input_data[in_index];
}
}
}
uint out_index = (((on * output_channel + oc) * output_height + oh) * output_width + ow) * output_depth + od;
output_data[out_index] = sum / static_cast<T>(kh * kw * kd);
}
}
template <typename T>
hipError_t ApplyAdaptiveAvgPool3D(const uint out_size, const uint input_channel, const uint input_height,
const uint input_width, const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width, const uint output_depth,
T *input_data, T *output_data, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( AdaptiveAvgPool3DKernel), dim3(GET_BLOCKS(out_size)), dim3(GET_THREADS), 0, cuda_stream,
out_size, input_channel, input_height, input_width, input_depth, output_channel, output_height, output_width,
output_depth, input_data, output_data);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT hipError_t ApplyAdaptiveAvgPool3D<float>(const uint out_size, const uint input_channel,
const uint input_height, const uint input_width,
const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width,
const uint output_depth, float *input_data,
float *output_data, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdaptiveAvgPool3D<half>(const uint out_size, const uint input_channel,
const uint input_height, const uint input_width,
const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width,
const uint output_depth, half *input_data,
half *output_data, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdaptiveAvgPool3D<double>(const uint out_size, const uint input_channel,
const uint input_height, const uint input_width,
const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width,
const uint output_depth, double *input_data,
double *output_data, hipStream_t cuda_stream);
| c6f921a39af091acdb763c8fbc57fa8c9de5caee.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/adaptive_avg_pool3d_impl.cuh"
#include "include/cuda_fp16.h"
template <typename T>
__global__ void AdaptiveAvgPool3DKernel(const uint out_size, const uint input_channel, const uint input_height,
const uint input_width, const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width, const uint output_depth,
T *input_data, T *output_data) {
for (uint pos = blockIdx.x * blockDim.x + threadIdx.x; pos < out_size; pos += gridDim.x * blockDim.x) {
const uint on = pos / (output_channel * output_height * output_width * output_depth);
const uint oc = pos / (output_height * output_width * output_depth) % output_channel;
const uint oh = pos / (output_width * output_depth) % output_height;
const uint ow = pos / output_depth % output_width;
const uint od = pos % output_depth;
const uint in = on;
const uint ic = oc;
uint ih0 = floorf(__uint2float_rn(oh * input_height) / __uint2float_rn(output_height));
uint ih1 = ceilf(__uint2float_rn((oh + 1) * input_height) / __uint2float_rn(output_height));
uint kh = ih1 - ih0;
uint iw0 = floorf(__uint2float_rn(ow * input_width) / __uint2float_rn(output_width));
uint iw1 = ceilf(__uint2float_rn((ow + 1) * input_width) / __uint2float_rn(output_width));
uint kw = iw1 - iw0;
uint id0 = floorf(__uint2float_rn(od * input_depth) / __uint2float_rn(output_depth));
uint id1 = ceilf(__uint2float_rn((od + 1) * input_depth) / __uint2float_rn(output_depth));
uint kd = id1 - id0;
T sum = 0;
uint in_index = 0;
for (uint ih = ih0; ih < ih1; ih++) {
for (uint iw = iw0; iw < iw1; iw++) {
for (uint id = id0; id < id1; id++) {
in_index = (((in * input_channel + ic) * input_height + ih) * input_width + iw) * input_depth + id;
sum += input_data[in_index];
}
}
}
uint out_index = (((on * output_channel + oc) * output_height + oh) * output_width + ow) * output_depth + od;
output_data[out_index] = sum / static_cast<T>(kh * kw * kd);
}
}
template <typename T>
cudaError_t ApplyAdaptiveAvgPool3D(const uint out_size, const uint input_channel, const uint input_height,
const uint input_width, const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width, const uint output_depth,
T *input_data, T *output_data, cudaStream_t cuda_stream) {
AdaptiveAvgPool3DKernel<<<GET_BLOCKS(out_size), GET_THREADS, 0, cuda_stream>>>(
out_size, input_channel, input_height, input_width, input_depth, output_channel, output_height, output_width,
output_depth, input_data, output_data);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT cudaError_t ApplyAdaptiveAvgPool3D<float>(const uint out_size, const uint input_channel,
const uint input_height, const uint input_width,
const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width,
const uint output_depth, float *input_data,
float *output_data, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdaptiveAvgPool3D<half>(const uint out_size, const uint input_channel,
const uint input_height, const uint input_width,
const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width,
const uint output_depth, half *input_data,
half *output_data, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdaptiveAvgPool3D<double>(const uint out_size, const uint input_channel,
const uint input_height, const uint input_width,
const uint input_depth, const uint output_channel,
const uint output_height, const uint output_width,
const uint output_depth, double *input_data,
double *output_data, cudaStream_t cuda_stream);
|
9b482f602dd1a750d98d8e74739c5ff497d52b9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void set_chunk_data( int x, int y, double dx, double dy, double* cell_x, double* cell_y, double* cell_dx, double* cell_dy, double* vertex_x, double* vertex_y, double* volume, double* x_area, double* y_area)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x)
{
cell_x[gid] = 0.5*(vertex_x[gid]+vertex_x[gid+1]);
cell_dx[gid] = dx;
}
if(gid < y)
{
cell_y[gid] = 0.5*(vertex_y[gid]+vertex_y[gid+1]);
cell_dy[gid] = dy;
}
if(gid < x*y)
{
volume[gid] = dx*dy;
}
if(gid < (x+1)*y)
{
x_area[gid] = dy;
}
if(gid < x*(y+1))
{
y_area[gid] = dx;
}
} | 9b482f602dd1a750d98d8e74739c5ff497d52b9d.cu | #include "includes.h"
__global__ void set_chunk_data( int x, int y, double dx, double dy, double* cell_x, double* cell_y, double* cell_dx, double* cell_dy, double* vertex_x, double* vertex_y, double* volume, double* x_area, double* y_area)
{
const int gid = blockIdx.x*blockDim.x+threadIdx.x;
if(gid < x)
{
cell_x[gid] = 0.5*(vertex_x[gid]+vertex_x[gid+1]);
cell_dx[gid] = dx;
}
if(gid < y)
{
cell_y[gid] = 0.5*(vertex_y[gid]+vertex_y[gid+1]);
cell_dy[gid] = dy;
}
if(gid < x*y)
{
volume[gid] = dx*dy;
}
if(gid < (x+1)*y)
{
x_area[gid] = dy;
}
if(gid < x*(y+1))
{
y_area[gid] = dx;
}
} |
5a73404523f760305cb2bd44def911dddbe98530.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1000
struct hipComplex {
float r;
float i;
__device__ hipComplex(float a, float b) : r(a), i(b){}
__device__ float magnitude2(void) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a){
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator + (const hipComplex& a){
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int julia(int x, int y){
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++){
a = a * a + c;
if(a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr){
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int juliaValue = julia(x, y);
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main(void){
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(1), 0, 0, dev_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR(hipFree(dev_bitmap));
return 0;
}
| 5a73404523f760305cb2bd44def911dddbe98530.cu | #include <stdio.h>
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1000
struct cuComplex {
float r;
float i;
__device__ cuComplex(float a, float b) : r(a), i(b){}
__device__ float magnitude2(void) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a){
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator + (const cuComplex& a){
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int julia(int x, int y){
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++){
a = a * a + c;
if(a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr){
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int juliaValue = julia(x, y);
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main(void){
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
kernel<<<grid, 1>>>(dev_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR(cudaFree(dev_bitmap));
return 0;
}
|
1e57f39bb124c14fc552c99a4744c8fe8e2e60a9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <cudf/types.h>
#include <cudf/legacy/column.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/rmm.h>
#include <cuspatial/soa_readers.hpp>
#include <utility/utility.hpp>
namespace cuspatial
{
/**
* @brief read lon/lat from file into two columns; data type is fixed to double (GDF_FLOAT64)
*
* see soa_readers.hpp
*/
std::pair<gdf_column, gdf_column> read_lonlat_points_soa(const char *filename)
{
hipStream_t stream{0};
double * p_lon=nullptr, *p_lat=nullptr;
int num_p=read_point_lonlat<double>(filename,p_lon,p_lat);
gdf_column pnt_lon,pnt_lat;
memset(&pnt_lon,0,sizeof(gdf_column));
memset(&pnt_lat,0,sizeof(gdf_column));
double* temp_lon{nullptr};
RMM_TRY( RMM_ALLOC(&temp_lon, num_p * sizeof(double), 0) );
CUDA_TRY( hipMemcpyAsync(temp_lon, p_lon,
num_p * sizeof(double) ,
hipMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&pnt_lon, temp_lon, nullptr, num_p,
GDF_FLOAT64, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "lon");
delete[] p_lon;
double* temp_lat{nullptr};
RMM_TRY( RMM_ALLOC(&temp_lat, num_p * sizeof(double), 0) );
CUDA_TRY( hipMemcpyAsync(temp_lat, p_lat,
num_p * sizeof(double) ,
hipMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&pnt_lat, temp_lat, nullptr, num_p,
GDF_FLOAT64, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "lat");
delete[] p_lat;
return std::make_pair(pnt_lon,pnt_lat);
}
/**
* @brief read x/y from file into two columns; data type is fixed to double (GDF_FLOAT64)
*
* see soa_readers.hpp
*/
std::pair<gdf_column, gdf_column> read_xy_points_soa(const char *filename)
{
double * p_x=nullptr, *p_y=nullptr;
int num_p=read_point_xy<double>(filename,p_x,p_y);
gdf_column pnt_x,pnt_y;
memset(&pnt_x,0,sizeof(gdf_column));
memset(&pnt_y,0,sizeof(gdf_column));
double* temp_x{nullptr};
RMM_TRY( RMM_ALLOC(&temp_x, num_p * sizeof(double), 0) );
hipStream_t stream{0};
CUDA_TRY( hipMemcpyAsync(temp_x, p_x,
num_p * sizeof(double) ,
hipMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&pnt_x, temp_x, nullptr, num_p,
GDF_FLOAT64, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "x");
delete[] p_x;
double* temp_y{nullptr};
RMM_TRY( RMM_ALLOC(&temp_y, num_p * sizeof(double), 0) );
CUDA_TRY( hipMemcpyAsync(temp_y, p_y,
num_p * sizeof(double) ,
hipMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&pnt_y, temp_y, nullptr, num_p,
GDF_FLOAT64, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "y");
delete[] p_y;
return std::make_pair(pnt_x,pnt_y);
}
}
| 1e57f39bb124c14fc552c99a4744c8fe8e2e60a9.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cudf/types.h>
#include <cudf/legacy/column.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/rmm.h>
#include <cuspatial/soa_readers.hpp>
#include <utility/utility.hpp>
namespace cuspatial
{
/**
* @brief read lon/lat from file into two columns; data type is fixed to double (GDF_FLOAT64)
*
* see soa_readers.hpp
*/
std::pair<gdf_column, gdf_column> read_lonlat_points_soa(const char *filename)
{
cudaStream_t stream{0};
double * p_lon=nullptr, *p_lat=nullptr;
int num_p=read_point_lonlat<double>(filename,p_lon,p_lat);
gdf_column pnt_lon,pnt_lat;
memset(&pnt_lon,0,sizeof(gdf_column));
memset(&pnt_lat,0,sizeof(gdf_column));
double* temp_lon{nullptr};
RMM_TRY( RMM_ALLOC(&temp_lon, num_p * sizeof(double), 0) );
CUDA_TRY( cudaMemcpyAsync(temp_lon, p_lon,
num_p * sizeof(double) ,
cudaMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&pnt_lon, temp_lon, nullptr, num_p,
GDF_FLOAT64, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "lon");
delete[] p_lon;
double* temp_lat{nullptr};
RMM_TRY( RMM_ALLOC(&temp_lat, num_p * sizeof(double), 0) );
CUDA_TRY( cudaMemcpyAsync(temp_lat, p_lat,
num_p * sizeof(double) ,
cudaMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&pnt_lat, temp_lat, nullptr, num_p,
GDF_FLOAT64, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "lat");
delete[] p_lat;
return std::make_pair(pnt_lon,pnt_lat);
}
/**
* @brief read x/y from file into two columns; data type is fixed to double (GDF_FLOAT64)
*
* see soa_readers.hpp
*/
std::pair<gdf_column, gdf_column> read_xy_points_soa(const char *filename)
{
double * p_x=nullptr, *p_y=nullptr;
int num_p=read_point_xy<double>(filename,p_x,p_y);
gdf_column pnt_x,pnt_y;
memset(&pnt_x,0,sizeof(gdf_column));
memset(&pnt_y,0,sizeof(gdf_column));
double* temp_x{nullptr};
RMM_TRY( RMM_ALLOC(&temp_x, num_p * sizeof(double), 0) );
cudaStream_t stream{0};
CUDA_TRY( cudaMemcpyAsync(temp_x, p_x,
num_p * sizeof(double) ,
cudaMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&pnt_x, temp_x, nullptr, num_p,
GDF_FLOAT64, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "x");
delete[] p_x;
double* temp_y{nullptr};
RMM_TRY( RMM_ALLOC(&temp_y, num_p * sizeof(double), 0) );
CUDA_TRY( cudaMemcpyAsync(temp_y, p_y,
num_p * sizeof(double) ,
cudaMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&pnt_y, temp_y, nullptr, num_p,
GDF_FLOAT64, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "y");
delete[] p_y;
return std::make_pair(pnt_x,pnt_y);
}
}
|
ff031c33c7080b37d2f68cbe2614ea54bee29f4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Bandwidth.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "Chrono.h"
using std::cout;
using std::endl;
/*---------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void kernelBandwidthEntrelacement(int* tabDataGM, int* tabDataGMCopy, int n);
extern __global__ void kernelBandwidthOneOne(int *tabDataGM,int*tabDataGMCopy, int n );
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*-------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
Bandwidth::Bandwidth(const Grid &grid, int *tabData, int n, TransferType type) :
n(n), type(type)
{
this->dg = grid.dg;
this->db = grid.db;
this->sizeTabDataGM = sizeof(int) * n; // [octet]
switch (type)
{
case TransferType::HostToDevice:
Device::malloc(&tabDataGM, sizeTabDataGM);
break;
case TransferType::HostToDeviceDMA:
Device::hostMalloc(&tabDataGM, sizeTabDataGM, HostMemoryType::MAPPED_MULTIGPU);
break;
case TransferType::DeviceToDevice:
Device::malloc(&tabDataGM, sizeTabDataGM);
Device::malloc(&tabDataGMCopy, sizeTabDataGM);
Device::memcpyHToD(tabDataGM, tabData, sizeTabDataGM);
break;
case TransferType::DeviceToDeviceMultiGPUEntrelacement:
case TransferType::DeviceToDeviceMultiGPUOneOne:
case TransferType::DeviceToDeviceMultiGPU:
Device::setDevice(0);
Device::malloc(&tabDataGM, sizeTabDataGM);
Device::setDevice(1);
Device::malloc(&tabDataGMCopy, sizeTabDataGM);
Device::setDevice(0);
Device::memcpyHToD(tabDataGM, tabData, sizeTabDataGM);
break;
case TransferType::DeviceToDeviceEntrelacement:
case TransferType::DeviceToDeviceOneOne:
Device::malloc(&tabDataGM, sizeTabDataGM);
Device::malloc(&tabDataGMCopy, sizeTabDataGM);
break;
}
int mp = Device::getMPCount();
int coreMP = Device::getCoreCountMP();
dim3 dg = dim3(mp*12, 1, 1);
dim3 db = dim3(coreMP, 1, 1);
Chrono chrono;
switch (type)
{
case TransferType::HostToDevice:
case TransferType::HostToDeviceDMA:
Device::memcpyHToD(tabDataGM, tabData, sizeTabDataGM);
break;
case TransferType::DeviceToDeviceMultiGPU:
case TransferType::DeviceToDevice:
Device::memcpyDToD(tabDataGMCopy, tabDataGM, sizeTabDataGM);
Device::synchronize();
break;
case TransferType::DeviceToDeviceMultiGPUEntrelacement:
case TransferType::DeviceToDeviceEntrelacement:
hipLaunchKernelGGL(( kernelBandwidthEntrelacement), dim3(dg),dim3(db), 0, 0, tabDataGM,tabDataGMCopy,n);
Device::synchronize();
break;
case TransferType::DeviceToDeviceMultiGPUOneOne:
case TransferType::DeviceToDeviceOneOne:
// dim3 dg = dim3(n/1024, 1, 1);
// dim3 db = dim3(1024, 1, 1);
// kernelBandwidthOneOne<<<dg,db>>>(tabDataGM,tabDataGMCopy,n);
Device::synchronize();
break;
}
chrono.stop();
elapsedTime = chrono.getElapseTimeS();
cout << elapsedTime<< endl;
}
Bandwidth::~Bandwidth(void)
{
switch (type)
{
case TransferType::HostToDevice:
Device::free(tabDataGM);
break;
case TransferType::HostToDeviceDMA:
Device::hostFree(tabDataGM);
break;
case TransferType::DeviceToDevice:
case TransferType::DeviceToDeviceEntrelacement:
case TransferType::DeviceToDeviceOneOne:
Device::free(tabDataGM);
Device::free(tabDataGMCopy);
break;
case TransferType::DeviceToDeviceMultiGPUEntrelacement:
case TransferType::DeviceToDeviceMultiGPUOneOne:
case TransferType::DeviceToDeviceMultiGPU:
Device::free(tabDataGM);
Device::setDevice(1);
Device::free(tabDataGMCopy);
Device::setDevice(0);
break;
}
}
double Bandwidth::getElapsedTime()
{
return elapsedTime;
}
/*-------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void Bandwidth::run()
{
//kernelHistogramme<<<dg,db>>>(tabDataGM);
//Device::memcpyDToH(tabData, tabDataGM);
}
/*---------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| ff031c33c7080b37d2f68cbe2614ea54bee29f4c.cu | #include "Bandwidth.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "Chrono.h"
using std::cout;
using std::endl;
/*---------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void kernelBandwidthEntrelacement(int* tabDataGM, int* tabDataGMCopy, int n);
extern __global__ void kernelBandwidthOneOne(int *tabDataGM,int*tabDataGMCopy, int n );
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*-------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
Bandwidth::Bandwidth(const Grid &grid, int *tabData, int n, TransferType type) :
n(n), type(type)
{
this->dg = grid.dg;
this->db = grid.db;
this->sizeTabDataGM = sizeof(int) * n; // [octet]
switch (type)
{
case TransferType::HostToDevice:
Device::malloc(&tabDataGM, sizeTabDataGM);
break;
case TransferType::HostToDeviceDMA:
Device::hostMalloc(&tabDataGM, sizeTabDataGM, HostMemoryType::MAPPED_MULTIGPU);
break;
case TransferType::DeviceToDevice:
Device::malloc(&tabDataGM, sizeTabDataGM);
Device::malloc(&tabDataGMCopy, sizeTabDataGM);
Device::memcpyHToD(tabDataGM, tabData, sizeTabDataGM);
break;
case TransferType::DeviceToDeviceMultiGPUEntrelacement:
case TransferType::DeviceToDeviceMultiGPUOneOne:
case TransferType::DeviceToDeviceMultiGPU:
Device::setDevice(0);
Device::malloc(&tabDataGM, sizeTabDataGM);
Device::setDevice(1);
Device::malloc(&tabDataGMCopy, sizeTabDataGM);
Device::setDevice(0);
Device::memcpyHToD(tabDataGM, tabData, sizeTabDataGM);
break;
case TransferType::DeviceToDeviceEntrelacement:
case TransferType::DeviceToDeviceOneOne:
Device::malloc(&tabDataGM, sizeTabDataGM);
Device::malloc(&tabDataGMCopy, sizeTabDataGM);
break;
}
int mp = Device::getMPCount();
int coreMP = Device::getCoreCountMP();
dim3 dg = dim3(mp*12, 1, 1);
dim3 db = dim3(coreMP, 1, 1);
Chrono chrono;
switch (type)
{
case TransferType::HostToDevice:
case TransferType::HostToDeviceDMA:
Device::memcpyHToD(tabDataGM, tabData, sizeTabDataGM);
break;
case TransferType::DeviceToDeviceMultiGPU:
case TransferType::DeviceToDevice:
Device::memcpyDToD(tabDataGMCopy, tabDataGM, sizeTabDataGM);
Device::synchronize();
break;
case TransferType::DeviceToDeviceMultiGPUEntrelacement:
case TransferType::DeviceToDeviceEntrelacement:
kernelBandwidthEntrelacement<<<dg,db>>>(tabDataGM,tabDataGMCopy,n);
Device::synchronize();
break;
case TransferType::DeviceToDeviceMultiGPUOneOne:
case TransferType::DeviceToDeviceOneOne:
// dim3 dg = dim3(n/1024, 1, 1);
// dim3 db = dim3(1024, 1, 1);
// kernelBandwidthOneOne<<<dg,db>>>(tabDataGM,tabDataGMCopy,n);
Device::synchronize();
break;
}
chrono.stop();
elapsedTime = chrono.getElapseTimeS();
cout << elapsedTime<< endl;
}
Bandwidth::~Bandwidth(void)
{
switch (type)
{
case TransferType::HostToDevice:
Device::free(tabDataGM);
break;
case TransferType::HostToDeviceDMA:
Device::hostFree(tabDataGM);
break;
case TransferType::DeviceToDevice:
case TransferType::DeviceToDeviceEntrelacement:
case TransferType::DeviceToDeviceOneOne:
Device::free(tabDataGM);
Device::free(tabDataGMCopy);
break;
case TransferType::DeviceToDeviceMultiGPUEntrelacement:
case TransferType::DeviceToDeviceMultiGPUOneOne:
case TransferType::DeviceToDeviceMultiGPU:
Device::free(tabDataGM);
Device::setDevice(1);
Device::free(tabDataGMCopy);
Device::setDevice(0);
break;
}
}
double Bandwidth::getElapsedTime()
{
return elapsedTime;
}
/*-------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void Bandwidth::run()
{
//kernelHistogramme<<<dg,db>>>(tabDataGM);
//Device::memcpyDToH(tabData, tabDataGM);
}
/*---------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
54fc49d7c44617d5fd0bfb27e7bc1e6b931bcd64.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "common.h"
#define LOG 0
/*
* An implementation of parallel reduction using nested kernel launches from
* CUDA kernels.
*/
// Recursive Implementation of Interleaved Pair Approach
int cpuRecursiveReduce(int *data, int const size) {
// stop condition
if (size == 1) return data[0];
// renew the stride
int const stride = size / 2;
// in-place reduction
for (int i = 0; i < stride; i++) {
data[i] += data[i + stride];
}
// call recursively
return cpuRecursiveReduce(data, stride);
}
// Neighbored Pair Implementation with divergence
__global__ void reduceNeighbored(int *g_idata, int *g_odata, unsigned int n) {
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0) {
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void gpuRecursiveReduce(int *g_idata, int *g_odata,
unsigned int isize) {
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
int *odata = &g_odata[blockIdx.x];
// stop condition
if (isize == 2 && tid == 0) {
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
// nested invocation
int istride = isize >> 1;
if (istride > 1 && tid < istride) {
// in place reduction
idata[tid] += idata[tid + istride];
}
// sync at block level
__syncthreads();
// nested invocation to generate child grids
if (tid == 0) {
hipLaunchKernelGGL(( gpuRecursiveReduce), dim3(1), dim3(istride), 0, 0, idata, odata, istride);
// sync all child grids launched in this block
hipDeviceSynchronize();
}
// sync at block level again
__syncthreads();
}
// main from here
int main(int argc, char **argv) {
// set up device
int dev = 0, gpu_sum;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool bResult = false;
// set up execution configuration
int nblock = 2048;
int nthread = 512; // initial block size
if (argc > 1) {
nblock = atoi(argv[1]); // block size from command line argument
}
if (argc > 2) {
nthread = atoi(argv[2]); // block size from command line argument
}
int size = nblock * nthread; // total number of elements to reduceNeighbored
dim3 block(nthread, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("array %d grid %d block %d\n", size, grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *)malloc(bytes);
int *h_odata = (int *)malloc(grid.x * sizeof(int));
int *tmp = (int *)malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++) {
h_idata[i] = (int)(rand() & 0xFF);
h_idata[i] = 1;
}
memcpy(tmp, h_idata, bytes);
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(hipMalloc((void **)&d_idata, bytes));
CHECK(hipMalloc((void **)&d_odata, grid.x * sizeof(int)));
double iStart, iElaps;
// cpu recursive reduction
iStart = seconds();
int cpu_sum = cpuRecursiveReduce(tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce\t\telapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// gpu reduceNeighbored
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
iStart = seconds();
hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(
"gpu Neighbored\t\telapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n",
iElaps, gpu_sum, grid.x, block.x);
// gpu nested reduce kernel
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
iStart = seconds();
hipLaunchKernelGGL(( gpuRecursiveReduce), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, block.x);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu nested\t\telapsed %f sec gpu_sum: %d <<<grid %d block %d>>>\n",
iElaps, gpu_sum, grid.x, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
// reset device
CHECK(hipDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if (!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
| 54fc49d7c44617d5fd0bfb27e7bc1e6b931bcd64.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "common.h"
#define LOG 0
/*
* An implementation of parallel reduction using nested kernel launches from
* CUDA kernels.
*/
// Recursive Implementation of Interleaved Pair Approach
int cpuRecursiveReduce(int *data, int const size) {
// stop condition
if (size == 1) return data[0];
// renew the stride
int const stride = size / 2;
// in-place reduction
for (int i = 0; i < stride; i++) {
data[i] += data[i + stride];
}
// call recursively
return cpuRecursiveReduce(data, stride);
}
// Neighbored Pair Implementation with divergence
__global__ void reduceNeighbored(int *g_idata, int *g_odata, unsigned int n) {
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0) {
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void gpuRecursiveReduce(int *g_idata, int *g_odata,
unsigned int isize) {
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
int *odata = &g_odata[blockIdx.x];
// stop condition
if (isize == 2 && tid == 0) {
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
// nested invocation
int istride = isize >> 1;
if (istride > 1 && tid < istride) {
// in place reduction
idata[tid] += idata[tid + istride];
}
// sync at block level
__syncthreads();
// nested invocation to generate child grids
if (tid == 0) {
gpuRecursiveReduce<<<1, istride>>>(idata, odata, istride);
// sync all child grids launched in this block
cudaDeviceSynchronize();
}
// sync at block level again
__syncthreads();
}
// main from here
int main(int argc, char **argv) {
// set up device
int dev = 0, gpu_sum;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool bResult = false;
// set up execution configuration
int nblock = 2048;
int nthread = 512; // initial block size
if (argc > 1) {
nblock = atoi(argv[1]); // block size from command line argument
}
if (argc > 2) {
nthread = atoi(argv[2]); // block size from command line argument
}
int size = nblock * nthread; // total number of elements to reduceNeighbored
dim3 block(nthread, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("array %d grid %d block %d\n", size, grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *)malloc(bytes);
int *h_odata = (int *)malloc(grid.x * sizeof(int));
int *tmp = (int *)malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++) {
h_idata[i] = (int)(rand() & 0xFF);
h_idata[i] = 1;
}
memcpy(tmp, h_idata, bytes);
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(cudaMalloc((void **)&d_idata, bytes));
CHECK(cudaMalloc((void **)&d_odata, grid.x * sizeof(int)));
double iStart, iElaps;
// cpu recursive reduction
iStart = seconds();
int cpu_sum = cpuRecursiveReduce(tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce\t\telapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// gpu reduceNeighbored
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
iStart = seconds();
reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(
"gpu Neighbored\t\telapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n",
iElaps, gpu_sum, grid.x, block.x);
// gpu nested reduce kernel
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
iStart = seconds();
gpuRecursiveReduce<<<grid, block>>>(d_idata, d_odata, block.x);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu nested\t\telapsed %f sec gpu_sum: %d <<<grid %d block %d>>>\n",
iElaps, gpu_sum, grid.x, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
// reset device
CHECK(cudaDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if (!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
|
a54b95c530e21a7983cca77f7bb9eaaeaa2fac37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorTopK.cu"
#else
THC_API void THCTensor_(topk)(THCState* state,
THCTensor *topK,
THCudaLongTensor *indices,
THCTensor *input_,
int64_t k, int dim, int dir, int sorted) {
THAssert(topK != NULL && indices != NULL && input_ != NULL);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int64_t dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_);
THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range");
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim);
THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension");
THCTensor *input = THCTensor_(newContiguous)(state, input_);
// Build the output size, which is the dim being selected set to
// size k
std::vector<int64_t> topKSize = THTensor_sizesLegacyNoScalars(input);
topKSize[dim] = k;
THCTensor_(resize)(state, topK, topKSize, {});
THCudaLongTensor_resize(state, indices, topKSize, {});
#define RUN_K(INDEX_T, DIM, DIR) \
hipLaunchKernelGGL(( gatherTopK<real, INDEX_T, DIM, DIR>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
inputInfo, \
sliceSize, \
k, \
inputSlices, \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
inputInfo.strides[collapseInputDim], \
topKInfo, \
topKSlices, \
topKInfo.strides[collapseTopKDim], \
indicesInfo, \
indicesInfo.strides[collapseIndicesDim])
#define RUN_DIR(INDEX_T, DIM) \
if (dir) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
TensorInfo<real, INDEX_T> inputInfo = \
getTensorInfo<real, THCTensor, INDEX_T>(state, input); \
TensorInfo<real, INDEX_T> topKInfo = \
getTensorInfo<real, THCTensor, INDEX_T>(state, topK); \
TensorInfo<int64_t, INDEX_T> indicesInfo = \
getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \
\
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
\
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
\
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
if (!THC_getGridFromTiles(inputSlices, grid)) { \
THError("Slice to sort is too large"); \
} \
\
dim3 block(::min(THCRoundUp(sliceSize, (int64_t) 32), (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T);
if (THCTensor_nElement(state, input) > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (THCTensor_canUse32BitIndexMath(state, input) &&
THCTensor_canUse32BitIndexMath(state, topK) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted) {
// FIXME: the k/v inplace sort along slice only works for size <=
// 2048 at the moment
if (sliceSize <= 2048) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
THCTensor* sortedTopK = THCTensor_(new)(state);
THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir);
THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices);
THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices);
THCTensor_(freeCopyTo)(state, sortedTopK, topK);
THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices);
THCudaLongTensor_free(state, sortedIndices);
}
}
THCudaLongTensor_free(state, input);
THCudaCheck(hipGetLastError());
}
#endif // THC_GENERIC_FILE
| a54b95c530e21a7983cca77f7bb9eaaeaa2fac37.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorTopK.cu"
#else
THC_API void THCTensor_(topk)(THCState* state,
THCTensor *topK,
THCudaLongTensor *indices,
THCTensor *input_,
int64_t k, int dim, int dir, int sorted) {
THAssert(topK != NULL && indices != NULL && input_ != NULL);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_));
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int64_t dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_);
THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range");
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim);
THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension");
THCTensor *input = THCTensor_(newContiguous)(state, input_);
// Build the output size, which is the dim being selected set to
// size k
std::vector<int64_t> topKSize = THTensor_sizesLegacyNoScalars(input);
topKSize[dim] = k;
THCTensor_(resize)(state, topK, topKSize, {});
THCudaLongTensor_resize(state, indices, topKSize, {});
#define RUN_K(INDEX_T, DIM, DIR) \
gatherTopK<real, INDEX_T, DIM, DIR> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
inputInfo, \
sliceSize, \
k, \
inputSlices, \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
inputInfo.strides[collapseInputDim], \
topKInfo, \
topKSlices, \
topKInfo.strides[collapseTopKDim], \
indicesInfo, \
indicesInfo.strides[collapseIndicesDim])
#define RUN_DIR(INDEX_T, DIM) \
if (dir) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
TensorInfo<real, INDEX_T> inputInfo = \
getTensorInfo<real, THCTensor, INDEX_T>(state, input); \
TensorInfo<real, INDEX_T> topKInfo = \
getTensorInfo<real, THCTensor, INDEX_T>(state, topK); \
TensorInfo<int64_t, INDEX_T> indicesInfo = \
getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \
\
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
\
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
\
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
if (!THC_getGridFromTiles(inputSlices, grid)) { \
THError("Slice to sort is too large"); \
} \
\
dim3 block(std::min(THCRoundUp(sliceSize, (int64_t) 32), (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T);
if (THCTensor_nElement(state, input) > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (THCTensor_canUse32BitIndexMath(state, input) &&
THCTensor_canUse32BitIndexMath(state, topK) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted) {
// FIXME: the k/v inplace sort along slice only works for size <=
// 2048 at the moment
if (sliceSize <= 2048) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
THCTensor* sortedTopK = THCTensor_(new)(state);
THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir);
THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices);
THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices);
THCTensor_(freeCopyTo)(state, sortedTopK, topK);
THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices);
THCudaLongTensor_free(state, sortedIndices);
}
}
THCudaLongTensor_free(state, input);
THCudaCheck(cudaGetLastError());
}
#endif // THC_GENERIC_FILE
|
eb77606e6e6d45fa79b10c88485ea66c83e01375.hip | // !!! This is a file automatically generated by hipify!!!
/*
* gpu_util.cu -- GPU utility functions
*
* Copyright (C) 2010-2013, Computing Systems Laboratory (CSLab)
* Copyright (C) 2010-2013, Vasileios Karakasis
*/
#include <hip/hip_runtime.h>
#include "gpu_util.h"
void *gpu_alloc(size_t count)
{
void *ret;
if (hipMalloc(&ret, count) != hipSuccess) {
ret = NULL;
}
return ret;
}
void gpu_free(void *gpuptr)
{
hipFree(gpuptr);
}
int copy_to_gpu(const void *host, void *gpu, size_t count)
{
if (hipMemcpy(gpu, host, count, hipMemcpyHostToDevice) != hipSuccess)
return -1;
return 0;
}
int copy_from_gpu(void *host, const void *gpu, size_t count)
{
if (hipMemcpy(host, gpu, count, hipMemcpyDeviceToHost) != hipSuccess)
return -1;
return 0;
}
const char *gpu_get_errmsg(hipError_t err)
{
return hipGetErrorString(err);
}
const char *gpu_get_last_errmsg()
{
return gpu_get_errmsg(hipGetLastError());
}
| eb77606e6e6d45fa79b10c88485ea66c83e01375.cu | /*
* gpu_util.cu -- GPU utility functions
*
* Copyright (C) 2010-2013, Computing Systems Laboratory (CSLab)
* Copyright (C) 2010-2013, Vasileios Karakasis
*/
#include <cuda.h>
#include "gpu_util.h"
void *gpu_alloc(size_t count)
{
void *ret;
if (cudaMalloc(&ret, count) != cudaSuccess) {
ret = NULL;
}
return ret;
}
void gpu_free(void *gpuptr)
{
cudaFree(gpuptr);
}
int copy_to_gpu(const void *host, void *gpu, size_t count)
{
if (cudaMemcpy(gpu, host, count, cudaMemcpyHostToDevice) != cudaSuccess)
return -1;
return 0;
}
int copy_from_gpu(void *host, const void *gpu, size_t count)
{
if (cudaMemcpy(host, gpu, count, cudaMemcpyDeviceToHost) != cudaSuccess)
return -1;
return 0;
}
const char *gpu_get_errmsg(cudaError_t err)
{
return cudaGetErrorString(err);
}
const char *gpu_get_last_errmsg()
{
return gpu_get_errmsg(cudaGetLastError());
}
|
1b5e9864b3979063ca27184456c4226097682846.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "axpy_kernel.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float ALPHA = 2;
float *X = NULL;
hipMalloc(&X, XSIZE*YSIZE);
int OFFX = 1;
int INCX = 1;
float *Y = NULL;
hipMalloc(&Y, XSIZE*YSIZE);
int OFFY = 1;
int INCY = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
axpy_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,ALPHA,X,OFFX,INCX,Y,OFFY,INCY);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
axpy_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,ALPHA,X,OFFX,INCX,Y,OFFY,INCY);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
axpy_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,ALPHA,X,OFFX,INCX,Y,OFFY,INCY);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1b5e9864b3979063ca27184456c4226097682846.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "axpy_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float ALPHA = 2;
float *X = NULL;
cudaMalloc(&X, XSIZE*YSIZE);
int OFFX = 1;
int INCX = 1;
float *Y = NULL;
cudaMalloc(&Y, XSIZE*YSIZE);
int OFFY = 1;
int INCY = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
axpy_kernel<<<gridBlock,threadBlock>>>(N,ALPHA,X,OFFX,INCX,Y,OFFY,INCY);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
axpy_kernel<<<gridBlock,threadBlock>>>(N,ALPHA,X,OFFX,INCX,Y,OFFY,INCY);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
axpy_kernel<<<gridBlock,threadBlock>>>(N,ALPHA,X,OFFX,INCX,Y,OFFY,INCY);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d462e4bde1c1fa0eee7bae847636485ae201c213.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions mixed zc -> ds
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// CSR-SpMV kernel
__global__ void
zcgecsrmv_mixed_prec_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * ddiagval,
magmaFloatComplex * doffdiagval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = ddiagval[ row ] * dx[ row ];
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++){
magmaDoubleComplex val =
MAGMA_Z_MAKE( (double) MAGMA_C_REAL(doffdiagval[ j ]),
(double) MAGMA_C_IMAG(doffdiagval[ j ]) );
dot += val * dx[ dcolind[j] ];
}
dy[ row ] = dot *alpha + beta * dy[ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
A is a matrix in mixed precision, i.e. the diagonal values are stored in
high precision, the offdiagonal values in low precision.
The input format is a CSR (val, row, col) in FloatComplex storing all
offdiagonal elements and an array containing the diagonal values in
DoubleComplex.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
ddiagval magmaDoubleComplex_ptr
array containing diagonal values of A in DoubleComplex
@param[in]
doffdiagval magmaFloatComplex_ptr
array containing offdiag values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zcgecsrmv_mixed_prec(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr ddiagval,
magmaFloatComplex_ptr doffdiagval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zcgecsrmv_mixed_prec_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, ddiagval, doffdiagval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
| d462e4bde1c1fa0eee7bae847636485ae201c213.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions mixed zc -> ds
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// CSR-SpMV kernel
__global__ void
zcgecsrmv_mixed_prec_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * ddiagval,
magmaFloatComplex * doffdiagval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = ddiagval[ row ] * dx[ row ];
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++){
magmaDoubleComplex val =
MAGMA_Z_MAKE( (double) MAGMA_C_REAL(doffdiagval[ j ]),
(double) MAGMA_C_IMAG(doffdiagval[ j ]) );
dot += val * dx[ dcolind[j] ];
}
dy[ row ] = dot *alpha + beta * dy[ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
A is a matrix in mixed precision, i.e. the diagonal values are stored in
high precision, the offdiagonal values in low precision.
The input format is a CSR (val, row, col) in FloatComplex storing all
offdiagonal elements and an array containing the diagonal values in
DoubleComplex.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
ddiagval magmaDoubleComplex_ptr
array containing diagonal values of A in DoubleComplex
@param[in]
doffdiagval magmaFloatComplex_ptr
array containing offdiag values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zcgecsrmv_mixed_prec(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr ddiagval,
magmaFloatComplex_ptr doffdiagval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zcgecsrmv_mixed_prec_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, ddiagval, doffdiagval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
|
fb0a39294713267c999ee914883baf481c8b6f83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ void mul(float a, float b, float *res)
{
*res = a * b;
// almost underflow
*res = (*res) * 1e-35f;
}
__global__ void dot_prod(float *x, float *y, int size)
{
float d;
for (int i=0; i < size; ++i)
{
float tmp;
mul(x[i], y[i], &tmp);
d += tmp;
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
}
| fb0a39294713267c999ee914883baf481c8b6f83.cu |
#include <stdio.h>
__device__ void mul(float a, float b, float *res)
{
*res = a * b;
// almost underflow
*res = (*res) * 1e-35f;
}
__global__ void dot_prod(float *x, float *y, int size)
{
float d;
for (int i=0; i < size; ++i)
{
float tmp;
mul(x[i], y[i], &tmp);
d += tmp;
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
}
|
dfbd65a826c9f6790f4d9ae84699f467e7880c61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layers/channel_reduce_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxChannelReduceForward(const int n, const Dtype* bot_data,
const int height, const int width, const int channels, const int num_channels,
Dtype* top_data, int* mask) {
CUDA_KERNEL_LOOP(index, n) {
//Dtype val = 0;
int w = index % width;
int h = (index / width) % height;
int c = index / (width * height);
// w,h,c represent where we are in top_data
int c_to_channel_reduce = channels/num_channels;
// say we're at c=0; i want to grab chans 0..c_to_channel_reduce
int coeff_h_col = (1 - height) * width;
int coeff_w_col = 1 - height * width;
for (int chan = c*c_to_channel_reduce; chan < (c+1)*c_to_channel_reduce; ++chan) {
int offset = (chan + h + w) * height * width;
int bot_index = offset + h * coeff_h_col + w * coeff_w_col;
if (bot_data[bot_index]>top_data[index]) {
top_data[index] = bot_data[bot_index];
mask[index] = bot_index;
}
}
}
}
template <typename Dtype>
void ChannelReduceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0), top_data);
const int count = top[0]->height()*top[0]->width(); // number of pixels in a channel
int* mask = max_idx_.mutable_gpu_data();
switch (op_) {
case ChannelReduceParameter_Op_SUM:
for (int n = 0; n < bottom[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
int c_top = c / (channels_/num_channels_);
caffe_gpu_add(count,
bottom_data + bottom[0]->offset(n,c),
top_data + top[0]->offset(n,c_top),
top_data + top[0]->offset(n,c_top));
}
}
break;
case ChannelReduceParameter_Op_MAX:
caffe_gpu_set(top[0]->count(), Dtype(-FLT_MAX), top_data);
for (int n = 0; n < bottom[0]->num(); ++n) {
int num_kernels = num_channels_ * height_ * width_;
hipLaunchKernelGGL(( MaxChannelReduceForward<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels,
bottom_data+bottom[0]->offset(n),
height_, width_, channels_, num_channels_,
top_data+top[0]->offset(n),
mask+top[0]->offset(n));
CUDA_POST_KERNEL_CHECK;
}
break;
default:
LOG(FATAL) << "Unknown operation.";
}
// const Dtype* embs = top[0]->cpu_data();
// for (int i=0; i < 10; i++){
// LOG(ERROR) << "for example, cr[" << i << "] = " << embs[i];
// }
}
template <typename Dtype>
__global__ void MaxChannelReduceBackward(const int n, const Dtype* top_diff,
const int height, const int width, const int channels, const int num_channels,
Dtype* bot_diff, const int* mask) {
CUDA_KERNEL_LOOP(index, n) {
//Dtype val = 0;
int w = index % width;
int h = (index / width) % height;
int c = index / (width * height);
// w,h,c represent where we are in bottom_data
// this index may or may not have contributed to the top data
int c_top = c / (channels/num_channels);
int top_index = (c_top * height + h) * width + w;
if (index == mask[top_index]) {
bot_diff[index] += top_diff[top_index];
}
}
}
template <typename Dtype>
void ChannelReduceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int count = bottom[0]->height()*bottom[0]->width(); // number of pixels in a channel
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int* mask = max_idx_.gpu_data();
switch (op_) {
case ChannelReduceParameter_Op_SUM:
// i need to walk through the channels in bottom_diff,
// and at each channel, copy in the appropriate top_diff
for (int n = 0; n < bottom[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
int c_top = c / (channels_/num_channels_);
caffe_copy(count,
top_diff+top[0]->offset(n,c_top),
bottom_diff+bottom[0]->offset(n,c));
}
}
break;
case ChannelReduceParameter_Op_MAX:
// i need to walk through the channels in bottom_diff,
// and at each channel, copy in the appropriate top_diff
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
for (int n = 0; n < bottom[0]->num(); ++n) {
int num_kernels = channels_ * height_ * width_;
hipLaunchKernelGGL(( MaxChannelReduceBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels,
top_diff+top[0]->offset(n),
height_, width_, channels_, num_channels_,
bottom_diff+bottom[0]->offset(n),
mask+top[0]->offset(n));
CUDA_POST_KERNEL_CHECK;
}
break;
default:
LOG(FATAL) << "Unknown operation.";
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ChannelReduceLayer);
} // namespace caffe
| dfbd65a826c9f6790f4d9ae84699f467e7880c61.cu | #include <cfloat>
#include <vector>
#include "caffe/layers/channel_reduce_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxChannelReduceForward(const int n, const Dtype* bot_data,
const int height, const int width, const int channels, const int num_channels,
Dtype* top_data, int* mask) {
CUDA_KERNEL_LOOP(index, n) {
//Dtype val = 0;
int w = index % width;
int h = (index / width) % height;
int c = index / (width * height);
// w,h,c represent where we are in top_data
int c_to_channel_reduce = channels/num_channels;
// say we're at c=0; i want to grab chans 0..c_to_channel_reduce
int coeff_h_col = (1 - height) * width;
int coeff_w_col = 1 - height * width;
for (int chan = c*c_to_channel_reduce; chan < (c+1)*c_to_channel_reduce; ++chan) {
int offset = (chan + h + w) * height * width;
int bot_index = offset + h * coeff_h_col + w * coeff_w_col;
if (bot_data[bot_index]>top_data[index]) {
top_data[index] = bot_data[bot_index];
mask[index] = bot_index;
}
}
}
}
template <typename Dtype>
void ChannelReduceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0), top_data);
const int count = top[0]->height()*top[0]->width(); // number of pixels in a channel
int* mask = max_idx_.mutable_gpu_data();
switch (op_) {
case ChannelReduceParameter_Op_SUM:
for (int n = 0; n < bottom[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
int c_top = c / (channels_/num_channels_);
caffe_gpu_add(count,
bottom_data + bottom[0]->offset(n,c),
top_data + top[0]->offset(n,c_top),
top_data + top[0]->offset(n,c_top));
}
}
break;
case ChannelReduceParameter_Op_MAX:
caffe_gpu_set(top[0]->count(), Dtype(-FLT_MAX), top_data);
for (int n = 0; n < bottom[0]->num(); ++n) {
int num_kernels = num_channels_ * height_ * width_;
MaxChannelReduceForward<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(num_kernels,
bottom_data+bottom[0]->offset(n),
height_, width_, channels_, num_channels_,
top_data+top[0]->offset(n),
mask+top[0]->offset(n));
CUDA_POST_KERNEL_CHECK;
}
break;
default:
LOG(FATAL) << "Unknown operation.";
}
// const Dtype* embs = top[0]->cpu_data();
// for (int i=0; i < 10; i++){
// LOG(ERROR) << "for example, cr[" << i << "] = " << embs[i];
// }
}
template <typename Dtype>
__global__ void MaxChannelReduceBackward(const int n, const Dtype* top_diff,
const int height, const int width, const int channels, const int num_channels,
Dtype* bot_diff, const int* mask) {
CUDA_KERNEL_LOOP(index, n) {
//Dtype val = 0;
int w = index % width;
int h = (index / width) % height;
int c = index / (width * height);
// w,h,c represent where we are in bottom_data
// this index may or may not have contributed to the top data
int c_top = c / (channels/num_channels);
int top_index = (c_top * height + h) * width + w;
if (index == mask[top_index]) {
bot_diff[index] += top_diff[top_index];
}
}
}
template <typename Dtype>
void ChannelReduceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int count = bottom[0]->height()*bottom[0]->width(); // number of pixels in a channel
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int* mask = max_idx_.gpu_data();
switch (op_) {
case ChannelReduceParameter_Op_SUM:
// i need to walk through the channels in bottom_diff,
// and at each channel, copy in the appropriate top_diff
for (int n = 0; n < bottom[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
int c_top = c / (channels_/num_channels_);
caffe_copy(count,
top_diff+top[0]->offset(n,c_top),
bottom_diff+bottom[0]->offset(n,c));
}
}
break;
case ChannelReduceParameter_Op_MAX:
// i need to walk through the channels in bottom_diff,
// and at each channel, copy in the appropriate top_diff
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
for (int n = 0; n < bottom[0]->num(); ++n) {
int num_kernels = channels_ * height_ * width_;
MaxChannelReduceBackward<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(num_kernels,
top_diff+top[0]->offset(n),
height_, width_, channels_, num_channels_,
bottom_diff+bottom[0]->offset(n),
mask+top[0]->offset(n));
CUDA_POST_KERNEL_CHECK;
}
break;
default:
LOG(FATAL) << "Unknown operation.";
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ChannelReduceLayer);
} // namespace caffe
|
2fc0af885194685f55257d4057640240594e3d29.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdint>
#include <memory>
#include <random>
#include <functional>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "cuda_helper.h"
const size_t BLOCK_SIZE = 128u;
struct SparseMat_CSR
{
uint32_t n_row = 0u;
uint32_t n_col = 0u;
uint32_t n_elem = 0u;
float* elems = nullptr;
uint32_t* col_index = nullptr;
uint32_t* row_ptr = nullptr;
SparseMat_CSR() = default;
SparseMat_CSR(uint32_t n_row, uint32_t n_col, uint32_t n_elem)
: n_row(n_row), n_col(n_col), n_elem(n_elem)
{
if (n_row * n_col * n_elem > 0)
{
elems = new float[n_elem];
col_index = new uint32_t[n_elem];
row_ptr = new uint32_t[n_row + 1];
}
}
~SparseMat_CSR()
{
if (elems) delete[] elems;
if (col_index) delete[] col_index;
if (row_ptr) delete[] row_ptr;
}
};
struct Vector
{
uint32_t n = 0;
float* elems = nullptr;
Vector() = default;
Vector(uint32_t n)
: n(n)
{
if (n > 0)
{
elems = new float[n];
}
}
Vector(const Vector& ano)
: n(ano.n)
{
if (n > 0)
{
elems = new float[n];
std::copy(ano.elems, ano.elems + n, elems);
}
}
Vector(Vector&& ano)
: n(ano.n)
{
if (n > 0)
{
ano.n = 0;
elems = ano.elems;
ano.elems = nullptr;
}
}
~Vector()
{
if (elems) delete[] elems;
}
};
// SpMV/CSR -(Compress Sparse Row, CSR)
__global__
void kernel_spMV_CSR(const uint32_t mat_n_row, const uint32_t mat_n_col,
const float* mat_elems, const uint32_t* mat_col_index, const uint32_t* mat_row_ptr,
const float* x_elems, float* result)
{
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < mat_n_row)
{
float sum = 0.0f;
for (auto i = mat_row_ptr[tid], end = mat_row_ptr[tid + 1]; i < end; i++)
{
sum += mat_elems[i] * x_elems[mat_col_index[i]];
}
result[tid] = sum;
}
}
Vector gpu_spMV_CSR(const SparseMat_CSR& h_mat, const Vector& h_x)
{
Vector h_result(h_mat.n_row);
std::fill(h_result.elems, h_result.elems + h_mat.n_row, 0.0f);
float *d_mat_elems = nullptr;
cc(hipMalloc(&d_mat_elems, sizeof(float) * h_mat.n_elem));
cc(hipMemcpy(d_mat_elems, h_mat.elems, sizeof(float) * h_mat.n_elem, hipMemcpyHostToDevice));
uint32_t *d_mat_col_index = nullptr;
cc(hipMalloc(&d_mat_col_index, sizeof(uint32_t) * h_mat.n_elem));
cc(hipMemcpy(d_mat_col_index, h_mat.col_index, sizeof(uint32_t) * h_mat.n_elem, hipMemcpyHostToDevice));
uint32_t *d_mat_row_ptr = nullptr;
cc(hipMalloc(&d_mat_row_ptr, sizeof(uint32_t) * (h_mat.n_row + 1)));
cc(hipMemcpy(d_mat_row_ptr, h_mat.row_ptr, sizeof(uint32_t) * (h_mat.n_row + 1), hipMemcpyHostToDevice));
uint32_t d_x_n = h_x.n;
float *d_x_elems = nullptr;
cc(hipMalloc(&d_x_elems, sizeof(float) * h_x.n));
cc(hipMemcpy(d_x_elems, h_x.elems, sizeof(float) * h_x.n, hipMemcpyHostToDevice));
uint32_t d_result_n = h_result.n;
float *d_result_elems = nullptr;
cc(hipMalloc(&d_result_elems, sizeof(float) * d_result_n));
hipLaunchKernelGGL(( kernel_spMV_CSR), dim3((h_mat.n_row - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, h_mat.n_row, h_mat.n_col, d_mat_elems,
d_mat_col_index, d_mat_row_ptr, d_x_elems, d_result_elems);
cc(hipDeviceSynchronize());
cc(hipMemcpy(h_result.elems, d_result_elems, sizeof(float) * d_result_n, hipMemcpyDeviceToHost));
cc(hipFree(d_mat_elems));
cc(hipFree(d_mat_col_index));
cc(hipFree(d_mat_row_ptr));
cc(hipFree(d_x_elems));
cc(hipFree(d_result_elems));
return h_result;
}
__global__
void kernel_spMV_ELL(const uint32_t mat_n_row, const uint32_t mat_n_col,
const float* mat_elems, const uint32_t* mat_col_index, const float* x_elems, float* result)
{
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < mat_n_row)
{
float sum = 0.0f;
for (auto i = 0; i < mat_n_col; i++)
{
sum += mat_elems[i * mat_n_row + tid] * x_elems[mat_col_index[i * mat_n_row + tid]];
}
result[tid] = sum;
}
}
Vector gpu_spMV_ELL(const SparseMat_CSR& h_mat, const Vector& h_x)
{
Vector h_result(h_mat.n_row);
std::fill(h_result.elems, h_result.elems + h_mat.n_row, 0.0f);
uint32_t mat_n_row = h_mat.n_row;
//
uint32_t d_row_length = 0;
for (auto i = 0; i < h_mat.n_row; i++)
{
d_row_length = ::max(d_row_length, h_mat.row_ptr[i + 1] - h_mat.row_ptr[i]);
}
float *padded_mat_elems = new float[mat_n_row * d_row_length];
uint32_t *padded_mat_col_index = new uint32_t[mat_n_row * d_row_length];
std::fill(padded_mat_elems, padded_mat_elems + mat_n_row * d_row_length, 0.0f);
std::fill(padded_mat_col_index, padded_mat_col_index + mat_n_row * d_row_length, 0u);
//
for (auto i = 0; i < h_mat.n_row; i++)
{
int count = 0;
for (auto j = h_mat.row_ptr[i], end = h_mat.row_ptr[i + 1]; j < end; j++)
{
padded_mat_elems[count * mat_n_row + i] = h_mat.elems[j];
padded_mat_col_index[count * mat_n_row + i] = h_mat.col_index[j];
count++;
}
}
uint32_t *d_mat_col_index = nullptr;
cc(hipMalloc(&d_mat_col_index, sizeof(uint32_t) * (mat_n_row * d_row_length)));
cc(hipMemcpy(d_mat_col_index, padded_mat_col_index, sizeof(uint32_t) * (mat_n_row * d_row_length), hipMemcpyHostToDevice));
float *d_mat_elems = nullptr;
cc(hipMalloc(&d_mat_elems, sizeof(float) * (mat_n_row * d_row_length)));
cc(hipMemcpy(d_mat_elems, padded_mat_elems, sizeof(float) * (mat_n_row * d_row_length), hipMemcpyHostToDevice));
delete[] padded_mat_col_index;
delete[] padded_mat_elems;
uint32_t d_x_n = h_x.n;
float *d_x_elems = nullptr;
cc(hipMalloc(&d_x_elems, sizeof(float) * h_x.n));
cc(hipMemcpy(d_x_elems, h_x.elems, sizeof(float) * h_x.n, hipMemcpyHostToDevice));
uint32_t d_result_n = h_result.n;
float *d_result_elems = nullptr;
cc(hipMalloc(&d_result_elems, sizeof(float) * d_result_n));
hipLaunchKernelGGL(( kernel_spMV_ELL), dim3((h_mat.n_row - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE) , 0, 0, h_mat.n_row, d_row_length, d_mat_elems, d_mat_col_index, d_x_elems, d_result_elems);
cc(hipDeviceSynchronize());
cc(hipMemcpy(h_result.elems, d_result_elems, sizeof(float) * d_result_n, hipMemcpyDeviceToHost));
cc(hipFree(d_mat_elems));
cc(hipFree(d_mat_col_index));
cc(hipFree(d_x_elems));
cc(hipFree(d_result_elems));
return h_result;
}
Vector cpu_spMV(const SparseMat_CSR& mat, const Vector& x)
{
Vector result(mat.n_row);
std::fill(result.elems, result.elems + mat.n_row, 0.0f);
for (auto i = 0; i < mat.n_row; i++)
{
float sum = 0.0f;
for (auto j = mat.row_ptr[i], end = mat.row_ptr[i + 1]; j < end; j++)
{
sum += mat.elems[j] * x.elems[mat.col_index[j]];
}
result.elems[i] = sum;
}
return std::move(result);
}
bool valid(const float* h_result, const float* d_result, const size_t len)
{
bool is_valid = true;
for (auto i = 0; i < len; i++)
{
auto delta = h_result[i] - d_result[i];
delta = delta > 0 ? delta : -delta;
if (delta > 1e-5)
{
is_valid = false;
printf("At [%d]: %f vs %f\n", i, h_result[i], d_result[i]);
}
}
if (is_valid)
{
printf("All OK\n");
}
else
{
printf("Somewhere error\n");
}
return is_valid;
}
int main()
{
//Vector v(4);
//for (auto i = 0; i < v.n; i++)
//{
// v.elems[i] = 1.0;
//}
//SparseMat_CSR mat(4, 4, 7);
//mat.elems[0] = 3;
//mat.elems[1] = 1;
//mat.elems[2] = 2;
//mat.elems[3] = 4;
//mat.elems[4] = 1;
//mat.elems[5] = 1;
//mat.elems[6] = 1;
//mat.col_index[0] = 0;
//mat.col_index[1] = 2;
//mat.col_index[2] = 1;
//mat.col_index[3] = 2;
//mat.col_index[4] = 3;
//mat.col_index[5] = 0;
//mat.col_index[6] = 3;
//
//mat.row_ptr[0] = 0;
//mat.row_ptr[1] = 2;
//mat.row_ptr[2] = 2;
//mat.row_ptr[3] = 5;
//mat.row_ptr[4] = 7;
const uint32_t n_row = 3000;
const uint32_t n_col = 2000;
std::default_random_engine rd;
std::normal_distribution<double> normal_dis(0, 3);
std::uniform_real_distribution<double> uniform_dis(0.0, 1.0);
std::vector<float> elems;
std::vector<uint32_t> col_index;
std::vector<uint32_t> row_ptr;
uint32_t count = 0;
row_ptr.emplace_back(count);
for (auto i = 0; i < n_row; i++)
{
for (auto j = 0; j < n_col; j++)
{
//
//if (((i % 15 == 4 ? 0.1 : 0) + 1.0) * normal_dis(rd) > 7.0)
if (normal_dis(rd) > 7.0)
{
elems.emplace_back(uniform_dis(rd));
col_index.emplace_back(j);
count++;
}
}
row_ptr.emplace_back(count);
}
Vector v(n_row);
SparseMat_CSR mat(n_row, n_col, count);
std::copy(elems.begin(), elems.end(), mat.elems);
std::copy(col_index.begin(), col_index.end(), mat.col_index);
std::copy(row_ptr.begin(), row_ptr.end(), mat.row_ptr);
for (auto i = 0; i < n_row; i++)
{
v.elems[i] = uniform_dis(rd);
}
printf("Row: %d Col: %d N: %d\n", n_row, n_col, count);
auto h_result = cpu_spMV(mat, v);
auto d_result_CSR = gpu_spMV_CSR(mat, v);
auto d_result_ELL = gpu_spMV_ELL(mat, v);
std::cout << "CSR: ";
valid(h_result.elems, d_result_CSR.elems, n_row);
std::cout << "ELL: ";
valid(h_result.elems, d_result_ELL.elems, n_row);
//for (int i = 0; i < h_result.n; i++)
//{
// printf("%f ", h_result.elems[i]);
//}
//printf("\n");
//for (int i = 0; i < h_result.n; i++)
//{
// printf("%f ", d_result_CSR.elems[i]);
//}
//printf("\n");
//for (int i = 0; i < h_result.n; i++)
//{
// printf("%f ", d_result_ELL.elems[i]);
//}
//printf("\n");
return 0;
} | 2fc0af885194685f55257d4057640240594e3d29.cu | #include <iostream>
#include <cstdint>
#include <memory>
#include <random>
#include <functional>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "cuda_helper.h"
const size_t BLOCK_SIZE = 128u;
struct SparseMat_CSR
{
uint32_t n_row = 0u;
uint32_t n_col = 0u;
uint32_t n_elem = 0u;
float* elems = nullptr;
uint32_t* col_index = nullptr;
uint32_t* row_ptr = nullptr;
SparseMat_CSR() = default;
SparseMat_CSR(uint32_t n_row, uint32_t n_col, uint32_t n_elem)
: n_row(n_row), n_col(n_col), n_elem(n_elem)
{
if (n_row * n_col * n_elem > 0)
{
elems = new float[n_elem];
col_index = new uint32_t[n_elem];
row_ptr = new uint32_t[n_row + 1];
}
}
~SparseMat_CSR()
{
if (elems) delete[] elems;
if (col_index) delete[] col_index;
if (row_ptr) delete[] row_ptr;
}
};
struct Vector
{
uint32_t n = 0;
float* elems = nullptr;
Vector() = default;
Vector(uint32_t n)
: n(n)
{
if (n > 0)
{
elems = new float[n];
}
}
Vector(const Vector& ano)
: n(ano.n)
{
if (n > 0)
{
elems = new float[n];
std::copy(ano.elems, ano.elems + n, elems);
}
}
Vector(Vector&& ano)
: n(ano.n)
{
if (n > 0)
{
ano.n = 0;
elems = ano.elems;
ano.elems = nullptr;
}
}
~Vector()
{
if (elems) delete[] elems;
}
};
// SpMV/CSR 稀疏矩阵-向量乘法,压缩稀疏行(Compress Sparse Row, CSR)结构算法
__global__
void kernel_spMV_CSR(const uint32_t mat_n_row, const uint32_t mat_n_col,
const float* mat_elems, const uint32_t* mat_col_index, const uint32_t* mat_row_ptr,
const float* x_elems, float* result)
{
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < mat_n_row)
{
float sum = 0.0f;
for (auto i = mat_row_ptr[tid], end = mat_row_ptr[tid + 1]; i < end; i++)
{
sum += mat_elems[i] * x_elems[mat_col_index[i]];
}
result[tid] = sum;
}
}
Vector gpu_spMV_CSR(const SparseMat_CSR& h_mat, const Vector& h_x)
{
Vector h_result(h_mat.n_row);
std::fill(h_result.elems, h_result.elems + h_mat.n_row, 0.0f);
float *d_mat_elems = nullptr;
cc(cudaMalloc(&d_mat_elems, sizeof(float) * h_mat.n_elem));
cc(cudaMemcpy(d_mat_elems, h_mat.elems, sizeof(float) * h_mat.n_elem, cudaMemcpyHostToDevice));
uint32_t *d_mat_col_index = nullptr;
cc(cudaMalloc(&d_mat_col_index, sizeof(uint32_t) * h_mat.n_elem));
cc(cudaMemcpy(d_mat_col_index, h_mat.col_index, sizeof(uint32_t) * h_mat.n_elem, cudaMemcpyHostToDevice));
uint32_t *d_mat_row_ptr = nullptr;
cc(cudaMalloc(&d_mat_row_ptr, sizeof(uint32_t) * (h_mat.n_row + 1)));
cc(cudaMemcpy(d_mat_row_ptr, h_mat.row_ptr, sizeof(uint32_t) * (h_mat.n_row + 1), cudaMemcpyHostToDevice));
uint32_t d_x_n = h_x.n;
float *d_x_elems = nullptr;
cc(cudaMalloc(&d_x_elems, sizeof(float) * h_x.n));
cc(cudaMemcpy(d_x_elems, h_x.elems, sizeof(float) * h_x.n, cudaMemcpyHostToDevice));
uint32_t d_result_n = h_result.n;
float *d_result_elems = nullptr;
cc(cudaMalloc(&d_result_elems, sizeof(float) * d_result_n));
kernel_spMV_CSR<<<(h_mat.n_row - 1) / BLOCK_SIZE + 1, BLOCK_SIZE>>>(h_mat.n_row, h_mat.n_col, d_mat_elems,
d_mat_col_index, d_mat_row_ptr, d_x_elems, d_result_elems);
cc(cudaDeviceSynchronize());
cc(cudaMemcpy(h_result.elems, d_result_elems, sizeof(float) * d_result_n, cudaMemcpyDeviceToHost));
cc(cudaFree(d_mat_elems));
cc(cudaFree(d_mat_col_index));
cc(cudaFree(d_mat_row_ptr));
cc(cudaFree(d_x_elems));
cc(cudaFree(d_result_elems));
return h_result;
}
__global__
void kernel_spMV_ELL(const uint32_t mat_n_row, const uint32_t mat_n_col,
const float* mat_elems, const uint32_t* mat_col_index, const float* x_elems, float* result)
{
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < mat_n_row)
{
float sum = 0.0f;
for (auto i = 0; i < mat_n_col; i++)
{
sum += mat_elems[i * mat_n_row + tid] * x_elems[mat_col_index[i * mat_n_row + tid]];
}
result[tid] = sum;
}
}
Vector gpu_spMV_ELL(const SparseMat_CSR& h_mat, const Vector& h_x)
{
Vector h_result(h_mat.n_row);
std::fill(h_result.elems, h_result.elems + h_mat.n_row, 0.0f);
uint32_t mat_n_row = h_mat.n_row;
// 非零元素最多的行的非零元素个数
uint32_t d_row_length = 0;
for (auto i = 0; i < h_mat.n_row; i++)
{
d_row_length = std::max(d_row_length, h_mat.row_ptr[i + 1] - h_mat.row_ptr[i]);
}
float *padded_mat_elems = new float[mat_n_row * d_row_length];
uint32_t *padded_mat_col_index = new uint32_t[mat_n_row * d_row_length];
std::fill(padded_mat_elems, padded_mat_elems + mat_n_row * d_row_length, 0.0f);
std::fill(padded_mat_col_index, padded_mat_col_index + mat_n_row * d_row_length, 0u);
// 以转置的方式储存
for (auto i = 0; i < h_mat.n_row; i++)
{
int count = 0;
for (auto j = h_mat.row_ptr[i], end = h_mat.row_ptr[i + 1]; j < end; j++)
{
padded_mat_elems[count * mat_n_row + i] = h_mat.elems[j];
padded_mat_col_index[count * mat_n_row + i] = h_mat.col_index[j];
count++;
}
}
uint32_t *d_mat_col_index = nullptr;
cc(cudaMalloc(&d_mat_col_index, sizeof(uint32_t) * (mat_n_row * d_row_length)));
cc(cudaMemcpy(d_mat_col_index, padded_mat_col_index, sizeof(uint32_t) * (mat_n_row * d_row_length), cudaMemcpyHostToDevice));
float *d_mat_elems = nullptr;
cc(cudaMalloc(&d_mat_elems, sizeof(float) * (mat_n_row * d_row_length)));
cc(cudaMemcpy(d_mat_elems, padded_mat_elems, sizeof(float) * (mat_n_row * d_row_length), cudaMemcpyHostToDevice));
delete[] padded_mat_col_index;
delete[] padded_mat_elems;
uint32_t d_x_n = h_x.n;
float *d_x_elems = nullptr;
cc(cudaMalloc(&d_x_elems, sizeof(float) * h_x.n));
cc(cudaMemcpy(d_x_elems, h_x.elems, sizeof(float) * h_x.n, cudaMemcpyHostToDevice));
uint32_t d_result_n = h_result.n;
float *d_result_elems = nullptr;
cc(cudaMalloc(&d_result_elems, sizeof(float) * d_result_n));
kernel_spMV_ELL<<<(h_mat.n_row - 1) / BLOCK_SIZE + 1, BLOCK_SIZE >>>(h_mat.n_row, d_row_length, d_mat_elems, d_mat_col_index, d_x_elems, d_result_elems);
cc(cudaDeviceSynchronize());
cc(cudaMemcpy(h_result.elems, d_result_elems, sizeof(float) * d_result_n, cudaMemcpyDeviceToHost));
cc(cudaFree(d_mat_elems));
cc(cudaFree(d_mat_col_index));
cc(cudaFree(d_x_elems));
cc(cudaFree(d_result_elems));
return h_result;
}
Vector cpu_spMV(const SparseMat_CSR& mat, const Vector& x)
{
Vector result(mat.n_row);
std::fill(result.elems, result.elems + mat.n_row, 0.0f);
for (auto i = 0; i < mat.n_row; i++)
{
float sum = 0.0f;
for (auto j = mat.row_ptr[i], end = mat.row_ptr[i + 1]; j < end; j++)
{
sum += mat.elems[j] * x.elems[mat.col_index[j]];
}
result.elems[i] = sum;
}
return std::move(result);
}
bool valid(const float* h_result, const float* d_result, const size_t len)
{
bool is_valid = true;
for (auto i = 0; i < len; i++)
{
auto delta = h_result[i] - d_result[i];
delta = delta > 0 ? delta : -delta;
if (delta > 1e-5)
{
is_valid = false;
printf("At [%d]: %f vs %f\n", i, h_result[i], d_result[i]);
}
}
if (is_valid)
{
printf("All OK\n");
}
else
{
printf("Somewhere error\n");
}
return is_valid;
}
int main()
{
//Vector v(4);
//for (auto i = 0; i < v.n; i++)
//{
// v.elems[i] = 1.0;
//}
//SparseMat_CSR mat(4, 4, 7);
//mat.elems[0] = 3;
//mat.elems[1] = 1;
//mat.elems[2] = 2;
//mat.elems[3] = 4;
//mat.elems[4] = 1;
//mat.elems[5] = 1;
//mat.elems[6] = 1;
//mat.col_index[0] = 0;
//mat.col_index[1] = 2;
//mat.col_index[2] = 1;
//mat.col_index[3] = 2;
//mat.col_index[4] = 3;
//mat.col_index[5] = 0;
//mat.col_index[6] = 3;
//
//mat.row_ptr[0] = 0;
//mat.row_ptr[1] = 2;
//mat.row_ptr[2] = 2;
//mat.row_ptr[3] = 5;
//mat.row_ptr[4] = 7;
const uint32_t n_row = 3000;
const uint32_t n_col = 2000;
std::default_random_engine rd;
std::normal_distribution<double> normal_dis(0, 3);
std::uniform_real_distribution<double> uniform_dis(0.0, 1.0);
std::vector<float> elems;
std::vector<uint32_t> col_index;
std::vector<uint32_t> row_ptr;
uint32_t count = 0;
row_ptr.emplace_back(count);
for (auto i = 0; i < n_row; i++)
{
for (auto j = 0; j < n_col; j++)
{
// 分别用于均匀分布和不均匀分布
//if (((i % 15 == 4 ? 0.1 : 0) + 1.0) * normal_dis(rd) > 7.0)
if (normal_dis(rd) > 7.0)
{
elems.emplace_back(uniform_dis(rd));
col_index.emplace_back(j);
count++;
}
}
row_ptr.emplace_back(count);
}
Vector v(n_row);
SparseMat_CSR mat(n_row, n_col, count);
std::copy(elems.begin(), elems.end(), mat.elems);
std::copy(col_index.begin(), col_index.end(), mat.col_index);
std::copy(row_ptr.begin(), row_ptr.end(), mat.row_ptr);
for (auto i = 0; i < n_row; i++)
{
v.elems[i] = uniform_dis(rd);
}
printf("Row: %d Col: %d N: %d\n", n_row, n_col, count);
auto h_result = cpu_spMV(mat, v);
auto d_result_CSR = gpu_spMV_CSR(mat, v);
auto d_result_ELL = gpu_spMV_ELL(mat, v);
std::cout << "CSR: ";
valid(h_result.elems, d_result_CSR.elems, n_row);
std::cout << "ELL: ";
valid(h_result.elems, d_result_ELL.elems, n_row);
//for (int i = 0; i < h_result.n; i++)
//{
// printf("%f ", h_result.elems[i]);
//}
//printf("\n");
//for (int i = 0; i < h_result.n; i++)
//{
// printf("%f ", d_result_CSR.elems[i]);
//}
//printf("\n");
//for (int i = 0; i < h_result.n; i++)
//{
// printf("%f ", d_result_ELL.elems[i]);
//}
//printf("\n");
return 0;
} |
fed520c00426152458e53db79d6221bb6ac0adbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef MD_HOST_CUH
#define MD_HOST_CUH
#include "../thrust_all.cuh"
#include "./../utility/random.cuh"
#include "./particle.cuh"
#include "./list.cuh"
#include "./setting.cuh"
void h_create_list_full_search(Atoms &atoms,List &list,Setting &setting){
list.refresh(atoms.N);
hipLaunchKernelGGL(( create_list), dim3(setting.numBlocks),dim3(setting.numThreads), 0, 0,
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(list.list),setting.N,setting.L,list.list_size,list.cutoff);
}
void h_update_verlet_list(Atoms &atoms,List &list,Mesh &mesh, Setting &setting){
list.refresh(atoms.N);
mesh.refresh();
mesh.check_index(atoms);
mesh.check_num_atoms_in_mesh();
hipLaunchKernelGGL(( updateVerletList), dim3(setting.numBlocks),dim3(setting.numThreads), 0, 0,
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(mesh.mesh_begin),thrust::raw_pointer_cast(mesh.mesh_end),
thrust::raw_pointer_cast(mesh.atoms_id),thrust::raw_pointer_cast(list.list),setting.L,
list.cutoff,list.list_size,mesh.num_mesh_per_axis,mesh.mesh_length
);
}
void EoM(Atoms &atoms,List &list,Mesh &mesh, Setting &setting,hiprandGenerator_t &gen){
// calc_force<<<setting.numBlocks,setting.numThreads>>>(
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
// thrust::raw_pointer_cast(atoms.sigma),setting.N,setting.L
// );
hipLaunchKernelGGL(( calc_force_with_list), dim3(setting.numBlocks),dim3(setting.numThreads), 0, 0,
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
thrust::raw_pointer_cast(atoms.sigma),setting.L,
thrust::raw_pointer_cast(list.list),list.list_size
);
//hipLaunchKernelGGL(( calc_force_Harmonic), dim3(setting.numBlocks),dim3(setting.numThreads), 0, 0,
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
// thrust::raw_pointer_cast(atoms.sigma),setting.L,
// thrust::raw_pointer_cast(list.list),list.list_size
// );
atoms.updateVelo(gen,setting.dt);
atoms.updatePos(setting.dt);
atoms.set_in_box(setting);
}
void quench(Atoms &atoms,List &list,Mesh &mesh, Setting &setting){
// calc_force<<<setting.numBlocks,setting.numThreads>>>(
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
// thrust::raw_pointer_cast(atoms.sigma),setting.N,setting.L
// );
hipLaunchKernelGGL(( calc_force_Harmonic), dim3(setting.numBlocks),dim3(setting.numThreads), 0, 0,
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
thrust::raw_pointer_cast(atoms.sigma),setting.L,
thrust::raw_pointer_cast(list.list),list.list_size
);
//std::cout << "calc force" << '\n';
atoms.updateQuench(setting.dt);
//std::cout << "updatePos" << '\n';
atoms.set_in_box(setting);
//std::cout << "boundary" << '\n';
}
double h_calc_potential_energy(Atoms &atoms,Setting &setting,List &list){
// calc_potential<<<setting.numBlocks,setting.numThreads>>>(
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.potential),thrust::raw_pointer_cast(atoms.sigma),
// setting.N,setting.L
// );
hipLaunchKernelGGL(( calc_potential_with_list), dim3(setting.numBlocks),dim3(setting.numThreads), 0, 0,
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(atoms.potential),thrust::raw_pointer_cast(atoms.sigma),
setting.L,thrust::raw_pointer_cast(list.list),list.list_size);
double ave_pot = atoms.average_potential();
return ave_pot;
}
double h_calc_potential_energy_harmonic(Atoms &atoms,Setting &setting,List &list){
// calc_potential<<<setting.numBlocks,setting.numThreads>>>(
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.potential),thrust::raw_pointer_cast(atoms.sigma),
// setting.N,setting.L
// );
hipLaunchKernelGGL(( calc_potential_Harmonic), dim3(setting.numBlocks),dim3(setting.numThreads), 0, 0,
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(atoms.potential),thrust::raw_pointer_cast(atoms.sigma),
setting.L,thrust::raw_pointer_cast(list.list),list.list_size);
double ave_pot = atoms.average_potential();
return ave_pot;
}
#endif
| fed520c00426152458e53db79d6221bb6ac0adbd.cu | #ifndef MD_HOST_CUH
#define MD_HOST_CUH
#include "../thrust_all.cuh"
#include "./../utility/random.cuh"
#include "./particle.cuh"
#include "./list.cuh"
#include "./setting.cuh"
void h_create_list_full_search(Atoms &atoms,List &list,Setting &setting){
list.refresh(atoms.N);
create_list<<<setting.numBlocks,setting.numThreads>>>(
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(list.list),setting.N,setting.L,list.list_size,list.cutoff);
}
void h_update_verlet_list(Atoms &atoms,List &list,Mesh &mesh, Setting &setting){
list.refresh(atoms.N);
mesh.refresh();
mesh.check_index(atoms);
mesh.check_num_atoms_in_mesh();
updateVerletList<<<setting.numBlocks,setting.numThreads>>>(
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(mesh.mesh_begin),thrust::raw_pointer_cast(mesh.mesh_end),
thrust::raw_pointer_cast(mesh.atoms_id),thrust::raw_pointer_cast(list.list),setting.L,
list.cutoff,list.list_size,mesh.num_mesh_per_axis,mesh.mesh_length
);
}
void EoM(Atoms &atoms,List &list,Mesh &mesh, Setting &setting,curandGenerator_t &gen){
// calc_force<<<setting.numBlocks,setting.numThreads>>>(
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
// thrust::raw_pointer_cast(atoms.sigma),setting.N,setting.L
// );
calc_force_with_list<<<setting.numBlocks,setting.numThreads>>>(
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
thrust::raw_pointer_cast(atoms.sigma),setting.L,
thrust::raw_pointer_cast(list.list),list.list_size
);
// calc_force_Harmonic<<<setting.numBlocks,setting.numThreads>>>(
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
// thrust::raw_pointer_cast(atoms.sigma),setting.L,
// thrust::raw_pointer_cast(list.list),list.list_size
// );
atoms.updateVelo(gen,setting.dt);
atoms.updatePos(setting.dt);
atoms.set_in_box(setting);
}
void quench(Atoms &atoms,List &list,Mesh &mesh, Setting &setting){
// calc_force<<<setting.numBlocks,setting.numThreads>>>(
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
// thrust::raw_pointer_cast(atoms.sigma),setting.N,setting.L
// );
calc_force_Harmonic<<<setting.numBlocks,setting.numThreads>>>(
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(atoms.fx),thrust::raw_pointer_cast(atoms.fy),
thrust::raw_pointer_cast(atoms.sigma),setting.L,
thrust::raw_pointer_cast(list.list),list.list_size
);
//std::cout << "calc force" << '\n';
atoms.updateQuench(setting.dt);
//std::cout << "updatePos" << '\n';
atoms.set_in_box(setting);
//std::cout << "boundary" << '\n';
}
double h_calc_potential_energy(Atoms &atoms,Setting &setting,List &list){
// calc_potential<<<setting.numBlocks,setting.numThreads>>>(
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.potential),thrust::raw_pointer_cast(atoms.sigma),
// setting.N,setting.L
// );
calc_potential_with_list<<<setting.numBlocks,setting.numThreads>>>(
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(atoms.potential),thrust::raw_pointer_cast(atoms.sigma),
setting.L,thrust::raw_pointer_cast(list.list),list.list_size);
double ave_pot = atoms.average_potential();
return ave_pot;
}
double h_calc_potential_energy_harmonic(Atoms &atoms,Setting &setting,List &list){
// calc_potential<<<setting.numBlocks,setting.numThreads>>>(
// thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
// thrust::raw_pointer_cast(atoms.potential),thrust::raw_pointer_cast(atoms.sigma),
// setting.N,setting.L
// );
calc_potential_Harmonic<<<setting.numBlocks,setting.numThreads>>>(
thrust::raw_pointer_cast(atoms.x),thrust::raw_pointer_cast(atoms.y),
thrust::raw_pointer_cast(atoms.potential),thrust::raw_pointer_cast(atoms.sigma),
setting.L,thrust::raw_pointer_cast(list.list),list.list_size);
double ave_pot = atoms.average_potential();
return ave_pot;
}
#endif
|
16aadcf660e3aeb8c49c7b8d5ed87709dddb665c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define NUM_THREADS SCAN_NUM_THREADS
#define VALUES_PER_THREAD SCAN_VALUES_PER_THREAD
#define BLOCKS_PER_SM SCAN_BLOCKS_PER_SM
#define NUM_WARPS (NUM_THREADS / WARP_SIZE)
#define LOG_NUM_WARPS LOG_BASE_2(NUM_WARPS)
#define VALUES_PER_WARP (WARP_SIZE * VALUES_PER_THREAD)
#define NUM_VALUES (NUM_THREADS * VALUES_PER_THREAD)
////////////////////////////////////////////////////////////////////////////////
// Multiscan utility function. Used in the first and third passes of the
// global scan function. Returns the inclusive scan of the arguments in .x and
// the sum of all arguments in .y.
// Each warp is passed a pointer to its own contiguous area of shared memory.
// There must be at least 48 slots of memory. They should also be aligned so
// that the difference between the start of consecutive warps differ by an
// interval that is relatively prime to 32 (any odd number will do).
////////////////////////////////////////////////////////////////////////////////
// GlobalScanUpsweep adds up all the values in elements_global within the
// range given by blockCount and writes to blockTotals_global[blockIdx.x].
extern "C" __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM) __global__
void GlobalScanUpsweep(const uint* valuesIn_global, uint* blockTotals_global,
const int2* range_global) {
uint block = blockIdx.x;
uint tid = threadIdx.x;
int2 range = range_global[block];
// Loop through all elements in the interval, adding up values.
// There is no need to synchronize until we perform the multiscan.
uint sum = 0;
for(uint index = range.x + tid; index < range.y; index += 2 * NUM_THREADS)
sum += valuesIn_global[index] + valuesIn_global[index + NUM_THREADS];
// A full multiscan is unnecessary here - we really only need the total.
// But this is easy and won't slow us down since this kernel is already
// bandwidth limited.
uint total = Multiscan2<NUM_WARPS>(tid, sum).y;
if(!tid)
blockTotals_global[block] = total;
}
////////////////////////////////////////////////////////////////////////////////
// GlobalScanReduction performs an exclusive scan on the elements in
// blockTotals_global and writes back in-place.
extern "C" __global__ __launch_bounds__(REDUCTION_NUM_THREADS, 1)
void GlobalScanReduction(uint* blockTotals_global,
uint numBlocks) {
uint tid = threadIdx.x;
uint x = 0;
if(tid < numBlocks) x = blockTotals_global[tid];
// Subtract the value from the inclusive scan for the exclusive scan.
uint2 scan = Multiscan2<REDUCTION_NUM_THREADS / WARP_SIZE>(tid, x);
if(tid < numBlocks) blockTotals_global[tid] = scan.x - x;
// Have the first thread in the block set the scan total.
if(!tid) blockTotals_global[numBlocks] = scan.y;
}
////////////////////////////////////////////////////////////////////////////////
// GlobalScanDownsweep runs an exclusive scan on the same interval of data as in
// pass 1, and adds blockScan_global[blockIdx.x] to each of them, writing back
// out in-place.
extern "C" __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM) __global__
void GlobalScanDownsweep(const uint* valuesIn_global, uint* valuesOut_global,
const uint* blockScan_global, const int2* range_global, int count,
int inclusive) {
uint block = blockIdx.x;
uint tid = threadIdx.x;
uint warp = tid / WARP_SIZE;
uint lane = (WARP_SIZE - 1) & tid;
uint index = VALUES_PER_WARP * warp + lane;
uint blockScan = blockScan_global[block];
int2 range = range_global[block];
const int Size = NUM_WARPS * VALUES_PER_THREAD * (WARP_SIZE + 1);
__shared__ volatile uint shared[Size];
// Use a stride of 33 slots per warp per value to allow conflict-free
// transposes from strided to thread order.
volatile uint* warpShared = shared +
warp * VALUES_PER_THREAD * (WARP_SIZE + 1);
volatile uint* threadShared = warpShared + lane;
// Transpose values into thread order.
uint offset = VALUES_PER_THREAD * lane;
offset += offset / WARP_SIZE;
while(range.x < range.y) {
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint source = range.x + index + i * WARP_SIZE;
uint x = valuesIn_global[source];
if(i * (WARP_SIZE + 1) < count) {
threadShared[i * (WARP_SIZE + 1)] = x;
}
}
// Transpose into thread order by reading from transposeValues.
// Compute the exclusive or inclusive scan of the thread values and
// their sum.
uint scan[VALUES_PER_THREAD];
uint sum = 0;
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint x = warpShared[offset + i];
scan[i] = sum;
if(inclusive) scan[i] += x;
sum += x;
}
// Multiscan for each thread's scan offset within the block. Subtract
// sum to make it an exclusive scan.
uint2 localScan = Multiscan2<NUM_WARPS>(tid, sum);
uint scanOffset = localScan.x + blockScan - sum;
// Add the scan offset to each exclusive scan and put the values back
// into the shared memory they came out of.
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint x = scan[i] + scanOffset;
warpShared[offset + i] = x;
}
// Store the scan back to global memory.
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint x = threadShared[i * (WARP_SIZE + 1)];
uint target = range.x + index + i * WARP_SIZE;
if(target < count) {
valuesOut_global[target] = x;
}
}
// Grab the last element of totals_shared, which was set in Multiscan.
// This is the total for all the values encountered in this pass.
blockScan += localScan.y;
range.x += NUM_VALUES;
}
}
#undef NUM_THREADS
#undef NUM_WARPS
#undef LOG_NUM_WARPS
#undef BLOCKS_PER_SM
#undef VALUES_PER_THREAD
#undef VALUES_PER_WARP
#undef NUM_VALUES
| 16aadcf660e3aeb8c49c7b8d5ed87709dddb665c.cu | #define NUM_THREADS SCAN_NUM_THREADS
#define VALUES_PER_THREAD SCAN_VALUES_PER_THREAD
#define BLOCKS_PER_SM SCAN_BLOCKS_PER_SM
#define NUM_WARPS (NUM_THREADS / WARP_SIZE)
#define LOG_NUM_WARPS LOG_BASE_2(NUM_WARPS)
#define VALUES_PER_WARP (WARP_SIZE * VALUES_PER_THREAD)
#define NUM_VALUES (NUM_THREADS * VALUES_PER_THREAD)
////////////////////////////////////////////////////////////////////////////////
// Multiscan utility function. Used in the first and third passes of the
// global scan function. Returns the inclusive scan of the arguments in .x and
// the sum of all arguments in .y.
// Each warp is passed a pointer to its own contiguous area of shared memory.
// There must be at least 48 slots of memory. They should also be aligned so
// that the difference between the start of consecutive warps differ by an
// interval that is relatively prime to 32 (any odd number will do).
////////////////////////////////////////////////////////////////////////////////
// GlobalScanUpsweep adds up all the values in elements_global within the
// range given by blockCount and writes to blockTotals_global[blockIdx.x].
extern "C" __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM) __global__
void GlobalScanUpsweep(const uint* valuesIn_global, uint* blockTotals_global,
const int2* range_global) {
uint block = blockIdx.x;
uint tid = threadIdx.x;
int2 range = range_global[block];
// Loop through all elements in the interval, adding up values.
// There is no need to synchronize until we perform the multiscan.
uint sum = 0;
for(uint index = range.x + tid; index < range.y; index += 2 * NUM_THREADS)
sum += valuesIn_global[index] + valuesIn_global[index + NUM_THREADS];
// A full multiscan is unnecessary here - we really only need the total.
// But this is easy and won't slow us down since this kernel is already
// bandwidth limited.
uint total = Multiscan2<NUM_WARPS>(tid, sum).y;
if(!tid)
blockTotals_global[block] = total;
}
////////////////////////////////////////////////////////////////////////////////
// GlobalScanReduction performs an exclusive scan on the elements in
// blockTotals_global and writes back in-place.
extern "C" __global__ __launch_bounds__(REDUCTION_NUM_THREADS, 1)
void GlobalScanReduction(uint* blockTotals_global,
uint numBlocks) {
uint tid = threadIdx.x;
uint x = 0;
if(tid < numBlocks) x = blockTotals_global[tid];
// Subtract the value from the inclusive scan for the exclusive scan.
uint2 scan = Multiscan2<REDUCTION_NUM_THREADS / WARP_SIZE>(tid, x);
if(tid < numBlocks) blockTotals_global[tid] = scan.x - x;
// Have the first thread in the block set the scan total.
if(!tid) blockTotals_global[numBlocks] = scan.y;
}
////////////////////////////////////////////////////////////////////////////////
// GlobalScanDownsweep runs an exclusive scan on the same interval of data as in
// pass 1, and adds blockScan_global[blockIdx.x] to each of them, writing back
// out in-place.
extern "C" __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM) __global__
void GlobalScanDownsweep(const uint* valuesIn_global, uint* valuesOut_global,
const uint* blockScan_global, const int2* range_global, int count,
int inclusive) {
uint block = blockIdx.x;
uint tid = threadIdx.x;
uint warp = tid / WARP_SIZE;
uint lane = (WARP_SIZE - 1) & tid;
uint index = VALUES_PER_WARP * warp + lane;
uint blockScan = blockScan_global[block];
int2 range = range_global[block];
const int Size = NUM_WARPS * VALUES_PER_THREAD * (WARP_SIZE + 1);
__shared__ volatile uint shared[Size];
// Use a stride of 33 slots per warp per value to allow conflict-free
// transposes from strided to thread order.
volatile uint* warpShared = shared +
warp * VALUES_PER_THREAD * (WARP_SIZE + 1);
volatile uint* threadShared = warpShared + lane;
// Transpose values into thread order.
uint offset = VALUES_PER_THREAD * lane;
offset += offset / WARP_SIZE;
while(range.x < range.y) {
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint source = range.x + index + i * WARP_SIZE;
uint x = valuesIn_global[source];
if(i * (WARP_SIZE + 1) < count) {
threadShared[i * (WARP_SIZE + 1)] = x;
}
}
// Transpose into thread order by reading from transposeValues.
// Compute the exclusive or inclusive scan of the thread values and
// their sum.
uint scan[VALUES_PER_THREAD];
uint sum = 0;
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint x = warpShared[offset + i];
scan[i] = sum;
if(inclusive) scan[i] += x;
sum += x;
}
// Multiscan for each thread's scan offset within the block. Subtract
// sum to make it an exclusive scan.
uint2 localScan = Multiscan2<NUM_WARPS>(tid, sum);
uint scanOffset = localScan.x + blockScan - sum;
// Add the scan offset to each exclusive scan and put the values back
// into the shared memory they came out of.
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint x = scan[i] + scanOffset;
warpShared[offset + i] = x;
}
// Store the scan back to global memory.
#pragma unroll
for(int i = 0; i < VALUES_PER_THREAD; ++i) {
uint x = threadShared[i * (WARP_SIZE + 1)];
uint target = range.x + index + i * WARP_SIZE;
if(target < count) {
valuesOut_global[target] = x;
}
}
// Grab the last element of totals_shared, which was set in Multiscan.
// This is the total for all the values encountered in this pass.
blockScan += localScan.y;
range.x += NUM_VALUES;
}
}
#undef NUM_THREADS
#undef NUM_WARPS
#undef LOG_NUM_WARPS
#undef BLOCKS_PER_SM
#undef VALUES_PER_THREAD
#undef VALUES_PER_WARP
#undef NUM_VALUES
|
d7d98de377c9ac8a813f139256770e12f1fcd526.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <chrono>
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
VertexAttributeTexcoord texcoord0;
TextureData* dev_diffuseTex;
int texWidth, texHeight;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int * dev_depth = NULL; // you might need this buffer when doing depth test
static int * dev_mutex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
__device__
glm::vec3 getTexColor(TextureData* tex, int width, float u, float v)
{
int index = u + v * width;
return glm::vec3(tex[index * 3], tex[index * 3 + 1], tex[index * 3 + 2]) / 255.f;
}
// for more information on bilinear filtering:
// https://en.wikipedia.org/wiki/Bilinear_filtering
// used sample code from this source
__device__
glm::vec3 getBilinearFilteredPixelColor(Fragment &fragment)
{
float u = fragment.texcoord0.x * fragment.texWidth - 0.5f;
float v = fragment.texcoord0.y * fragment.texHeight - 0.5f;
int x = glm::floor(u);
int y = glm::floor(v);
float u_ratio = u - x;
float v_ratio = v - y;
float u_opposite = 1.f - u_ratio;
float v_opposite = 1.f - v_ratio;
// retrieve texture data
glm::vec3 texXY = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, x, y);
glm::vec3 texX1Y = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, x + 1, y);
glm::vec3 texXY1 = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, x, y + 1);
glm::vec3 texX1Y1 = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, x + 1, y + 1);
return (texXY * u_opposite + texX1Y * u_ratio) * v_opposite +
(texXY1 * u_opposite + texX1Y1 * u_ratio) * v_ratio;
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h)
{
// TODO: add your fragment shader code here
Fragment fragment = fragmentBuffer[index];
#if TEXTURE == 1
if (fragment.dev_diffuseTex != NULL)
{
#if BILINEAR == 1
fragment.color = getBilinearFilteredPixelColor(fragment);
#else
int u = fragment.texcoord0.x * fragment.texWidth;
int v = fragment.texcoord0.y * fragment.texHeight;
fragment.color = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, u, v);
#endif
}
#endif
framebuffer[index] = fragment.color;
#if PRIMTYPE == 3
framebuffer[index] *= glm::dot(fragment.eyeNor, glm::normalize(glm::vec3(1.0f) - fragmentBuffer[index].eyePos));
#endif
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
hipFree(dev_fragmentBuffer);
hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(int));
hipFree(dev_mutex);
hipMalloc(&dev_mutex, width * height * sizeof(int));
hipMemset(dev_mutex, 0, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace hipMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
hipMalloc(&dev_bufferView, bufferView.byteLength);
hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
hipMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
hipMalloc(&dev_diffuseTex, s);
hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
hipDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, hipFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
hipFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height)
{
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices)
{
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
glm::vec4 pos = MVP * glm::vec4(primitive.dev_position[vid], 1.0f);
glm::vec3 eyePos = glm::vec3(MV * glm::vec4(primitive.dev_position[vid], 1.0f));
glm::vec3 eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
// Then divide the pos by its w element to transform into NDC space
if (pos.w != 0) pos /= pos.w;
// Finally transform x and y to viewport space
pos.x = 0.5f * (float)width * (pos.x + 1.f);
pos.y = 0.5f * (float)height * (1.f - pos.y);
// pos.z = 1.f / eyePos.z;
// TODO: Apply vertex assembly here
// Assemble all attribute arrays into the primitive array
primitive.dev_verticesOut[vid].pos = pos;
primitive.dev_verticesOut[vid].eyePos = eyePos;
primitive.dev_verticesOut[vid].eyeNor = eyeNor;
// retrieve texture data
#if TEXTURE == 1
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
#endif
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices)
{
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES)
{
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
__device__
void _rasterizePoints(Fragment* dev_fragmentBuffer, Primitive& primitive, int width, int height)
{
VertexOut v0 = primitive.v[0];
VertexOut v1 = primitive.v[1];
VertexOut v2 = primitive.v[2];
glm::vec3 triangle[3] = { glm::vec3(v0.pos),glm::vec3(v1.pos),glm::vec3(v2.pos) };
int x, y;
for (int vertIdx = 0; vertIdx < 3; ++vertIdx)
{
x = triangle[vertIdx].x; y = triangle[vertIdx].y;
int fragmentId = x + y * width;
if ( (x >= 0 && x <= width - 1) && (y >= 0 && y <= height - 1) )
{
dev_fragmentBuffer[fragmentId].color = glm::vec3(1.f);
}
}
}
__device__
void _rasterizeLines(Fragment* dev_fragmentBuffer, Primitive& primitive, const int *indicies, int width, int height)
{
VertexOut v0 = primitive.v[0];
VertexOut v1 = primitive.v[1];
VertexOut v2 = primitive.v[2];
glm::vec3 triangle[3] = { glm::vec3(v0.pos),glm::vec3(v1.pos),glm::vec3(v2.pos) };
int x1, x2, y1, y2, dx, dy, y, fragmentId;
for (int index = 0; index < 6; index += 2)
{
x1 = triangle[indicies[index]].x;
y1 = triangle[indicies[index]].y;
x2 = triangle[indicies[index + 1]].x;
y2 = triangle[indicies[index + 1]].y;
dx = x2 - x1;
dy = y2 - y1;
for (int x = x1; x <= x2; x++)
{
y = y1 + dy * (x - x1) / dx;
fragmentId = x + y * width;
if ( (x >= 0 && x <= width - 1) && (y >= 0 && y <= height - 1) )
{
dev_fragmentBuffer[fragmentId].color = glm::vec3(1.f);
}
}
}
}
__device__
void _rasterizeTriangles(Fragment* dev_fragmentBuffer, Primitive& primitive, int* dev_depth, int* dev_mutex, int width, int height)
{
VertexOut v0 = primitive.v[0];
VertexOut v1 = primitive.v[1];
VertexOut v2 = primitive.v[2];
glm::vec3 triangle[3] = { glm::vec3(v0.pos),glm::vec3(v1.pos),glm::vec3(v2.pos) };
// find the min and max of triangle bounding box
AABB bBox = getAABBForTriangle(triangle);
const int minX = glm::min(glm::max((int)bBox.min.x, 0), width - 1);
const int minY = glm::min(glm::max((int)bBox.min.y, 0), height - 1);
const int maxX = glm::min(glm::max((int)bBox.max.x, 0), width - 1);
const int maxY = glm::min(glm::max((int)bBox.max.y, 0), height - 1);
for (int x = minX; x <= maxX; x++)
{
for (int y = minY; y <= maxY; y++)
{
glm::vec3 barycentricCoord = calculateBarycentricCoordinate(triangle, glm::vec2(x, y));
if (isBarycentricCoordInBounds(barycentricCoord))
{
Fragment fragment;
fragment.eyePos = v0.eyePos * barycentricCoord.x + v1.eyePos * barycentricCoord.y + v2.eyePos * barycentricCoord.z;
fragment.eyeNor = v0.eyeNor * barycentricCoord.x + v1.eyeNor * barycentricCoord.y + v2.eyeNor * barycentricCoord.z;
// use texture color
#if TEXTURE == 1
fragment.dev_diffuseTex = v0.dev_diffuseTex;
fragment.texWidth = v0.texWidth;
fragment.texHeight = v0.texHeight;
// perspective correct texture coordinates
#if PERSPECTIVE == 1
const float zCoord = 1.f / (barycentricCoord.x / v0.eyePos.z
+ barycentricCoord.y / v1.eyePos.z
+ barycentricCoord.z / v2.eyePos.z);
fragment.texcoord0 = zCoord * (barycentricCoord.x * (v0.texcoord0 / v0.eyePos.z)
+ barycentricCoord.y * (v1.texcoord0 / v1.eyePos.z)
+ barycentricCoord.z * (v2.texcoord0 / v2.eyePos.z));
// no perspective correct
#else
fragment.texcoord0 = barycentricCoord.x * v0.texcoord0 + barycentricCoord.y * v1.texcoord0 + barycentricCoord.z * v2.texcoord0;
#endif
// do not use texture color
#else
fragment.dev_diffuseTex = NULL;
// default use vertex normal as color
fragment.color = fragment.eyeNor;
#endif
const int fragIndex = x + (y * width);
bool isSet;
do
{
isSet = (atomicCAS(&dev_mutex[fragIndex], 0, 1) == 0);
if (isSet)
{
int depth = -getZAtCoordinate(barycentricCoord, triangle) * INT_MAX;
if (depth < dev_depth[fragIndex])
{
dev_depth[fragIndex] = depth;
dev_fragmentBuffer[fragIndex] = fragment;
}
//reset mutex
dev_mutex[fragIndex] = 0;
}
} while (!isSet);
}
}
}
}
__global__
void _rasterize(int totalNumPrimitives, Primitive* dev_primitives,
Fragment* dev_fragmentBuffer, int* dev_depth,
int * dev_mutex, int width, int height)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > totalNumPrimitives) return;
// get the triangle vertices
Primitive primitive = dev_primitives[index];
#if PRIMTYPE == 1
_rasterizePoints(dev_fragmentBuffer, primitive, width, height);
#elif PRIMTYPE == 2
const int indices[] = { 0, 1, 1, 2, 2, 0 };
_rasterizeLines(dev_fragmentBuffer, primitive, indices, width, height);
#elif PRIMTYPE == 3
_rasterizeTriangles(dev_fragmentBuffer, primitive, dev_depth, dev_mutex, width, height);
#endif
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal)
{
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
#if TIMER
using time_point_t = std::chrono::high_resolution_clock::time_point;
time_point_t start_time = std::chrono::high_resolution_clock::now();
#endif
for (; it != itEnd; ++it)
{
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
hipDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
#if TIMER
hipDeviceSynchronize();
time_point_t end_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> dur = end_time - start_time;
float elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << std::endl;
std::cout << "Vertex Processing and Primitive Assembly: " << elapsed_time << " milliseconds." << std::endl;
#endif
}
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
#if TIMER
using time_point_t = std::chrono::high_resolution_clock::time_point;
time_point_t start_time = std::chrono::high_resolution_clock::now();
#endif
// TODO: rasterize
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives = (totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x;
_rasterize << <numBlocksForPrimitives, numThreadsPerBlock >> > (totalNumPrimitives, dev_primitives, dev_fragmentBuffer, dev_depth, dev_mutex, width, height);
#if TIMER
hipDeviceSynchronize();
time_point_t end_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> dur = end_time - start_time;
float elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "Rasterization: " << elapsed_time << " milliseconds." << std::endl;
#endif
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
#if TIMER
hipDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "Fragment Shader: " << elapsed_time << " milliseconds." << std::endl;
#endif
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
hipFree(p->dev_indices);
hipFree(p->dev_position);
hipFree(p->dev_normal);
hipFree(p->dev_texcoord0);
hipFree(p->dev_diffuseTex);
hipFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_depth);
dev_depth = NULL;
hipFree(dev_mutex);
dev_mutex = NULL;
checkCUDAError("rasterize Free");
}
| d7d98de377c9ac8a813f139256770e12f1fcd526.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <chrono>
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
VertexAttributeTexcoord texcoord0;
TextureData* dev_diffuseTex;
int texWidth, texHeight;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int * dev_depth = NULL; // you might need this buffer when doing depth test
static int * dev_mutex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
__device__
glm::vec3 getTexColor(TextureData* tex, int width, float u, float v)
{
int index = u + v * width;
return glm::vec3(tex[index * 3], tex[index * 3 + 1], tex[index * 3 + 2]) / 255.f;
}
// for more information on bilinear filtering:
// https://en.wikipedia.org/wiki/Bilinear_filtering
// used sample code from this source
__device__
glm::vec3 getBilinearFilteredPixelColor(Fragment &fragment)
{
float u = fragment.texcoord0.x * fragment.texWidth - 0.5f;
float v = fragment.texcoord0.y * fragment.texHeight - 0.5f;
int x = glm::floor(u);
int y = glm::floor(v);
float u_ratio = u - x;
float v_ratio = v - y;
float u_opposite = 1.f - u_ratio;
float v_opposite = 1.f - v_ratio;
// retrieve texture data
glm::vec3 texXY = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, x, y);
glm::vec3 texX1Y = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, x + 1, y);
glm::vec3 texXY1 = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, x, y + 1);
glm::vec3 texX1Y1 = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, x + 1, y + 1);
return (texXY * u_opposite + texX1Y * u_ratio) * v_opposite +
(texXY1 * u_opposite + texX1Y1 * u_ratio) * v_ratio;
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h)
{
// TODO: add your fragment shader code here
Fragment fragment = fragmentBuffer[index];
#if TEXTURE == 1
if (fragment.dev_diffuseTex != NULL)
{
#if BILINEAR == 1
fragment.color = getBilinearFilteredPixelColor(fragment);
#else
int u = fragment.texcoord0.x * fragment.texWidth;
int v = fragment.texcoord0.y * fragment.texHeight;
fragment.color = getTexColor(fragment.dev_diffuseTex, fragment.texWidth, u, v);
#endif
}
#endif
framebuffer[index] = fragment.color;
#if PRIMTYPE == 3
framebuffer[index] *= glm::dot(fragment.eyeNor, glm::normalize(glm::vec3(1.0f) - fragmentBuffer[index].eyePos));
#endif
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
cudaFree(dev_fragmentBuffer);
cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(int));
cudaFree(dev_mutex);
cudaMalloc(&dev_mutex, width * height * sizeof(int));
cudaMemset(dev_mutex, 0, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace cudaMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
cudaMalloc(&dev_bufferView, bufferView.byteLength);
cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
cudaMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
cudaMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
cudaMalloc(&dev_diffuseTex, s);
cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
cudaDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, cudaFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
cudaFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height)
{
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices)
{
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
glm::vec4 pos = MVP * glm::vec4(primitive.dev_position[vid], 1.0f);
glm::vec3 eyePos = glm::vec3(MV * glm::vec4(primitive.dev_position[vid], 1.0f));
glm::vec3 eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
// Then divide the pos by its w element to transform into NDC space
if (pos.w != 0) pos /= pos.w;
// Finally transform x and y to viewport space
pos.x = 0.5f * (float)width * (pos.x + 1.f);
pos.y = 0.5f * (float)height * (1.f - pos.y);
// pos.z = 1.f / eyePos.z;
// TODO: Apply vertex assembly here
// Assemble all attribute arrays into the primitive array
primitive.dev_verticesOut[vid].pos = pos;
primitive.dev_verticesOut[vid].eyePos = eyePos;
primitive.dev_verticesOut[vid].eyeNor = eyeNor;
// retrieve texture data
#if TEXTURE == 1
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
#endif
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices)
{
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES)
{
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
__device__
void _rasterizePoints(Fragment* dev_fragmentBuffer, Primitive& primitive, int width, int height)
{
VertexOut v0 = primitive.v[0];
VertexOut v1 = primitive.v[1];
VertexOut v2 = primitive.v[2];
glm::vec3 triangle[3] = { glm::vec3(v0.pos),glm::vec3(v1.pos),glm::vec3(v2.pos) };
int x, y;
for (int vertIdx = 0; vertIdx < 3; ++vertIdx)
{
x = triangle[vertIdx].x; y = triangle[vertIdx].y;
int fragmentId = x + y * width;
if ( (x >= 0 && x <= width - 1) && (y >= 0 && y <= height - 1) )
{
dev_fragmentBuffer[fragmentId].color = glm::vec3(1.f);
}
}
}
__device__
void _rasterizeLines(Fragment* dev_fragmentBuffer, Primitive& primitive, const int *indicies, int width, int height)
{
VertexOut v0 = primitive.v[0];
VertexOut v1 = primitive.v[1];
VertexOut v2 = primitive.v[2];
glm::vec3 triangle[3] = { glm::vec3(v0.pos),glm::vec3(v1.pos),glm::vec3(v2.pos) };
int x1, x2, y1, y2, dx, dy, y, fragmentId;
for (int index = 0; index < 6; index += 2)
{
x1 = triangle[indicies[index]].x;
y1 = triangle[indicies[index]].y;
x2 = triangle[indicies[index + 1]].x;
y2 = triangle[indicies[index + 1]].y;
dx = x2 - x1;
dy = y2 - y1;
for (int x = x1; x <= x2; x++)
{
y = y1 + dy * (x - x1) / dx;
fragmentId = x + y * width;
if ( (x >= 0 && x <= width - 1) && (y >= 0 && y <= height - 1) )
{
dev_fragmentBuffer[fragmentId].color = glm::vec3(1.f);
}
}
}
}
__device__
void _rasterizeTriangles(Fragment* dev_fragmentBuffer, Primitive& primitive, int* dev_depth, int* dev_mutex, int width, int height)
{
VertexOut v0 = primitive.v[0];
VertexOut v1 = primitive.v[1];
VertexOut v2 = primitive.v[2];
glm::vec3 triangle[3] = { glm::vec3(v0.pos),glm::vec3(v1.pos),glm::vec3(v2.pos) };
// find the min and max of triangle bounding box
AABB bBox = getAABBForTriangle(triangle);
const int minX = glm::min(glm::max((int)bBox.min.x, 0), width - 1);
const int minY = glm::min(glm::max((int)bBox.min.y, 0), height - 1);
const int maxX = glm::min(glm::max((int)bBox.max.x, 0), width - 1);
const int maxY = glm::min(glm::max((int)bBox.max.y, 0), height - 1);
for (int x = minX; x <= maxX; x++)
{
for (int y = minY; y <= maxY; y++)
{
glm::vec3 barycentricCoord = calculateBarycentricCoordinate(triangle, glm::vec2(x, y));
if (isBarycentricCoordInBounds(barycentricCoord))
{
Fragment fragment;
fragment.eyePos = v0.eyePos * barycentricCoord.x + v1.eyePos * barycentricCoord.y + v2.eyePos * barycentricCoord.z;
fragment.eyeNor = v0.eyeNor * barycentricCoord.x + v1.eyeNor * barycentricCoord.y + v2.eyeNor * barycentricCoord.z;
// use texture color
#if TEXTURE == 1
fragment.dev_diffuseTex = v0.dev_diffuseTex;
fragment.texWidth = v0.texWidth;
fragment.texHeight = v0.texHeight;
// perspective correct texture coordinates
#if PERSPECTIVE == 1
const float zCoord = 1.f / (barycentricCoord.x / v0.eyePos.z
+ barycentricCoord.y / v1.eyePos.z
+ barycentricCoord.z / v2.eyePos.z);
fragment.texcoord0 = zCoord * (barycentricCoord.x * (v0.texcoord0 / v0.eyePos.z)
+ barycentricCoord.y * (v1.texcoord0 / v1.eyePos.z)
+ barycentricCoord.z * (v2.texcoord0 / v2.eyePos.z));
// no perspective correct
#else
fragment.texcoord0 = barycentricCoord.x * v0.texcoord0 + barycentricCoord.y * v1.texcoord0 + barycentricCoord.z * v2.texcoord0;
#endif
// do not use texture color
#else
fragment.dev_diffuseTex = NULL;
// default use vertex normal as color
fragment.color = fragment.eyeNor;
#endif
const int fragIndex = x + (y * width);
bool isSet;
do
{
isSet = (atomicCAS(&dev_mutex[fragIndex], 0, 1) == 0);
if (isSet)
{
int depth = -getZAtCoordinate(barycentricCoord, triangle) * INT_MAX;
if (depth < dev_depth[fragIndex])
{
dev_depth[fragIndex] = depth;
dev_fragmentBuffer[fragIndex] = fragment;
}
//reset mutex
dev_mutex[fragIndex] = 0;
}
} while (!isSet);
}
}
}
}
__global__
void _rasterize(int totalNumPrimitives, Primitive* dev_primitives,
Fragment* dev_fragmentBuffer, int* dev_depth,
int * dev_mutex, int width, int height)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > totalNumPrimitives) return;
// get the triangle vertices
Primitive primitive = dev_primitives[index];
#if PRIMTYPE == 1
_rasterizePoints(dev_fragmentBuffer, primitive, width, height);
#elif PRIMTYPE == 2
const int indices[] = { 0, 1, 1, 2, 2, 0 };
_rasterizeLines(dev_fragmentBuffer, primitive, indices, width, height);
#elif PRIMTYPE == 3
_rasterizeTriangles(dev_fragmentBuffer, primitive, dev_depth, dev_mutex, width, height);
#endif
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal)
{
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
#if TIMER
using time_point_t = std::chrono::high_resolution_clock::time_point;
time_point_t start_time = std::chrono::high_resolution_clock::now();
#endif
for (; it != itEnd; ++it)
{
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
cudaDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
#if TIMER
cudaDeviceSynchronize();
time_point_t end_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> dur = end_time - start_time;
float elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << std::endl;
std::cout << "Vertex Processing and Primitive Assembly: " << elapsed_time << " milliseconds." << std::endl;
#endif
}
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
#if TIMER
using time_point_t = std::chrono::high_resolution_clock::time_point;
time_point_t start_time = std::chrono::high_resolution_clock::now();
#endif
// TODO: rasterize
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives = (totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x;
_rasterize << <numBlocksForPrimitives, numThreadsPerBlock >> > (totalNumPrimitives, dev_primitives, dev_fragmentBuffer, dev_depth, dev_mutex, width, height);
#if TIMER
cudaDeviceSynchronize();
time_point_t end_time = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> dur = end_time - start_time;
float elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "Rasterization: " << elapsed_time << " milliseconds." << std::endl;
#endif
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
#if TIMER
cudaDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "Fragment Shader: " << elapsed_time << " milliseconds." << std::endl;
#endif
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
cudaFree(p->dev_indices);
cudaFree(p->dev_position);
cudaFree(p->dev_normal);
cudaFree(p->dev_texcoord0);
cudaFree(p->dev_diffuseTex);
cudaFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_depth);
dev_depth = NULL;
cudaFree(dev_mutex);
dev_mutex = NULL;
checkCUDAError("rasterize Free");
}
|
d09883bc75c1c4b249f5fc5b559a52c40f594ebe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelMarkDeadTriangles(int *cmarker, short *cnewtri, int nTris) {
int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (x >= nTris)
return ;
cmarker[x] = (cnewtri[x] >= 0 ? 0 : 1);
} | d09883bc75c1c4b249f5fc5b559a52c40f594ebe.cu | #include "includes.h"
__global__ void kernelMarkDeadTriangles(int *cmarker, short *cnewtri, int nTris) {
int x = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (x >= nTris)
return ;
cmarker[x] = (cnewtri[x] >= 0 ? 0 : 1);
} |
2e464338be2ac6e01298f741d65b10939f841cf0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
int threads;
__global__ void gcd_vector(int * d_out, int integer_m){
int idx = threadIdx.x;
for(int i = idx; i<integer_m; i+=blockDim.x){
int u = i, v = integer_m;
while ( v != 0) {
int r = u % v;
u = v;
v = r;
}
if(u == 1){
d_out[idx]++;
}
}
} | 2e464338be2ac6e01298f741d65b10939f841cf0.cu | #include "includes.h"
using namespace std;
int threads;
__global__ void gcd_vector(int * d_out, int integer_m){
int idx = threadIdx.x;
for(int i = idx; i<integer_m; i+=blockDim.x){
int u = i, v = integer_m;
while ( v != 0) {
int r = u % v;
u = v;
v = r;
}
if(u == 1){
d_out[idx]++;
}
}
} |
0a33197cec3786c4b5ede5adaf5e6a4e39c872ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <cassert>
#include <algorithm>
/// Returns the sum of all values `a` within a warp,
/// with the correct answer returned only by the 0th thread of a warp.
__device__ double sumWarp(double a) {
// TODO: 1.a) Compute sum of all values within a warp.
// Only the threads with threadIdx.x % warpSize == 0 have to
// return the correct result.
// (although this function operates only on a single warp, it
// will be called with many threads for testing)
double sum = a;
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 1);
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 2);
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 4);
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 8);
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 16);
return sum;
}
/// Returns the sum of all values `a` within a block,
/// with the correct answer returned only by the 0th thread of a block.
__device__ double sumBlock(double a) {
// TODO: 1.c) Compute the sum of values `a` for all threads within a block.
// Only threadIdx.x == 0 has to return the correct result.
// NOTE: For 1.c) implement either this or `argMaxBlock`!
// we are sure that there are 1024 threads all with meaningful data
double result = sumWarp(a);
__shared__ double sdata[32];
if (threadIdx.x % 32 == 0)
sdata[threadIdx.x / 32] = result;
__syncthreads();
if (threadIdx.x < 32) {
result = sumWarp(sdata[threadIdx.x]);
}
return result;
}
__global__ void sumReduce(const double *aDev, double *bDev, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// printf("IDX = %d\n", idx);
double a = idx < N ? aDev[idx] : 0.0;
// if (gridDim.x == 1 && a > 0.0) {
// printf("Idx = %2d, value = %f\n", idx, a);
// }
// if (threadIdx.x < 32) printf("%3d my value: %f\n", threadIdx.x, a);
double sum = sumBlock(a);
if (threadIdx.x == 0) {
bDev[blockIdx.x] = sum;
}
// if (threadIdx.x == 0)
// atomicAdd(bDev, sum);
}
/// Compute the sum of all values aDev[0]..aDev[N-1] for N <= 1024^2 and store the result to bDev[0].
void sum1M(const double *aDev, double *bDev, int N) {
assert(N <= 1024 * 1024);
// TODO: 1.d) Implement either this or `argMax1M`.
// Avoid copying any data back to the host.
// Hint: The solution requires more CUDA operations than just
// calling a single kernel. Feel free to use whatever you find
// necessary.
int numBlocks = (N+1024-1)/1024;
int device;
hipDeviceProp_t prop;
hipGetDevice(&device);
hipGetDeviceProperties(&prop, device);
// printf("Cuda compute capability: %d.%d\n", prop.major, prop.minor);
if (numBlocks > prop.maxGridSize[0]) {
fprintf(stderr, "Grid size %d exceeds the device capability %d", numBlocks, prop.maxGridSize[0]);
return;
}
// bDev[0] = 0.0f;
if (numBlocks > 1) {
// need some memory to synchronize over blocks
double* bufferDev;
CUDA_CHECK(hipMalloc(&bufferDev, numBlocks * sizeof(double)));
CUDA_LAUNCH(sumReduce, numBlocks, 1024, aDev, bufferDev, N);
CUDA_LAUNCH(sumReduce, 1, 1024, bufferDev, bDev, numBlocks);
CUDA_CHECK(hipFree(bufferDev));
} else {
CUDA_LAUNCH(sumReduce, 1, 1024, aDev, bDev, N);
}
}
#include "reduction_sum.h"
int main() {
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 3);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 32);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 320);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 1023123);
printf("sumWarp OK.\n");
/*
// OPTIONAL: 1a reduce-all. In case you want to try to implement it,
// implement a global function `__device__ double sumWarpAll(double x)`,
// and comment out sumWarpAll* functions in utils.h.
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 3);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 32);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 320);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 1023123);
printf("sumWarpAll OK.\n");
*/
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 32);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 1024);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 12341);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 1012311);
printf("sumBlock OK.\n");
testLargeSum("sum1M", sum1M, 32);
testLargeSum("sum1M", sum1M, 1024);
testLargeSum("sum1M", sum1M, 12341);
testLargeSum("sum1M", sum1M, 1012311);
printf("sum1M OK.\n");
}
| 0a33197cec3786c4b5ede5adaf5e6a4e39c872ce.cu | #include "utils.h"
#include <cassert>
#include <algorithm>
/// Returns the sum of all values `a` within a warp,
/// with the correct answer returned only by the 0th thread of a warp.
__device__ double sumWarp(double a) {
// TODO: 1.a) Compute sum of all values within a warp.
// Only the threads with threadIdx.x % warpSize == 0 have to
// return the correct result.
// (although this function operates only on a single warp, it
// will be called with many threads for testing)
double sum = a;
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 1);
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 2);
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 4);
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 8);
sum += __shfl_xor_sync(0xFFFFFFFF, sum, 16);
return sum;
}
/// Returns the sum of all values `a` within a block,
/// with the correct answer returned only by the 0th thread of a block.
__device__ double sumBlock(double a) {
// TODO: 1.c) Compute the sum of values `a` for all threads within a block.
// Only threadIdx.x == 0 has to return the correct result.
// NOTE: For 1.c) implement either this or `argMaxBlock`!
// we are sure that there are 1024 threads all with meaningful data
double result = sumWarp(a);
__shared__ double sdata[32];
if (threadIdx.x % 32 == 0)
sdata[threadIdx.x / 32] = result;
__syncthreads();
if (threadIdx.x < 32) {
result = sumWarp(sdata[threadIdx.x]);
}
return result;
}
__global__ void sumReduce(const double *aDev, double *bDev, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// printf("IDX = %d\n", idx);
double a = idx < N ? aDev[idx] : 0.0;
// if (gridDim.x == 1 && a > 0.0) {
// printf("Idx = %2d, value = %f\n", idx, a);
// }
// if (threadIdx.x < 32) printf("%3d my value: %f\n", threadIdx.x, a);
double sum = sumBlock(a);
if (threadIdx.x == 0) {
bDev[blockIdx.x] = sum;
}
// if (threadIdx.x == 0)
// atomicAdd(bDev, sum);
}
/// Compute the sum of all values aDev[0]..aDev[N-1] for N <= 1024^2 and store the result to bDev[0].
void sum1M(const double *aDev, double *bDev, int N) {
assert(N <= 1024 * 1024);
// TODO: 1.d) Implement either this or `argMax1M`.
// Avoid copying any data back to the host.
// Hint: The solution requires more CUDA operations than just
// calling a single kernel. Feel free to use whatever you find
// necessary.
int numBlocks = (N+1024-1)/1024;
int device;
cudaDeviceProp prop;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
// printf("Cuda compute capability: %d.%d\n", prop.major, prop.minor);
if (numBlocks > prop.maxGridSize[0]) {
fprintf(stderr, "Grid size %d exceeds the device capability %d", numBlocks, prop.maxGridSize[0]);
return;
}
// bDev[0] = 0.0f;
if (numBlocks > 1) {
// need some memory to synchronize over blocks
double* bufferDev;
CUDA_CHECK(cudaMalloc(&bufferDev, numBlocks * sizeof(double)));
CUDA_LAUNCH(sumReduce, numBlocks, 1024, aDev, bufferDev, N);
CUDA_LAUNCH(sumReduce, 1, 1024, bufferDev, bDev, numBlocks);
CUDA_CHECK(cudaFree(bufferDev));
} else {
CUDA_LAUNCH(sumReduce, 1, 1024, aDev, bDev, N);
}
}
#include "reduction_sum.h"
int main() {
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 3);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 32);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 320);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 1023123);
printf("sumWarp OK.\n");
/*
// OPTIONAL: 1a reduce-all. In case you want to try to implement it,
// implement a global function `__device__ double sumWarpAll(double x)`,
// and comment out sumWarpAll* functions in utils.h.
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 3);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 32);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 320);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 1023123);
printf("sumWarpAll OK.\n");
*/
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 32);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 1024);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 12341);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 1012311);
printf("sumBlock OK.\n");
testLargeSum("sum1M", sum1M, 32);
testLargeSum("sum1M", sum1M, 1024);
testLargeSum("sum1M", sum1M, 12341);
testLargeSum("sum1M", sum1M, 1012311);
printf("sum1M OK.\n");
}
|
cabbfe08252c47c7912e97008631cfa51caca724.hip | // !!! This is a file automatically generated by hipify!!!
/***************
CS-426-Project-4
ERKAN NAL
21302017
CUDA programming
***************/
#define NOMINMAX
#define PI 3.14159265
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <sstream>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
using namespace std;
float getArraySize(char *filename);
void readFile(char *filename, int arrSize, float **vector1, float **vector2);
float *arrayGenerator(float N);
float findAngle(float N, float *vector1, float *vector2);
__global__ void compute(float N, float *d_vector1, float *d_vector2, float *d_vector3)
{
extern __shared__ float sharedData[];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float tmp = 0;
while (tid < N) {
tmp = tmp + d_vector1[tid] * d_vector2[tid];
tid = tid + blockDim.x * gridDim.x;
}
//put your tmp to shared
sharedData[threadIdx.x] = tmp;
//synchronize threads
__syncthreads();
//reduction code
int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i)
sharedData[threadIdx.x] = sharedData[threadIdx.x] + sharedData[threadIdx.x + i];
__syncthreads();
i = i / 2;
}
//accumulate into final result
if (threadIdx.x == 0) {
atomicAdd(d_vector3, sharedData[0]);
}
//Nominator Calculated, Now, Calculate Denominator
//*********************Calculate sqrt of first vector first*********************//
tid = threadIdx.x + blockIdx.x * blockDim.x;
tmp = 0;
while (tid < N) {
tmp = tmp + powf(d_vector1[tid], 2);
tid = tid + blockDim.x * gridDim.x;
}
//put your tmp to shared
sharedData[threadIdx.x] = tmp;
//synchronize threads
__syncthreads();
//reduction code
i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i)
sharedData[threadIdx.x] = sharedData[threadIdx.x] + sharedData[threadIdx.x + i];
__syncthreads();
i = i / 2;
}
//accumulate into final result
if (threadIdx.x == 0) {
atomicAdd(d_vector3 + 1, sharedData[0]);
}
//*********************Calculate sqrt of second vector*********************//
tid = threadIdx.x + blockIdx.x * blockDim.x;
tmp = 0;
while (tid < N) {
tmp = tmp + powf(d_vector2[tid], 2);
tid = tid + blockDim.x * gridDim.x;
}
//put your tmp to shared
sharedData[threadIdx.x] = tmp;
//synchronize threads
__syncthreads();
//reduction code
i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i)
sharedData[threadIdx.x] = sharedData[threadIdx.x] + sharedData[threadIdx.x + i];
__syncthreads();
i = i / 2;
}
//accumulate into final result
if (threadIdx.x == 0) {
atomicAdd(d_vector3 + 2, sharedData[0]);
}
}
int main(int argc, char **argv)
{
if (argc == 3) {
float CPU_result, GPU_result;
//To measure time for CPU
clock_t start, end;
float time_for_arr_gen, time_for_cpu_func, time_for_host_to_device, time_for_device_to_host, time_for_kernel_exe;
//To measure time for GPU
hipEvent_t start_gpu, stop_gpu;
hipEventCreate(&start_gpu);
hipEventCreate(&stop_gpu);
printf("Cuda Works\n");
//float N = 435090;
//float N = 20000000;
float N = atoi(argv[1]);
//float blocksize = 32;
float blocksize = atoi(argv[2]);
float blocksWillBeCreated = (N / blocksize) + 1;
//define the input/ouput vectors of the host and kernel
float *vector1, *vector2, *d_vector1, *d_vector2;
float *output, *d_output;
//initialize defined vectors and output
start = clock();
vector1 = arrayGenerator(N);
vector2 = arrayGenerator(N);
output = (float*)malloc(3 * sizeof(float));
output[0] = 0; output[1] = 0; output[2] = 0;
//allocate for device members
hipMalloc(&d_vector1, N * sizeof(float));
hipMalloc(&d_vector2, N * sizeof(float));
hipMalloc(&d_output, 3 * sizeof(float));
end = clock();
time_for_arr_gen = ((double)(end - start)) / CLOCKS_PER_SEC;
//host to device transfer
start = clock();
hipMemcpy(d_vector1, vector1, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_vector2, vector2, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_output, output, 3 * sizeof(float), hipMemcpyHostToDevice);
end = clock();
time_for_host_to_device = ((double)(end - start)) / CLOCKS_PER_SEC;
//run host function and measure its elapsed time
start = clock();
CPU_result = findAngle(N, vector1, vector2);
end = clock();
time_for_cpu_func = ((double)(end - start)) / CLOCKS_PER_SEC;
//run kernel function and measure its elapsed time
start = clock();
compute << <(int)blocksWillBeCreated, (int)blocksize, (blocksize * sizeof(float)) >> > (N, d_vector1, d_vector2, d_output);
hipDeviceSynchronize();
end = clock();
time_for_kernel_exe = ((double)(end - start)) / CLOCKS_PER_SEC;
//device to host transfer
start = clock();
hipMemcpy(output, d_output, 3 * sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
end = clock();
time_for_device_to_host = ((double)(end - start)) / CLOCKS_PER_SEC;
output[1] = sqrt(output[1]);
output[2] = sqrt(output[2]);
float nominator = output[0];
//printf("Device nominator is: %f\n\n", nominator);
float denominator = output[1] * output[2];
GPU_result = nominator / denominator;
float value = 180.0 / PI;
GPU_result = atan(GPU_result) * value;
//float to int
int NInt = (int)(N);
int blocksizeInt = (int)(blocksize);
int blocksWillBeCreatedInt = (int)(blocksWillBeCreated);
//
printf("Info\n");
printf("__________________\n");
printf("Number of Elements: %d\n", NInt);
printf("Number of threads per block: %d\n", blocksizeInt);
printf("Number of blocks will be created: %d\n", blocksWillBeCreatedInt);
printf("Time\n");
printf("__________________\n");
printf("Time for the array generation: %f ms\n", time_for_arr_gen);
printf("Time for the CPU function: %f ms\n", time_for_cpu_func);
printf("Time for the Host to Device transfer: %f ms\n", time_for_host_to_device / 1000);
printf("Time for the kernel execution: %f ms\n", time_for_kernel_exe / 1000);
printf("Time for the Device to Host transfer: %f\ ms \n", time_for_device_to_host / 1000);
printf("Total execution time for GPU: %f ms\n", (time_for_host_to_device + time_for_kernel_exe) / 1000);
printf("Results\n");
printf("__________________\n");
printf("CPU result: %.3f\n", CPU_result);
printf("GPU result: %.3f\n", GPU_result);
//
hipFree(d_vector1);
hipFree(d_vector2);
free(vector1);
free(vector2);
}
else if (argc == 4) {
//results
float CPU_result, GPU_result;
//To measure time for CPU
clock_t start, end;
float time_for_arr_gen, time_for_cpu_func, time_for_host_to_device, time_for_device_to_host, time_for_kernel_exe;
//To measure time for GPU
hipEvent_t start_gpu, stop_gpu;
hipEventCreate(&start_gpu);
hipEventCreate(&stop_gpu);
printf("Cuda Works\n");
//read filename
//char *filename = "data.txt";
char *filename = argv[3];
float numOfArraySize = 0;
numOfArraySize = getArraySize(filename);
float N = numOfArraySize;
//float blocksize = 512;
float blocksize = atoi(argv[2]);
float blocksWillBeCreated = (N / blocksize) + 1;
//define the input/ouput vectors of the host and kernel
float *vector1, *vector2, *d_vector1, *d_vector2;
float *output, *d_output;
//initialize defined vectors and output
start = clock();
vector1 = (float*)malloc(N * sizeof(float));
vector2 = (float*)malloc(N * sizeof(float));
output = (float*)malloc(3 * sizeof(float));
readFile(filename, numOfArraySize, &vector1, &vector2);
output[0] = 0; output[1] = 0; output[2] = 0;
//allocate for device members
hipMalloc(&d_vector1, N * sizeof(float));
hipMalloc(&d_vector2, N * sizeof(float));
hipMalloc(&d_output, 3 * sizeof(float));
end = clock();
time_for_arr_gen = ((double)(end - start)) / CLOCKS_PER_SEC;
//host to device transfer
start = clock();
hipMemcpy(d_vector1, vector1, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_vector2, vector2, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_output, output, 3 * sizeof(float), hipMemcpyHostToDevice);
end = clock();
time_for_host_to_device = ((double)(end - start)) / CLOCKS_PER_SEC;
//run host function and measure its elapsed time
start = clock();
CPU_result = findAngle(N, vector1, vector2);
end = clock();
time_for_cpu_func = ((double)(end - start)) / CLOCKS_PER_SEC;
//run kernel function and measure its elapsed time
start = clock();
compute << <(int)((N / blocksize) + 1), (int)blocksize, (blocksize * sizeof(float)) >> > (N, d_vector1, d_vector2, d_output);
hipDeviceSynchronize();
end = clock();
time_for_kernel_exe = ((double)(end - start)) / CLOCKS_PER_SEC;
//device to host transfer
start = clock();
hipMemcpy(output, d_output, 3 * sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
end = clock();
time_for_device_to_host = ((double)(end - start)) / CLOCKS_PER_SEC;
output[1] = sqrt(output[1]);
output[2] = sqrt(output[2]);
float nominator = output[0];
float denominator = output[1] * output[2];
GPU_result = nominator / denominator;
float value = 180.0 / PI;
GPU_result = atan(GPU_result) * value;
//float to int
int NInt = (int)(N);
int blocksizeInt = (int)(blocksize);
int blocksWillBeCreatedInt = (int)(blocksWillBeCreated);
//
printf("Info\n");
printf("__________________\n");
printf("Number of Elements: %d\n", NInt);
printf("Number of threads per block: %d\n", blocksizeInt);
printf("Number of blocks will be created: %d\n", blocksWillBeCreatedInt);
printf("Time\n");
printf("__________________\n");
printf("Time for the array generation: %f ms\n", time_for_arr_gen);
printf("Time for the CPU function: %f ms\n", time_for_cpu_func);
printf("Time for the Host to Device transfer: %f ms\n", time_for_host_to_device / 1000);
printf("Time for the kernel execution: %f ms\n", time_for_kernel_exe / 1000);
printf("Time for the Device to Host transfer: %f\ ms \n", time_for_device_to_host / 1000);
printf("Total execution time for GPU: %f ms\n", (time_for_host_to_device + time_for_kernel_exe) / 1000);
printf("Results\n");
printf("__________________\n");
printf("CPU result: %.3f\n", CPU_result);
printf("GPU result: %.3f\n", GPU_result);
//
hipFree(d_vector1);
hipFree(d_vector2);
free(vector1);
free(vector2);
}
else {
printf("Invalid number of arguements");
}
return 0;
}
float *arrayGenerator(float N) {
if (N < 0)
return NULL;
float *vector = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
vector[i] = rand() % 20 - 20;;
}
return vector;
}
float getArraySize(char *filename) {
float numOfArraySize = 0;
FILE* file = fopen(filename, "r");
fscanf(file, "%f", &numOfArraySize);
fclose(file);
return numOfArraySize;
}
void readFile(char *filename, int arrSize, float **vector1, float **vector2) {
int a = 0;
FILE* file = fopen(filename, "r");
fscanf(file, "%d", &a);
int x = 0;
int i = 0;
int j = 0;
while (!feof(file)) {
fscanf(file, "%d", &x);
if (i < arrSize) {
(*vector1)[i] = x;
}
if (i >= arrSize && i < 2 * arrSize) {
(*vector2)[j] = x;
j++;
}
i++;
}
fclose(file);
}
float findAngle(float N, float *vector1, float *vector2) {
float nominator = 0;
float length1 = 0;
float length2 = 0;
float denominator = 0;
float result = 0;
float value = 180.0 / PI;
for (int i = 0; i < N; i++) {
//printf("vector1[i]: %d and vector2[i]: %d\n", vector1[i], vector2[i]);
nominator = nominator + vector1[i] * vector2[i];
}
//printf("Host nominator: %f\n", nominator);
for (int i = 0; i < N; i++) {
length1 = length1 + pow(vector1[i], 2);
length2 = length2 + pow(vector2[i], 2);
}
length1 = sqrt(length1);
length2 = sqrt(length2);
//printf("serial result length1: %f\n", length1);
//printf("serial result length2: %f\n", length2);
denominator = length1 * length2;
//printf("Denominator: %f\n", denominator);
result = nominator / denominator;
result = atan(result) * value;
return result;
} | cabbfe08252c47c7912e97008631cfa51caca724.cu | /***************
CS-426-Project-4
ERKAN ÍNAL
21302017
CUDA programming
***************/
#define NOMINMAX
#define PI 3.14159265
#include "cuda_runtime.h"
#include <iostream>
#include <string>
#include <sstream>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <time.h>
using namespace std;
float getArraySize(char *filename);
void readFile(char *filename, int arrSize, float **vector1, float **vector2);
float *arrayGenerator(float N);
float findAngle(float N, float *vector1, float *vector2);
__global__ void compute(float N, float *d_vector1, float *d_vector2, float *d_vector3)
{
extern __shared__ float sharedData[];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float tmp = 0;
while (tid < N) {
tmp = tmp + d_vector1[tid] * d_vector2[tid];
tid = tid + blockDim.x * gridDim.x;
}
//put your tmp to shared
sharedData[threadIdx.x] = tmp;
//synchronize threads
__syncthreads();
//reduction code
int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i)
sharedData[threadIdx.x] = sharedData[threadIdx.x] + sharedData[threadIdx.x + i];
__syncthreads();
i = i / 2;
}
//accumulate into final result
if (threadIdx.x == 0) {
atomicAdd(d_vector3, sharedData[0]);
}
//Nominator Calculated, Now, Calculate Denominator
//*********************Calculate sqrt of first vector first*********************//
tid = threadIdx.x + blockIdx.x * blockDim.x;
tmp = 0;
while (tid < N) {
tmp = tmp + powf(d_vector1[tid], 2);
tid = tid + blockDim.x * gridDim.x;
}
//put your tmp to shared
sharedData[threadIdx.x] = tmp;
//synchronize threads
__syncthreads();
//reduction code
i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i)
sharedData[threadIdx.x] = sharedData[threadIdx.x] + sharedData[threadIdx.x + i];
__syncthreads();
i = i / 2;
}
//accumulate into final result
if (threadIdx.x == 0) {
atomicAdd(d_vector3 + 1, sharedData[0]);
}
//*********************Calculate sqrt of second vector*********************//
tid = threadIdx.x + blockIdx.x * blockDim.x;
tmp = 0;
while (tid < N) {
tmp = tmp + powf(d_vector2[tid], 2);
tid = tid + blockDim.x * gridDim.x;
}
//put your tmp to shared
sharedData[threadIdx.x] = tmp;
//synchronize threads
__syncthreads();
//reduction code
i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i)
sharedData[threadIdx.x] = sharedData[threadIdx.x] + sharedData[threadIdx.x + i];
__syncthreads();
i = i / 2;
}
//accumulate into final result
if (threadIdx.x == 0) {
atomicAdd(d_vector3 + 2, sharedData[0]);
}
}
int main(int argc, char **argv)
{
if (argc == 3) {
float CPU_result, GPU_result;
//To measure time for CPU
clock_t start, end;
float time_for_arr_gen, time_for_cpu_func, time_for_host_to_device, time_for_device_to_host, time_for_kernel_exe;
//To measure time for GPU
cudaEvent_t start_gpu, stop_gpu;
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
printf("Cuda Works\n");
//float N = 435090;
//float N = 20000000;
float N = atoi(argv[1]);
//float blocksize = 32;
float blocksize = atoi(argv[2]);
float blocksWillBeCreated = (N / blocksize) + 1;
//define the input/ouput vectors of the host and kernel
float *vector1, *vector2, *d_vector1, *d_vector2;
float *output, *d_output;
//initialize defined vectors and output
start = clock();
vector1 = arrayGenerator(N);
vector2 = arrayGenerator(N);
output = (float*)malloc(3 * sizeof(float));
output[0] = 0; output[1] = 0; output[2] = 0;
//allocate for device members
cudaMalloc(&d_vector1, N * sizeof(float));
cudaMalloc(&d_vector2, N * sizeof(float));
cudaMalloc(&d_output, 3 * sizeof(float));
end = clock();
time_for_arr_gen = ((double)(end - start)) / CLOCKS_PER_SEC;
//host to device transfer
start = clock();
cudaMemcpy(d_vector1, vector1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_vector2, vector2, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_output, output, 3 * sizeof(float), cudaMemcpyHostToDevice);
end = clock();
time_for_host_to_device = ((double)(end - start)) / CLOCKS_PER_SEC;
//run host function and measure its elapsed time
start = clock();
CPU_result = findAngle(N, vector1, vector2);
end = clock();
time_for_cpu_func = ((double)(end - start)) / CLOCKS_PER_SEC;
//run kernel function and measure its elapsed time
start = clock();
compute << <(int)blocksWillBeCreated, (int)blocksize, (blocksize * sizeof(float)) >> > (N, d_vector1, d_vector2, d_output);
cudaThreadSynchronize();
end = clock();
time_for_kernel_exe = ((double)(end - start)) / CLOCKS_PER_SEC;
//device to host transfer
start = clock();
cudaMemcpy(output, d_output, 3 * sizeof(float), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
end = clock();
time_for_device_to_host = ((double)(end - start)) / CLOCKS_PER_SEC;
output[1] = sqrt(output[1]);
output[2] = sqrt(output[2]);
float nominator = output[0];
//printf("Device nominator is: %f\n\n", nominator);
float denominator = output[1] * output[2];
GPU_result = nominator / denominator;
float value = 180.0 / PI;
GPU_result = atan(GPU_result) * value;
//float to int
int NInt = (int)(N);
int blocksizeInt = (int)(blocksize);
int blocksWillBeCreatedInt = (int)(blocksWillBeCreated);
//
printf("Info\n");
printf("__________________\n");
printf("Number of Elements: %d\n", NInt);
printf("Number of threads per block: %d\n", blocksizeInt);
printf("Number of blocks will be created: %d\n", blocksWillBeCreatedInt);
printf("Time\n");
printf("__________________\n");
printf("Time for the array generation: %f ms\n", time_for_arr_gen);
printf("Time for the CPU function: %f ms\n", time_for_cpu_func);
printf("Time for the Host to Device transfer: %f ms\n", time_for_host_to_device / 1000);
printf("Time for the kernel execution: %f ms\n", time_for_kernel_exe / 1000);
printf("Time for the Device to Host transfer: %f\ ms \n", time_for_device_to_host / 1000);
printf("Total execution time for GPU: %f ms\n", (time_for_host_to_device + time_for_kernel_exe) / 1000);
printf("Results\n");
printf("__________________\n");
printf("CPU result: %.3f\n", CPU_result);
printf("GPU result: %.3f\n", GPU_result);
//
cudaFree(d_vector1);
cudaFree(d_vector2);
free(vector1);
free(vector2);
}
else if (argc == 4) {
//results
float CPU_result, GPU_result;
//To measure time for CPU
clock_t start, end;
float time_for_arr_gen, time_for_cpu_func, time_for_host_to_device, time_for_device_to_host, time_for_kernel_exe;
//To measure time for GPU
cudaEvent_t start_gpu, stop_gpu;
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
printf("Cuda Works\n");
//read filename
//char *filename = "data.txt";
char *filename = argv[3];
float numOfArraySize = 0;
numOfArraySize = getArraySize(filename);
float N = numOfArraySize;
//float blocksize = 512;
float blocksize = atoi(argv[2]);
float blocksWillBeCreated = (N / blocksize) + 1;
//define the input/ouput vectors of the host and kernel
float *vector1, *vector2, *d_vector1, *d_vector2;
float *output, *d_output;
//initialize defined vectors and output
start = clock();
vector1 = (float*)malloc(N * sizeof(float));
vector2 = (float*)malloc(N * sizeof(float));
output = (float*)malloc(3 * sizeof(float));
readFile(filename, numOfArraySize, &vector1, &vector2);
output[0] = 0; output[1] = 0; output[2] = 0;
//allocate for device members
cudaMalloc(&d_vector1, N * sizeof(float));
cudaMalloc(&d_vector2, N * sizeof(float));
cudaMalloc(&d_output, 3 * sizeof(float));
end = clock();
time_for_arr_gen = ((double)(end - start)) / CLOCKS_PER_SEC;
//host to device transfer
start = clock();
cudaMemcpy(d_vector1, vector1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_vector2, vector2, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_output, output, 3 * sizeof(float), cudaMemcpyHostToDevice);
end = clock();
time_for_host_to_device = ((double)(end - start)) / CLOCKS_PER_SEC;
//run host function and measure its elapsed time
start = clock();
CPU_result = findAngle(N, vector1, vector2);
end = clock();
time_for_cpu_func = ((double)(end - start)) / CLOCKS_PER_SEC;
//run kernel function and measure its elapsed time
start = clock();
compute << <(int)((N / blocksize) + 1), (int)blocksize, (blocksize * sizeof(float)) >> > (N, d_vector1, d_vector2, d_output);
cudaThreadSynchronize();
end = clock();
time_for_kernel_exe = ((double)(end - start)) / CLOCKS_PER_SEC;
//device to host transfer
start = clock();
cudaMemcpy(output, d_output, 3 * sizeof(float), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
end = clock();
time_for_device_to_host = ((double)(end - start)) / CLOCKS_PER_SEC;
output[1] = sqrt(output[1]);
output[2] = sqrt(output[2]);
float nominator = output[0];
float denominator = output[1] * output[2];
GPU_result = nominator / denominator;
float value = 180.0 / PI;
GPU_result = atan(GPU_result) * value;
//float to int
int NInt = (int)(N);
int blocksizeInt = (int)(blocksize);
int blocksWillBeCreatedInt = (int)(blocksWillBeCreated);
//
printf("Info\n");
printf("__________________\n");
printf("Number of Elements: %d\n", NInt);
printf("Number of threads per block: %d\n", blocksizeInt);
printf("Number of blocks will be created: %d\n", blocksWillBeCreatedInt);
printf("Time\n");
printf("__________________\n");
printf("Time for the array generation: %f ms\n", time_for_arr_gen);
printf("Time for the CPU function: %f ms\n", time_for_cpu_func);
printf("Time for the Host to Device transfer: %f ms\n", time_for_host_to_device / 1000);
printf("Time for the kernel execution: %f ms\n", time_for_kernel_exe / 1000);
printf("Time for the Device to Host transfer: %f\ ms \n", time_for_device_to_host / 1000);
printf("Total execution time for GPU: %f ms\n", (time_for_host_to_device + time_for_kernel_exe) / 1000);
printf("Results\n");
printf("__________________\n");
printf("CPU result: %.3f\n", CPU_result);
printf("GPU result: %.3f\n", GPU_result);
//
cudaFree(d_vector1);
cudaFree(d_vector2);
free(vector1);
free(vector2);
}
else {
printf("Invalid number of arguements");
}
return 0;
}
float *arrayGenerator(float N) {
if (N < 0)
return NULL;
float *vector = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
vector[i] = rand() % 20 - 20;;
}
return vector;
}
float getArraySize(char *filename) {
float numOfArraySize = 0;
FILE* file = fopen(filename, "r");
fscanf(file, "%f", &numOfArraySize);
fclose(file);
return numOfArraySize;
}
void readFile(char *filename, int arrSize, float **vector1, float **vector2) {
int a = 0;
FILE* file = fopen(filename, "r");
fscanf(file, "%d", &a);
int x = 0;
int i = 0;
int j = 0;
while (!feof(file)) {
fscanf(file, "%d", &x);
if (i < arrSize) {
(*vector1)[i] = x;
}
if (i >= arrSize && i < 2 * arrSize) {
(*vector2)[j] = x;
j++;
}
i++;
}
fclose(file);
}
float findAngle(float N, float *vector1, float *vector2) {
float nominator = 0;
float length1 = 0;
float length2 = 0;
float denominator = 0;
float result = 0;
float value = 180.0 / PI;
for (int i = 0; i < N; i++) {
//printf("vector1[i]: %d and vector2[i]: %d\n", vector1[i], vector2[i]);
nominator = nominator + vector1[i] * vector2[i];
}
//printf("Host nominator: %f\n", nominator);
for (int i = 0; i < N; i++) {
length1 = length1 + pow(vector1[i], 2);
length2 = length2 + pow(vector2[i], 2);
}
length1 = sqrt(length1);
length2 = sqrt(length2);
//printf("serial result length1: %f\n", length1);
//printf("serial result length2: %f\n", length2);
denominator = length1 * length2;
//printf("Denominator: %f\n", denominator);
result = nominator / denominator;
result = atan(result) * value;
return result;
} |
5ce608ad2132e17ed2fd309a675f30b0bc4c3680.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author George A. Shulinok <[email protected]>, created on 4/18/2019
//
#include <ops/declarable/helpers/BarnesHutTsne.h>
namespace nd4j {
namespace ops {
namespace helpers {
static __global__ void countRowsKernel(int* pRowCounts, int const* pRows, int const* pCols, Nd4jLong N) {
auto start = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int n = threadIdx.x + start; n < N; n += step) {
int begin = pRows[n];//->e<int>(n);
int end = pRows[n + 1];//rowP->e<int>(n + 1);
for (int i = begin; i < end; i++) {
bool present = false;
for (int m = pRows[pCols[i]]; m < pRows[pCols[i] + 1]; m++)
if (pCols[m] == n) {
present = true;
break;
}
atomicAdd(&pRowCounts[n], 1);
if (!present)
atomicAdd(&pRowCounts[pCols[i]], 1);
}
}
}
Nd4jLong barnes_row_count(const NDArray* rowP, const NDArray* colP, Nd4jLong N, NDArray& rowCounts) {
int* pRowCounts = reinterpret_cast<int*>(rowCounts.specialBuffer());
int const* pRows = reinterpret_cast<int const*>(rowP->getSpecialBuffer());
int const* pCols = reinterpret_cast<int const*>(colP->getSpecialBuffer());
auto stream = rowCounts.getContext()->getCudaStream();
hipLaunchKernelGGL(( countRowsKernel), dim3(1), dim3(1), 128, *stream, pRowCounts, pRows, pCols, N);
NDArray numElementsArr = rowCounts.sumNumber(); //reduceAlongDimension(reduce::Sum, {});
//rowCounts.printBuffer("Row counts");
auto numElements = numElementsArr.e<Nd4jLong>(0);
return numElements;
}
static __global__ void fillUpsymRow(int const* pRowCounts, int* symRowP, int N) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int n = start; n < N + 1; n += step) {
symRowP[n] = 0;
for (int i = 0; i < n; i++)
atomicAdd(&symRowP[n], pRowCounts[i]);
}
}
template <typename T>
static __global__ void symmetrizeKernel(int const* pRows, int const* pCols, T const* pVals, int* symRowP, int* symColP, int* offset, T* pOutput, int N) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int n = start; n < N; n += step) {
int begin = pRows[n];
int bound = pRows[n + 1];
for (int i = begin; i < bound; i++) {
bool present = false;
int colPI = pCols[i];
int start = pRows[colPI];
int end = pRows[colPI + 1];
//PRAGMA_OMP_PARALLEL_FOR_ARGS(schedule(guided) firstprivate(offset))
for (int m = start; m < end; m++) {
if (pCols[m] == n) {
present = true;
if (n <= colPI) {
symColP[symRowP[n] + offset[n]] = colPI;
symColP[symRowP[colPI] + offset[colPI]] = n;
pOutput[symRowP[n] + offset[n]] = pVals[i] + pVals[m];
pOutput[symRowP[colPI] + offset[colPI]] = pVals[i] + pVals[m];
}
}
}
// If (colP[i], n) is not present, there is no addition involved
if (!present) {
//int colPI = pCols[i];
//if (n <= colPI) {
symColP[symRowP[n] + offset[n]] = colPI;
symColP[symRowP[pCols[i]] + offset[colPI]] = n;
pOutput[symRowP[n] + offset[n]] = pVals[i];
pOutput[symRowP[colPI] + offset[colPI]] = pVals[i];
//}
}
// Update offsets
if (!present || (present && n <= colPI)) {
atomicAdd(&offset[n], 1);
if (colPI != n)
atomicAdd(&offset[colPI], 1);
}
}
}
}
template <typename T>
static void barnes_symmetrize_(const NDArray* rowP, const NDArray* colP, const NDArray* valP, Nd4jLong N, NDArray* outputRows, NDArray* outputCols, NDArray* outputVals, NDArray* rowCounts) {
int const* pRows = reinterpret_cast<int const*>(rowP->getSpecialBuffer());
int* symRowP = reinterpret_cast<int*>(outputRows->specialBuffer());
int* pRowCounts = reinterpret_cast<int*>(rowCounts->specialBuffer());
auto stream = outputCols->getContext()->getCudaStream();
hipLaunchKernelGGL(( fillUpsymRow), dim3(1), dim3(N), 128, *stream, pRowCounts, symRowP, N);
outputRows->syncToHost();
// outputRows->printBuffer("output rows");
int* symColP = reinterpret_cast<int*>(outputCols->specialBuffer());
// outputRows->printBuffer("SymRows are");
int const* pCols = reinterpret_cast<int const*>(colP->getSpecialBuffer());
T const* pVals = reinterpret_cast<T const*>(valP->getSpecialBuffer());
T* pOutput = reinterpret_cast<T*>(outputVals->specialBuffer());
//std::vector<int> rowCountsV = rowCounts->getBufferAsVector<int>();
auto offsetArr = NDArrayFactory::create<int>('c', {N});
int* offset = reinterpret_cast<int*>(offsetArr.specialBuffer());
hipLaunchKernelGGL(( symmetrizeKernel<T>), dim3(1), dim3(1), 1024, *stream, pRows, pCols, pVals, symRowP, symColP, offset, pOutput, N);
//PRAGMA_OMP_PARALLEL_FOR_SIMD_ARGS(schedule(guided) shared(offset))
}
void barnes_symmetrize(const NDArray* rowP, const NDArray* colP, const NDArray* valP, Nd4jLong N, NDArray* outputRows, NDArray* outputCols, NDArray* outputVals, NDArray* rowCounts) {
BUILD_SINGLE_SELECTOR(valP->dataType(), barnes_symmetrize_, (rowP, colP, valP, N, outputRows, outputCols, outputVals, rowCounts), NUMERIC_TYPES);
*outputVals /= 2.0;
}
BUILD_SINGLE_TEMPLATE(template void barnes_symmetrize_, (const NDArray* rowP, const NDArray* colP, const NDArray* valP, Nd4jLong N, NDArray* outputRows, NDArray* outputCols, NDArray* outputVals, NDArray* rowCounts), NUMERIC_TYPES);
template <typename T>
static __global__ void edgeForcesKernel(int const* pRows, int const* pCols, T const* dataP, T const* vals, T* outputP, int N, int colCount, int rowSize) {
// std::vector<T> buffer(colCount);
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int n = start; n < N; n += step) {
int start = pRows[n];
int end = pRows[n + 1];
int shift = n * colCount;
for (int i = start; i < end; i++) {
T const* thisSlice = dataP + pCols[i] * colCount;
T res = 1;
for (int k = 0; k < colCount; k++) {
auto valTemp = dataP[shift + k] - thisSlice[k];//thisSlice[k];
res += valTemp * valTemp; // (dataP[shift + k] * dataP[shift + k] - 2 * dataP[shift + k] * thisSlice[k] + thisSlice[k] * thisSlice[k])
}
res = vals[i] / res;
for (int k = 0; k < colCount; k++)
math::atomics::nd4j_atomicAdd(&outputP[shift + k], T((dataP[shift + k] - thisSlice[k]) * res));
}
//atomicAdd(&shift, colCount);
}
}
template <typename T>
static void barnes_edge_forces_(const NDArray* rowP, NDArray const* colP, NDArray const* valP, int N, NDArray const* data, NDArray* output) {
NDArray::prepareSpecialUse({output}, {data, rowP, colP, valP, valP});
T const* dataP = reinterpret_cast<T const*>(data->getSpecialBuffer());
T const* vals = reinterpret_cast<T const*>(valP->getSpecialBuffer());
T* outputP = reinterpret_cast<T*>(output->specialBuffer());
int const* pRows = reinterpret_cast<int const*>(rowP->getSpecialBuffer());
int const* pCols = reinterpret_cast<int const*>(colP->getSpecialBuffer());
int colCount = data->columns();
//auto shift = 0;
auto rowSize = sizeof(T) * colCount;
auto stream = output->getContext()->getCudaStream();
hipLaunchKernelGGL(( edgeForcesKernel<T>), dim3(1), dim3(128), 1024, *stream, pRows, pCols, dataP, vals, outputP, N, colCount, rowSize);
NDArray::registerSpecialUse({output}, {rowP, colP, valP, data});
}
void barnes_edge_forces(const NDArray* rowP, NDArray const* colP, NDArray const* valP, int N, NDArray* output, NDArray const& data) {
// Loop over all edges in the graph
BUILD_SINGLE_SELECTOR(output->dataType(), barnes_edge_forces_, (rowP, colP, valP, N, &data, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void barnes_edge_forces_, (const NDArray* rowP, NDArray const* colP, NDArray const* valP, int N, NDArray const* data, NDArray* output), FLOAT_TYPES);
template <typename T>
void barnes_gains_(NDArray* input, NDArray* gradX, NDArray* epsilon, NDArray* output) {
auto gainsInternal = LAMBDA_TTT(x, grad, eps) {
// return T((x + 2.) * nd4j::math::nd4j_sign<T,T>(grad) != nd4j::math::nd4j_sign<T,T>(eps)) + T(x * 0.8 * nd4j::math::nd4j_sign<T,T>(grad) != nd4j::math::nd4j_sign<T,T>(eps));
//return T((x + 2.) * nd4j::math::nd4j_sign<T,T>(grad) == nd4j::math::nd4j_sign<T,T>(eps)) + T(x * 0.8 * nd4j::math::nd4j_sign<T,T>(grad) == nd4j::math::nd4j_sign<T,T>(eps));
T res = nd4j::math::nd4j_sign<T,T>(grad) != nd4j::math::nd4j_sign<T,T>(eps) ? x + T(.2) : x * T(.8);
if(res < .01) res = .01;
return res;
};
input->applyTriplewiseLambda(gradX, epsilon, gainsInternal, output);
}
void barnes_gains(NDArray* input, NDArray* gradX, NDArray* epsilon, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), barnes_gains_, (input, gradX, epsilon, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void barnes_gains_, (NDArray* input, NDArray* gradX, NDArray* epsilon, NDArray* output), NUMERIC_TYPES);
bool cell_contains(NDArray* corner, NDArray* width, NDArray* point, Nd4jLong dimension) {
auto cornerMinusWidth = *corner - *width;
auto cornerPlusWidth = *corner + *width;
cornerMinusWidth.syncToHost();
cornerPlusWidth.syncToHost();
for (Nd4jLong i = 0; i < dimension; i++) {
if (cornerMinusWidth.e<double>(i) > point->e<double>(i))
return false;
if (cornerPlusWidth.e<double>(i) < point->e<double>(i))
return false;
}
return true;
}
}
}
}
| 5ce608ad2132e17ed2fd309a675f30b0bc4c3680.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author George A. Shulinok <[email protected]>, created on 4/18/2019
//
#include <ops/declarable/helpers/BarnesHutTsne.h>
namespace nd4j {
namespace ops {
namespace helpers {
static __global__ void countRowsKernel(int* pRowCounts, int const* pRows, int const* pCols, Nd4jLong N) {
auto start = blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int n = threadIdx.x + start; n < N; n += step) {
int begin = pRows[n];//->e<int>(n);
int end = pRows[n + 1];//rowP->e<int>(n + 1);
for (int i = begin; i < end; i++) {
bool present = false;
for (int m = pRows[pCols[i]]; m < pRows[pCols[i] + 1]; m++)
if (pCols[m] == n) {
present = true;
break;
}
atomicAdd(&pRowCounts[n], 1);
if (!present)
atomicAdd(&pRowCounts[pCols[i]], 1);
}
}
}
Nd4jLong barnes_row_count(const NDArray* rowP, const NDArray* colP, Nd4jLong N, NDArray& rowCounts) {
int* pRowCounts = reinterpret_cast<int*>(rowCounts.specialBuffer());
int const* pRows = reinterpret_cast<int const*>(rowP->getSpecialBuffer());
int const* pCols = reinterpret_cast<int const*>(colP->getSpecialBuffer());
auto stream = rowCounts.getContext()->getCudaStream();
countRowsKernel<<<1, 1, 128, *stream>>>(pRowCounts, pRows, pCols, N);
NDArray numElementsArr = rowCounts.sumNumber(); //reduceAlongDimension(reduce::Sum, {});
//rowCounts.printBuffer("Row counts");
auto numElements = numElementsArr.e<Nd4jLong>(0);
return numElements;
}
static __global__ void fillUpsymRow(int const* pRowCounts, int* symRowP, int N) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int n = start; n < N + 1; n += step) {
symRowP[n] = 0;
for (int i = 0; i < n; i++)
atomicAdd(&symRowP[n], pRowCounts[i]);
}
}
template <typename T>
static __global__ void symmetrizeKernel(int const* pRows, int const* pCols, T const* pVals, int* symRowP, int* symColP, int* offset, T* pOutput, int N) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int n = start; n < N; n += step) {
int begin = pRows[n];
int bound = pRows[n + 1];
for (int i = begin; i < bound; i++) {
bool present = false;
int colPI = pCols[i];
int start = pRows[colPI];
int end = pRows[colPI + 1];
//PRAGMA_OMP_PARALLEL_FOR_ARGS(schedule(guided) firstprivate(offset))
for (int m = start; m < end; m++) {
if (pCols[m] == n) {
present = true;
if (n <= colPI) {
symColP[symRowP[n] + offset[n]] = colPI;
symColP[symRowP[colPI] + offset[colPI]] = n;
pOutput[symRowP[n] + offset[n]] = pVals[i] + pVals[m];
pOutput[symRowP[colPI] + offset[colPI]] = pVals[i] + pVals[m];
}
}
}
// If (colP[i], n) is not present, there is no addition involved
if (!present) {
//int colPI = pCols[i];
//if (n <= colPI) {
symColP[symRowP[n] + offset[n]] = colPI;
symColP[symRowP[pCols[i]] + offset[colPI]] = n;
pOutput[symRowP[n] + offset[n]] = pVals[i];
pOutput[symRowP[colPI] + offset[colPI]] = pVals[i];
//}
}
// Update offsets
if (!present || (present && n <= colPI)) {
atomicAdd(&offset[n], 1);
if (colPI != n)
atomicAdd(&offset[colPI], 1);
}
}
}
}
template <typename T>
static void barnes_symmetrize_(const NDArray* rowP, const NDArray* colP, const NDArray* valP, Nd4jLong N, NDArray* outputRows, NDArray* outputCols, NDArray* outputVals, NDArray* rowCounts) {
int const* pRows = reinterpret_cast<int const*>(rowP->getSpecialBuffer());
int* symRowP = reinterpret_cast<int*>(outputRows->specialBuffer());
int* pRowCounts = reinterpret_cast<int*>(rowCounts->specialBuffer());
auto stream = outputCols->getContext()->getCudaStream();
fillUpsymRow<<<1, N, 128, *stream>>>(pRowCounts, symRowP, N);
outputRows->syncToHost();
// outputRows->printBuffer("output rows");
int* symColP = reinterpret_cast<int*>(outputCols->specialBuffer());
// outputRows->printBuffer("SymRows are");
int const* pCols = reinterpret_cast<int const*>(colP->getSpecialBuffer());
T const* pVals = reinterpret_cast<T const*>(valP->getSpecialBuffer());
T* pOutput = reinterpret_cast<T*>(outputVals->specialBuffer());
//std::vector<int> rowCountsV = rowCounts->getBufferAsVector<int>();
auto offsetArr = NDArrayFactory::create<int>('c', {N});
int* offset = reinterpret_cast<int*>(offsetArr.specialBuffer());
symmetrizeKernel<T><<<1, 1, 1024, *stream>>>(pRows, pCols, pVals, symRowP, symColP, offset, pOutput, N);
//PRAGMA_OMP_PARALLEL_FOR_SIMD_ARGS(schedule(guided) shared(offset))
}
void barnes_symmetrize(const NDArray* rowP, const NDArray* colP, const NDArray* valP, Nd4jLong N, NDArray* outputRows, NDArray* outputCols, NDArray* outputVals, NDArray* rowCounts) {
BUILD_SINGLE_SELECTOR(valP->dataType(), barnes_symmetrize_, (rowP, colP, valP, N, outputRows, outputCols, outputVals, rowCounts), NUMERIC_TYPES);
*outputVals /= 2.0;
}
BUILD_SINGLE_TEMPLATE(template void barnes_symmetrize_, (const NDArray* rowP, const NDArray* colP, const NDArray* valP, Nd4jLong N, NDArray* outputRows, NDArray* outputCols, NDArray* outputVals, NDArray* rowCounts), NUMERIC_TYPES);
template <typename T>
static __global__ void edgeForcesKernel(int const* pRows, int const* pCols, T const* dataP, T const* vals, T* outputP, int N, int colCount, int rowSize) {
// std::vector<T> buffer(colCount);
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int n = start; n < N; n += step) {
int start = pRows[n];
int end = pRows[n + 1];
int shift = n * colCount;
for (int i = start; i < end; i++) {
T const* thisSlice = dataP + pCols[i] * colCount;
T res = 1;
for (int k = 0; k < colCount; k++) {
auto valTemp = dataP[shift + k] - thisSlice[k];//thisSlice[k];
res += valTemp * valTemp; // (dataP[shift + k] * dataP[shift + k] - 2 * dataP[shift + k] * thisSlice[k] + thisSlice[k] * thisSlice[k])
}
res = vals[i] / res;
for (int k = 0; k < colCount; k++)
math::atomics::nd4j_atomicAdd(&outputP[shift + k], T((dataP[shift + k] - thisSlice[k]) * res));
}
//atomicAdd(&shift, colCount);
}
}
template <typename T>
static void barnes_edge_forces_(const NDArray* rowP, NDArray const* colP, NDArray const* valP, int N, NDArray const* data, NDArray* output) {
NDArray::prepareSpecialUse({output}, {data, rowP, colP, valP, valP});
T const* dataP = reinterpret_cast<T const*>(data->getSpecialBuffer());
T const* vals = reinterpret_cast<T const*>(valP->getSpecialBuffer());
T* outputP = reinterpret_cast<T*>(output->specialBuffer());
int const* pRows = reinterpret_cast<int const*>(rowP->getSpecialBuffer());
int const* pCols = reinterpret_cast<int const*>(colP->getSpecialBuffer());
int colCount = data->columns();
//auto shift = 0;
auto rowSize = sizeof(T) * colCount;
auto stream = output->getContext()->getCudaStream();
edgeForcesKernel<T><<<1, 128, 1024, *stream>>>(pRows, pCols, dataP, vals, outputP, N, colCount, rowSize);
NDArray::registerSpecialUse({output}, {rowP, colP, valP, data});
}
void barnes_edge_forces(const NDArray* rowP, NDArray const* colP, NDArray const* valP, int N, NDArray* output, NDArray const& data) {
// Loop over all edges in the graph
BUILD_SINGLE_SELECTOR(output->dataType(), barnes_edge_forces_, (rowP, colP, valP, N, &data, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void barnes_edge_forces_, (const NDArray* rowP, NDArray const* colP, NDArray const* valP, int N, NDArray const* data, NDArray* output), FLOAT_TYPES);
template <typename T>
void barnes_gains_(NDArray* input, NDArray* gradX, NDArray* epsilon, NDArray* output) {
auto gainsInternal = LAMBDA_TTT(x, grad, eps) {
// return T((x + 2.) * nd4j::math::nd4j_sign<T,T>(grad) != nd4j::math::nd4j_sign<T,T>(eps)) + T(x * 0.8 * nd4j::math::nd4j_sign<T,T>(grad) != nd4j::math::nd4j_sign<T,T>(eps));
//return T((x + 2.) * nd4j::math::nd4j_sign<T,T>(grad) == nd4j::math::nd4j_sign<T,T>(eps)) + T(x * 0.8 * nd4j::math::nd4j_sign<T,T>(grad) == nd4j::math::nd4j_sign<T,T>(eps));
T res = nd4j::math::nd4j_sign<T,T>(grad) != nd4j::math::nd4j_sign<T,T>(eps) ? x + T(.2) : x * T(.8);
if(res < .01) res = .01;
return res;
};
input->applyTriplewiseLambda(gradX, epsilon, gainsInternal, output);
}
void barnes_gains(NDArray* input, NDArray* gradX, NDArray* epsilon, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), barnes_gains_, (input, gradX, epsilon, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void barnes_gains_, (NDArray* input, NDArray* gradX, NDArray* epsilon, NDArray* output), NUMERIC_TYPES);
bool cell_contains(NDArray* corner, NDArray* width, NDArray* point, Nd4jLong dimension) {
auto cornerMinusWidth = *corner - *width;
auto cornerPlusWidth = *corner + *width;
cornerMinusWidth.syncToHost();
cornerPlusWidth.syncToHost();
for (Nd4jLong i = 0; i < dimension; i++) {
if (cornerMinusWidth.e<double>(i) > point->e<double>(i))
return false;
if (cornerPlusWidth.e<double>(i) < point->e<double>(i))
return false;
}
return true;
}
}
}
}
|
45f77b77e5b3448d78c7e1d5e694c2638253da30.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include "main.h"
#include "NBody.cuh"
#include <algorithm>
#include <math.h>
#include <iostream>
#define BLOCKSIZE (128)
using namespace std;
/*cuda shared memory*/
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
double getRandom(double min, double max)
{
float r = (float)rand() / RAND_MAX;
return r * (max - min) + min;
}
struct body *initializeNBodyCuda(char method)
{
/**
initialize the bodies, then copy to the CUDA device memory
return the device pointer so that it can be reused in the NBodyTimestepCuda function.
*/
/*Using CUDA Unified Memory to automatically handle memory operation*/
body *bodies;
const double PI = 3.14159265;
hipMallocManaged((void **) &bodies, sizeof(body) * (NUM_BODIES+1));/*using bodies[NUM_BODIES] to store cursor body*/
if(method == '0')
{
cout<<"Initialized by random position and mass"<<endl;
for (auto i = 0; i < NUM_BODIES+1; i++)
{
bodies[i].m = getRandom(0,1.0);
bodies[i].x = getRandom(-1,1);
bodies[i].y = getRandom(-1,1);
bodies[i].vx = 0;
bodies[i].vy = 0;
}
}
else
{
cout<<"Initialized by random velocity and mass"<<endl;
for (auto i = 0; i < NUM_BODIES+1; i++)
{
bodies[i].m = getRandom(0,1.0);
bodies[i].x = 0;
bodies[i].y = 0;
auto radius = getRandom(0,0.085);
auto theta = getRandom(0,2*PI);
bodies[i].vx = radius*cos(theta);
bodies[i].vy = radius*sin(theta);
}
}
return bodies;
}
unsigned char* initCanvas()
{
/*Using CUDA Unified Memory to automatically handle memory operation, such that we can parallel rasterasize function*/
unsigned char* buffer;
hipMallocManaged((void **) &buffer, sizeof(unsigned char) * SCREEN_WIDTH * SCREEN_HEIGHT * 3);
return buffer;
}
__global__
void rasterizeKernel(unsigned char* buffer, body* bodies)
{
/*rasterize kernel*/
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= NUM_BODIES)
{
return;
}
int x = (int) lround(SCREEN_WIDTH * ((bodies[index].x + 1) / 2.0));
int y = (int) lround(SCREEN_HEIGHT * ((bodies[index].y + 1) / 2.0));
buffer[x * SCREEN_WIDTH * 3 + y * 3 + 0] = 0XFF;
buffer[x * SCREEN_WIDTH * 3 + y * 3 + 1] = 0XFE;
buffer[x * SCREEN_WIDTH * 3 + y * 3 + 2] = 0XE5;
}
__global__
void trailKernel(unsigned char* buffer)
{
/*trail effection kernel*/
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= SCREEN_WIDTH * SCREEN_HEIGHT * 3)
{
return;
}
buffer[index] = (unsigned char)(buffer[index] * 0.8);
}
void rasterize(struct body *bodies, unsigned char *buffer)
{
/**
rasterize the bodies from x,y: (-1,-1) to (1,1) according to some kind of formula
Note: You can change the code for better visualization
As the following code can be parallelized, you can optimize this routine with CUDA.
\param bodies A collection of bodies (located on the device).
\param buffer the RGB buffer for screen display (located on the host).
*/
/*trail effection*/
dim3 blockSize(BLOCKSIZE);
dim3 gridSize((SCREEN_WIDTH * SCREEN_HEIGHT * 3 + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( trailKernel), dim3(gridSize), dim3(blockSize), 0, 0, buffer);
hipDeviceSynchronize();
/*rasterize*/
dim3 gridSize2((NUM_BODIES + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( rasterizeKernel), dim3(gridSize2), dim3(blockSize), 0, 0, buffer,bodies);
hipDeviceSynchronize();
}
void freeCudaMem(void* p)
{
/*ferr malloced memory mannually*/
hipFree(p);
}
__device__
float2 bodyBodyInteraction(body selfBody, body body2, float2 acc)
{
/*calculate accelaration between 2 bodies*/
float2 r;
r.x = body2.x - selfBody.x;
r.y = body2.y - selfBody.y;
float disSquare = r.x * r.x + r.y * r.y + eps * eps;
float tmp = G * body2.m * rsqrt(disSquare * disSquare * disSquare);
acc.x += r.x * tmp;
acc.y += r.y * tmp;
return acc;
}
__device__
float2 computeBodyAccel(body selfBody, body* bodies, int numTiles)
{
/*compute the body's accelaration under all the other bodies' effection*/
body *sharedBodies = SharedMemory<body>();
float2 acc; acc.x =0; acc.y=0;
for (int tile = 0; tile < numTiles; tile++)
{
sharedBodies[threadIdx.x] = bodies[tile * blockDim.x + threadIdx.x];
__syncthreads();
#pragma unroll 128
for (unsigned int counter = 0; counter < blockDim.x; counter++)
{
acc = bodyBodyInteraction(selfBody, sharedBodies[counter], acc);
}
__syncthreads();
}
return acc;
}
__global__ void
integrateBodies(body* bodies, float deltaTime,int numTiles)
{
/*N-Body kernel*/
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= NUM_BODIES)
{
return;
}
bodies[index].x += bodies[index].vx *deltaTime;
bodies[index].y += bodies[index].vy *deltaTime;
body b1 = bodies[index];
float2 accel = computeBodyAccel(b1,bodies,numTiles);
bodies[index].vx += accel.x * deltaTime;
bodies[index].vy += accel.y * deltaTime;
// solve boundary problem
if (bodies[index].x > 1 || bodies[index].x < -1)
{
bodies[index].vx *= (-collision_damping);
bodies[index].x = (bodies[index].x > 0 ? 1.0 : -1.0);
}
if (bodies[index].y > 1 || bodies[index].y < -1)
{
bodies[index].vy *= (-collision_damping);
bodies[index].y = (bodies[index].y > 0 ? 1.0 : -1.0);
}
}
void NBodyTimestepCuda(struct body *bodies, float rx, float ry, bool cursor)
{
/**
Compute a time step on the CUDA device.
TODO: correctly manage the device memory, compute the time step with proper block/threads
\param bodies A collection of bodies (located on the device).
\param rx position x of the cursor.
\param ry position y of the cursor.
\param cursor Enable the mouse interaction if true (adding a weight = cursor_weight body in the computation).
*/
float timeStep = 1;
/*cursor body*/
bodies[NUM_BODIES].m = cursor_weight * (float) cursor; // convert bool condition to scale multiply to avoid branch selection
bodies[NUM_BODIES].x = rx;
bodies[NUM_BODIES].y = ry;
bodies[NUM_BODIES].vx = 0;
bodies[NUM_BODIES].vy = 0;
/*N-Body cuda kernel exec*/
int numTiles = (NUM_BODIES + BLOCKSIZE) / BLOCKSIZE;/*add 1 to store cursor body*/
dim3 blockSize(BLOCKSIZE);
dim3 gridSize(numTiles);
int sharedMemSize = BLOCKSIZE * sizeof(body);
hipLaunchKernelGGL(( integrateBodies), dim3(gridSize), dim3(blockSize), sharedMemSize , 0, bodies,timeStep,numTiles);
hipDeviceSynchronize();
}
| 45f77b77e5b3448d78c7e1d5e694c2638253da30.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include "main.h"
#include "NBody.cuh"
#include <algorithm>
#include <math.h>
#include <iostream>
#define BLOCKSIZE (128)
using namespace std;
/*cuda shared memory*/
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
double getRandom(double min, double max)
{
float r = (float)rand() / RAND_MAX;
return r * (max - min) + min;
}
struct body *initializeNBodyCuda(char method)
{
/**
initialize the bodies, then copy to the CUDA device memory
return the device pointer so that it can be reused in the NBodyTimestepCuda function.
*/
/*Using CUDA Unified Memory to automatically handle memory operation*/
body *bodies;
const double PI = 3.14159265;
cudaMallocManaged((void **) &bodies, sizeof(body) * (NUM_BODIES+1));/*using bodies[NUM_BODIES] to store cursor body*/
if(method == '0')
{
cout<<"Initialized by random position and mass"<<endl;
for (auto i = 0; i < NUM_BODIES+1; i++)
{
bodies[i].m = getRandom(0,1.0);
bodies[i].x = getRandom(-1,1);
bodies[i].y = getRandom(-1,1);
bodies[i].vx = 0;
bodies[i].vy = 0;
}
}
else
{
cout<<"Initialized by random velocity and mass"<<endl;
for (auto i = 0; i < NUM_BODIES+1; i++)
{
bodies[i].m = getRandom(0,1.0);
bodies[i].x = 0;
bodies[i].y = 0;
auto radius = getRandom(0,0.085);
auto theta = getRandom(0,2*PI);
bodies[i].vx = radius*cos(theta);
bodies[i].vy = radius*sin(theta);
}
}
return bodies;
}
unsigned char* initCanvas()
{
/*Using CUDA Unified Memory to automatically handle memory operation, such that we can parallel rasterasize function*/
unsigned char* buffer;
cudaMallocManaged((void **) &buffer, sizeof(unsigned char) * SCREEN_WIDTH * SCREEN_HEIGHT * 3);
return buffer;
}
__global__
void rasterizeKernel(unsigned char* buffer, body* bodies)
{
/*rasterize kernel*/
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= NUM_BODIES)
{
return;
}
int x = (int) lround(SCREEN_WIDTH * ((bodies[index].x + 1) / 2.0));
int y = (int) lround(SCREEN_HEIGHT * ((bodies[index].y + 1) / 2.0));
buffer[x * SCREEN_WIDTH * 3 + y * 3 + 0] = 0XFF;
buffer[x * SCREEN_WIDTH * 3 + y * 3 + 1] = 0XFE;
buffer[x * SCREEN_WIDTH * 3 + y * 3 + 2] = 0XE5;
}
__global__
void trailKernel(unsigned char* buffer)
{
/*trail effection kernel*/
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= SCREEN_WIDTH * SCREEN_HEIGHT * 3)
{
return;
}
buffer[index] = (unsigned char)(buffer[index] * 0.8);
}
void rasterize(struct body *bodies, unsigned char *buffer)
{
/**
rasterize the bodies from x,y: (-1,-1) to (1,1) according to some kind of formula
Note: You can change the code for better visualization
As the following code can be parallelized, you can optimize this routine with CUDA.
\param bodies A collection of bodies (located on the device).
\param buffer the RGB buffer for screen display (located on the host).
*/
/*trail effection*/
dim3 blockSize(BLOCKSIZE);
dim3 gridSize((SCREEN_WIDTH * SCREEN_HEIGHT * 3 + blockSize.x - 1) / blockSize.x);
trailKernel<<< gridSize, blockSize>>>(buffer);
cudaDeviceSynchronize();
/*rasterize*/
dim3 gridSize2((NUM_BODIES + blockSize.x - 1) / blockSize.x);
rasterizeKernel<<< gridSize2, blockSize>>>(buffer,bodies);
cudaDeviceSynchronize();
}
void freeCudaMem(void* p)
{
/*ferr malloced memory mannually*/
cudaFree(p);
}
__device__
float2 bodyBodyInteraction(body selfBody, body body2, float2 acc)
{
/*calculate accelaration between 2 bodies*/
float2 r;
r.x = body2.x - selfBody.x;
r.y = body2.y - selfBody.y;
float disSquare = r.x * r.x + r.y * r.y + eps * eps;
float tmp = G * body2.m * rsqrt(disSquare * disSquare * disSquare);
acc.x += r.x * tmp;
acc.y += r.y * tmp;
return acc;
}
__device__
float2 computeBodyAccel(body selfBody, body* bodies, int numTiles)
{
/*compute the body's accelaration under all the other bodies' effection*/
body *sharedBodies = SharedMemory<body>();
float2 acc; acc.x =0; acc.y=0;
for (int tile = 0; tile < numTiles; tile++)
{
sharedBodies[threadIdx.x] = bodies[tile * blockDim.x + threadIdx.x];
__syncthreads();
#pragma unroll 128
for (unsigned int counter = 0; counter < blockDim.x; counter++)
{
acc = bodyBodyInteraction(selfBody, sharedBodies[counter], acc);
}
__syncthreads();
}
return acc;
}
__global__ void
integrateBodies(body* bodies, float deltaTime,int numTiles)
{
/*N-Body kernel*/
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= NUM_BODIES)
{
return;
}
bodies[index].x += bodies[index].vx *deltaTime;
bodies[index].y += bodies[index].vy *deltaTime;
body b1 = bodies[index];
float2 accel = computeBodyAccel(b1,bodies,numTiles);
bodies[index].vx += accel.x * deltaTime;
bodies[index].vy += accel.y * deltaTime;
// solve boundary problem
if (bodies[index].x > 1 || bodies[index].x < -1)
{
bodies[index].vx *= (-collision_damping);
bodies[index].x = (bodies[index].x > 0 ? 1.0 : -1.0);
}
if (bodies[index].y > 1 || bodies[index].y < -1)
{
bodies[index].vy *= (-collision_damping);
bodies[index].y = (bodies[index].y > 0 ? 1.0 : -1.0);
}
}
void NBodyTimestepCuda(struct body *bodies, float rx, float ry, bool cursor)
{
/**
Compute a time step on the CUDA device.
TODO: correctly manage the device memory, compute the time step with proper block/threads
\param bodies A collection of bodies (located on the device).
\param rx position x of the cursor.
\param ry position y of the cursor.
\param cursor Enable the mouse interaction if true (adding a weight = cursor_weight body in the computation).
*/
float timeStep = 1;
/*cursor body*/
bodies[NUM_BODIES].m = cursor_weight * (float) cursor; // convert bool condition to scale multiply to avoid branch selection
bodies[NUM_BODIES].x = rx;
bodies[NUM_BODIES].y = ry;
bodies[NUM_BODIES].vx = 0;
bodies[NUM_BODIES].vy = 0;
/*N-Body cuda kernel exec*/
int numTiles = (NUM_BODIES + BLOCKSIZE) / BLOCKSIZE;/*add 1 to store cursor body*/
dim3 blockSize(BLOCKSIZE);
dim3 gridSize(numTiles);
int sharedMemSize = BLOCKSIZE * sizeof(body);
integrateBodies<<< gridSize, blockSize, sharedMemSize >>>(bodies,timeStep,numTiles);
cudaDeviceSynchronize();
}
|
02831d82ced79ffa2dd4a854171d0331e8733a13.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define BLOCK_SIZE 512
#define _check(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
printf("Failed to run stmt ", #stmt); \
printf("Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
int main(int argc, char** argv) {
if(argc != 2) {
printf("Usage: ./scan <input_data_file>\n");
return -1;
}
return 0;
}
| 02831d82ced79ffa2dd4a854171d0331e8733a13.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define BLOCK_SIZE 512
#define _check(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
printf("Failed to run stmt ", #stmt); \
printf("Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
int main(int argc, char** argv) {
if(argc != 2) {
printf("Usage: ./scan <input_data_file>\n");
return -1;
}
return 0;
}
|
fde0b297125d71ffdcdbd8fcfc8a5ae008ef4f0a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define m 4
#define n 4
#define k 4
extern "C" void mat_mul_func(int size, float *a, float *b, float *c){
hipError_t cudaStat; //hipMalloc status
hipblasStatus_t stat; //CUBLAS functions status
hipblasHandle_t handle; //CUBLAS context
int i, j;
//on the device
float* d_a;
float* d_b;
float* d_c;
cudaStat = hipMalloc((void**)&d_a, m*k*sizeof(a));
cudaStat = hipMalloc((void**)&d_b, k*n*sizeof(b));
cudaStat = hipMalloc((void**)&d_c, m*n*sizeof(c));
stat = hipblasCreate(&handle); //initialize CUBLAS context
//copy matrixes from the host to the device
stat = hipblasSetMatrix(m, k, sizeof(a), a, m, d_a, m);
stat = hipblasSetMatrix(k, n, sizeof(*b), b, k, d_b, k);
stat = hipblasSetMatrix(m, n, sizeof(*c), c, m, d_c, m);
float al = 1.0f;
float bet = 0.0f;
stat = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &al, d_a, m, d_b, k, &bet, d_c, m);
stat = hipblasGetMatrix(m, n, sizeof(*c), d_c, m, c, m);
printf("c after Sgemm : \n");
for(i=0; i<m; i++){
for(j=0; j<n; j++){
printf("%7.0f", c[IDX2C(i, j, m)]);
}
printf("\n");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipblasDestroy(handle);
}
int main(){
return 0;
}
| fde0b297125d71ffdcdbd8fcfc8a5ae008ef4f0a.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define m 4
#define n 4
#define k 4
extern "C" void mat_mul_func(int size, float *a, float *b, float *c){
cudaError_t cudaStat; //cudaMalloc status
cublasStatus_t stat; //CUBLAS functions status
cublasHandle_t handle; //CUBLAS context
int i, j;
//on the device
float* d_a;
float* d_b;
float* d_c;
cudaStat = cudaMalloc((void**)&d_a, m*k*sizeof(a));
cudaStat = cudaMalloc((void**)&d_b, k*n*sizeof(b));
cudaStat = cudaMalloc((void**)&d_c, m*n*sizeof(c));
stat = cublasCreate(&handle); //initialize CUBLAS context
//copy matrixes from the host to the device
stat = cublasSetMatrix(m, k, sizeof(a), a, m, d_a, m);
stat = cublasSetMatrix(k, n, sizeof(*b), b, k, d_b, k);
stat = cublasSetMatrix(m, n, sizeof(*c), c, m, d_c, m);
float al = 1.0f;
float bet = 0.0f;
stat = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &al, d_a, m, d_b, k, &bet, d_c, m);
stat = cublasGetMatrix(m, n, sizeof(*c), d_c, m, c, m);
printf("c after Sgemm : \n");
for(i=0; i<m; i++){
for(j=0; j<n; j++){
printf("%7.0f", c[IDX2C(i, j, m)]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cublasDestroy(handle);
}
int main(){
return 0;
}
|
3fe11f7729aa16c7f41f461ae9c3c3fe1836d781.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <malloc.h>
#include <hip/hip_runtime.h>
#define SIZE 1024
__global__ void VectorAdd(int *a, int *b, int *c)
{
int i=threadIdx.x;
if(i<SIZE)
c[i]=a[i]+b[i];
}
int main()
{
clock_t start = clock();
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
hipMalloc(&d_a, SIZE*sizeof(int));
hipMalloc(&d_b, SIZE*sizeof(int));
hipMalloc(&d_c, SIZE*sizeof(int));
for(int i=0;i<SIZE;i++)
{
a[i]=i;
b[i]=i;
c[i]=0;
}
hipMemcpy(d_a, a, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, SIZE*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( VectorAdd), dim3(1),dim3(SIZE), 0, 0, d_a, d_b, d_c);
hipMemcpy(c, d_c, SIZE*sizeof(int), hipMemcpyDeviceToHost);
for(int i=0;i<10; i++)
printf("%d ",c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
printf("Tiempo transcurrido: %f",((double)clock() - start) / CLOCKS_PER_SEC);
return 0;
}
| 3fe11f7729aa16c7f41f461ae9c3c3fe1836d781.cu | #include <stdio.h>
#include <malloc.h>
#include <cuda.h>
#define SIZE 1024
__global__ void VectorAdd(int *a, int *b, int *c)
{
int i=threadIdx.x;
if(i<SIZE)
c[i]=a[i]+b[i];
}
int main()
{
clock_t start = clock();
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
cudaMalloc(&d_a, SIZE*sizeof(int));
cudaMalloc(&d_b, SIZE*sizeof(int));
cudaMalloc(&d_c, SIZE*sizeof(int));
for(int i=0;i<SIZE;i++)
{
a[i]=i;
b[i]=i;
c[i]=0;
}
cudaMemcpy(d_a, a, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, SIZE*sizeof(int), cudaMemcpyHostToDevice);
VectorAdd<<<1,SIZE>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0;i<10; i++)
printf("%d ",c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("Tiempo transcurrido: %f",((double)clock() - start) / CLOCKS_PER_SEC);
return 0;
}
|
68d759c221729610b3100c83912fe8518062e77b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ndarray/ndarray_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void BroadcastPReluForwardGpu(const int32_t elem_cnt, const int32_t alpha_size,
const int32_t inner_size, const T* x, const T* alpha,
T* y) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T alpha_i = alpha[(i / inner_size) % alpha_size];
y[i] = x_i > 0 ? x_i : x_i * alpha_i;
}
}
template<typename T>
__global__ void BroadcastPReluBackwardGpu(const int32_t elem_cnt, const int32_t alpha_size,
const int32_t inner_size, const T* x, const T* alpha,
const T* dy, T* dx, T* alpha_diff) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T dy_i = dy[i];
const T alpha_i = alpha[(i / inner_size) % alpha_size];
T dx_i = 0;
T alpha_diff_i = 0;
if (x_i > 0) {
dx_i = dy_i;
alpha_diff_i = 0;
} else {
dx_i = dy_i * alpha_i;
alpha_diff_i = dy_i * x_i;
}
dx[i] = dx_i;
alpha_diff[i] = alpha_diff_i;
}
}
template<typename T>
__global__ void ElemwisePReluForwardGpu(const int32_t elem_cnt, const T* x, const T* alpha, T* y) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T alpha_i = alpha[i];
y[i] = x_i > 0 ? x_i : x_i * alpha_i;
}
}
template<typename T>
__global__ void ElemwisePReluBackwardGpu(const int32_t elem_cnt, const T* x, const T* alpha,
const T* dy, T* dx, T* alpha_diff) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T dy_i = dy[i];
const T alpha_i = alpha[i];
T dx_i = 0;
T alpha_diff_i = 0;
if (x_i > 0) {
dx_i = dy_i;
alpha_diff_i = 0;
} else {
dx_i = dy_i * alpha_i;
alpha_diff_i = dy_i * x_i;
}
dx[i] = dx_i;
alpha_diff[i] = alpha_diff_i;
}
}
bool IsAlphaShapeContiguous(const ShapeView& alpha_shape, const ShapeView& x_shape) {
if (alpha_shape.elem_cnt() == 1) { return true; }
int64_t begin_idx = -1;
for (int64_t i = 0; i < alpha_shape.NumAxes(); ++i) {
if (alpha_shape.At(i) != 1) {
begin_idx = i;
break;
}
}
CHECK_NE(begin_idx, -1);
int64_t end_idx = -1;
for (int64_t i = alpha_shape.NumAxes(); i > 0; --i) {
if (alpha_shape.At(i - 1) != 1) {
end_idx = i;
break;
}
}
CHECK_NE(end_idx, -1);
if (alpha_shape.elem_cnt() == x_shape.Count(begin_idx + 1, end_idx + 1)) {
return true;
} else {
return false;
}
}
int32_t GetOuterSize(const ShapeView& alpha_shape, const ShapeView& x_shape) {
int32_t outer_size = x_shape.At(0);
for (int32_t i = 0; i < alpha_shape.NumAxes(); ++i) {
if (alpha_shape.At(i) == 1) {
outer_size *= x_shape.At(i + 1);
} else {
break;
}
}
return outer_size;
}
} // namespace
template<typename T>
class TfGpuPReluKernel final : public user_op::OpKernel {
public:
TfGpuPReluKernel() = default;
~TfGpuPReluKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* alpha = ctx->Tensor4ArgNameAndIndex("alpha", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const int32_t elem_cnt = x->shape().elem_cnt();
if (IsAlphaShapeContiguous(alpha->shape(), x->shape())) {
const int32_t outer_size = GetOuterSize(alpha->shape(), x->shape());
const int32_t alpha_size = alpha->shape().elem_cnt();
const int32_t inner_size = elem_cnt / outer_size / alpha_size;
hipLaunchKernelGGL(( BroadcastPReluForwardGpu<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, alpha_size, inner_size, x->dptr<T>(), alpha->dptr<T>(), y->mut_dptr<T>());
} else {
user_op::Tensor* broadcasted_alpha = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const Shape& left_extended_shape =
CreateLeftExtendedShape(ShapeView(alpha->shape()), x->shape().NumAxes());
NdarrayUtil<DeviceType::kGPU, T>::BroadcastTo(
ctx->device_ctx(), XpuVarNdarray<T>(x->shape(), broadcasted_alpha->mut_dptr<T>()),
XpuVarNdarray<const T>(left_extended_shape, alpha->dptr<T>()));
hipLaunchKernelGGL(( ElemwisePReluForwardGpu<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, x->dptr<T>(), broadcasted_alpha->dptr<T>(), y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_TF_GPU_PRELU_KERNEL(dtype) \
REGISTER_USER_KERNEL("tf_prelu") \
.SetCreateFn<TfGpuPReluKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape& in_shape = ctx->InputShape("x", 0); \
const Shape& alpha_shape = ctx->InputShape("alpha", 0); \
const int64_t tmp_buffer_size = \
IsAlphaShapeContiguous(alpha_shape, in_shape) \
? 0 \
: GetCudaAlignedSize(in_shape.elem_cnt() * sizeof(dtype)); \
return tmp_buffer_size; \
});
REGISTER_TF_GPU_PRELU_KERNEL(float)
REGISTER_TF_GPU_PRELU_KERNEL(double)
template<typename T>
class TfGpuPReluGradKernel final : public user_op::OpKernel {
public:
TfGpuPReluGradKernel() = default;
~TfGpuPReluGradKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* alpha = ctx->Tensor4ArgNameAndIndex("alpha", 0);
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
user_op::Tensor* alpha_diff = ctx->Tensor4ArgNameAndIndex("alpha_diff", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int32_t elem_cnt = x->shape().elem_cnt();
T* broadcasted_alpha_diff = tmp_buffer->mut_dptr<T>();
T* reduce_sum_tmp_buf = reinterpret_cast<T*>(tmp_buffer->mut_dptr<char>()
+ GetCudaAlignedSize(elem_cnt * sizeof(T)));
const Shape& left_extended_shape =
CreateLeftExtendedShape(ShapeView(alpha->shape()), x->shape().NumAxes());
if (IsAlphaShapeContiguous(alpha->shape(), x->shape())) {
const int32_t outer_size = GetOuterSize(alpha->shape(), x->shape());
const int32_t alpha_size = alpha->shape().elem_cnt();
const int32_t inner_size = elem_cnt / outer_size / alpha_size;
hipLaunchKernelGGL(( BroadcastPReluBackwardGpu<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, alpha_size, inner_size, x->dptr<T>(), alpha->dptr<T>(), dy->dptr<T>(),
dx->mut_dptr<T>(), broadcasted_alpha_diff);
} else {
T* broadcasted_alpha = reinterpret_cast<T*>(tmp_buffer->mut_dptr<char>()
+ 2 * GetCudaAlignedSize(elem_cnt * sizeof(T)));
NdarrayUtil<DeviceType::kGPU, T>::BroadcastTo(
ctx->device_ctx(), XpuVarNdarray<T>(x->shape(), broadcasted_alpha),
XpuVarNdarray<const T>(left_extended_shape, alpha->dptr<T>()));
hipLaunchKernelGGL(( ElemwisePReluBackwardGpu<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, x->dptr<T>(), broadcasted_alpha, dy->dptr<T>(), dx->mut_dptr<T>(),
broadcasted_alpha_diff);
}
NdarrayUtil<DeviceType::kGPU, T>::ReduceSum(
ctx->device_ctx(), XpuVarNdarray<T>(left_extended_shape, alpha_diff->mut_dptr<T>()),
XpuVarNdarray<const T>(x->shape(), broadcasted_alpha_diff),
XpuVarNdarray<T>(x->shape(), reduce_sum_tmp_buf));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_TF_GPU_PRELU_GRAD_KERNEL(dtype) \
REGISTER_USER_KERNEL("tf_prelu_grad") \
.SetCreateFn<TfGpuPReluGradKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape& in_shape = ctx->InputShape("x", 0); \
const Shape& alpha_shape = ctx->InputShape("alpha", 0); \
const int64_t tmp_buffer_size = \
IsAlphaShapeContiguous(alpha_shape, in_shape) \
? 2 * GetCudaAlignedSize(in_shape.elem_cnt() * sizeof(dtype)) \
: 3 * GetCudaAlignedSize(in_shape.elem_cnt() * sizeof(dtype)); \
return tmp_buffer_size; \
});
REGISTER_TF_GPU_PRELU_GRAD_KERNEL(float)
REGISTER_TF_GPU_PRELU_GRAD_KERNEL(double)
} // namespace oneflow
| 68d759c221729610b3100c83912fe8518062e77b.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ndarray/ndarray_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void BroadcastPReluForwardGpu(const int32_t elem_cnt, const int32_t alpha_size,
const int32_t inner_size, const T* x, const T* alpha,
T* y) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T alpha_i = alpha[(i / inner_size) % alpha_size];
y[i] = x_i > 0 ? x_i : x_i * alpha_i;
}
}
template<typename T>
__global__ void BroadcastPReluBackwardGpu(const int32_t elem_cnt, const int32_t alpha_size,
const int32_t inner_size, const T* x, const T* alpha,
const T* dy, T* dx, T* alpha_diff) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T dy_i = dy[i];
const T alpha_i = alpha[(i / inner_size) % alpha_size];
T dx_i = 0;
T alpha_diff_i = 0;
if (x_i > 0) {
dx_i = dy_i;
alpha_diff_i = 0;
} else {
dx_i = dy_i * alpha_i;
alpha_diff_i = dy_i * x_i;
}
dx[i] = dx_i;
alpha_diff[i] = alpha_diff_i;
}
}
template<typename T>
__global__ void ElemwisePReluForwardGpu(const int32_t elem_cnt, const T* x, const T* alpha, T* y) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T alpha_i = alpha[i];
y[i] = x_i > 0 ? x_i : x_i * alpha_i;
}
}
template<typename T>
__global__ void ElemwisePReluBackwardGpu(const int32_t elem_cnt, const T* x, const T* alpha,
const T* dy, T* dx, T* alpha_diff) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T dy_i = dy[i];
const T alpha_i = alpha[i];
T dx_i = 0;
T alpha_diff_i = 0;
if (x_i > 0) {
dx_i = dy_i;
alpha_diff_i = 0;
} else {
dx_i = dy_i * alpha_i;
alpha_diff_i = dy_i * x_i;
}
dx[i] = dx_i;
alpha_diff[i] = alpha_diff_i;
}
}
bool IsAlphaShapeContiguous(const ShapeView& alpha_shape, const ShapeView& x_shape) {
if (alpha_shape.elem_cnt() == 1) { return true; }
int64_t begin_idx = -1;
for (int64_t i = 0; i < alpha_shape.NumAxes(); ++i) {
if (alpha_shape.At(i) != 1) {
begin_idx = i;
break;
}
}
CHECK_NE(begin_idx, -1);
int64_t end_idx = -1;
for (int64_t i = alpha_shape.NumAxes(); i > 0; --i) {
if (alpha_shape.At(i - 1) != 1) {
end_idx = i;
break;
}
}
CHECK_NE(end_idx, -1);
if (alpha_shape.elem_cnt() == x_shape.Count(begin_idx + 1, end_idx + 1)) {
return true;
} else {
return false;
}
}
int32_t GetOuterSize(const ShapeView& alpha_shape, const ShapeView& x_shape) {
int32_t outer_size = x_shape.At(0);
for (int32_t i = 0; i < alpha_shape.NumAxes(); ++i) {
if (alpha_shape.At(i) == 1) {
outer_size *= x_shape.At(i + 1);
} else {
break;
}
}
return outer_size;
}
} // namespace
template<typename T>
class TfGpuPReluKernel final : public user_op::OpKernel {
public:
TfGpuPReluKernel() = default;
~TfGpuPReluKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* alpha = ctx->Tensor4ArgNameAndIndex("alpha", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const int32_t elem_cnt = x->shape().elem_cnt();
if (IsAlphaShapeContiguous(alpha->shape(), x->shape())) {
const int32_t outer_size = GetOuterSize(alpha->shape(), x->shape());
const int32_t alpha_size = alpha->shape().elem_cnt();
const int32_t inner_size = elem_cnt / outer_size / alpha_size;
BroadcastPReluForwardGpu<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, alpha_size, inner_size, x->dptr<T>(), alpha->dptr<T>(), y->mut_dptr<T>());
} else {
user_op::Tensor* broadcasted_alpha = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const Shape& left_extended_shape =
CreateLeftExtendedShape(ShapeView(alpha->shape()), x->shape().NumAxes());
NdarrayUtil<DeviceType::kGPU, T>::BroadcastTo(
ctx->device_ctx(), XpuVarNdarray<T>(x->shape(), broadcasted_alpha->mut_dptr<T>()),
XpuVarNdarray<const T>(left_extended_shape, alpha->dptr<T>()));
ElemwisePReluForwardGpu<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, x->dptr<T>(), broadcasted_alpha->dptr<T>(), y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_TF_GPU_PRELU_KERNEL(dtype) \
REGISTER_USER_KERNEL("tf_prelu") \
.SetCreateFn<TfGpuPReluKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape& in_shape = ctx->InputShape("x", 0); \
const Shape& alpha_shape = ctx->InputShape("alpha", 0); \
const int64_t tmp_buffer_size = \
IsAlphaShapeContiguous(alpha_shape, in_shape) \
? 0 \
: GetCudaAlignedSize(in_shape.elem_cnt() * sizeof(dtype)); \
return tmp_buffer_size; \
});
REGISTER_TF_GPU_PRELU_KERNEL(float)
REGISTER_TF_GPU_PRELU_KERNEL(double)
template<typename T>
class TfGpuPReluGradKernel final : public user_op::OpKernel {
public:
TfGpuPReluGradKernel() = default;
~TfGpuPReluGradKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* alpha = ctx->Tensor4ArgNameAndIndex("alpha", 0);
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
user_op::Tensor* alpha_diff = ctx->Tensor4ArgNameAndIndex("alpha_diff", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int32_t elem_cnt = x->shape().elem_cnt();
T* broadcasted_alpha_diff = tmp_buffer->mut_dptr<T>();
T* reduce_sum_tmp_buf = reinterpret_cast<T*>(tmp_buffer->mut_dptr<char>()
+ GetCudaAlignedSize(elem_cnt * sizeof(T)));
const Shape& left_extended_shape =
CreateLeftExtendedShape(ShapeView(alpha->shape()), x->shape().NumAxes());
if (IsAlphaShapeContiguous(alpha->shape(), x->shape())) {
const int32_t outer_size = GetOuterSize(alpha->shape(), x->shape());
const int32_t alpha_size = alpha->shape().elem_cnt();
const int32_t inner_size = elem_cnt / outer_size / alpha_size;
BroadcastPReluBackwardGpu<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, alpha_size, inner_size, x->dptr<T>(), alpha->dptr<T>(), dy->dptr<T>(),
dx->mut_dptr<T>(), broadcasted_alpha_diff);
} else {
T* broadcasted_alpha = reinterpret_cast<T*>(tmp_buffer->mut_dptr<char>()
+ 2 * GetCudaAlignedSize(elem_cnt * sizeof(T)));
NdarrayUtil<DeviceType::kGPU, T>::BroadcastTo(
ctx->device_ctx(), XpuVarNdarray<T>(x->shape(), broadcasted_alpha),
XpuVarNdarray<const T>(left_extended_shape, alpha->dptr<T>()));
ElemwisePReluBackwardGpu<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, x->dptr<T>(), broadcasted_alpha, dy->dptr<T>(), dx->mut_dptr<T>(),
broadcasted_alpha_diff);
}
NdarrayUtil<DeviceType::kGPU, T>::ReduceSum(
ctx->device_ctx(), XpuVarNdarray<T>(left_extended_shape, alpha_diff->mut_dptr<T>()),
XpuVarNdarray<const T>(x->shape(), broadcasted_alpha_diff),
XpuVarNdarray<T>(x->shape(), reduce_sum_tmp_buf));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_TF_GPU_PRELU_GRAD_KERNEL(dtype) \
REGISTER_USER_KERNEL("tf_prelu_grad") \
.SetCreateFn<TfGpuPReluGradKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape& in_shape = ctx->InputShape("x", 0); \
const Shape& alpha_shape = ctx->InputShape("alpha", 0); \
const int64_t tmp_buffer_size = \
IsAlphaShapeContiguous(alpha_shape, in_shape) \
? 2 * GetCudaAlignedSize(in_shape.elem_cnt() * sizeof(dtype)) \
: 3 * GetCudaAlignedSize(in_shape.elem_cnt() * sizeof(dtype)); \
return tmp_buffer_size; \
});
REGISTER_TF_GPU_PRELU_GRAD_KERNEL(float)
REGISTER_TF_GPU_PRELU_GRAD_KERNEL(double)
} // namespace oneflow
|
506116a15a8a812843b5dbd084a6b20f38d54d97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void computeFluxes_gpu( const float *cellLeft, const float *cellRight,
const float *alphaleft, const float *alpharight,
const float *edgeLength, const float *edgeNormals,
const float *leftcellCenters, const float *rightcellCenters,
const float *edgeCenters,
const float *leftGradient, const float *rightGradient,
const int *isRightBoundary,
float *bathySource, float *out,
float *maxEdgeEigenvalues, const float *zmin) {
float leftCellValues[4];
float rightCellValues[4];
float InterfaceBathy;
float zL, zR;
float uR,vR,uL,vL;
leftCellValues[0] = cellLeft[0];
leftCellValues[1] = cellLeft[1];
leftCellValues[2] = cellLeft[2];
leftCellValues[3] = cellLeft[3];
float dxl, dyl, dxr, dyr;
dxl = (edgeCenters[0] - leftcellCenters[0]);
dyl = (edgeCenters[1] - leftcellCenters[1]);
dxr = (edgeCenters[0] - rightcellCenters[0]);
dyr = (edgeCenters[1] - rightcellCenters[1]);
leftCellValues[0] += alphaleft[0] * ((dxl * leftGradient[0])+(dyl * leftGradient[1]));
leftCellValues[1] += alphaleft[0] * ((dxl * leftGradient[2])+(dyl * leftGradient[3]));
leftCellValues[2] += alphaleft[0] * ((dxl * leftGradient[4])+(dyl * leftGradient[5]));
leftCellValues[3] += alphaleft[0] * ((dxl * leftGradient[6])+(dyl * leftGradient[7]));
if (leftCellValues[0] >= 1e-3){
uL = leftCellValues[1]/leftCellValues[0];
vL = leftCellValues[2]/leftCellValues[0];
} else {
uL = 0.0f;
vL = 0.0f;
}
zL = cellLeft[3] - cellLeft[0];
zR = cellRight[3] - cellRight[0];
if (!*isRightBoundary) {
rightCellValues[0] = cellRight[0];
rightCellValues[1] = cellRight[1];
rightCellValues[2] = cellRight[2];
rightCellValues[3] = cellRight[3];
rightCellValues[0] += alpharight[0] * ((dxr * rightGradient[0])+(dyr * rightGradient[1]));
rightCellValues[1] += alpharight[0] * ((dxr * rightGradient[2])+(dyr * rightGradient[3]));
rightCellValues[2] += alpharight[0] * ((dxr * rightGradient[4])+(dyr * rightGradient[5]));
rightCellValues[3] += alpharight[0] * ((dxr * rightGradient[6])+(dyr * rightGradient[7]));
if (rightCellValues[0] >= 1e-3){
uR = rightCellValues[1]/rightCellValues[0];
vR = rightCellValues[2]/rightCellValues[0];
} else {
uR = 0.0f;
vR = 0.0f;
}
} else {
float nx = edgeNormals[0];
float ny = edgeNormals[1];
float inNormalVelocity = uL * nx + vL * ny;
float inTangentVelocity = -1.0f * uL * ny + vL * nx;
float outNormalVelocity = 0.0f;
float outTangentVelocity = 0.0f;
float wet = (fabs(*zmin) - zL) > 0.0f ? (fabs(*zmin) - zL) : EPS_cuda;
float critical = sqrt(uL*uL + vL*vL);
outTangentVelocity = inTangentVelocity;
if (critical < sqrt(g_cuda*leftCellValues[0])){
rightCellValues[0] = wet;
rightCellValues[3] = wet + zL;
outNormalVelocity = 0.0f;
} else {
rightCellValues[0] = leftCellValues[0];
rightCellValues[3] = leftCellValues[3];
outNormalVelocity = inNormalVelocity;
}
uR = outNormalVelocity * nx - outTangentVelocity * ny;
vR = outNormalVelocity * ny + outTangentVelocity * nx;
rightCellValues[1] = uR*rightCellValues[0];
rightCellValues[2] = vR*rightCellValues[0];
zR = zL;
}
rightCellValues[3] -= rightCellValues[0];
leftCellValues[3] -= leftCellValues[0];
InterfaceBathy = leftCellValues[3] > rightCellValues[3] ? leftCellValues[3] : rightCellValues[3];
bathySource[0] =0.5f * g_cuda * (leftCellValues[0]*leftCellValues[0]);
bathySource[1] =0.5f * g_cuda * (rightCellValues[0]*rightCellValues[0]);
float hL = (leftCellValues[0] + leftCellValues[3] - InterfaceBathy);
hL = hL > 0.0f ? hL : 0.0f;
float hR = (rightCellValues[0] + rightCellValues[3] - InterfaceBathy);
hR = hR > 0.0f ? hR : 0.0f;
bathySource[0] -= .5f * g_cuda * (hL * hL);
bathySource[1] -= .5f * g_cuda * (hR * hR);
bathySource[2] = -.5f * g_cuda *(leftCellValues[0] + cellLeft[0])*(leftCellValues[3] - zL);
bathySource[3] = -.5f * g_cuda *(rightCellValues[0] + cellRight[0])*(rightCellValues[3] - zR);
bathySource[0] *= *edgeLength;
bathySource[1] *= *edgeLength;
bathySource[2] *= *edgeLength;
bathySource[3] *= *edgeLength;
float cL = sqrt(g_cuda * hL);
cL = cL > 0.0f ? cL : 0.0f;
float cR = sqrt(g_cuda * hR);
cR = cR > 0.0f ? cR : 0.0f;
float uLn = uL * edgeNormals[0] + vL * edgeNormals[1];
float uRn = uR * edgeNormals[0] + vR * edgeNormals[1];
float unStar = 0.5f * (uLn + uRn) + (cL-cR);
float cStar = 0.5f * (cL + cR) - 0.25f* (uRn-uLn);
float sL = (uLn - cL) < (unStar - cStar) ? (uLn - cL) : (unStar - cStar);
float sR = (uRn + cR) > (unStar + cStar) ? (uRn + cR) : (unStar + cStar);
float sStar;
sStar = (sL*hR*(uRn - sR) - sR*hL*(uLn - sL))/
(hR*(uRn - sR) - hL*(uLn - sL));
if ((leftCellValues[0] <= EPS_cuda) && (rightCellValues[0] > EPS_cuda)) {
sL = uRn - 2.0f*cR;
sR = uRn + cR;
sStar = sL;
}
if ((rightCellValues[0] <= EPS_cuda) && (leftCellValues[0] > EPS_cuda)) {
sR = uLn + 2.0f*cL;
sL = uLn - cL;
sStar = sR;
}
float uLp = vL*edgeNormals[0] - uL*edgeNormals[1];
float uRp = vR*edgeNormals[0] - uR*edgeNormals[1];
float LeftFluxes_H, LeftFluxes_N, LeftFluxes_U, LeftFluxes_V;
float HuDotN = (hL*uL) * edgeNormals[0] + (hL*vL) * edgeNormals[1];
LeftFluxes_H = HuDotN;
LeftFluxes_U = HuDotN * uL;
LeftFluxes_V = HuDotN * vL;
LeftFluxes_N = HuDotN * uLn;
LeftFluxes_U += (.5f * g_cuda * edgeNormals[0] ) * ( hL * hL );
LeftFluxes_V += (.5f * g_cuda * edgeNormals[1] ) * ( hL * hL );
LeftFluxes_N += (.5f * g_cuda ) * ( hL * hL );
float RightFluxes_H,RightFluxes_N, RightFluxes_U, RightFluxes_V;
HuDotN = (hR*uR) * edgeNormals[0] + (hR*vR) * edgeNormals[1];
RightFluxes_H = HuDotN;
RightFluxes_U = HuDotN * uR;
RightFluxes_V = HuDotN * vR;
RightFluxes_N = HuDotN * uRn;
RightFluxes_U += (.5f * g_cuda * edgeNormals[0] ) * ( hR * hR );
RightFluxes_V += (.5f * g_cuda * edgeNormals[1] ) * ( hR * hR );
RightFluxes_N += (.5f * g_cuda ) * ( hR * hR );
float sLMinus = sL < 0.0f ? sL : 0.0f;
float sRPlus = sR > 0.0f ? sR : 0.0f;
float sRMinussL = sRPlus - sLMinus;
sRMinussL = sRMinussL < EPS_cuda ? EPS_cuda : sRMinussL;
float t1 = sRPlus / sRMinussL;
float t2 = ( -1.0 * sLMinus ) / sRMinussL;
float t3 = ( sRPlus * sLMinus ) / sRMinussL;
float FStar[3];
FStar[0] =
( t1 * LeftFluxes_H ) +
( t2 * RightFluxes_H ) +
( t3 * ( hR - hL ) );
FStar[1] =
( t1 * LeftFluxes_N ) +
( t2 * RightFluxes_N ) +
( t3 * ( (hR * uRn) -
(hL * uLn) ) );
if( sL >= 0.0f) {
out[0] = t1*LeftFluxes_H;
out[1] = t1*LeftFluxes_U;
out[2] = t1*LeftFluxes_V;
} else if ((sL < 0.0f) && (sStar >= 0.0f)){
out[0] = FStar[0];
FStar[2] = FStar[0] * uLp;
out[1] = FStar[1]*edgeNormals[0] - FStar[2]*edgeNormals[1];
out[2] = FStar[1]*edgeNormals[1] + FStar[2]*edgeNormals[0];
} else if((sStar < 0.0f) && (sR >= 0.0f)){
out[0] = FStar[0];
FStar[2] = FStar[0] * uRp;
out[1] = FStar[1]*edgeNormals[0] - FStar[2]*edgeNormals[1];
out[2] = FStar[1]*edgeNormals[1] + FStar[2]*edgeNormals[0];
} else {
out[0] = t2*RightFluxes_H;
out[1] = t2*RightFluxes_U;
out[2] = t2*RightFluxes_V;
}
out[0] *= *edgeLength;
out[1] *= *edgeLength;
out[2] *= *edgeLength;
float maximum = fabs(uLn + cL);
maximum = maximum > fabs(uLn - cL) ? maximum : fabs(uLn - cL);
maximum = maximum > fabs(uRn + cR) ? maximum : fabs(uRn + cR);
maximum = maximum > fabs(uRn - cR) ? maximum : fabs(uRn - cR);
*maxEdgeEigenvalues = maximum;
}
// CUDA kernel function
__global__ void op_cuda_computeFluxes(
const float *__restrict ind_arg0,
const float *__restrict ind_arg1,
const float *__restrict ind_arg2,
const float *__restrict ind_arg3,
const int *__restrict opDat0Map,
const float *__restrict arg4,
const float *__restrict arg5,
const float *__restrict arg8,
const int *__restrict arg11,
float *arg12,
float *arg13,
float *arg14,
const float *arg15,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelem; n+=blockDim.x ){
int map0idx;
int map1idx;
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
//user-supplied kernel call
computeFluxes_gpu(ind_arg0+map0idx*4,
ind_arg0+map1idx*4,
ind_arg1+map0idx*4,
ind_arg1+map1idx*4,
arg4+(n+offset_b)*1,
arg5+(n+offset_b)*2,
ind_arg2+map0idx*2,
ind_arg2+map1idx*2,
arg8+(n+offset_b)*2,
ind_arg3+map0idx*8,
ind_arg3+map1idx*8,
arg11+(n+offset_b)*1,
arg12+(n+offset_b)*4,
arg13+(n+offset_b)*3,
arg14+(n+offset_b)*1,
arg15);
}
}
//host stub function
void op_par_loop_computeFluxes(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8,
op_arg arg9,
op_arg arg10,
op_arg arg11,
op_arg arg12,
op_arg arg13,
op_arg arg14,
op_arg arg15){
float*arg15h = (float *)arg15.data;
int nargs = 16;
op_arg args[16];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
args[9] = arg9;
args[10] = arg10;
args[11] = arg11;
args[12] = arg12;
args[13] = arg13;
args[14] = arg14;
args[15] = arg15;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(24);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[24].name = name;
OP_kernels[24].count += 1;
int ninds = 4;
int inds[16] = {0,0,1,1,-1,-1,2,2,-1,3,3,-1,-1,-1,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: computeFluxes\n");
}
//get plan
#ifdef OP_PART_SIZE_24
int part_size = OP_PART_SIZE_24;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg15.data = OP_consts_h + consts_bytes;
arg15.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg15.data)[d] = arg15h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_grouped(nargs, args, 2);
}
#ifdef OP_BLOCK_SIZE_24
int nthread = OP_BLOCK_SIZE_24;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_computeFluxes), dim3(nblocks),dim3(nthread), 0, 0,
(float *)arg0.data_d,
(float *)arg2.data_d,
(float *)arg6.data_d,
(float *)arg9.data_d,
arg0.map_data_d,
(float*)arg4.data_d,
(float*)arg5.data_d,
(float*)arg8.data_d,
(int*)arg11.data_d,
(float*)arg12.data_d,
(float*)arg13.data_d,
(float*)arg14.data_d,
(float*)arg15.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[24].transfer += Plan->transfer;
OP_kernels[24].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[24].time += wall_t2 - wall_t1;
}
| 506116a15a8a812843b5dbd084a6b20f38d54d97.cu | //
// auto-generated by op2.py
//
//user function
__device__ void computeFluxes_gpu( const float *cellLeft, const float *cellRight,
const float *alphaleft, const float *alpharight,
const float *edgeLength, const float *edgeNormals,
const float *leftcellCenters, const float *rightcellCenters,
const float *edgeCenters,
const float *leftGradient, const float *rightGradient,
const int *isRightBoundary,
float *bathySource, float *out,
float *maxEdgeEigenvalues, const float *zmin) {
float leftCellValues[4];
float rightCellValues[4];
float InterfaceBathy;
float zL, zR;
float uR,vR,uL,vL;
leftCellValues[0] = cellLeft[0];
leftCellValues[1] = cellLeft[1];
leftCellValues[2] = cellLeft[2];
leftCellValues[3] = cellLeft[3];
float dxl, dyl, dxr, dyr;
dxl = (edgeCenters[0] - leftcellCenters[0]);
dyl = (edgeCenters[1] - leftcellCenters[1]);
dxr = (edgeCenters[0] - rightcellCenters[0]);
dyr = (edgeCenters[1] - rightcellCenters[1]);
leftCellValues[0] += alphaleft[0] * ((dxl * leftGradient[0])+(dyl * leftGradient[1]));
leftCellValues[1] += alphaleft[0] * ((dxl * leftGradient[2])+(dyl * leftGradient[3]));
leftCellValues[2] += alphaleft[0] * ((dxl * leftGradient[4])+(dyl * leftGradient[5]));
leftCellValues[3] += alphaleft[0] * ((dxl * leftGradient[6])+(dyl * leftGradient[7]));
if (leftCellValues[0] >= 1e-3){
uL = leftCellValues[1]/leftCellValues[0];
vL = leftCellValues[2]/leftCellValues[0];
} else {
uL = 0.0f;
vL = 0.0f;
}
zL = cellLeft[3] - cellLeft[0];
zR = cellRight[3] - cellRight[0];
if (!*isRightBoundary) {
rightCellValues[0] = cellRight[0];
rightCellValues[1] = cellRight[1];
rightCellValues[2] = cellRight[2];
rightCellValues[3] = cellRight[3];
rightCellValues[0] += alpharight[0] * ((dxr * rightGradient[0])+(dyr * rightGradient[1]));
rightCellValues[1] += alpharight[0] * ((dxr * rightGradient[2])+(dyr * rightGradient[3]));
rightCellValues[2] += alpharight[0] * ((dxr * rightGradient[4])+(dyr * rightGradient[5]));
rightCellValues[3] += alpharight[0] * ((dxr * rightGradient[6])+(dyr * rightGradient[7]));
if (rightCellValues[0] >= 1e-3){
uR = rightCellValues[1]/rightCellValues[0];
vR = rightCellValues[2]/rightCellValues[0];
} else {
uR = 0.0f;
vR = 0.0f;
}
} else {
float nx = edgeNormals[0];
float ny = edgeNormals[1];
float inNormalVelocity = uL * nx + vL * ny;
float inTangentVelocity = -1.0f * uL * ny + vL * nx;
float outNormalVelocity = 0.0f;
float outTangentVelocity = 0.0f;
float wet = (fabs(*zmin) - zL) > 0.0f ? (fabs(*zmin) - zL) : EPS_cuda;
float critical = sqrt(uL*uL + vL*vL);
outTangentVelocity = inTangentVelocity;
if (critical < sqrt(g_cuda*leftCellValues[0])){
rightCellValues[0] = wet;
rightCellValues[3] = wet + zL;
outNormalVelocity = 0.0f;
} else {
rightCellValues[0] = leftCellValues[0];
rightCellValues[3] = leftCellValues[3];
outNormalVelocity = inNormalVelocity;
}
uR = outNormalVelocity * nx - outTangentVelocity * ny;
vR = outNormalVelocity * ny + outTangentVelocity * nx;
rightCellValues[1] = uR*rightCellValues[0];
rightCellValues[2] = vR*rightCellValues[0];
zR = zL;
}
rightCellValues[3] -= rightCellValues[0];
leftCellValues[3] -= leftCellValues[0];
InterfaceBathy = leftCellValues[3] > rightCellValues[3] ? leftCellValues[3] : rightCellValues[3];
bathySource[0] =0.5f * g_cuda * (leftCellValues[0]*leftCellValues[0]);
bathySource[1] =0.5f * g_cuda * (rightCellValues[0]*rightCellValues[0]);
float hL = (leftCellValues[0] + leftCellValues[3] - InterfaceBathy);
hL = hL > 0.0f ? hL : 0.0f;
float hR = (rightCellValues[0] + rightCellValues[3] - InterfaceBathy);
hR = hR > 0.0f ? hR : 0.0f;
bathySource[0] -= .5f * g_cuda * (hL * hL);
bathySource[1] -= .5f * g_cuda * (hR * hR);
bathySource[2] = -.5f * g_cuda *(leftCellValues[0] + cellLeft[0])*(leftCellValues[3] - zL);
bathySource[3] = -.5f * g_cuda *(rightCellValues[0] + cellRight[0])*(rightCellValues[3] - zR);
bathySource[0] *= *edgeLength;
bathySource[1] *= *edgeLength;
bathySource[2] *= *edgeLength;
bathySource[3] *= *edgeLength;
float cL = sqrt(g_cuda * hL);
cL = cL > 0.0f ? cL : 0.0f;
float cR = sqrt(g_cuda * hR);
cR = cR > 0.0f ? cR : 0.0f;
float uLn = uL * edgeNormals[0] + vL * edgeNormals[1];
float uRn = uR * edgeNormals[0] + vR * edgeNormals[1];
float unStar = 0.5f * (uLn + uRn) + (cL-cR);
float cStar = 0.5f * (cL + cR) - 0.25f* (uRn-uLn);
float sL = (uLn - cL) < (unStar - cStar) ? (uLn - cL) : (unStar - cStar);
float sR = (uRn + cR) > (unStar + cStar) ? (uRn + cR) : (unStar + cStar);
float sStar;
sStar = (sL*hR*(uRn - sR) - sR*hL*(uLn - sL))/
(hR*(uRn - sR) - hL*(uLn - sL));
if ((leftCellValues[0] <= EPS_cuda) && (rightCellValues[0] > EPS_cuda)) {
sL = uRn - 2.0f*cR;
sR = uRn + cR;
sStar = sL;
}
if ((rightCellValues[0] <= EPS_cuda) && (leftCellValues[0] > EPS_cuda)) {
sR = uLn + 2.0f*cL;
sL = uLn - cL;
sStar = sR;
}
float uLp = vL*edgeNormals[0] - uL*edgeNormals[1];
float uRp = vR*edgeNormals[0] - uR*edgeNormals[1];
float LeftFluxes_H, LeftFluxes_N, LeftFluxes_U, LeftFluxes_V;
float HuDotN = (hL*uL) * edgeNormals[0] + (hL*vL) * edgeNormals[1];
LeftFluxes_H = HuDotN;
LeftFluxes_U = HuDotN * uL;
LeftFluxes_V = HuDotN * vL;
LeftFluxes_N = HuDotN * uLn;
LeftFluxes_U += (.5f * g_cuda * edgeNormals[0] ) * ( hL * hL );
LeftFluxes_V += (.5f * g_cuda * edgeNormals[1] ) * ( hL * hL );
LeftFluxes_N += (.5f * g_cuda ) * ( hL * hL );
float RightFluxes_H,RightFluxes_N, RightFluxes_U, RightFluxes_V;
HuDotN = (hR*uR) * edgeNormals[0] + (hR*vR) * edgeNormals[1];
RightFluxes_H = HuDotN;
RightFluxes_U = HuDotN * uR;
RightFluxes_V = HuDotN * vR;
RightFluxes_N = HuDotN * uRn;
RightFluxes_U += (.5f * g_cuda * edgeNormals[0] ) * ( hR * hR );
RightFluxes_V += (.5f * g_cuda * edgeNormals[1] ) * ( hR * hR );
RightFluxes_N += (.5f * g_cuda ) * ( hR * hR );
float sLMinus = sL < 0.0f ? sL : 0.0f;
float sRPlus = sR > 0.0f ? sR : 0.0f;
float sRMinussL = sRPlus - sLMinus;
sRMinussL = sRMinussL < EPS_cuda ? EPS_cuda : sRMinussL;
float t1 = sRPlus / sRMinussL;
float t2 = ( -1.0 * sLMinus ) / sRMinussL;
float t3 = ( sRPlus * sLMinus ) / sRMinussL;
float FStar[3];
FStar[0] =
( t1 * LeftFluxes_H ) +
( t2 * RightFluxes_H ) +
( t3 * ( hR - hL ) );
FStar[1] =
( t1 * LeftFluxes_N ) +
( t2 * RightFluxes_N ) +
( t3 * ( (hR * uRn) -
(hL * uLn) ) );
if( sL >= 0.0f) {
out[0] = t1*LeftFluxes_H;
out[1] = t1*LeftFluxes_U;
out[2] = t1*LeftFluxes_V;
} else if ((sL < 0.0f) && (sStar >= 0.0f)){
out[0] = FStar[0];
FStar[2] = FStar[0] * uLp;
out[1] = FStar[1]*edgeNormals[0] - FStar[2]*edgeNormals[1];
out[2] = FStar[1]*edgeNormals[1] + FStar[2]*edgeNormals[0];
} else if((sStar < 0.0f) && (sR >= 0.0f)){
out[0] = FStar[0];
FStar[2] = FStar[0] * uRp;
out[1] = FStar[1]*edgeNormals[0] - FStar[2]*edgeNormals[1];
out[2] = FStar[1]*edgeNormals[1] + FStar[2]*edgeNormals[0];
} else {
out[0] = t2*RightFluxes_H;
out[1] = t2*RightFluxes_U;
out[2] = t2*RightFluxes_V;
}
out[0] *= *edgeLength;
out[1] *= *edgeLength;
out[2] *= *edgeLength;
float maximum = fabs(uLn + cL);
maximum = maximum > fabs(uLn - cL) ? maximum : fabs(uLn - cL);
maximum = maximum > fabs(uRn + cR) ? maximum : fabs(uRn + cR);
maximum = maximum > fabs(uRn - cR) ? maximum : fabs(uRn - cR);
*maxEdgeEigenvalues = maximum;
}
// CUDA kernel function
__global__ void op_cuda_computeFluxes(
const float *__restrict ind_arg0,
const float *__restrict ind_arg1,
const float *__restrict ind_arg2,
const float *__restrict ind_arg3,
const int *__restrict opDat0Map,
const float *__restrict arg4,
const float *__restrict arg5,
const float *__restrict arg8,
const int *__restrict arg11,
float *arg12,
float *arg13,
float *arg14,
const float *arg15,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelem; n+=blockDim.x ){
int map0idx;
int map1idx;
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
//user-supplied kernel call
computeFluxes_gpu(ind_arg0+map0idx*4,
ind_arg0+map1idx*4,
ind_arg1+map0idx*4,
ind_arg1+map1idx*4,
arg4+(n+offset_b)*1,
arg5+(n+offset_b)*2,
ind_arg2+map0idx*2,
ind_arg2+map1idx*2,
arg8+(n+offset_b)*2,
ind_arg3+map0idx*8,
ind_arg3+map1idx*8,
arg11+(n+offset_b)*1,
arg12+(n+offset_b)*4,
arg13+(n+offset_b)*3,
arg14+(n+offset_b)*1,
arg15);
}
}
//host stub function
void op_par_loop_computeFluxes(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8,
op_arg arg9,
op_arg arg10,
op_arg arg11,
op_arg arg12,
op_arg arg13,
op_arg arg14,
op_arg arg15){
float*arg15h = (float *)arg15.data;
int nargs = 16;
op_arg args[16];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
args[9] = arg9;
args[10] = arg10;
args[11] = arg11;
args[12] = arg12;
args[13] = arg13;
args[14] = arg14;
args[15] = arg15;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(24);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[24].name = name;
OP_kernels[24].count += 1;
int ninds = 4;
int inds[16] = {0,0,1,1,-1,-1,2,2,-1,3,3,-1,-1,-1,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: computeFluxes\n");
}
//get plan
#ifdef OP_PART_SIZE_24
int part_size = OP_PART_SIZE_24;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg15.data = OP_consts_h + consts_bytes;
arg15.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg15.data)[d] = arg15h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_grouped(nargs, args, 2);
}
#ifdef OP_BLOCK_SIZE_24
int nthread = OP_BLOCK_SIZE_24;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_computeFluxes<<<nblocks,nthread>>>(
(float *)arg0.data_d,
(float *)arg2.data_d,
(float *)arg6.data_d,
(float *)arg9.data_d,
arg0.map_data_d,
(float*)arg4.data_d,
(float*)arg5.data_d,
(float*)arg8.data_d,
(int*)arg11.data_d,
(float*)arg12.data_d,
(float*)arg13.data_d,
(float*)arg14.data_d,
(float*)arg15.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[24].transfer += Plan->transfer;
OP_kernels[24].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[24].time += wall_t2 - wall_t1;
}
|
c60cf0263ef619e4b143f64146bf3bb7db4de6fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Example 3.2.2
#include <stdio.h>
__global__ void kernel(void) {
}
int main(void) {
kernel <<< 1, 1 >> > ();
printf("Hello World");
return 0;
} | c60cf0263ef619e4b143f64146bf3bb7db4de6fb.cu | //Example 3.2.2
#include <stdio.h>
__global__ void kernel(void) {
}
int main(void) {
kernel <<< 1, 1 >> > ();
printf("Hello World");
return 0;
} |
6e31b5fe505d79862ac7d69fc355580ed0c79d4c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dscores_kernel_init.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
double *dscores = NULL;
hipMalloc(&dscores, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dscores_kernel_init), dim3(gridBlock),dim3(threadBlock), 0, 0, y,dscores,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dscores_kernel_init), dim3(gridBlock),dim3(threadBlock), 0, 0, y,dscores,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dscores_kernel_init), dim3(gridBlock),dim3(threadBlock), 0, 0, y,dscores,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6e31b5fe505d79862ac7d69fc355580ed0c79d4c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dscores_kernel_init.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
double *dscores = NULL;
cudaMalloc(&dscores, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dscores_kernel_init<<<gridBlock,threadBlock>>>(y,dscores,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dscores_kernel_init<<<gridBlock,threadBlock>>>(y,dscores,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dscores_kernel_init<<<gridBlock,threadBlock>>>(y,dscores,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
961dead1edb8d874ab5a0fd6953ca1fd9ebb722d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "structs.h"
#include <math.h>
#include <thrust/execution_policy.h>
#include <thrust/binary_search.h>
#include <hip/hip_cooperative_groups.h>
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
#include "params.h"
//namespace cg = cooperative_groups;
using namespace cooperative_groups;
__device__ void print(unsigned int tid, unsigned int value)
{
if(0 == tid)
{
printf("threadIdx.x 0, value = %d\n", value);
}
}
/******************************************************************************/
__global__ void sortByWorkLoad(
DTYPE* database,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
schedulingCell * sortedCells,
DTYPE* sortedSet)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(*nNonEmptyCells <= tid)
{
return;
}
int cell = gridCellLookupArr[tid].idx;
int nbNeighborPoints = 0;
int tmpId = indexLookupArr[ index[cell].indexmin ];
DTYPE point[NUMINDEXEDDIM];
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for(int n = 0; n < NUMINDEXEDDIM; n++)
{
point[n] = database[tmpId * NUMINDEXEDDIM + n];
nDCellIDs[n] = (point[n] - minArr[n]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[n] - 1);;
unsigned int nDMaxCellIDs = min(nCells[n] - 1, nDCellIDs[n] + 1);
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs;
rangeFilteredCellIdsMax[n] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs;
rangeFilteredCellIdsMax[n] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[n] = nDMaxCellIDs;
//printf("\nmax not min");
}
else{
//printf("\nneither");
rangeFilteredCellIdsMin[n] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[n] = nDMinCellIDs + 1;
}
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t cellID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
struct gridCellLookup tmp;
tmp.gridLinearID = cellID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
nbNeighborPoints += index[GridIndex].indexmax - index[GridIndex].indexmin + 1;
}
}
sortedCells[tid].nbPoints = nbNeighborPoints;
sortedCells[tid].cellId = cell;
}
/******************************************************************************/
__global__ void sortByWorkLoadLidUnicomp(
DTYPE* database,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
schedulingCell * sortedCells,
DTYPE* sortedSet)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(*nNonEmptyCells <= tid)
{
return;
}
int cell = gridCellLookupArr[tid].idx;
int nbNeighborPoints = 0;
int tmpId = indexLookupArr[ index[cell].indexmin ];
DTYPE point[NUMINDEXEDDIM];
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for(int n = 0; n < NUMINDEXEDDIM; n++)
{
point[n] = database[tmpId * NUMINDEXEDDIM + n];
nDCellIDs[n] = (point[n] - minArr[n]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[n] - 1);;
unsigned int nDMaxCellIDs = min(nCells[n] - 1, nDCellIDs[n] + 1);
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs;
rangeFilteredCellIdsMax[n] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs;
rangeFilteredCellIdsMax[n] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[n] = nDMaxCellIDs;
//printf("\nmax not min");
}
else{
//printf("\nneither");
rangeFilteredCellIdsMin[n] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[n] = nDMinCellIDs + 1;
}
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = nDCellIDs[x];
}
uint64_t originCellID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t cellID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(originCellID <= cellID)
{
struct gridCellLookup tmp;
tmp.gridLinearID = cellID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
nbNeighborPoints += index[GridIndex].indexmax - index[GridIndex].indexmin + 1;
}
}
}
sortedCells[tid].nbPoints = nbNeighborPoints;
sortedCells[tid].cellId = cell;
}
/******************************************************************************/
__device__ uint64_t getLinearID_nDimensionsGPU(
unsigned int * indexes,
unsigned int * dimLen,
unsigned int nDimensions)
{
uint64_t offset = 0;
uint64_t multiplier = 1;
for (int i = 0; i < nDimensions; i++)
{
offset += (uint64_t) indexes[i] * multiplier;
multiplier *= dimLen[i];
}
return offset;
}
/******************************************************************************/
__device__ unsigned int binary_search(
const unsigned int * threadArray,
unsigned int begin,
unsigned int end,
const unsigned int value)
{
unsigned int mid = (begin + end) / 2;
if(threadArray[mid] <= value && value < threadArray[mid + 1])
{
return mid;
}else{
if(threadArray[mid] < value)
{
return binary_search(threadArray, mid + 1, end, value);
}else{
return binary_search(threadArray, begin, mid - 1, value);
}
}
/*
while(begin <= end)
{
unsigned int mid = (begin + end) / 2;
if(threadArray[mid] <= value && value < threadArray[mid + 1])
{
(*tPerPoint) = threadArray[mid + 1] - threadArray[mid];
return mid;
}else{
if(threadArray[mid] < value)
{
begin = mid + 1;
}else{
end = mid - 1;
}
}
}
(*tPerPoint) = 1;
return end;
*/
}
/******************************************************************************/
__forceinline__ __device__ void evalPoint(
unsigned int* indexLookupArr,
int k,
DTYPE* database,
DTYPE* epsilon,
DTYPE* point,
unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx,
bool differentCell)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for(int l = 0; l < GPUNUMDIM; l++){
runningTotalDist += ( database[dataIdx * GPUNUMDIM + l] - point[l])
* (database[dataIdx * GPUNUMDIM + l] - point[l] );
}
if(sqrt(runningTotalDist) <= (*epsilon)){
//if(runningTotalDist <= ((*epsilon) * (*epsilon))){
unsigned int idx = atomicAdd(cnt, int(1));
// printf("tid = %d, tidx = %d, idx = %d\n", blockIdx.x * BLOCKSIZE + threadIdx.x, threadIdx.x, idx);
pointIDKey[idx] = pointIdx; // --> HERE
pointInDistVal[idx] = dataIdx;
if(differentCell) {
unsigned int idx = atomicAdd(cnt, int(1));
pointIDKey[idx] = pointIdx;
pointInDistVal[idx] = dataIdx;
}
}
}
/******************************************************************************/
__device__ void evaluateCell(
unsigned int* nCells,
unsigned int* indexes,
struct gridCellLookup * gridCellLookupArr,
unsigned int* nNonEmptyCells,
DTYPE* database, DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
DTYPE* point, unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx,
bool differentCell,
unsigned int* nDCellIDs)
{
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
//find if the cell is non-empty
if(thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//compute the neighbors for the adjacent non-empty cell
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
for(int k = index[GridIndex].indexmin; k <= index[GridIndex].indexmax; k++){
evalPoint(indexLookupArr, k, database, epsilon, point, cnt, pointIDKey, pointInDistVal, pointIdx, differentCell);
}
}
}
/******************************************************************************/
__forceinline__ __device__ void evalPointUnicompOrigin(
unsigned int* indexLookupArr,
int k,
DTYPE* database,
DTYPE* epsilon,
DTYPE* point,
unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for (int l = 0; l < GPUNUMDIM; l++)
{
runningTotalDist += (database[dataIdx * GPUNUMDIM + l] - point[l]) * (database[dataIdx * GPUNUMDIM + l] - point[l]);
}
if (sqrt(runningTotalDist) <= (*epsilon)){
//if(runningTotalDist <= ((*epsilon) * (*epsilon))){
unsigned int idx = atomicAdd(cnt, int(1));
//printf("\n\nLOL CA VA TROP LOIN (%d)\n\n", idx);
// assert(idx < 2000000);
pointIDKey[idx] = pointIdx; // --> HERE
pointInDistVal[idx] = dataIdx;
}
}
/******************************************************************************/
__device__ void evaluateCellUnicompOrigin(
unsigned int* nCells,
unsigned int* indexes,
struct gridCellLookup * gridCellLookupArr,
unsigned int* nNonEmptyCells,
DTYPE* database, DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
DTYPE* point, unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx,
unsigned int* nDCellIDs,
unsigned int nbThreads,
unsigned int numThread)
{
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
//find if the cell is non-empty
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//compute the neighbors for the adjacent non-empty cell
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
int begin = index[GridIndex].indexmin;
int end = index[GridIndex].indexmax;
int nbElem = end - begin + 1;
if(numThread < nbElem)
{
int size = nbElem / nbThreads;
int oneMore = nbElem - (size * nbThreads);
if(nbElem == (size * nbThreads))
{
begin += size * numThread;
end = begin + size - 1;
}else{
begin += numThread * size + ((numThread < oneMore)?numThread:oneMore);
end = begin + size - 1 + (numThread < oneMore);
}
for(int k = begin; k <= end; k++)
{
evalPointUnicompOrigin(indexLookupArr, k, database, epsilon, point, cnt, pointIDKey, pointInDistVal, pointIdx);
}
}
}
}
/******************************************************************************/
__forceinline__ __device__ void evalPointUnicompAdjacent(
unsigned int* indexLookupArr,
int k,
DTYPE* database,
DTYPE* epsilon,
DTYPE* point,
unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for (int l = 0; l < GPUNUMDIM; l++)
{
runningTotalDist += (database[dataIdx * GPUNUMDIM + l] - point[l]) * (database[dataIdx * GPUNUMDIM + l] - point[l]);
}
if (sqrt(runningTotalDist) <= (*epsilon)){
//if(runningTotalDist <= ((*epsilon) * (*epsilon))){
unsigned int idx = atomicAdd(cnt, int(2));
pointIDKey[idx] = pointIdx;
pointInDistVal[idx] = dataIdx;
pointIDKey[idx + 1] = dataIdx;
pointInDistVal[idx + 1] = pointIdx;
}
}
/******************************************************************************/
__device__ void evaluateCellUnicompAdjacent(
unsigned int* nCells,
unsigned int* indexes,
struct gridCellLookup * gridCellLookupArr,
unsigned int* nNonEmptyCells,
DTYPE* database, DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
DTYPE* point, unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx,
unsigned int* nDCellIDs,
unsigned int nbThreads,
unsigned int numThread)
{
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
//find if the cell is non-empty
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//compute the neighbors for the adjacent non-empty cell
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
int begin = index[GridIndex].indexmin;
int end = index[GridIndex].indexmax;
int nbElem = end - begin + 1;
if(numThread < nbElem)
{
int size = nbElem / nbThreads;
int oneMore = nbElem - (size * nbThreads);
if(nbElem == (size * nbThreads))
{
begin += size * numThread;
end = begin + size - 1;
}else{
begin += numThread * size + ((numThread < oneMore)?numThread:oneMore);
end = begin + size - 1 + (numThread < oneMore);
}
for(int k = begin; k <= end; k++)
{
evalPointUnicompAdjacent(indexLookupArr, k, database, epsilon, point, cnt, pointIDKey, pointInDistVal, pointIdx);
}
}
}
}
/******************************************************************************/
__global__ void kernelNDGridIndexBatchEstimatorOLD(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * sampleOffset,
DTYPE* database,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*BLOCKSIZE);
if (tid>=*N){
return;
}
unsigned int pointID = tid * (*sampleOffset) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointID + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
nDMinCellIDs[i] = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i] = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
}
///////////////////////////
//Take the intersection of the ranges for each dimension between
//the point and the filtered set of cells in each dimension
//Ranges in a given dimension that have points in them that are non-empty in a dimension will be tested
///////////////////////////
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
//compare the point's range of cell IDs in each dimension to the filter mask
//only 2 possible values (you always find the middle point in the range), because that's the cell of the point itself
bool foundMin = 0;
bool foundMax = 0;
//we go throgh each dimension and compare the range of the query points min/max cell ids to the filtered ones
//find out which ones in the range exist based on the min/max
//then determine the appropriate ranges
for (int i=0; i<NUMINDEXEDDIM; i++)
{
foundMin = 0;
foundMax = 0;
//for each dimension
//OPTIMIZE: WITH BINARY SEARCH LATER
// for (int dimFilterRng=gridCellNDMaskOffsets[(i*2)]; dimFilterRng<=gridCellNDMaskOffsets[(i*2)+1]; dimFilterRng++){
// if (gridCellNDMask[dimFilterRng]==nDMinCellIDs[i])
// foundMin=1;
// if (gridCellNDMask[dimFilterRng]==nDMaxCellIDs[i])
// foundMax=1;
// }
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMin=1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMax=1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] =nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++)
{
indexes[x] = loopRng[x];
// if (tid==0)
// printf("\ndim: %d, indexes: %d",x, indexes[x]);
}
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//in the GPU implementation we go directly to computing neighbors so that we don't need to
//store a buffer of the cells to check
//cellsToCheck->push_back(calcLinearID);
//HERE WE COMPUTE THE NEIGHBORS FOR THE CELL
//XXXXXXXXXXXXXXXXXXXXXXXXX
struct gridCellLookup * resultBinSearch=thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr+(*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
for (int k = index[GridIndex].indexmin; k <= index[GridIndex].indexmax; k++)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for (int l = 0; l < GPUNUMDIM; l++)
{
runningTotalDist += (database[dataIdx * GPUNUMDIM + l] -point[l])
* (database[dataIdx * GPUNUMDIM + l] - point[l]);
}
if (sqrt(runningTotalDist) <= (*epsilon)){
unsigned int idx = atomicAdd(cnt, int(1));
// pointIDKey[idx]=tid;
// pointInDistVal[idx]=i;
//neighborTableCPUPrototype[queryPoint].neighbors.push_back(dataIdx);
}
}
}
//printf("\nLinear id: %d",calcLinearID);
} //end loop body
}
__device__ int counterEstimator = 0;
__global__ void kernelNDGridIndexWorkQueueBatchEstimatorOLD(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * sampleOffset,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets)
{
unsigned int tid = blockIdx.x * BLOCKSIZE + threadIdx.x;
if (*N <= tid){
return;
}
//unsigned int pointID = tid * (*sampleOffset) * (GPUNUMDIM);
unsigned int pointID = atomicAdd(&counterEstimator, int(1));
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
// point[i] = database[pointID + i];
point[i] = sortedCells[pointID * GPUNUMDIM + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
nDMinCellIDs[i] = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i] = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
}
///////////////////////////
//Take the intersection of the ranges for each dimension between
//the point and the filtered set of cells in each dimension
//Ranges in a given dimension that have points in them that are non-empty in a dimension will be tested
///////////////////////////
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
//compare the point's range of cell IDs in each dimension to the filter mask
//only 2 possible values (you always find the middle point in the range), because that's the cell of the point itself
bool foundMin = 0;
bool foundMax = 0;
//we go throgh each dimension and compare the range of the query points min/max cell ids to the filtered ones
//find out which ones in the range exist based on the min/max
//then determine the appropriate ranges
for (int i=0; i<NUMINDEXEDDIM; i++)
{
foundMin = 0;
foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMin=1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMax=1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] =nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
}
else{
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++)
{
indexes[x] = loopRng[x];
}
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//in the GPU implementation we go directly to computing neighbors so that we don't need to
//store a buffer of the cells to check
//cellsToCheck->push_back(calcLinearID);
//HERE WE COMPUTE THE NEIGHBORS FOR THE CELL
//XXXXXXXXXXXXXXXXXXXXXXXXX
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr+(*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
for (int k = index[GridIndex].indexmin; k <= index[GridIndex].indexmax; k++)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for (int l = 0; l < GPUNUMDIM; l++)
{
runningTotalDist += (database[dataIdx * GPUNUMDIM + l] - point[l])
* (database[dataIdx * GPUNUMDIM + l] - point[l]);
}
if (sqrt(runningTotalDist) <= (*epsilon)){
unsigned int idx = atomicAdd(cnt, int(1));
// pointIDKey[idx]=tid;
// pointInDistVal[idx]=i;
//neighborTableCPUPrototype[queryPoint].neighbors.push_back(dataIdx);
}
}
}
//printf("\nLinear id: %d",calcLinearID);
} //end loop body
}
// Global memory kernel - Initial version ("GPU")
__global__ void kernelNDGridIndexGlobal(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//printf("tid %d, working on point %d,\n", tid, pointIdx);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
#if SORT_BY_WORKLOAD
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
#else
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointOffset + i];
}
#endif
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
nDMinCellIDs[i] = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i] = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
}
///////////////////////////
//Take the intersection of the ranges for each dimension between
//the point and the filtered set of cells in each dimension
//Ranges in a given dimension that have points in them that are non-empty in a dimension will be tested
///////////////////////////
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
//compare the point's range of cell IDs in each dimension to the filter mask
//only 2 possible values (you always find the middle point in the range), because that's the cell of the point itself
bool foundMin = 0;
bool foundMax = 0;
//we go through each dimension and compare the range of the query points min/max cell ids to the filtered ones
//find out which ones in the range exist based on the min/max
//then determine the appropriate ranges
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
foundMin = 0;
foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[(i * 2)],
gridCellNDMask + gridCellNDMaskOffsets[(i * 2) + 1] + 1, nDMinCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[(i * 2)],
gridCellNDMask + gridCellNDMaskOffsets[(i * 2) + 1] + 1, nDMaxCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
#else
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon,
index, indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs);
#endif
} //end loop body
}
// Global memory kernel - Unicomp version ("Unicomp")
__global__ void kernelNDGridIndexGlobalUnicomp(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
#if SORT_BY_WORKLOAD
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
#else
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointOffset + i];
}
#endif
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
nDMinCellIDs[i] = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i] = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
}
///////////////////////////
//Take the intersection of the ranges for each dimension between
//the point and the filtered set of cells in each dimension
//Ranges in a given dimension that have points in them that are non-empty in a dimension will be tested
///////////////////////////
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
//compare the point's range of cell IDs in each dimension to the filter mask
//only 2 possible values (you always find the middle point in the range), because that's the cell of the point itself
bool foundMin = 0;
bool foundMax = 0;
//we go through each dimension and compare the range of the query points min/max cell ids to the filtered ones
//find out which ones in the range exist based on the min/max
//then determine the appropriate ranges
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
foundMin = 0;
foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
#include "stamploopsV2.h"
#else
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon,
index, indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs);
#include "stamploops.h"
#endif
}
// Global memory kernel - B-Unicomp version ("B-Unicomp")
__global__ void kernelNDGridIndexGlobalBUnicomp(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
#if SORT_BY_WORKLOAD
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
#else
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointOffset + i];
}
#endif
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
#if NUMINDEXEDDIM==2
indexes[0] = nDCellIDs[0];
indexes[1] = nDCellIDs[1];
unsigned int colorId = nDCellIDs[0] + nDCellIDs[1];
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
for(loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for(loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
{
if( ( (1 == colorId % 2) && (nDCellIDs[1] <= loopRng[1]) && (nDCellIDs[0] != loopRng[0]) )
|| ( (0 == colorId % 2) && ((nDCellIDs[1] < loopRng[1]) || (loopRng[1] < nDCellIDs[1] && loopRng[0] == nDCellIDs[0])) ) ) // ( odd => red pattern ) || ( even => green pattern )
{
indexes[0] = loopRng[0];
indexes[1] = loopRng[1];
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
}
}
#else
#if NUMINDEXEDDIM==3
indexes[0] = nDCellIDs[0];
indexes[1] = nDCellIDs[1];
indexes[2] = nDCellIDs[2];
unsigned int colorId = nDCellIDs[0] + nDCellIDs[1] + nDCellIDs[2];
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
for(loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for(loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
for(loopRng[2] = rangeFilteredCellIdsMin[2]; loopRng[2] <= rangeFilteredCellIdsMax[2]; loopRng[2]++)
{
if( ( (1 == colorId % 2) && ( (nDCellIDs[0] != loopRng[0] && nDCellIDs[1] <= loopRng[1] && nDCellIDs[2] <= loopRng[2])
|| (nDCellIDs[0] == loopRng[0] && nDCellIDs[1] <= loopRng[1] && nDCellIDs[2] < loopRng[2])
|| (nDCellIDs[1] < loopRng[1] && loopRng[2] < nDCellIDs[2]) ) )
|| ( (0 == colorId % 2) && ( (nDCellIDs[1] < loopRng[1] && nDCellIDs[2] <= loopRng[2])
|| (nDCellIDs[0] == loopRng[0] && loopRng[1] < nDCellIDs[1] && nDCellIDs[2] <= loopRng[2])
|| (nDCellIDs[0] != loopRng[0] && nDCellIDs[1] < loopRng[1] && loopRng[2] < nDCellIDs[2])
|| (nDCellIDs[1] == loopRng[1] && nDCellIDs[2] < loopRng[2]) ) ) )
{
indexes[0] = loopRng[0];
indexes[1] = loopRng[1];
indexes[2] = loopRng[2];
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
}
}
#endif
#endif
}
// Global memory kernel - Linear ID comparison (Need to find a name : L-Unicomp ? Lin-Unicomp ? LId-Unicomp ?)
__global__ void kernelNDGridIndexGlobalLinearIDUnicomp(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
#if SORT_BY_WORKLOAD
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
#else
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointOffset + i];
}
#endif
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] =nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
uint64_t cellID = getLinearID_nDimensionsGPU(nDCellIDs, nCells, NUMINDEXEDDIM);
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t neighborID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(cellID < neighborID)
{
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
}
} //end loop body
}
// Global memory kernel - Sorting cells by workload (Need to find a name)
__global__ void kernelNDGridIndexGlobalSortedCells(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
#else
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index,
indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs);
#endif
} //end loop body
}
__global__ void kernelNDGridIndexGlobalSortedCellsDynamicThreads(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
unsigned int * sortedCells,
unsigned int * sortedCellsNbThreads,
unsigned int * sortedCellsNbThreadsBefore,
unsigned int nbTotalThreads,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x);
/*if (nbTotalThreads <= tid){
return;
}*/
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
if(nbTotalThreads <= pointOffset){
return;
}
//make a local copy of the point
DTYPE point[GPUNUMDIM];
unsigned int dataIdx = indexLookupArr[ sortedCells[pointOffset] ];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[ dataIdx * GPUNUMDIM + i ];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
uint64_t cellID = getLinearID_nDimensionsGPU(nDCellIDs, nCells, NUMINDEXEDDIM);
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, sortedCellsNbThreads[pointOffset], sortedCellsNbThreadsBefore[pointOffset]);
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t neighborID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(cellID < neighborID)
{
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, sortedCellsNbThreads[pointOffset], sortedCellsNbThreadsBefore[pointOffset]);
}
} //end loop body
}
__global__ void kernelNDGridIndexGlobalSortedCellsDynamicThreadsV2(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE * sortedSet,
unsigned int * sortedCellsNbThreads,
unsigned int * sortedCellsNbThreadsBefore,
unsigned int nbTotalThreads,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x);
/*if (nbTotalThreads <= tid){
return;
}*/
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
if(nbTotalThreads <= pointIdx){
return;
}
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedSet[pointOffset + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
uint64_t cellID = getLinearID_nDimensionsGPU(nDCellIDs, nCells, NUMINDEXEDDIM);
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, sortedCellsNbThreads[pointIdx], sortedCellsNbThreadsBefore[pointIdx]);
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t neighborID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(cellID < neighborID)
{
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, sortedCellsNbThreads[pointIdx], sortedCellsNbThreadsBefore[pointIdx]);
}
} //end loop body
}
__global__ void kernelNDGridIndexGlobalSortedCellsDynamicThreadsFixed(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int *offset,
unsigned int *batchNum,
DTYPE * database,
DTYPE * sortedCells,
unsigned int * threadArray,
unsigned int nbTotalThreads,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x);
unsigned int tPerPoint;
//unsigned int pointToWork = binary_search(threadArray, 0, (*N), globalId);
unsigned int pointToWork = threadArray[tid];
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
//point[i] = sortedCells[pointOffset + i];
point[i] = sortedCells[pointToWork * GPUNUMDIM + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
if(1 < tPerPoint)
{
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointToWork, nDCellIDs, tPerPoint, threadIdx.x % tPerPoint);
}else{
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index,
indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointToWork, false, nDCellIDs);
}
} //end loop body
}
__global__ void kernelNDGridIndexGlobalSortedCellsDynamicThreadsV3(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal,
int nbThreadsPerPoint)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / nbThreadsPerPoint;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
if(1 < nbThreadsPerPoint)
{
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, nbThreadsPerPoint, threadIdx.x % nbThreadsPerPoint);
}else{
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index,
indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs);
}
} //end loop body
}
__device__ int atomicAggInc(int *ctr) {
auto g = coalesced_threads();
int warp_res;
if(g.thread_rank() == 0)
warp_res = atomicAdd(ctr, g.size());
return g.shfl(warp_res, 0) + g.thread_rank();
}
__device__ int counter = 0;
__global__ void kernelNDGridIndexGlobalWorkQueue(
unsigned int * debug1,
unsigned int * debug2,
unsigned int * N,
unsigned int * offset,
unsigned int * batchNum,
DTYPE * database,
DTYPE * sortedCells,
unsigned int * originPointIndex,
DTYPE * epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal,
unsigned int * elementToWork,
unsigned int nbPoints)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
#if THREADPERPOINT == 1
unsigned int pointId = atomicAdd(&counter, int(1));
#else
// __shared__ int pointIdShared;
// if(0 == threadIdx.x)
// {
// pointIdShared = atomicAdd(&counter, int(BLOCKSIZE / THREADPERPOINT));
// }
// __syncthreads();
// unsigned int pointId = pointIdShared + (threadIdx.x / THREADPERPOINT);
// thread_block_tile<32> tile32 = tiled_partition<32>(this_thread_block());
// unsigned int pointId;
// if(0 == tile32.thread_rank())
// {
// pointId = atomicAdd(&counter, int(32 / THREADPERPOINT));
// }
// pointId = tile32.shfl(pointId, 0) + (tile32.thread_rank() / THREADPERPOINT);
// coalesced_group active = coalesced_threads();
// unsigned int pointId;
// if(0 == active.thread_rank())
// {
// pointId = atomicAdd(&counter, int(active.size() / THREADPERPOINT));
// }
// pointId = active.shfl(pointId, 0) + (active.thread_rank() / THREADPERPOINT);
// thread_block_tile<THREADPERPOINT> tile = tiled_partition<THREADPERPOINT>(coalesced_threads());
auto tile = tiled_partition(coalesced_threads(), THREADPERPOINT);
unsigned int pointId;
if(0 == tile.thread_rank())
{
pointId = atomicAdd(&counter, int(1));
}
pointId = tile.shfl(pointId, 0);
#endif
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
//point[i] = sortedCells[pointOffset + i];
point[i] = sortedCells[pointId * GPUNUMDIM + i];
//point[i] = database[pointId * GPUNUMDIM + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, originPointIndex[pointId], nDCellIDs, THREADPERPOINT, tile.thread_rank());
#else
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index,
indexLookupArr, point, cnt, pointIDKey, pointInDistVal, originPointIndex[pointId], false, nDCellIDs);
#endif
} //end loop body
}
__global__ void kernelNDGridIndexGlobalWorkQueueLidUnicomp(
unsigned int * debug1,
unsigned int * debug2,
unsigned int * N,
unsigned int * offset,
unsigned int * batchNum,
DTYPE * database,
DTYPE * sortedCells,
DTYPE * epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal,
unsigned int * elementToWork,
unsigned int nbPoints)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
#if THREADPERPOINT == 1
unsigned int pointId = atomicAdd(&counter, int(1));
#else
// __shared__ int pointIdShared;
// if(0 == threadIdx.x)
// {
// pointIdShared = atomicAdd(&counter, int(BLOCKSIZE / THREADPERPOINT));
// }
// __syncthreads();
// unsigned int pointId = pointIdShared + (threadIdx.x / THREADPERPOINT);
// thread_block_tile<32> tile32 = tiled_partition<32>(this_thread_block());
// unsigned int pointId;
// if(0 == tile32.thread_rank())
// {
// pointId = atomicAdd(&counter, int(32 / THREADPERPOINT));
// }
// pointId = tile32.shfl(pointId, 0) + (tile32.thread_rank() / THREADPERPOINT);
// coalesced_group active = coalesced_threads();
// unsigned int pointId;
// if(0 == active.thread_rank())
// {
// pointId = atomicAdd(&counter, int(active.size() / THREADPERPOINT));
// }
// pointId = active.shfl(pointId, 0) + (active.thread_rank() / THREADPERPOINT);
// thread_block_tile<THREADPERPOINT> tile = tiled_partition<THREADPERPOINT>(coalesced_threads());
auto tile = tiled_partition(coalesced_threads(), THREADPERPOINT);
unsigned int pointId;
if(0 == tile.thread_rank())
{
pointId = atomicAdd(&counter, int(1));
}
pointId = tile.shfl(pointId, 0);
#endif
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
//point[i] = sortedCells[pointOffset + i];
point[i] = sortedCells[pointId * GPUNUMDIM + i];
//point[i] = database[pointId * GPUNUMDIM + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
uint64_t cellID = getLinearID_nDimensionsGPU(nDCellIDs, nCells, NUMINDEXEDDIM);
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointId, nDCellIDs, THREADPERPOINT, tile.thread_rank());
#else
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointId, nDCellIDs, 1, 0);
#endif
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t neighborID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(cellID < neighborID)
{
#if THREADPERPOINT > 1
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointId, nDCellIDs, THREADPERPOINT, tile.thread_rank());
#else
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointId, nDCellIDs, 1, 0);
#endif
}
} //end loop body
}
| 961dead1edb8d874ab5a0fd6953ca1fd9ebb722d.cu | #include "kernel.h"
#include "structs.h"
#include <math.h>
#include <thrust/execution_policy.h>
#include <thrust/binary_search.h>
#include <cooperative_groups.h>
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
#include "params.h"
//namespace cg = cooperative_groups;
using namespace cooperative_groups;
__device__ void print(unsigned int tid, unsigned int value)
{
if(0 == tid)
{
printf("threadIdx.x 0, value = %d\n", value);
}
}
/******************************************************************************/
__global__ void sortByWorkLoad(
DTYPE* database,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
schedulingCell * sortedCells,
DTYPE* sortedSet)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(*nNonEmptyCells <= tid)
{
return;
}
int cell = gridCellLookupArr[tid].idx;
int nbNeighborPoints = 0;
int tmpId = indexLookupArr[ index[cell].indexmin ];
DTYPE point[NUMINDEXEDDIM];
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for(int n = 0; n < NUMINDEXEDDIM; n++)
{
point[n] = database[tmpId * NUMINDEXEDDIM + n];
nDCellIDs[n] = (point[n] - minArr[n]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[n] - 1);;
unsigned int nDMaxCellIDs = min(nCells[n] - 1, nDCellIDs[n] + 1);
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs;
rangeFilteredCellIdsMax[n] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs;
rangeFilteredCellIdsMax[n] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[n] = nDMaxCellIDs;
//printf("\nmax not min");
}
else{
//printf("\nneither");
rangeFilteredCellIdsMin[n] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[n] = nDMinCellIDs + 1;
}
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t cellID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
struct gridCellLookup tmp;
tmp.gridLinearID = cellID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
nbNeighborPoints += index[GridIndex].indexmax - index[GridIndex].indexmin + 1;
}
}
sortedCells[tid].nbPoints = nbNeighborPoints;
sortedCells[tid].cellId = cell;
}
/******************************************************************************/
__global__ void sortByWorkLoadLidUnicomp(
DTYPE* database,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
schedulingCell * sortedCells,
DTYPE* sortedSet)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(*nNonEmptyCells <= tid)
{
return;
}
int cell = gridCellLookupArr[tid].idx;
int nbNeighborPoints = 0;
int tmpId = indexLookupArr[ index[cell].indexmin ];
DTYPE point[NUMINDEXEDDIM];
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for(int n = 0; n < NUMINDEXEDDIM; n++)
{
point[n] = database[tmpId * NUMINDEXEDDIM + n];
nDCellIDs[n] = (point[n] - minArr[n]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[n] - 1);;
unsigned int nDMaxCellIDs = min(nCells[n] - 1, nDCellIDs[n] + 1);
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (n * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs;
rangeFilteredCellIdsMax[n] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs;
rangeFilteredCellIdsMax[n] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[n] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[n] = nDMaxCellIDs;
//printf("\nmax not min");
}
else{
//printf("\nneither");
rangeFilteredCellIdsMin[n] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[n] = nDMinCellIDs + 1;
}
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = nDCellIDs[x];
}
uint64_t originCellID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t cellID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(originCellID <= cellID)
{
struct gridCellLookup tmp;
tmp.gridLinearID = cellID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
nbNeighborPoints += index[GridIndex].indexmax - index[GridIndex].indexmin + 1;
}
}
}
sortedCells[tid].nbPoints = nbNeighborPoints;
sortedCells[tid].cellId = cell;
}
/******************************************************************************/
__device__ uint64_t getLinearID_nDimensionsGPU(
unsigned int * indexes,
unsigned int * dimLen,
unsigned int nDimensions)
{
uint64_t offset = 0;
uint64_t multiplier = 1;
for (int i = 0; i < nDimensions; i++)
{
offset += (uint64_t) indexes[i] * multiplier;
multiplier *= dimLen[i];
}
return offset;
}
/******************************************************************************/
__device__ unsigned int binary_search(
const unsigned int * threadArray,
unsigned int begin,
unsigned int end,
const unsigned int value)
{
unsigned int mid = (begin + end) / 2;
if(threadArray[mid] <= value && value < threadArray[mid + 1])
{
return mid;
}else{
if(threadArray[mid] < value)
{
return binary_search(threadArray, mid + 1, end, value);
}else{
return binary_search(threadArray, begin, mid - 1, value);
}
}
/*
while(begin <= end)
{
unsigned int mid = (begin + end) / 2;
if(threadArray[mid] <= value && value < threadArray[mid + 1])
{
(*tPerPoint) = threadArray[mid + 1] - threadArray[mid];
return mid;
}else{
if(threadArray[mid] < value)
{
begin = mid + 1;
}else{
end = mid - 1;
}
}
}
(*tPerPoint) = 1;
return end;
*/
}
/******************************************************************************/
__forceinline__ __device__ void evalPoint(
unsigned int* indexLookupArr,
int k,
DTYPE* database,
DTYPE* epsilon,
DTYPE* point,
unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx,
bool differentCell)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for(int l = 0; l < GPUNUMDIM; l++){
runningTotalDist += ( database[dataIdx * GPUNUMDIM + l] - point[l])
* (database[dataIdx * GPUNUMDIM + l] - point[l] );
}
if(sqrt(runningTotalDist) <= (*epsilon)){
//if(runningTotalDist <= ((*epsilon) * (*epsilon))){
unsigned int idx = atomicAdd(cnt, int(1));
// printf("tid = %d, tidx = %d, idx = %d\n", blockIdx.x * BLOCKSIZE + threadIdx.x, threadIdx.x, idx);
pointIDKey[idx] = pointIdx; // --> HERE
pointInDistVal[idx] = dataIdx;
if(differentCell) {
unsigned int idx = atomicAdd(cnt, int(1));
pointIDKey[idx] = pointIdx;
pointInDistVal[idx] = dataIdx;
}
}
}
/******************************************************************************/
__device__ void evaluateCell(
unsigned int* nCells,
unsigned int* indexes,
struct gridCellLookup * gridCellLookupArr,
unsigned int* nNonEmptyCells,
DTYPE* database, DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
DTYPE* point, unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx,
bool differentCell,
unsigned int* nDCellIDs)
{
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
//find if the cell is non-empty
if(thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//compute the neighbors for the adjacent non-empty cell
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
for(int k = index[GridIndex].indexmin; k <= index[GridIndex].indexmax; k++){
evalPoint(indexLookupArr, k, database, epsilon, point, cnt, pointIDKey, pointInDistVal, pointIdx, differentCell);
}
}
}
/******************************************************************************/
__forceinline__ __device__ void evalPointUnicompOrigin(
unsigned int* indexLookupArr,
int k,
DTYPE* database,
DTYPE* epsilon,
DTYPE* point,
unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for (int l = 0; l < GPUNUMDIM; l++)
{
runningTotalDist += (database[dataIdx * GPUNUMDIM + l] - point[l]) * (database[dataIdx * GPUNUMDIM + l] - point[l]);
}
if (sqrt(runningTotalDist) <= (*epsilon)){
//if(runningTotalDist <= ((*epsilon) * (*epsilon))){
unsigned int idx = atomicAdd(cnt, int(1));
//printf("\n\nLOL CA VA TROP LOIN (%d)\n\n", idx);
// assert(idx < 2000000);
pointIDKey[idx] = pointIdx; // --> HERE
pointInDistVal[idx] = dataIdx;
}
}
/******************************************************************************/
__device__ void evaluateCellUnicompOrigin(
unsigned int* nCells,
unsigned int* indexes,
struct gridCellLookup * gridCellLookupArr,
unsigned int* nNonEmptyCells,
DTYPE* database, DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
DTYPE* point, unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx,
unsigned int* nDCellIDs,
unsigned int nbThreads,
unsigned int numThread)
{
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
//find if the cell is non-empty
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//compute the neighbors for the adjacent non-empty cell
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
int begin = index[GridIndex].indexmin;
int end = index[GridIndex].indexmax;
int nbElem = end - begin + 1;
if(numThread < nbElem)
{
int size = nbElem / nbThreads;
int oneMore = nbElem - (size * nbThreads);
if(nbElem == (size * nbThreads))
{
begin += size * numThread;
end = begin + size - 1;
}else{
begin += numThread * size + ((numThread < oneMore)?numThread:oneMore);
end = begin + size - 1 + (numThread < oneMore);
}
for(int k = begin; k <= end; k++)
{
evalPointUnicompOrigin(indexLookupArr, k, database, epsilon, point, cnt, pointIDKey, pointInDistVal, pointIdx);
}
}
}
}
/******************************************************************************/
__forceinline__ __device__ void evalPointUnicompAdjacent(
unsigned int* indexLookupArr,
int k,
DTYPE* database,
DTYPE* epsilon,
DTYPE* point,
unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for (int l = 0; l < GPUNUMDIM; l++)
{
runningTotalDist += (database[dataIdx * GPUNUMDIM + l] - point[l]) * (database[dataIdx * GPUNUMDIM + l] - point[l]);
}
if (sqrt(runningTotalDist) <= (*epsilon)){
//if(runningTotalDist <= ((*epsilon) * (*epsilon))){
unsigned int idx = atomicAdd(cnt, int(2));
pointIDKey[idx] = pointIdx;
pointInDistVal[idx] = dataIdx;
pointIDKey[idx + 1] = dataIdx;
pointInDistVal[idx + 1] = pointIdx;
}
}
/******************************************************************************/
__device__ void evaluateCellUnicompAdjacent(
unsigned int* nCells,
unsigned int* indexes,
struct gridCellLookup * gridCellLookupArr,
unsigned int* nNonEmptyCells,
DTYPE* database, DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
DTYPE* point, unsigned int* cnt,
int* pointIDKey,
int* pointInDistVal,
int pointIdx,
unsigned int* nDCellIDs,
unsigned int nbThreads,
unsigned int numThread)
{
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
//find if the cell is non-empty
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//compute the neighbors for the adjacent non-empty cell
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
int begin = index[GridIndex].indexmin;
int end = index[GridIndex].indexmax;
int nbElem = end - begin + 1;
if(numThread < nbElem)
{
int size = nbElem / nbThreads;
int oneMore = nbElem - (size * nbThreads);
if(nbElem == (size * nbThreads))
{
begin += size * numThread;
end = begin + size - 1;
}else{
begin += numThread * size + ((numThread < oneMore)?numThread:oneMore);
end = begin + size - 1 + (numThread < oneMore);
}
for(int k = begin; k <= end; k++)
{
evalPointUnicompAdjacent(indexLookupArr, k, database, epsilon, point, cnt, pointIDKey, pointInDistVal, pointIdx);
}
}
}
}
/******************************************************************************/
__global__ void kernelNDGridIndexBatchEstimatorOLD(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * sampleOffset,
DTYPE* database,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets)
{
unsigned int tid=threadIdx.x+ (blockIdx.x*BLOCKSIZE);
if (tid>=*N){
return;
}
unsigned int pointID = tid * (*sampleOffset) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointID + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
nDMinCellIDs[i] = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i] = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
}
///////////////////////////
//Take the intersection of the ranges for each dimension between
//the point and the filtered set of cells in each dimension
//Ranges in a given dimension that have points in them that are non-empty in a dimension will be tested
///////////////////////////
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
//compare the point's range of cell IDs in each dimension to the filter mask
//only 2 possible values (you always find the middle point in the range), because that's the cell of the point itself
bool foundMin = 0;
bool foundMax = 0;
//we go throgh each dimension and compare the range of the query points min/max cell ids to the filtered ones
//find out which ones in the range exist based on the min/max
//then determine the appropriate ranges
for (int i=0; i<NUMINDEXEDDIM; i++)
{
foundMin = 0;
foundMax = 0;
//for each dimension
//OPTIMIZE: WITH BINARY SEARCH LATER
// for (int dimFilterRng=gridCellNDMaskOffsets[(i*2)]; dimFilterRng<=gridCellNDMaskOffsets[(i*2)+1]; dimFilterRng++){
// if (gridCellNDMask[dimFilterRng]==nDMinCellIDs[i])
// foundMin=1;
// if (gridCellNDMask[dimFilterRng]==nDMaxCellIDs[i])
// foundMax=1;
// }
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMin=1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMax=1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] =nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++)
{
indexes[x] = loopRng[x];
// if (tid==0)
// printf("\ndim: %d, indexes: %d",x, indexes[x]);
}
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//in the GPU implementation we go directly to computing neighbors so that we don't need to
//store a buffer of the cells to check
//cellsToCheck->push_back(calcLinearID);
//HERE WE COMPUTE THE NEIGHBORS FOR THE CELL
//XXXXXXXXXXXXXXXXXXXXXXXXX
struct gridCellLookup * resultBinSearch=thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr+(*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
for (int k = index[GridIndex].indexmin; k <= index[GridIndex].indexmax; k++)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for (int l = 0; l < GPUNUMDIM; l++)
{
runningTotalDist += (database[dataIdx * GPUNUMDIM + l] -point[l])
* (database[dataIdx * GPUNUMDIM + l] - point[l]);
}
if (sqrt(runningTotalDist) <= (*epsilon)){
unsigned int idx = atomicAdd(cnt, int(1));
// pointIDKey[idx]=tid;
// pointInDistVal[idx]=i;
//neighborTableCPUPrototype[queryPoint].neighbors.push_back(dataIdx);
}
}
}
//printf("\nLinear id: %d",calcLinearID);
} //end loop body
}
__device__ int counterEstimator = 0;
__global__ void kernelNDGridIndexWorkQueueBatchEstimatorOLD(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * sampleOffset,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets)
{
unsigned int tid = blockIdx.x * BLOCKSIZE + threadIdx.x;
if (*N <= tid){
return;
}
//unsigned int pointID = tid * (*sampleOffset) * (GPUNUMDIM);
unsigned int pointID = atomicAdd(&counterEstimator, int(1));
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
// point[i] = database[pointID + i];
point[i] = sortedCells[pointID * GPUNUMDIM + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
nDMinCellIDs[i] = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i] = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
}
///////////////////////////
//Take the intersection of the ranges for each dimension between
//the point and the filtered set of cells in each dimension
//Ranges in a given dimension that have points in them that are non-empty in a dimension will be tested
///////////////////////////
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
//compare the point's range of cell IDs in each dimension to the filter mask
//only 2 possible values (you always find the middle point in the range), because that's the cell of the point itself
bool foundMin = 0;
bool foundMax = 0;
//we go throgh each dimension and compare the range of the query points min/max cell ids to the filtered ones
//find out which ones in the range exist based on the min/max
//then determine the appropriate ranges
for (int i=0; i<NUMINDEXEDDIM; i++)
{
foundMin = 0;
foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMin=1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMax=1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] =nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
}
else{
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++)
{
indexes[x] = loopRng[x];
}
uint64_t calcLinearID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
//compare the linear ID with the gridCellLookupArr to determine if the cell is non-empty: this can happen because one point says
//a cell in a particular dimension is non-empty, but that's because it was related to a different point (not adjacent to the query point)
struct gridCellLookup tmp;
tmp.gridLinearID = calcLinearID;
if (thrust::binary_search(thrust::seq, gridCellLookupArr, gridCellLookupArr + (*nNonEmptyCells), gridCellLookup(tmp)))
{
//in the GPU implementation we go directly to computing neighbors so that we don't need to
//store a buffer of the cells to check
//cellsToCheck->push_back(calcLinearID);
//HERE WE COMPUTE THE NEIGHBORS FOR THE CELL
//XXXXXXXXXXXXXXXXXXXXXXXXX
struct gridCellLookup * resultBinSearch = thrust::lower_bound(thrust::seq, gridCellLookupArr, gridCellLookupArr+(*nNonEmptyCells), gridCellLookup(tmp));
unsigned int GridIndex = resultBinSearch->idx;
for (int k = index[GridIndex].indexmin; k <= index[GridIndex].indexmax; k++)
{
DTYPE runningTotalDist = 0;
unsigned int dataIdx = indexLookupArr[k];
for (int l = 0; l < GPUNUMDIM; l++)
{
runningTotalDist += (database[dataIdx * GPUNUMDIM + l] - point[l])
* (database[dataIdx * GPUNUMDIM + l] - point[l]);
}
if (sqrt(runningTotalDist) <= (*epsilon)){
unsigned int idx = atomicAdd(cnt, int(1));
// pointIDKey[idx]=tid;
// pointInDistVal[idx]=i;
//neighborTableCPUPrototype[queryPoint].neighbors.push_back(dataIdx);
}
}
}
//printf("\nLinear id: %d",calcLinearID);
} //end loop body
}
// Global memory kernel - Initial version ("GPU")
__global__ void kernelNDGridIndexGlobal(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//printf("tid %d, working on point %d,\n", tid, pointIdx);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
#if SORT_BY_WORKLOAD
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
#else
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointOffset + i];
}
#endif
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
nDMinCellIDs[i] = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i] = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
}
///////////////////////////
//Take the intersection of the ranges for each dimension between
//the point and the filtered set of cells in each dimension
//Ranges in a given dimension that have points in them that are non-empty in a dimension will be tested
///////////////////////////
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
//compare the point's range of cell IDs in each dimension to the filter mask
//only 2 possible values (you always find the middle point in the range), because that's the cell of the point itself
bool foundMin = 0;
bool foundMax = 0;
//we go through each dimension and compare the range of the query points min/max cell ids to the filtered ones
//find out which ones in the range exist based on the min/max
//then determine the appropriate ranges
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
foundMin = 0;
foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[(i * 2)],
gridCellNDMask + gridCellNDMaskOffsets[(i * 2) + 1] + 1, nDMinCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[(i * 2)],
gridCellNDMask + gridCellNDMaskOffsets[(i * 2) + 1] + 1, nDMaxCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
#else
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon,
index, indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs);
#endif
} //end loop body
}
// Global memory kernel - Unicomp version ("Unicomp")
__global__ void kernelNDGridIndexGlobalUnicomp(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
#if SORT_BY_WORKLOAD
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
#else
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointOffset + i];
}
#endif
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int nDMinCellIDs[NUMINDEXEDDIM];
unsigned int nDMaxCellIDs[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
nDMinCellIDs[i] = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
nDMaxCellIDs[i] = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
}
///////////////////////////
//Take the intersection of the ranges for each dimension between
//the point and the filtered set of cells in each dimension
//Ranges in a given dimension that have points in them that are non-empty in a dimension will be tested
///////////////////////////
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
//compare the point's range of cell IDs in each dimension to the filter mask
//only 2 possible values (you always find the middle point in the range), because that's the cell of the point itself
bool foundMin = 0;
bool foundMax = 0;
//we go through each dimension and compare the range of the query points min/max cell ids to the filtered ones
//find out which ones in the range exist based on the min/max
//then determine the appropriate ranges
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
foundMin = 0;
foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs[i])){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i];
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs[i];
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs[i] + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs[i] + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
#include "stamploopsV2.h"
#else
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon,
index, indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs);
#include "stamploops.h"
#endif
}
// Global memory kernel - B-Unicomp version ("B-Unicomp")
__global__ void kernelNDGridIndexGlobalBUnicomp(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
#if SORT_BY_WORKLOAD
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
#else
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointOffset + i];
}
#endif
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
#if NUMINDEXEDDIM==2
indexes[0] = nDCellIDs[0];
indexes[1] = nDCellIDs[1];
unsigned int colorId = nDCellIDs[0] + nDCellIDs[1];
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
for(loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for(loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
{
if( ( (1 == colorId % 2) && (nDCellIDs[1] <= loopRng[1]) && (nDCellIDs[0] != loopRng[0]) )
|| ( (0 == colorId % 2) && ((nDCellIDs[1] < loopRng[1]) || (loopRng[1] < nDCellIDs[1] && loopRng[0] == nDCellIDs[0])) ) ) // ( odd => red pattern ) || ( even => green pattern )
{
indexes[0] = loopRng[0];
indexes[1] = loopRng[1];
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
}
}
#else
#if NUMINDEXEDDIM==3
indexes[0] = nDCellIDs[0];
indexes[1] = nDCellIDs[1];
indexes[2] = nDCellIDs[2];
unsigned int colorId = nDCellIDs[0] + nDCellIDs[1] + nDCellIDs[2];
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
for(loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for(loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
for(loopRng[2] = rangeFilteredCellIdsMin[2]; loopRng[2] <= rangeFilteredCellIdsMax[2]; loopRng[2]++)
{
if( ( (1 == colorId % 2) && ( (nDCellIDs[0] != loopRng[0] && nDCellIDs[1] <= loopRng[1] && nDCellIDs[2] <= loopRng[2])
|| (nDCellIDs[0] == loopRng[0] && nDCellIDs[1] <= loopRng[1] && nDCellIDs[2] < loopRng[2])
|| (nDCellIDs[1] < loopRng[1] && loopRng[2] < nDCellIDs[2]) ) )
|| ( (0 == colorId % 2) && ( (nDCellIDs[1] < loopRng[1] && nDCellIDs[2] <= loopRng[2])
|| (nDCellIDs[0] == loopRng[0] && loopRng[1] < nDCellIDs[1] && nDCellIDs[2] <= loopRng[2])
|| (nDCellIDs[0] != loopRng[0] && nDCellIDs[1] < loopRng[1] && loopRng[2] < nDCellIDs[2])
|| (nDCellIDs[1] == loopRng[1] && nDCellIDs[2] < loopRng[2]) ) ) )
{
indexes[0] = loopRng[0];
indexes[1] = loopRng[1];
indexes[2] = loopRng[2];
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
}
}
#endif
#endif
}
// Global memory kernel - Linear ID comparison (Need to find a name : L-Unicomp ? Lin-Unicomp ? LId-Unicomp ?)
__global__ void kernelNDGridIndexGlobalLinearIDUnicomp(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
#if SORT_BY_WORKLOAD
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
#else
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[pointOffset + i];
}
#endif
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] =nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
uint64_t cellID = getLinearID_nDimensionsGPU(nDCellIDs, nCells, NUMINDEXEDDIM);
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t neighborID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(cellID < neighborID)
{
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
}
} //end loop body
}
// Global memory kernel - Sorting cells by workload (Need to find a name)
__global__ void kernelNDGridIndexGlobalSortedCells(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, THREADPERPOINT, threadIdx.x % THREADPERPOINT);
#else
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index,
indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs);
#endif
} //end loop body
}
__global__ void kernelNDGridIndexGlobalSortedCellsDynamicThreads(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
unsigned int * sortedCells,
unsigned int * sortedCellsNbThreads,
unsigned int * sortedCellsNbThreadsBefore,
unsigned int nbTotalThreads,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x);
/*if (nbTotalThreads <= tid){
return;
}*/
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
if(nbTotalThreads <= pointOffset){
return;
}
//make a local copy of the point
DTYPE point[GPUNUMDIM];
unsigned int dataIdx = indexLookupArr[ sortedCells[pointOffset] ];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = database[ dataIdx * GPUNUMDIM + i ];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
uint64_t cellID = getLinearID_nDimensionsGPU(nDCellIDs, nCells, NUMINDEXEDDIM);
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, sortedCellsNbThreads[pointOffset], sortedCellsNbThreadsBefore[pointOffset]);
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t neighborID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(cellID < neighborID)
{
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, sortedCellsNbThreads[pointOffset], sortedCellsNbThreadsBefore[pointOffset]);
}
} //end loop body
}
__global__ void kernelNDGridIndexGlobalSortedCellsDynamicThreadsV2(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE * sortedSet,
unsigned int * sortedCellsNbThreads,
unsigned int * sortedCellsNbThreadsBefore,
unsigned int nbTotalThreads,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x);
/*if (nbTotalThreads <= tid){
return;
}*/
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
if(nbTotalThreads <= pointIdx){
return;
}
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedSet[pointOffset + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
// cases:
// found the min and max
// found the min and not max
//found the max and not the min
//you don't find the min or max -- then only check the mid
//you always find the mid because it's in the cell of the point you're looking for
//NEED TO OPTIMIZE STILL
if (1 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (1 == foundMin && 0 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (0 == foundMin && 1 == foundMax){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
uint64_t cellID = getLinearID_nDimensionsGPU(nDCellIDs, nCells, NUMINDEXEDDIM);
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, sortedCellsNbThreads[pointIdx], sortedCellsNbThreadsBefore[pointIdx]);
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t neighborID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(cellID < neighborID)
{
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, sortedCellsNbThreads[pointIdx], sortedCellsNbThreadsBefore[pointIdx]);
}
} //end loop body
}
__global__ void kernelNDGridIndexGlobalSortedCellsDynamicThreadsFixed(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int *offset,
unsigned int *batchNum,
DTYPE * database,
DTYPE * sortedCells,
unsigned int * threadArray,
unsigned int nbTotalThreads,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x);
unsigned int tPerPoint;
//unsigned int pointToWork = binary_search(threadArray, 0, (*N), globalId);
unsigned int pointToWork = threadArray[tid];
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
//point[i] = sortedCells[pointOffset + i];
point[i] = sortedCells[pointToWork * GPUNUMDIM + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++)
{
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
if(1 < tPerPoint)
{
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointToWork, nDCellIDs, tPerPoint, threadIdx.x % tPerPoint);
}else{
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index,
indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointToWork, false, nDCellIDs);
}
} //end loop body
}
__global__ void kernelNDGridIndexGlobalSortedCellsDynamicThreadsV3(
unsigned int *debug1,
unsigned int *debug2,
unsigned int *N,
unsigned int * offset,
unsigned int *batchNum,
DTYPE* database,
DTYPE* sortedCells,
DTYPE* epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal,
int nbThreadsPerPoint)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / nbThreadsPerPoint;
if (*N <= tid){
return;
}
//the point id in the dataset
unsigned int pointIdx = tid * (*offset) + (*batchNum);
//The offset into the database, taking into consideration the length of each dimension
unsigned int pointOffset = tid * (GPUNUMDIM) * (*offset) + (*batchNum) * (GPUNUMDIM);
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
point[i] = sortedCells[pointOffset + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
if(1 < nbThreadsPerPoint)
{
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointIdx, nDCellIDs, nbThreadsPerPoint, threadIdx.x % nbThreadsPerPoint);
}else{
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index,
indexLookupArr, point, cnt, pointIDKey, pointInDistVal, pointIdx, false, nDCellIDs);
}
} //end loop body
}
__device__ int atomicAggInc(int *ctr) {
auto g = coalesced_threads();
int warp_res;
if(g.thread_rank() == 0)
warp_res = atomicAdd(ctr, g.size());
return g.shfl(warp_res, 0) + g.thread_rank();
}
__device__ int counter = 0;
__global__ void kernelNDGridIndexGlobalWorkQueue(
unsigned int * debug1,
unsigned int * debug2,
unsigned int * N,
unsigned int * offset,
unsigned int * batchNum,
DTYPE * database,
DTYPE * sortedCells,
unsigned int * originPointIndex,
DTYPE * epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal,
unsigned int * elementToWork,
unsigned int nbPoints)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
#if THREADPERPOINT == 1
unsigned int pointId = atomicAdd(&counter, int(1));
#else
// __shared__ int pointIdShared;
// if(0 == threadIdx.x)
// {
// pointIdShared = atomicAdd(&counter, int(BLOCKSIZE / THREADPERPOINT));
// }
// __syncthreads();
// unsigned int pointId = pointIdShared + (threadIdx.x / THREADPERPOINT);
// thread_block_tile<32> tile32 = tiled_partition<32>(this_thread_block());
// unsigned int pointId;
// if(0 == tile32.thread_rank())
// {
// pointId = atomicAdd(&counter, int(32 / THREADPERPOINT));
// }
// pointId = tile32.shfl(pointId, 0) + (tile32.thread_rank() / THREADPERPOINT);
// coalesced_group active = coalesced_threads();
// unsigned int pointId;
// if(0 == active.thread_rank())
// {
// pointId = atomicAdd(&counter, int(active.size() / THREADPERPOINT));
// }
// pointId = active.shfl(pointId, 0) + (active.thread_rank() / THREADPERPOINT);
// thread_block_tile<THREADPERPOINT> tile = tiled_partition<THREADPERPOINT>(coalesced_threads());
auto tile = tiled_partition(coalesced_threads(), THREADPERPOINT);
unsigned int pointId;
if(0 == tile.thread_rank())
{
pointId = atomicAdd(&counter, int(1));
}
pointId = tile.shfl(pointId, 0);
#endif
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
//point[i] = sortedCells[pointOffset + i];
point[i] = sortedCells[pointId * GPUNUMDIM + i];
//point[i] = database[pointId * GPUNUMDIM + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, originPointIndex[pointId], nDCellIDs, THREADPERPOINT, tile.thread_rank());
#else
evaluateCell(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index,
indexLookupArr, point, cnt, pointIDKey, pointInDistVal, originPointIndex[pointId], false, nDCellIDs);
#endif
} //end loop body
}
__global__ void kernelNDGridIndexGlobalWorkQueueLidUnicomp(
unsigned int * debug1,
unsigned int * debug2,
unsigned int * N,
unsigned int * offset,
unsigned int * batchNum,
DTYPE * database,
DTYPE * sortedCells,
DTYPE * epsilon,
struct grid * index,
unsigned int * indexLookupArr,
struct gridCellLookup * gridCellLookupArr,
DTYPE* minArr,
unsigned int * nCells,
unsigned int * cnt,
unsigned int * nNonEmptyCells,
unsigned int * gridCellNDMask,
unsigned int * gridCellNDMaskOffsets,
int * pointIDKey,
int * pointInDistVal,
unsigned int * elementToWork,
unsigned int nbPoints)
{
unsigned int tid = (blockIdx.x * BLOCKSIZE + threadIdx.x) / THREADPERPOINT;
if (*N <= tid){
return;
}
#if THREADPERPOINT == 1
unsigned int pointId = atomicAdd(&counter, int(1));
#else
// __shared__ int pointIdShared;
// if(0 == threadIdx.x)
// {
// pointIdShared = atomicAdd(&counter, int(BLOCKSIZE / THREADPERPOINT));
// }
// __syncthreads();
// unsigned int pointId = pointIdShared + (threadIdx.x / THREADPERPOINT);
// thread_block_tile<32> tile32 = tiled_partition<32>(this_thread_block());
// unsigned int pointId;
// if(0 == tile32.thread_rank())
// {
// pointId = atomicAdd(&counter, int(32 / THREADPERPOINT));
// }
// pointId = tile32.shfl(pointId, 0) + (tile32.thread_rank() / THREADPERPOINT);
// coalesced_group active = coalesced_threads();
// unsigned int pointId;
// if(0 == active.thread_rank())
// {
// pointId = atomicAdd(&counter, int(active.size() / THREADPERPOINT));
// }
// pointId = active.shfl(pointId, 0) + (active.thread_rank() / THREADPERPOINT);
// thread_block_tile<THREADPERPOINT> tile = tiled_partition<THREADPERPOINT>(coalesced_threads());
auto tile = tiled_partition(coalesced_threads(), THREADPERPOINT);
unsigned int pointId;
if(0 == tile.thread_rank())
{
pointId = atomicAdd(&counter, int(1));
}
pointId = tile.shfl(pointId, 0);
#endif
//make a local copy of the point
DTYPE point[GPUNUMDIM];
for (int i = 0; i < GPUNUMDIM; i++){
//point[i] = sortedCells[pointOffset + i];
point[i] = sortedCells[pointId * GPUNUMDIM + i];
//point[i] = database[pointId * GPUNUMDIM + i];
}
//calculate the coords of the Cell for the point
//and the min/max ranges in each dimension
unsigned int nDCellIDs[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMin[NUMINDEXEDDIM];
unsigned int rangeFilteredCellIdsMax[NUMINDEXEDDIM];
for (int i = 0; i < NUMINDEXEDDIM; i++){
nDCellIDs[i] = (point[i] - minArr[i]) / (*epsilon);
unsigned int nDMinCellIDs = max(0, nDCellIDs[i] - 1); //boundary conditions (don't go beyond cell 0)
unsigned int nDMaxCellIDs = min(nCells[i] - 1, nDCellIDs[i] + 1); //boundary conditions (don't go beyond the maximum number of cells)
bool foundMin = 0;
bool foundMax = 0;
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMinCellIDs)){ //extra +1 here is because we include the upper bound
foundMin = 1;
}
if(thrust::binary_search(thrust::seq, gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) ],
gridCellNDMask + gridCellNDMaskOffsets[ (i * 2) + 1 ] + 1, nDMaxCellIDs)){ //extra +1 here is because we include the upper bound
foundMax = 1;
}
if (foundMin == 1 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmin and max");
}
else if (foundMin == 1 && foundMax == 0){
rangeFilteredCellIdsMin[i] = nDMinCellIDs;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
//printf("\nmin not max");
}
else if (foundMin == 0 && foundMax == 1){
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMaxCellIDs;
//printf("\nmax not min");
}
//dont find either min or max
//get middle value only
else{
//printf("\nneither");
rangeFilteredCellIdsMin[i] = nDMinCellIDs + 1;
rangeFilteredCellIdsMax[i] = nDMinCellIDs + 1;
}
}
///////////////////////////////////////
//End taking intersection
//////////////////////////////////////
unsigned int indexes[NUMINDEXEDDIM];
unsigned int loopRng[NUMINDEXEDDIM];
uint64_t cellID = getLinearID_nDimensionsGPU(nDCellIDs, nCells, NUMINDEXEDDIM);
for(int i = 0; i < NUMINDEXEDDIM; i++) {
indexes[i] = nDCellIDs[i];
}
#if THREADPERPOINT > 1
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointId, nDCellIDs, THREADPERPOINT, tile.thread_rank());
#else
evaluateCellUnicompOrigin(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointId, nDCellIDs, 1, 0);
#endif
for (loopRng[0] = rangeFilteredCellIdsMin[0]; loopRng[0] <= rangeFilteredCellIdsMax[0]; loopRng[0]++)
for (loopRng[1] = rangeFilteredCellIdsMin[1]; loopRng[1] <= rangeFilteredCellIdsMax[1]; loopRng[1]++)
#include "kernelloops.h"
{ //beginning of loop body
for (int x = 0; x < NUMINDEXEDDIM; x++){
indexes[x] = loopRng[x];
}
uint64_t neighborID = getLinearID_nDimensionsGPU(indexes, nCells, NUMINDEXEDDIM);
if(cellID < neighborID)
{
#if THREADPERPOINT > 1
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointId, nDCellIDs, THREADPERPOINT, tile.thread_rank());
#else
evaluateCellUnicompAdjacent(nCells, indexes, gridCellLookupArr, nNonEmptyCells, database, epsilon, index, indexLookupArr,
point, cnt, pointIDKey, pointInDistVal, pointId, nDCellIDs, 1, 0);
#endif
}
} //end loop body
}
|
82e438bdeddf9af9a2ec0a7adbd3e673be2ea783.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include "common.h"
const int TILE_SIZE = 256; //Max no of threads in Block.
const int MAX_GRID_SIZE = 65535; //Max no of Blocks in a Grid.
// GPU Kernel-1 to perform row scaling.
__global__ void GaussianEliminationGPUKernelScaling(float* matrix, unsigned int numberOfRows, unsigned int numberOfColumns, float* outputMatrix, bool partialPivot, unsigned int row)
{
// Retrieve our coordinates in the block
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x; //Thread id calculation
if(threadId<numberOfColumns*numberOfRows){
if ((threadId/numberOfColumns)==row)
outputMatrix[threadId] = matrix[threadId] / matrix[numberOfColumns*row+row];
else
outputMatrix[threadId] = matrix[threadId];
}
}
// GPU Kernel-2 to perform the reduction in rows.
__global__ void GaussianEliminationGPUKernelReduction(float* Matrix, unsigned int numberOfRows, unsigned int numberOfColumns, float* outputMatrix, bool partialPivot, unsigned int row)
{
// Retrieve our coordinates in the block
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x; //threadid calculation
float pivot = Matrix[numberOfColumns*row+row]; //Calculates the pivot element for each row.
if(threadId<numberOfColumns*numberOfRows){
if ((threadId/numberOfColumns)!=row)
outputMatrix[threadId] = Matrix[threadId]- Matrix[(threadId/numberOfColumns)*numberOfRows+row] * (Matrix[row*numberOfColumns + threadId%numberOfColumns]/pivot);
else
outputMatrix[threadId] = Matrix[threadId];
}
}
// GPU function for direct method Gross Jorden method.
bool GaussianEliminationGPU( float** matrix, unsigned int numberOfRows, unsigned int numberOfColumns, float** outputMatrix, bool partialPivot)
{
// Error return value
hipError_t status;
// Number of bytes in the matrix.
int bytes = numberOfColumns * numberOfRows *sizeof(float);
unsigned int numberOfRowsd, numberOfColumnsd; //To be safe copy the elements too.
numberOfColumnsd = numberOfColumns;
numberOfRowsd = numberOfRows;
// Pointers to the device arrays
float *matrixd, *outputMatrixd; //input and output matrix
// Allocate memory on the device to store each matrix
hipMalloc((void**) &matrixd, bytes);
status = hipGetLastError(); //To check the error
if (status != hipSuccess) {
std::cout << "Kernel failed2: " << hipGetErrorString(status) <<
std::endl;
hipFree(matrixd); //Free call for memory
hipFree(outputMatrixd); //Free call for memory
return false;
}
hipMalloc((void**) &outputMatrixd, bytes);
status = hipGetLastError(); //To check the error
if (status != hipSuccess) {
std::cout << "Kernel failed2: " << hipGetErrorString(status) <<
std::endl;
hipFree(matrixd); //Free call for memory
hipFree(outputMatrixd); //Free call for memory
return false;
}
float *temp1 = matrixd;
float *temp2 = outputMatrixd;
// Copy the host input data to the device
for (int i=0; i<numberOfRows; i++){
hipMemcpy((float *)temp1, matrix[i], numberOfColumns *sizeof(float), hipMemcpyHostToDevice);
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Kernel failed3: " << hipGetErrorString(status) <<
std::endl;
hipFree(matrixd); //Free call for memory
hipFree(outputMatrixd); //Free call for memory
return false;
}
hipMemcpy((float *)temp2, matrix[i], numberOfColumns *sizeof(float), hipMemcpyHostToDevice);
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Kernel failed4: " << hipGetErrorString(status) <<
std::endl;
hipFree(matrixd); //Free call for memory
hipFree(outputMatrixd); //Free call for memory
return false;
}
temp1 += numberOfColumns;
temp2 += numberOfColumns;
}
temp1= matrixd;
temp2 = outputMatrixd;
int size = numberOfColumns * numberOfRows;
dim3 dimBlock(TILE_SIZE, 1);
int gridx = 1; //Grid size calculation
int gridy = 1; //Grid size calculation
if(size/TILE_SIZE < MAX_GRID_SIZE)
gridx = ceil((float)size/TILE_SIZE); //Decide the grid size for input size.
else{
gridx = MAX_GRID_SIZE;
gridy = ceil((float)size/(TILE_SIZE*MAX_GRID_SIZE));
}
dim3 dimGrid(gridx, gridy); // grid call.
// Launch the kernel one-by-one
int rowNo = 0;
for (rowNo=0; rowNo < numberOfColumns ;rowNo++){
hipLaunchKernelGGL(( GaussianEliminationGPUKernelScaling), dim3(dimGrid), dim3(dimBlock), 0, 0, matrixd, numberOfRowsd, numberOfColumnsd, outputMatrixd, partialPivot, rowNo); //Calling kernel-1 for scaling
hipDeviceSynchronize(); //Thread sync
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Kernel failed5: " << hipGetErrorString(status) <<
std::endl;
hipFree(matrixd); //Free call for memory
hipFree(outputMatrixd); //Free call for memory
return false;
}
hipLaunchKernelGGL(( GaussianEliminationGPUKernelReduction), dim3(dimGrid), dim3(dimBlock), 0, 0, outputMatrixd, numberOfRowsd, numberOfColumnsd, matrixd, partialPivot, rowNo); //Calling kernel-2 for reduction
status = hipGetLastError(); //Error check
if (status != hipSuccess) {
std::cout << "Kernel failed6: " << hipGetErrorString(status) <<
std::endl;
hipFree(matrixd); //Free call for memory
hipFree(outputMatrixd); //Free call for memory
return false;
}
hipDeviceSynchronize(); //thread sync
}
// Check for errors
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Kernel failed7: " << hipGetErrorString(status) <<
std::endl;
hipFree(matrixd); //Free call for memory
hipFree(outputMatrixd); //Free call for memory
return false;
}
// Retrieve the result matrix
for (int i=0; i<numberOfRows; i++){
hipMemcpy(outputMatrix[i], matrixd, numberOfColumns *sizeof(float), hipMemcpyDeviceToHost);
matrixd += numberOfColumns;
}
// Free device memory
hipFree(outputMatrixd); //Free call for memory
hipFree(matrixd); //Free call for memory
// Success
return true;
}
| 82e438bdeddf9af9a2ec0a7adbd3e673be2ea783.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include "common.h"
const int TILE_SIZE = 256; //Max no of threads in Block.
const int MAX_GRID_SIZE = 65535; //Max no of Blocks in a Grid.
// GPU Kernel-1 to perform row scaling.
__global__ void GaussianEliminationGPUKernelScaling(float* matrix, unsigned int numberOfRows, unsigned int numberOfColumns, float* outputMatrix, bool partialPivot, unsigned int row)
{
// Retrieve our coordinates in the block
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x; //Thread id calculation
if(threadId<numberOfColumns*numberOfRows){
if ((threadId/numberOfColumns)==row)
outputMatrix[threadId] = matrix[threadId] / matrix[numberOfColumns*row+row];
else
outputMatrix[threadId] = matrix[threadId];
}
}
// GPU Kernel-2 to perform the reduction in rows.
__global__ void GaussianEliminationGPUKernelReduction(float* Matrix, unsigned int numberOfRows, unsigned int numberOfColumns, float* outputMatrix, bool partialPivot, unsigned int row)
{
// Retrieve our coordinates in the block
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x; //threadid calculation
float pivot = Matrix[numberOfColumns*row+row]; //Calculates the pivot element for each row.
if(threadId<numberOfColumns*numberOfRows){
if ((threadId/numberOfColumns)!=row)
outputMatrix[threadId] = Matrix[threadId]- Matrix[(threadId/numberOfColumns)*numberOfRows+row] * (Matrix[row*numberOfColumns + threadId%numberOfColumns]/pivot);
else
outputMatrix[threadId] = Matrix[threadId];
}
}
// GPU function for direct method Gross Jorden method.
bool GaussianEliminationGPU( float** matrix, unsigned int numberOfRows, unsigned int numberOfColumns, float** outputMatrix, bool partialPivot)
{
// Error return value
cudaError_t status;
// Number of bytes in the matrix.
int bytes = numberOfColumns * numberOfRows *sizeof(float);
unsigned int numberOfRowsd, numberOfColumnsd; //To be safe copy the elements too.
numberOfColumnsd = numberOfColumns;
numberOfRowsd = numberOfRows;
// Pointers to the device arrays
float *matrixd, *outputMatrixd; //input and output matrix
// Allocate memory on the device to store each matrix
cudaMalloc((void**) &matrixd, bytes);
status = cudaGetLastError(); //To check the error
if (status != cudaSuccess) {
std::cout << "Kernel failed2: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(matrixd); //Free call for memory
cudaFree(outputMatrixd); //Free call for memory
return false;
}
cudaMalloc((void**) &outputMatrixd, bytes);
status = cudaGetLastError(); //To check the error
if (status != cudaSuccess) {
std::cout << "Kernel failed2: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(matrixd); //Free call for memory
cudaFree(outputMatrixd); //Free call for memory
return false;
}
float *temp1 = matrixd;
float *temp2 = outputMatrixd;
// Copy the host input data to the device
for (int i=0; i<numberOfRows; i++){
cudaMemcpy((float *)temp1, matrix[i], numberOfColumns *sizeof(float), cudaMemcpyHostToDevice);
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Kernel failed3: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(matrixd); //Free call for memory
cudaFree(outputMatrixd); //Free call for memory
return false;
}
cudaMemcpy((float *)temp2, matrix[i], numberOfColumns *sizeof(float), cudaMemcpyHostToDevice);
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Kernel failed4: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(matrixd); //Free call for memory
cudaFree(outputMatrixd); //Free call for memory
return false;
}
temp1 += numberOfColumns;
temp2 += numberOfColumns;
}
temp1= matrixd;
temp2 = outputMatrixd;
int size = numberOfColumns * numberOfRows;
dim3 dimBlock(TILE_SIZE, 1);
int gridx = 1; //Grid size calculation
int gridy = 1; //Grid size calculation
if(size/TILE_SIZE < MAX_GRID_SIZE)
gridx = ceil((float)size/TILE_SIZE); //Decide the grid size for input size.
else{
gridx = MAX_GRID_SIZE;
gridy = ceil((float)size/(TILE_SIZE*MAX_GRID_SIZE));
}
dim3 dimGrid(gridx, gridy); // grid call.
// Launch the kernel one-by-one
int rowNo = 0;
for (rowNo=0; rowNo < numberOfColumns ;rowNo++){
GaussianEliminationGPUKernelScaling<<<dimGrid, dimBlock>>>(matrixd, numberOfRowsd, numberOfColumnsd, outputMatrixd, partialPivot, rowNo); //Calling kernel-1 for scaling
cudaThreadSynchronize(); //Thread sync
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Kernel failed5: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(matrixd); //Free call for memory
cudaFree(outputMatrixd); //Free call for memory
return false;
}
GaussianEliminationGPUKernelReduction<<<dimGrid, dimBlock>>>(outputMatrixd, numberOfRowsd, numberOfColumnsd, matrixd, partialPivot, rowNo); //Calling kernel-2 for reduction
status = cudaGetLastError(); //Error check
if (status != cudaSuccess) {
std::cout << "Kernel failed6: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(matrixd); //Free call for memory
cudaFree(outputMatrixd); //Free call for memory
return false;
}
cudaThreadSynchronize(); //thread sync
}
// Check for errors
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Kernel failed7: " << cudaGetErrorString(status) <<
std::endl;
cudaFree(matrixd); //Free call for memory
cudaFree(outputMatrixd); //Free call for memory
return false;
}
// Retrieve the result matrix
for (int i=0; i<numberOfRows; i++){
cudaMemcpy(outputMatrix[i], matrixd, numberOfColumns *sizeof(float), cudaMemcpyDeviceToHost);
matrixd += numberOfColumns;
}
// Free device memory
cudaFree(outputMatrixd); //Free call for memory
cudaFree(matrixd); //Free call for memory
// Success
return true;
}
|
150574022fa91f671d0d4bae5436e92e3bc0c312.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "parameter.h"
#include "array_definition.h"
#include "cuda_funclist.h"
//extern void checkCUDAError(const char *msg);
__global__ void cfl_kernel(float *cfl_u, float *cfl_b, int *cfl_nx, int *cfl_ny, int *cfl_nz, float *cfl_dt, float *cfl_c);
__device__ float max_3num(float *m3_1, float *m3_2, float *m3_3);
__device__ float max_2num(float *m2_1, float *m2_2);
__host__ void h_cfl_find_max(float *hcfm_in, int *hcfm_ny, int *hcfm_nz, float *hcfm_out);
void cuda_cfl(float *cfl_u, float *cfl_b, int *cfl_nx, int *cfl_ny, int *cfl_nz, float *cfl_dt, float *cfl_c, int *h_cfl_nx, int *h_cfl_ny, int *h_cfl_nz, float *h_cfl_dt)
{
// initialization
int Totalthreads = (*h_cfl_nx)*(*h_cfl_ny)*(*h_cfl_nz);
int numThreadsPerBlock = *h_cfl_nx;
int numBlocks = Totalthreads/numThreadsPerBlock;
size_t c_memSize = numBlocks * sizeof(float);
size_t dt_memSize = sizeof(float);
// send it to device to calculate
dim3 dimGrid(*h_cfl_ny,*h_cfl_nz);
dim3 dimBlock(*h_cfl_nx);hipLaunchKernelGGL((
cfl_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, cfl_u, cfl_b, cfl_nx, cfl_ny, cfl_nz, cfl_dt, cfl_c);
//
hipDeviceSynchronize();
//
checkCUDAError("kernel execution in cuda_cfl");
// get it from device to find the max of c
float *temp_h_cfl_c;
temp_h_cfl_c = (float *) malloc(c_memSize);
hipMemcpy( temp_h_cfl_c, cfl_c, c_memSize, hipMemcpyDeviceToHost );
//
checkCUDAError("memcpy: from device to host, in cuda_cfl");
//
float max_c;
max_c=0;
h_cfl_find_max(temp_h_cfl_c,h_cfl_ny,h_cfl_nz,&max_c);
// find it and get cfl_dt in host
(*h_cfl_dt)=1/(max_c);
// copy it to device
hipMemcpy( cfl_dt, h_cfl_dt, dt_memSize, hipMemcpyHostToDevice );
//
checkCUDAError("memcpy: from host to device, in cuda_cfl");
//
free(temp_h_cfl_c);
//
}
__global__ void cfl_kernel(float *cfl_u, float *cfl_b, int *cfl_nx, int *cfl_ny, int *cfl_nz, float *cfl_dt, float *cfl_c)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
__shared__ float cfl_s_b1_ip[BLOCK_SIZE];
__shared__ float cfl_s_b2_jp[BLOCK_SIZE];
__shared__ float cfl_s_b3_kp[BLOCK_SIZE];
__shared__ float cfl_s_b[3*BLOCK_SIZE];
__shared__ float cfl_s_u[5*BLOCK_SIZE];
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
float gamma;
gamma=5.0/3.0;
int ii;
int kp,jp,ip;
// kp=mod(k,nz)+1
kp=(blockIdx.y+1)%(*cfl_nz);
// jp=mod(j,ny)+1
jp=(blockIdx.x+1)%(*cfl_ny);
// ip=mod(i,nx)+1
ip=(threadIdx.x+1)%(*cfl_nx);
// get cfl_s_b1_ip
cfl_s_b1_ip[threadIdx.x]=cfl_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),ip,blockIdx.x,blockIdx.y)];
// get cfl_s_b2_jp
cfl_s_b2_jp[threadIdx.x]=cfl_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(2-1),threadIdx.x,jp,blockIdx.y)];
// get cfl_s_b3_kp
cfl_s_b3_kp[threadIdx.x]=cfl_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(3-1),threadIdx.x,blockIdx.x,kp)];
// get cfl_s_u
for (ii=0;ii<5;ii++)
{
cfl_s_u[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=cfl_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
// get cfl_s_b
for (ii=0;ii<3;ii++)
{
cfl_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)]=cfl_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
//
__syncthreads();
//
float bx,by,bz;
// bx=(b(1,i,j,k)+b(1,ip,j,k))/2
bx=(cfl_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]+cfl_s_b1_ip[threadIdx.x])/2.0;
// by=(b(2,i,j,k)+b(2,i,jp,k))/2
by=(cfl_s_b[a2D_FinC(3,blockDim.x,(2-1),threadIdx.x)]+cfl_s_b2_jp[threadIdx.x])/2.0;
// bz=(b(3,i,j,k)+b(3,i,j,kp))/2
bz=(cfl_s_b[a2D_FinC(3,blockDim.x,(3-1),threadIdx.x)]+cfl_s_b3_kp[threadIdx.x])/2.0;
float v;
// v=maxval(abs(u(2:4,i,j,k)/u(1,i,j,k)))
float temp1,temp2,temp3;
temp1=fabs(cfl_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
temp2=fabs(cfl_s_u[a2D_FinC(5,blockDim.x,(3-1),threadIdx.x)]/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
temp3=fabs(cfl_s_u[a2D_FinC(5,blockDim.x,(4-1),threadIdx.x)]/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
v=max_3num(&temp1,&temp2,&temp3);
float b2;
b2=bx*bx+by*by+bz*bz;
// ps=(u(5,i,j,k)-sum(u(2:4,i,j,k)**2,1)/u(1,i,j,k)/2)*(gamma-1)+(2-gamma)*b2/2
float ps;
ps=(cfl_s_u[a2D_FinC(5,blockDim.x,(5-1),threadIdx.x)]-(cfl_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]*cfl_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]+cfl_s_u[a2D_FinC(5,blockDim.x,(3-1),threadIdx.x)]*cfl_s_u[a2D_FinC(5,blockDim.x,(3-1),threadIdx.x)]+cfl_s_u[a2D_FinC(5,blockDim.x,(4-1),threadIdx.x)]*cfl_s_u[a2D_FinC(5,blockDim.x,(4-1),threadIdx.x)])/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]/2.0)*(gamma-1.0)+(2.0-gamma)*b2/2.0;
// p=ps-b2/2
float p;
p=ps-b2/2.0;
// c=max(c,v+sqrt(abs( (b2*2+gamma*p)/u(1,i,j,k))))
temp1=v+sqrt(fabs((b2*2.0+gamma*p)/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]));
// find max
__shared__ float cfl_s_c[BLOCK_SIZE];
float temp_c_max;
cfl_s_c[threadIdx.x]=temp1;
__syncthreads();
if (threadIdx.x==0)
{
temp_c_max=0.0;
for (int i=0; i<BLOCK_SIZE; i++)
{
if (cfl_s_c[i]>temp_c_max) temp_c_max=cfl_s_c[i];
}
(*cfl_c)=temp_c_max;
}
//
return;
}
__device__ float max_3num(float *m3_1, float *m3_2, float *m3_3)
{
if ((*m3_1)>(*m3_2))
{
if ((*m3_1)>(*m3_3))
{
return (*m3_1);
}
else
{
return (*m3_3);
}
}
else
{
if ((*m3_2)>(*m3_3))
{
return (*m3_2);
}
else
{
return (*m3_3);
}
}
}
__device__ float max_2num(float *m2_1, float *m2_2)
{
if ((*m2_1)>(*m2_2))
{
return (*m2_1);
}
else
{
return (*m2_2);
}
}
__host__ void h_cfl_find_max(float *hcfm_in, int *hcfm_ny, int *hcfm_nz, float *hcfm_out)
{
int j,k;
(*hcfm_out)=0;
for (k=0;k<(*hcfm_nz);k++)
{
for (j=0;j<(*hcfm_ny);j++)
{
if (hcfm_in[a2D_FinC((*hcfm_ny),(*hcfm_nz),j,k)]>(*hcfm_out))
{
(*hcfm_out)=hcfm_in[a2D_FinC((*hcfm_ny),(*hcfm_nz),j,k)];
}
}
}
}
| 150574022fa91f671d0d4bae5436e92e3bc0c312.cu | #include <stdio.h>
#include <math.h>
#include "cuda.h"
#include "parameter.h"
#include "array_definition.h"
#include "cuda_funclist.h"
//extern void checkCUDAError(const char *msg);
__global__ void cfl_kernel(float *cfl_u, float *cfl_b, int *cfl_nx, int *cfl_ny, int *cfl_nz, float *cfl_dt, float *cfl_c);
__device__ float max_3num(float *m3_1, float *m3_2, float *m3_3);
__device__ float max_2num(float *m2_1, float *m2_2);
__host__ void h_cfl_find_max(float *hcfm_in, int *hcfm_ny, int *hcfm_nz, float *hcfm_out);
void cuda_cfl(float *cfl_u, float *cfl_b, int *cfl_nx, int *cfl_ny, int *cfl_nz, float *cfl_dt, float *cfl_c, int *h_cfl_nx, int *h_cfl_ny, int *h_cfl_nz, float *h_cfl_dt)
{
// initialization
int Totalthreads = (*h_cfl_nx)*(*h_cfl_ny)*(*h_cfl_nz);
int numThreadsPerBlock = *h_cfl_nx;
int numBlocks = Totalthreads/numThreadsPerBlock;
size_t c_memSize = numBlocks * sizeof(float);
size_t dt_memSize = sizeof(float);
// send it to device to calculate
dim3 dimGrid(*h_cfl_ny,*h_cfl_nz);
dim3 dimBlock(*h_cfl_nx);
cfl_kernel<<< dimGrid, dimBlock >>>( cfl_u, cfl_b, cfl_nx, cfl_ny, cfl_nz, cfl_dt, cfl_c);
//
cudaThreadSynchronize();
//
checkCUDAError("kernel execution in cuda_cfl");
// get it from device to find the max of c
float *temp_h_cfl_c;
temp_h_cfl_c = (float *) malloc(c_memSize);
cudaMemcpy( temp_h_cfl_c, cfl_c, c_memSize, cudaMemcpyDeviceToHost );
//
checkCUDAError("memcpy: from device to host, in cuda_cfl");
//
float max_c;
max_c=0;
h_cfl_find_max(temp_h_cfl_c,h_cfl_ny,h_cfl_nz,&max_c);
// find it and get cfl_dt in host
(*h_cfl_dt)=1/(max_c);
// copy it to device
cudaMemcpy( cfl_dt, h_cfl_dt, dt_memSize, cudaMemcpyHostToDevice );
//
checkCUDAError("memcpy: from host to device, in cuda_cfl");
//
free(temp_h_cfl_c);
//
}
__global__ void cfl_kernel(float *cfl_u, float *cfl_b, int *cfl_nx, int *cfl_ny, int *cfl_nz, float *cfl_dt, float *cfl_c)
{
/*
two dimensional array of blocks on grid where each block has one dimensional array of threads:
UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
UniqueThreadIndex = UniqueBlockIndex * blockDim.x + threadIdx.x;
*/
__shared__ float cfl_s_b1_ip[BLOCK_SIZE];
__shared__ float cfl_s_b2_jp[BLOCK_SIZE];
__shared__ float cfl_s_b3_kp[BLOCK_SIZE];
__shared__ float cfl_s_b[3*BLOCK_SIZE];
__shared__ float cfl_s_u[5*BLOCK_SIZE];
/*
i = threadIdx.x
j = blockIdx.x
k = blockIdx.y
nx = blockDim.x
ny = gridDim.x
nz = gridDim.y
*/
float gamma;
gamma=5.0/3.0;
int ii;
int kp,jp,ip;
// kp=mod(k,nz)+1
kp=(blockIdx.y+1)%(*cfl_nz);
// jp=mod(j,ny)+1
jp=(blockIdx.x+1)%(*cfl_ny);
// ip=mod(i,nx)+1
ip=(threadIdx.x+1)%(*cfl_nx);
// get cfl_s_b1_ip
cfl_s_b1_ip[threadIdx.x]=cfl_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(1-1),ip,blockIdx.x,blockIdx.y)];
// get cfl_s_b2_jp
cfl_s_b2_jp[threadIdx.x]=cfl_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(2-1),threadIdx.x,jp,blockIdx.y)];
// get cfl_s_b3_kp
cfl_s_b3_kp[threadIdx.x]=cfl_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,(3-1),threadIdx.x,blockIdx.x,kp)];
// get cfl_s_u
for (ii=0;ii<5;ii++)
{
cfl_s_u[a2D_FinC(5,blockDim.x,ii,threadIdx.x)]=cfl_u[a4D_FinC(5,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
// get cfl_s_b
for (ii=0;ii<3;ii++)
{
cfl_s_b[a2D_FinC(3,blockDim.x,ii,threadIdx.x)]=cfl_b[a4D_FinC(3,blockDim.x,gridDim.x,gridDim.y,ii,threadIdx.x,blockIdx.x,blockIdx.y)];
}
//
__syncthreads();
//
float bx,by,bz;
// bx=(b(1,i,j,k)+b(1,ip,j,k))/2
bx=(cfl_s_b[a2D_FinC(3,blockDim.x,(1-1),threadIdx.x)]+cfl_s_b1_ip[threadIdx.x])/2.0;
// by=(b(2,i,j,k)+b(2,i,jp,k))/2
by=(cfl_s_b[a2D_FinC(3,blockDim.x,(2-1),threadIdx.x)]+cfl_s_b2_jp[threadIdx.x])/2.0;
// bz=(b(3,i,j,k)+b(3,i,j,kp))/2
bz=(cfl_s_b[a2D_FinC(3,blockDim.x,(3-1),threadIdx.x)]+cfl_s_b3_kp[threadIdx.x])/2.0;
float v;
// v=maxval(abs(u(2:4,i,j,k)/u(1,i,j,k)))
float temp1,temp2,temp3;
temp1=fabs(cfl_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
temp2=fabs(cfl_s_u[a2D_FinC(5,blockDim.x,(3-1),threadIdx.x)]/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
temp3=fabs(cfl_s_u[a2D_FinC(5,blockDim.x,(4-1),threadIdx.x)]/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]);
v=max_3num(&temp1,&temp2,&temp3);
float b2;
b2=bx*bx+by*by+bz*bz;
// ps=(u(5,i,j,k)-sum(u(2:4,i,j,k)**2,1)/u(1,i,j,k)/2)*(gamma-1)+(2-gamma)*b2/2
float ps;
ps=(cfl_s_u[a2D_FinC(5,blockDim.x,(5-1),threadIdx.x)]-(cfl_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]*cfl_s_u[a2D_FinC(5,blockDim.x,(2-1),threadIdx.x)]+cfl_s_u[a2D_FinC(5,blockDim.x,(3-1),threadIdx.x)]*cfl_s_u[a2D_FinC(5,blockDim.x,(3-1),threadIdx.x)]+cfl_s_u[a2D_FinC(5,blockDim.x,(4-1),threadIdx.x)]*cfl_s_u[a2D_FinC(5,blockDim.x,(4-1),threadIdx.x)])/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]/2.0)*(gamma-1.0)+(2.0-gamma)*b2/2.0;
// p=ps-b2/2
float p;
p=ps-b2/2.0;
// c=max(c,v+sqrt(abs( (b2*2+gamma*p)/u(1,i,j,k))))
temp1=v+sqrt(fabs((b2*2.0+gamma*p)/cfl_s_u[a2D_FinC(5,blockDim.x,(1-1),threadIdx.x)]));
// find max
__shared__ float cfl_s_c[BLOCK_SIZE];
float temp_c_max;
cfl_s_c[threadIdx.x]=temp1;
__syncthreads();
if (threadIdx.x==0)
{
temp_c_max=0.0;
for (int i=0; i<BLOCK_SIZE; i++)
{
if (cfl_s_c[i]>temp_c_max) temp_c_max=cfl_s_c[i];
}
(*cfl_c)=temp_c_max;
}
//
return;
}
__device__ float max_3num(float *m3_1, float *m3_2, float *m3_3)
{
if ((*m3_1)>(*m3_2))
{
if ((*m3_1)>(*m3_3))
{
return (*m3_1);
}
else
{
return (*m3_3);
}
}
else
{
if ((*m3_2)>(*m3_3))
{
return (*m3_2);
}
else
{
return (*m3_3);
}
}
}
__device__ float max_2num(float *m2_1, float *m2_2)
{
if ((*m2_1)>(*m2_2))
{
return (*m2_1);
}
else
{
return (*m2_2);
}
}
__host__ void h_cfl_find_max(float *hcfm_in, int *hcfm_ny, int *hcfm_nz, float *hcfm_out)
{
int j,k;
(*hcfm_out)=0;
for (k=0;k<(*hcfm_nz);k++)
{
for (j=0;j<(*hcfm_ny);j++)
{
if (hcfm_in[a2D_FinC((*hcfm_ny),(*hcfm_nz),j,k)]>(*hcfm_out))
{
(*hcfm_out)=hcfm_in[a2D_FinC((*hcfm_ny),(*hcfm_nz),j,k)];
}
}
}
}
|
a371bb1fd0b453357caf96b26d7e4e31c3481232.hip | // !!! This is a file automatically generated by hipify!!!
#include "Python.h"
#include <stdlib.h>
#define NPY_NO_DEPRECATED_API NPY_1_9_API_VERSION
#include "arrayobject.h"
#include <errno.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <helper_cuda.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/extrema.h>
#include "util.h"
#include "opencv2/opencv.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudaarithm.hpp"
void logsumexp(thrust::device_vector<double> &d_result,
const thrust::device_vector<double> &d_input,
int m, int n, int axis, hipblasHandle_t handle)
{
using namespace thrust::placeholders;
hipblasOperation_t op = HIPBLAS_OP_T;
int ones_size = m;
if (axis){ // Sum over columns
op = HIPBLAS_OP_N;
ones_size = n;
}
thrust::device_vector<double> d_expin = d_input;
const double *d_input_ptr = thrust::raw_pointer_cast(&d_input[0]);
double *d_expin_ptr = thrust::raw_pointer_cast(&d_expin[0]);
double *d_result_ptr = thrust::raw_pointer_cast(&d_result[0]);
thrust::device_vector<double> d_ones(ones_size, 1.0);
double *d_ones_ptr = thrust::raw_pointer_cast(&d_ones[0]);
double alpha = 1.0, beta = 0.0;
cv::cuda::GpuMat CVmat_input(n, m, CV_64FC1, (void *) d_input_ptr);
cv::cuda::GpuMat d_maxima;
double *d_maxima_ptr = NULL;
double m_alpha = -1.0;
if (axis){ //subtract max from rows
cv::cuda::reduce(CVmat_input, d_maxima, 0, CV_REDUCE_MAX, -1);
d_maxima_ptr = (double *)d_maxima.ptr();
checkCudaErrors(hipblasDger(handle, m, n, &m_alpha, d_maxima_ptr,
1, d_ones_ptr, 1, d_expin_ptr, m));
} else { // subtract max from columns
cv::cuda::reduce(CVmat_input, d_maxima, 1, CV_REDUCE_MAX, -1);
d_maxima_ptr = (double *)d_maxima.ptr();
checkCudaErrors(hipblasDger(handle, m, n, &m_alpha, d_ones_ptr,
1, d_maxima_ptr, 1, d_expin_ptr, m));
}
thrust::device_ptr<double> d_maxima_thrust = thrust::device_pointer_cast(d_maxima_ptr);
//thrust::host_vector<double> h_maxima(d_maxima_thrust, d_maxima_thrust+sums_size);
//printf("Maximum values:\n");
//for (int i=0; i<h_maxima.size(); i++){
// printf("%f ", h_maxima[i]);
//}
//printf("\n")
myExp expy;
myLog logy;
thrust::transform(d_expin.begin(), d_expin.end(), d_expin.begin(), expy);
checkCudaErrors(hipblasDgemv(handle, op, m, n, &alpha,
d_expin_ptr, m, d_ones_ptr, 1,
&beta, d_result_ptr, 1));
thrust::transform(d_result.begin(), d_result.end(), d_result.begin(), logy);
thrust::transform(d_result.begin(), d_result.end(), d_maxima_thrust,
d_result.begin(), thrust::plus<double>());
}
double infoFreeEnergy(thrust::device_vector<double> &d_pci,
thrust::device_vector<double> &d_s_ij,
double T, int N_c, int N, hipblasHandle_t handle)
{
double *d_pci_ptr = thrust::raw_pointer_cast(&d_pci[0]);
double *d_s_ij_ptr = thrust::raw_pointer_cast(&d_s_ij[0]);
thrust::device_vector<double> d_npc(N_c);
double *d_npc_ptr = thrust::raw_pointer_cast(&d_npc[0]);
thrust::device_vector<double> d_ps(N_c*N);
double *d_ps_ptr = thrust::raw_pointer_cast(&d_ps[0]);
thrust::device_vector<double> d_ones_N(N, 1.0);
double *d_ones_N_ptr = thrust::raw_pointer_cast(&d_ones_N[0]);
double alpha = 1.0, beta = 0.0;
checkCudaErrors(hipblasDgemv(handle, HIPBLAS_OP_N, N_c, N, &alpha,
d_pci_ptr, N_c, d_ones_N_ptr, 1,
&beta, d_npc_ptr, 1));
checkCudaErrors(hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
N_c, N, N, &alpha, d_pci_ptr, N_c,
d_s_ij_ptr, N, &beta, d_ps_ptr, N_c));
thrust::device_vector<double> d_pci_by_npc(N_c*N);
double *d_pci_by_npc_ptr = thrust::raw_pointer_cast(&d_pci_by_npc[0]);
//rescale rows
myInv invy;
thrust::transform(d_npc.begin(), d_npc.end(), d_npc.begin(), invy);
checkCudaErrors(hipblasDdgmm(handle, HIPBLAS_SIDE_LEFT, N_c, N,
d_pci_ptr, N_c, d_npc_ptr, 1,
d_pci_by_npc_ptr, N_c));
double S=0;
checkCudaErrors(hipblasDdot(handle, N_c*N, d_pci_by_npc_ptr,
1, d_ps_ptr, 1, &S));
myNan_logp nanLogp((double)N);
thrust::transform(d_pci_by_npc.begin(), d_pci_by_npc.end(),
d_pci_by_npc.begin(), nanLogp);
double I=0;
checkCudaErrors(hipblasDdot(handle, N_c*N, d_pci_ptr,
1, d_pci_by_npc_ptr, 1, &I));
return (S-T*I)/N;
}
void printDeviceArrayFOrder(double *d_arr, int rows, int cols)
{
int BYTES = sizeof(double)*rows*cols;
double *h_arr = (double *)malloc(BYTES);
checkCudaErrors(hipMemcpy(h_arr, d_arr, BYTES, hipMemcpyDeviceToHost));
for (int i = 0; i< rows; i++){
for (int j=0; j<cols; j++){
printf("%f ", h_arr[j*rows+i]);
}
printf("\n");
}
free(h_arr);
}
void printDeviceArrayFOrder(int *d_arr, int rows, int cols)
{
int BYTES = sizeof(int)*rows*cols;
int *h_arr = (int *)malloc(BYTES);
checkCudaErrors(hipMemcpy(h_arr, d_arr, BYTES, hipMemcpyDeviceToHost));
for (int i = 0; i< rows; i++){
for (int j=0; j<cols; j++){
printf("%i ", h_arr[j*rows+i]);
}
printf("\n");
}
free(h_arr);
}
/* Check that PyArrayObject is a double (Float) type and a vector */
int not_datatype(PyArrayObject *vec, int numpy_dtype)
{
if (PyArray_DESCR(vec)->type_num != numpy_dtype) {
PyErr_SetString(PyExc_ValueError,
"Array is incorrect datatype");
return 1;
}
return 0;
}
void die(const char *message)
{
if (errno){
perror(message);
} else {
printf("ERROR: %s\n", message);
}
}
void Copy_CarrayToFortranArray(double * FortranArray, double *Carray,
int row, int col)
{
for (int i = 0; i<row; i++){
for (int j = 0; j<col; j++){
FortranArray[j*row+i] = Carray[i*col+j];
}
}
}
//Overload for ints too, casts int to double!!
void Copy_CarrayToFortranArray(double * FortranArray, long int *Carray,
int row, int col)
{
for (int i = 0; i<row; i++){
for (int j = 0; j<col; j++){
FortranArray[j*row+i] = (double)Carray[i*col+j];
}
}
}
void Copy_FortranArrayToCarray(double *Carray, double *FortranArray,
int row, int col)
{
for (int i = 0; i<row; i++){
for (int j= 0; j<col; j++){
Carray[i*col+j] = FortranArray[j*row+i];
}
}
}
| a371bb1fd0b453357caf96b26d7e4e31c3481232.cu | #include "Python.h"
#include <stdlib.h>
#define NPY_NO_DEPRECATED_API NPY_1_9_API_VERSION
#include "arrayobject.h"
#include <errno.h>
#include <cuda.h>
#include <cublas_v2.h>
#include <helper_cuda.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/extrema.h>
#include "util.h"
#include "opencv2/opencv.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudaarithm.hpp"
void logsumexp(thrust::device_vector<double> &d_result,
const thrust::device_vector<double> &d_input,
int m, int n, int axis, cublasHandle_t handle)
{
using namespace thrust::placeholders;
cublasOperation_t op = CUBLAS_OP_T;
int ones_size = m;
if (axis){ // Sum over columns
op = CUBLAS_OP_N;
ones_size = n;
}
thrust::device_vector<double> d_expin = d_input;
const double *d_input_ptr = thrust::raw_pointer_cast(&d_input[0]);
double *d_expin_ptr = thrust::raw_pointer_cast(&d_expin[0]);
double *d_result_ptr = thrust::raw_pointer_cast(&d_result[0]);
thrust::device_vector<double> d_ones(ones_size, 1.0);
double *d_ones_ptr = thrust::raw_pointer_cast(&d_ones[0]);
double alpha = 1.0, beta = 0.0;
cv::cuda::GpuMat CVmat_input(n, m, CV_64FC1, (void *) d_input_ptr);
cv::cuda::GpuMat d_maxima;
double *d_maxima_ptr = NULL;
double m_alpha = -1.0;
if (axis){ //subtract max from rows
cv::cuda::reduce(CVmat_input, d_maxima, 0, CV_REDUCE_MAX, -1);
d_maxima_ptr = (double *)d_maxima.ptr();
checkCudaErrors(cublasDger(handle, m, n, &m_alpha, d_maxima_ptr,
1, d_ones_ptr, 1, d_expin_ptr, m));
} else { // subtract max from columns
cv::cuda::reduce(CVmat_input, d_maxima, 1, CV_REDUCE_MAX, -1);
d_maxima_ptr = (double *)d_maxima.ptr();
checkCudaErrors(cublasDger(handle, m, n, &m_alpha, d_ones_ptr,
1, d_maxima_ptr, 1, d_expin_ptr, m));
}
thrust::device_ptr<double> d_maxima_thrust = thrust::device_pointer_cast(d_maxima_ptr);
//thrust::host_vector<double> h_maxima(d_maxima_thrust, d_maxima_thrust+sums_size);
//printf("Maximum values:\n");
//for (int i=0; i<h_maxima.size(); i++){
// printf("%f ", h_maxima[i]);
//}
//printf("\n")
myExp expy;
myLog logy;
thrust::transform(d_expin.begin(), d_expin.end(), d_expin.begin(), expy);
checkCudaErrors(cublasDgemv(handle, op, m, n, &alpha,
d_expin_ptr, m, d_ones_ptr, 1,
&beta, d_result_ptr, 1));
thrust::transform(d_result.begin(), d_result.end(), d_result.begin(), logy);
thrust::transform(d_result.begin(), d_result.end(), d_maxima_thrust,
d_result.begin(), thrust::plus<double>());
}
double infoFreeEnergy(thrust::device_vector<double> &d_pci,
thrust::device_vector<double> &d_s_ij,
double T, int N_c, int N, cublasHandle_t handle)
{
double *d_pci_ptr = thrust::raw_pointer_cast(&d_pci[0]);
double *d_s_ij_ptr = thrust::raw_pointer_cast(&d_s_ij[0]);
thrust::device_vector<double> d_npc(N_c);
double *d_npc_ptr = thrust::raw_pointer_cast(&d_npc[0]);
thrust::device_vector<double> d_ps(N_c*N);
double *d_ps_ptr = thrust::raw_pointer_cast(&d_ps[0]);
thrust::device_vector<double> d_ones_N(N, 1.0);
double *d_ones_N_ptr = thrust::raw_pointer_cast(&d_ones_N[0]);
double alpha = 1.0, beta = 0.0;
checkCudaErrors(cublasDgemv(handle, CUBLAS_OP_N, N_c, N, &alpha,
d_pci_ptr, N_c, d_ones_N_ptr, 1,
&beta, d_npc_ptr, 1));
checkCudaErrors(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,
N_c, N, N, &alpha, d_pci_ptr, N_c,
d_s_ij_ptr, N, &beta, d_ps_ptr, N_c));
thrust::device_vector<double> d_pci_by_npc(N_c*N);
double *d_pci_by_npc_ptr = thrust::raw_pointer_cast(&d_pci_by_npc[0]);
//rescale rows
myInv invy;
thrust::transform(d_npc.begin(), d_npc.end(), d_npc.begin(), invy);
checkCudaErrors(cublasDdgmm(handle, CUBLAS_SIDE_LEFT, N_c, N,
d_pci_ptr, N_c, d_npc_ptr, 1,
d_pci_by_npc_ptr, N_c));
double S=0;
checkCudaErrors(cublasDdot(handle, N_c*N, d_pci_by_npc_ptr,
1, d_ps_ptr, 1, &S));
myNan_logp nanLogp((double)N);
thrust::transform(d_pci_by_npc.begin(), d_pci_by_npc.end(),
d_pci_by_npc.begin(), nanLogp);
double I=0;
checkCudaErrors(cublasDdot(handle, N_c*N, d_pci_ptr,
1, d_pci_by_npc_ptr, 1, &I));
return (S-T*I)/N;
}
void printDeviceArrayFOrder(double *d_arr, int rows, int cols)
{
int BYTES = sizeof(double)*rows*cols;
double *h_arr = (double *)malloc(BYTES);
checkCudaErrors(cudaMemcpy(h_arr, d_arr, BYTES, cudaMemcpyDeviceToHost));
for (int i = 0; i< rows; i++){
for (int j=0; j<cols; j++){
printf("%f ", h_arr[j*rows+i]);
}
printf("\n");
}
free(h_arr);
}
void printDeviceArrayFOrder(int *d_arr, int rows, int cols)
{
int BYTES = sizeof(int)*rows*cols;
int *h_arr = (int *)malloc(BYTES);
checkCudaErrors(cudaMemcpy(h_arr, d_arr, BYTES, cudaMemcpyDeviceToHost));
for (int i = 0; i< rows; i++){
for (int j=0; j<cols; j++){
printf("%i ", h_arr[j*rows+i]);
}
printf("\n");
}
free(h_arr);
}
/* Check that PyArrayObject is a double (Float) type and a vector */
int not_datatype(PyArrayObject *vec, int numpy_dtype)
{
if (PyArray_DESCR(vec)->type_num != numpy_dtype) {
PyErr_SetString(PyExc_ValueError,
"Array is incorrect datatype");
return 1;
}
return 0;
}
void die(const char *message)
{
if (errno){
perror(message);
} else {
printf("ERROR: %s\n", message);
}
}
void Copy_CarrayToFortranArray(double * FortranArray, double *Carray,
int row, int col)
{
for (int i = 0; i<row; i++){
for (int j = 0; j<col; j++){
FortranArray[j*row+i] = Carray[i*col+j];
}
}
}
//Overload for ints too, casts int to double!!
void Copy_CarrayToFortranArray(double * FortranArray, long int *Carray,
int row, int col)
{
for (int i = 0; i<row; i++){
for (int j = 0; j<col; j++){
FortranArray[j*row+i] = (double)Carray[i*col+j];
}
}
}
void Copy_FortranArrayToCarray(double *Carray, double *FortranArray,
int row, int col)
{
for (int i = 0; i<row; i++){
for (int j= 0; j<col; j++){
Carray[i*col+j] = FortranArray[j*row+i];
}
}
}
|
f57790d52e54c02b81cc7db18b949b83ab5c7d13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cstdint>
#ifdef WITH_CUDA
#include "oneflow/core/cuda/elementwise.cuh"
#include "oneflow/user/kernels/pooling_kernel_util.h"
namespace oneflow {
constexpr int kBlockSize = cuda::elementwise::kBlockSize;
const int GetMinThreadNum(int64_t elem_num) { return std::min<int64_t>(elem_num, kBlockSize); }
int GetNumBlocks(int64_t elem_cnt) {
int num_blocks = 0;
OF_CUDA_CHECK(cuda::elementwise::GetNumBlocks(elem_cnt, &num_blocks));
return num_blocks;
}
template<typename T>
__launch_bounds__(kBlockSize) __global__
void DoCUDAMaxPool2dForward(const NdIndexOffsetHelper<int64_t, 4> index_helper,
int64_t elem_num, const T* src, T* dest, int64_t* indice_ptr,
int32_t padding_h, int32_t padding_w, int64_t n_batch,
int64_t n_channel, int64_t x_height, int64_t x_width,
int64_t y_height, int64_t y_width, int32_t kernel_size_h,
int32_t kernel_size_w, int32_t stride_h, int32_t stride_w,
int32_t dilation_h, int32_t dilation_w) {
Maxpool2dFarwardCompute<T>(index_helper, elem_num, src, dest, indice_ptr, padding_h, padding_w,
n_batch, n_channel, x_height, x_width, y_height, y_width,
kernel_size_h, kernel_size_w, stride_h, stride_w, dilation_h,
dilation_w);
};
template<typename T>
__launch_bounds__(kBlockSize) __global__
void DoCUDAMaxPool3dForward(const NdIndexOffsetHelper<int64_t, 5> index_helper,
int64_t elem_num, const T* src, T* dest, int64_t* indice_ptr,
int32_t padding_t, int32_t padding_h, int32_t padding_w,
int64_t n_batch, int64_t n_channel, int64_t x_time,
int64_t x_height, int64_t x_width, int64_t y_time, int64_t y_height,
int64_t y_width, int32_t kernel_size_t, int32_t kernel_size_h,
int32_t kernel_size_w, int32_t stride_t, int32_t stride_h,
int32_t stride_w, int32_t dilation_t, int32_t dilation_h,
int32_t dilation_w) {
Maxpool3dFarwardCompute<T>(index_helper, elem_num, src, dest, indice_ptr, padding_t, padding_h,
padding_w, n_batch, n_channel, x_time, x_height, x_width, y_time,
y_height, y_width, kernel_size_t, kernel_size_h, kernel_size_w,
stride_t, stride_h, stride_w, dilation_t, dilation_h, dilation_w);
};
template<typename T>
__launch_bounds__(kBlockSize) __global__
void DoCUDAMaxPool2dBackward(const NdIndexOffsetHelper<int64_t, 4> index_helper,
const int64_t elem_num, const T* src, T* dest,
const int64_t* indice_ptr, const int64_t n_batch,
const int64_t n_channel, const int64_t src_height,
const int64_t src_width, const int64_t dst_height,
const int64_t dst_width) {
Maxpool2dBackwardCompute<T>(index_helper, elem_num, src, dest, indice_ptr, n_batch, n_channel,
src_height, src_width, dst_height, dst_width);
};
template<typename T>
__launch_bounds__(kBlockSize) __global__
void DoCUDAMaxPool3dBackward(const NdIndexOffsetHelper<int64_t, 5> index_helper,
const int64_t elem_num, const T* src, T* dest,
const int64_t* indice_ptr, const int64_t n_batch,
const int64_t n_channel, const int64_t src_time,
const int64_t src_height, const int64_t src_width,
const int64_t dst_time, const int64_t dst_height,
const int64_t dst_width) {
Maxpool3dBackwardCompute<T>(index_helper, elem_num, src, dest, indice_ptr, n_batch, n_channel,
src_time, src_height, src_width, dst_time, dst_height, dst_width);
};
template<typename T>
struct PoolingKernelUtil<DeviceType::kGPU, T> {
static void Maxpool2dForward(DeviceCtx* ctx, const NdIndexOffsetHelper<int64_t, 4>& index_helper,
const int64_t elem_num, const T* src, T* dest, int64_t* indice_ptr,
const PoolingParams3D& params_3d) {
hipLaunchKernelGGL(( DoCUDAMaxPool2dForward<T>)
, dim3(GetNumBlocks(elem_num)), dim3(GetMinThreadNum(elem_num)), 0, ctx->cuda_stream(),
index_helper, elem_num, src, dest, indice_ptr, params_3d.padding_before_3d()[1],
params_3d.padding_before_3d()[2], params_3d.num_batch(), params_3d.num_channel(),
params_3d.GetXShape5D().At(3), params_3d.GetXShape5D().At(4),
params_3d.GetYShape5D().At(3), params_3d.GetYShape5D().At(4),
params_3d.pooling_size_3d()[1], params_3d.pooling_size_3d()[2],
params_3d.stride_3d()[1], params_3d.stride_3d()[2], params_3d.dilation_3d()[1],
params_3d.dilation_3d()[2]);
}
static void Maxpool2dBackward(DeviceCtx* ctx, const NdIndexOffsetHelper<int64_t, 4>& index_helper,
const int64_t elem_num, const T* src, T* dest,
const int64_t* indice_ptr, const PoolingParams3D& params_3d) {
hipLaunchKernelGGL(( DoCUDAMaxPool2dBackward<T>)
, dim3(GetNumBlocks(elem_num)), dim3(GetMinThreadNum(elem_num)), 0, ctx->cuda_stream(),
index_helper, elem_num, src, dest, indice_ptr, params_3d.num_batch(),
params_3d.num_channel(), params_3d.GetYShape5D().At(3), params_3d.GetYShape5D().At(4),
params_3d.GetXShape5D().At(3), params_3d.GetXShape5D().At(4));
}
static void Maxpool3dForward(DeviceCtx* ctx, const NdIndexOffsetHelper<int64_t, 5>& index_helper,
const int64_t elem_num, const T* src, T* dest, int64_t* indice_ptr,
const PoolingParams3D& params_3d) {
hipLaunchKernelGGL(( DoCUDAMaxPool3dForward<T>)
, dim3(GetNumBlocks(elem_num)), dim3(GetMinThreadNum(elem_num)), 0, ctx->cuda_stream(),
index_helper, elem_num, src, dest, indice_ptr, params_3d.padding_before_3d()[0],
params_3d.padding_before_3d()[1], params_3d.padding_before_3d()[2],
params_3d.num_batch(), params_3d.num_channel(), params_3d.GetXShape5D().At(2),
params_3d.GetXShape5D().At(3), params_3d.GetXShape5D().At(4),
params_3d.GetYShape5D().At(2), params_3d.GetYShape5D().At(3),
params_3d.GetYShape5D().At(4), params_3d.pooling_size_3d()[0],
params_3d.pooling_size_3d()[1], params_3d.pooling_size_3d()[2],
params_3d.stride_3d()[0], params_3d.stride_3d()[1], params_3d.stride_3d()[2],
params_3d.dilation_3d()[0], params_3d.dilation_3d()[1], params_3d.dilation_3d()[2]);
}
static void Maxpool3dBackward(DeviceCtx* ctx, const NdIndexOffsetHelper<int64_t, 5>& index_helper,
const int64_t elem_num, const T* src, T* dest,
const int64_t* indice_ptr, const PoolingParams3D& params_3d) {
hipLaunchKernelGGL(( DoCUDAMaxPool3dBackward<T>)
, dim3(GetNumBlocks(elem_num)), dim3(GetMinThreadNum(elem_num)), 0, ctx->cuda_stream(),
index_helper, elem_num, src, dest, indice_ptr, params_3d.num_batch(),
params_3d.num_channel(), params_3d.GetYShape5D().At(2), params_3d.GetYShape5D().At(3),
params_3d.GetYShape5D().At(4), params_3d.GetXShape5D().At(2),
params_3d.GetXShape5D().At(3), params_3d.GetXShape5D().At(4));
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_POOLING_KERNEL_UTIL, (DeviceType::kGPU),
POOLING_DATA_TYPE_GPU_SEQ);
} // namespace oneflow
#endif // WITH_CUDA
| f57790d52e54c02b81cc7db18b949b83ab5c7d13.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cstdint>
#ifdef WITH_CUDA
#include "oneflow/core/cuda/elementwise.cuh"
#include "oneflow/user/kernels/pooling_kernel_util.h"
namespace oneflow {
constexpr int kBlockSize = cuda::elementwise::kBlockSize;
const int GetMinThreadNum(int64_t elem_num) { return std::min<int64_t>(elem_num, kBlockSize); }
int GetNumBlocks(int64_t elem_cnt) {
int num_blocks = 0;
OF_CUDA_CHECK(cuda::elementwise::GetNumBlocks(elem_cnt, &num_blocks));
return num_blocks;
}
template<typename T>
__launch_bounds__(kBlockSize) __global__
void DoCUDAMaxPool2dForward(const NdIndexOffsetHelper<int64_t, 4> index_helper,
int64_t elem_num, const T* src, T* dest, int64_t* indice_ptr,
int32_t padding_h, int32_t padding_w, int64_t n_batch,
int64_t n_channel, int64_t x_height, int64_t x_width,
int64_t y_height, int64_t y_width, int32_t kernel_size_h,
int32_t kernel_size_w, int32_t stride_h, int32_t stride_w,
int32_t dilation_h, int32_t dilation_w) {
Maxpool2dFarwardCompute<T>(index_helper, elem_num, src, dest, indice_ptr, padding_h, padding_w,
n_batch, n_channel, x_height, x_width, y_height, y_width,
kernel_size_h, kernel_size_w, stride_h, stride_w, dilation_h,
dilation_w);
};
template<typename T>
__launch_bounds__(kBlockSize) __global__
void DoCUDAMaxPool3dForward(const NdIndexOffsetHelper<int64_t, 5> index_helper,
int64_t elem_num, const T* src, T* dest, int64_t* indice_ptr,
int32_t padding_t, int32_t padding_h, int32_t padding_w,
int64_t n_batch, int64_t n_channel, int64_t x_time,
int64_t x_height, int64_t x_width, int64_t y_time, int64_t y_height,
int64_t y_width, int32_t kernel_size_t, int32_t kernel_size_h,
int32_t kernel_size_w, int32_t stride_t, int32_t stride_h,
int32_t stride_w, int32_t dilation_t, int32_t dilation_h,
int32_t dilation_w) {
Maxpool3dFarwardCompute<T>(index_helper, elem_num, src, dest, indice_ptr, padding_t, padding_h,
padding_w, n_batch, n_channel, x_time, x_height, x_width, y_time,
y_height, y_width, kernel_size_t, kernel_size_h, kernel_size_w,
stride_t, stride_h, stride_w, dilation_t, dilation_h, dilation_w);
};
template<typename T>
__launch_bounds__(kBlockSize) __global__
void DoCUDAMaxPool2dBackward(const NdIndexOffsetHelper<int64_t, 4> index_helper,
const int64_t elem_num, const T* src, T* dest,
const int64_t* indice_ptr, const int64_t n_batch,
const int64_t n_channel, const int64_t src_height,
const int64_t src_width, const int64_t dst_height,
const int64_t dst_width) {
Maxpool2dBackwardCompute<T>(index_helper, elem_num, src, dest, indice_ptr, n_batch, n_channel,
src_height, src_width, dst_height, dst_width);
};
template<typename T>
__launch_bounds__(kBlockSize) __global__
void DoCUDAMaxPool3dBackward(const NdIndexOffsetHelper<int64_t, 5> index_helper,
const int64_t elem_num, const T* src, T* dest,
const int64_t* indice_ptr, const int64_t n_batch,
const int64_t n_channel, const int64_t src_time,
const int64_t src_height, const int64_t src_width,
const int64_t dst_time, const int64_t dst_height,
const int64_t dst_width) {
Maxpool3dBackwardCompute<T>(index_helper, elem_num, src, dest, indice_ptr, n_batch, n_channel,
src_time, src_height, src_width, dst_time, dst_height, dst_width);
};
template<typename T>
struct PoolingKernelUtil<DeviceType::kGPU, T> {
static void Maxpool2dForward(DeviceCtx* ctx, const NdIndexOffsetHelper<int64_t, 4>& index_helper,
const int64_t elem_num, const T* src, T* dest, int64_t* indice_ptr,
const PoolingParams3D& params_3d) {
DoCUDAMaxPool2dForward<T>
<<<GetNumBlocks(elem_num), GetMinThreadNum(elem_num), 0, ctx->cuda_stream()>>>(
index_helper, elem_num, src, dest, indice_ptr, params_3d.padding_before_3d()[1],
params_3d.padding_before_3d()[2], params_3d.num_batch(), params_3d.num_channel(),
params_3d.GetXShape5D().At(3), params_3d.GetXShape5D().At(4),
params_3d.GetYShape5D().At(3), params_3d.GetYShape5D().At(4),
params_3d.pooling_size_3d()[1], params_3d.pooling_size_3d()[2],
params_3d.stride_3d()[1], params_3d.stride_3d()[2], params_3d.dilation_3d()[1],
params_3d.dilation_3d()[2]);
}
static void Maxpool2dBackward(DeviceCtx* ctx, const NdIndexOffsetHelper<int64_t, 4>& index_helper,
const int64_t elem_num, const T* src, T* dest,
const int64_t* indice_ptr, const PoolingParams3D& params_3d) {
DoCUDAMaxPool2dBackward<T>
<<<GetNumBlocks(elem_num), GetMinThreadNum(elem_num), 0, ctx->cuda_stream()>>>(
index_helper, elem_num, src, dest, indice_ptr, params_3d.num_batch(),
params_3d.num_channel(), params_3d.GetYShape5D().At(3), params_3d.GetYShape5D().At(4),
params_3d.GetXShape5D().At(3), params_3d.GetXShape5D().At(4));
}
static void Maxpool3dForward(DeviceCtx* ctx, const NdIndexOffsetHelper<int64_t, 5>& index_helper,
const int64_t elem_num, const T* src, T* dest, int64_t* indice_ptr,
const PoolingParams3D& params_3d) {
DoCUDAMaxPool3dForward<T>
<<<GetNumBlocks(elem_num), GetMinThreadNum(elem_num), 0, ctx->cuda_stream()>>>(
index_helper, elem_num, src, dest, indice_ptr, params_3d.padding_before_3d()[0],
params_3d.padding_before_3d()[1], params_3d.padding_before_3d()[2],
params_3d.num_batch(), params_3d.num_channel(), params_3d.GetXShape5D().At(2),
params_3d.GetXShape5D().At(3), params_3d.GetXShape5D().At(4),
params_3d.GetYShape5D().At(2), params_3d.GetYShape5D().At(3),
params_3d.GetYShape5D().At(4), params_3d.pooling_size_3d()[0],
params_3d.pooling_size_3d()[1], params_3d.pooling_size_3d()[2],
params_3d.stride_3d()[0], params_3d.stride_3d()[1], params_3d.stride_3d()[2],
params_3d.dilation_3d()[0], params_3d.dilation_3d()[1], params_3d.dilation_3d()[2]);
}
static void Maxpool3dBackward(DeviceCtx* ctx, const NdIndexOffsetHelper<int64_t, 5>& index_helper,
const int64_t elem_num, const T* src, T* dest,
const int64_t* indice_ptr, const PoolingParams3D& params_3d) {
DoCUDAMaxPool3dBackward<T>
<<<GetNumBlocks(elem_num), GetMinThreadNum(elem_num), 0, ctx->cuda_stream()>>>(
index_helper, elem_num, src, dest, indice_ptr, params_3d.num_batch(),
params_3d.num_channel(), params_3d.GetYShape5D().At(2), params_3d.GetYShape5D().At(3),
params_3d.GetYShape5D().At(4), params_3d.GetXShape5D().At(2),
params_3d.GetXShape5D().At(3), params_3d.GetXShape5D().At(4));
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_POOLING_KERNEL_UTIL, (DeviceType::kGPU),
POOLING_DATA_TYPE_GPU_SEQ);
} // namespace oneflow
#endif // WITH_CUDA
|
829a1cfd68d83b819fd55231c5f8faa350f1dae1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file grid-points.cu a test where grid points are sorted into a grid */
#include <halloc.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/** a macro for checking CUDA calls */
#define cucheck(call) \
{ \
hipError_t cucheck_err = (call); \
if(cucheck_err != hipSuccess) { \
const char* err_str = hipGetErrorString(cucheck_err); \
fprintf(stderr, "%s (%d): %s in %s\n", __FILE__, __LINE__, err_str, #call); \
exit(-1); \
} \
}
int divup(int a, int b) { return a / b + (a % b ? 1 : 0); }
/** a simple 3d vector */
template<class T>
struct vec3 {
T x, y, z;
__host__ __device__ vec3(T x, T y, T z) {
this->x = x; this->y = y; this->z = z;
}
__host__ __device__ vec3(T r = 0) {
this->x = this->y = this->z = r;
}
};
typedef vec3<int> ivec3;
typedef vec3<float> fvec3;
/** a single point list */
struct point_list_t {
/** point index */
int ip;
/** next element in the list, or 0 if end */
point_list_t *next;
};
/** gets a random float value between 0 and 1 */
float frandom(void) {
const int rand_max = 65536;
return (double)(random() % rand_max) / rand_max;
}
/** gets a random point within [0, 1]^3 cube */
fvec3 random_point(void) {
return fvec3(frandom(), frandom(), frandom());
} // random_point
typedef unsigned long long int uint64;
/** atomicCAS wrapper for pointers (arguments same as standard atomicCAS()) */
__device__ void *atomicCAS(void **address, void *compare, void *val) {
return (void *)atomicCAS((uint64 *)address, (uint64)compare, (uint64)val);
} // atomicCAS
/** a function to insert a point into a grid on device; this function can be
called concurrently by multiple threads */
__device__ void insert_point
(point_list_t **grid, int ncells, const fvec3 * __restrict__ ps, int ip,
point_list_t *plist) {
// compute the cell
fvec3 p = ps[ip];
ivec3 cell;
cell.x = max(min((int)floorf(p.x * ncells), ncells - 1), 0);
cell.y = max(min((int)floorf(p.y * ncells), ncells - 1), 0);
cell.z = max(min((int)floorf(p.z * ncells), ncells - 1), 0);
// cell.x = (int)floorf(p.x * ncells);
// cell.y = (int)floorf(p.y * ncells);
// cell.z = (int)floorf(p.z * ncells);
//printf("point = (%lf, %lf, %lf)\n, cell = (%d, %d, %d)",
// (double)p.x, (double)p.y, (double)p.z, cell.x, cell.y, cell.z);
// get the cell pointer
point_list_t * volatile *pcell = grid + (cell.x + ncells * (cell.y + ncells *
cell.z));
//point_list_t * volatile *pcell = grid + ip % (ncells * ncells * ncells);
//point_list_t *plist = (point_list_t *)malloc(sizeof(point_list_t));
plist->ip = ip;
plist->next = 0;
// try to take over the new start
// TODO: add __threadfence() somewhere
point_list_t *old = *pcell, *assumed;
do {
assumed = old;
plist->next = assumed;
old = (point_list_t *)atomicCAS((void **)pcell, assumed, plist);
//*pcell = plist;
} while(old != assumed);
// when the loop is over, new point is there
} // insert_point
/** frees the grid cell; one cell can be simultaneously freed by one thread only
*/
__device__ void free_cell(point_list_t **grid, int ncells, ivec3 cell,
point_list_t *pre_chains) {
point_list_t **pcell = grid + cell.x + ncells * (cell.y + ncells * cell.z);
// free all cells
point_list_t *plist = *pcell, *pnext;
while(plist) {
pnext = plist->next;
//plist->next = 0;
if(!pre_chains) {
hafree(plist);
//delete plist;
}
//free(plist);
plist = pnext;
}
} // free_cell
/** the kernel to insert points into the grid */
__global__ void sort_points_k
(point_list_t **grid, int ncells, const fvec3 * __restrict__ ps,
point_list_t *pre_chains, int n) {
int ip = threadIdx.x + blockIdx.x * blockDim.x;
if(ip >= n)
return;
// allocate memory for list element
point_list_t *plist;
if(pre_chains)
plist = pre_chains + ip;
else {
plist = (point_list_t *)hamalloc(sizeof(point_list_t));
//plist = new point_list_t();
}
if(!plist) {
//printf("cannot allocate memory\n");
return;
}
insert_point(grid, ncells, ps, ip, plist);
} // sort_points_k
/** the kernel to free the entire grid; this is 1d kernel */
__global__ void free_grid_k
(point_list_t **grid, int ncells, point_list_t *pre_chains) {
int ncells3 = ncells * ncells * ncells;
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= ncells3)
return;
ivec3 cell;
cell.x = i % ncells;
cell.y = i % (ncells * ncells) / ncells;
cell.z = i / (ncells * ncells);
free_cell(grid, ncells, cell, pre_chains);
} // free_grid_k
// a test to fill in the grid and then free it
void grid_test(int n, int ncells, bool alloc, bool print) {
// points
size_t sz = n * sizeof(fvec3);
fvec3 *ps, *d_ps;
ps = (fvec3 *)malloc(sz);
cucheck(hipMalloc((void **)&d_ps, sz));
for(int ip = 0; ip < n; ip++) {
ps[ip] = random_point();
//printf("point = (%lf, %lf %lf)\n", (double)ps[ip].x, (double)ps[ip].y,
// (double)ps[ip].z);
}
cucheck(hipMemcpy(d_ps, ps, sz, hipMemcpyHostToDevice));
// grid
int ncells3 = ncells * ncells * ncells;
size_t grid_sz = ncells3 * sizeof(point_list_t *);
point_list_t **d_grid;
cucheck(hipMalloc((void **)&d_grid, grid_sz));
cucheck(hipMemset(d_grid, 0, grid_sz));
// pre-allocated per-point chains
point_list_t *pre_chains = 0;
if(!alloc) {
cucheck(hipMalloc((void **)&pre_chains, n * sizeof(point_list_t)));
cucheck(hipMemset(pre_chains, 0, n * sizeof(point_list_t)));
}
// fill the grid
double t1 = omp_get_wtime();
int bs = 128;
hipLaunchKernelGGL(( sort_points_k), dim3(divup(n, bs)), dim3(bs), 0, 0, d_grid, ncells, d_ps, pre_chains, n);
cucheck(hipGetLastError());
cucheck(hipStreamSynchronize(0));
double t2 = omp_get_wtime();
// free the grid
hipLaunchKernelGGL(( free_grid_k), dim3(divup(ncells3, bs)), dim3(bs), 0, 0, d_grid, ncells, pre_chains);
cucheck(hipGetLastError());
cucheck(hipStreamSynchronize(0));
double t3 = omp_get_wtime();
// free everything
//free(ps);
cucheck(hipFree(d_grid));
cucheck(hipFree(d_ps));
cucheck(hipFree(pre_chains));
// print time
if(print) {
printf("allocation time %.2lf ms\n", (t2 - t1) * 1e3);
printf("free time %.2lf ms\n", (t3 - t2) * 1e3);
printf("allocation performance %.2lf Mpoints/s\n", n / (t2 - t1) * 1e-6);
printf("free performance %.2lf Mpoints/s\n", n / (t3 - t2) * 1e-6);
} // if(print)
} // grid_test
int main(int argc, char **argv) {
srandom((int)time(0));
size_t memory = 512 * 1024 * 1024;
bool alloc = true;
//cucheck(hipSetDevice(0));
ha_init(halloc_opts_t(memory));
// warm-up run
grid_test(10000, 8, alloc, false);
// main run
grid_test(1000000, 32, alloc, true);
ha_shutdown();
} // main
| 829a1cfd68d83b819fd55231c5f8faa350f1dae1.cu | /** @file grid-points.cu a test where grid points are sorted into a grid */
#include <halloc.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/** a macro for checking CUDA calls */
#define cucheck(call) \
{ \
cudaError_t cucheck_err = (call); \
if(cucheck_err != cudaSuccess) { \
const char* err_str = cudaGetErrorString(cucheck_err); \
fprintf(stderr, "%s (%d): %s in %s\n", __FILE__, __LINE__, err_str, #call); \
exit(-1); \
} \
}
int divup(int a, int b) { return a / b + (a % b ? 1 : 0); }
/** a simple 3d vector */
template<class T>
struct vec3 {
T x, y, z;
__host__ __device__ vec3(T x, T y, T z) {
this->x = x; this->y = y; this->z = z;
}
__host__ __device__ vec3(T r = 0) {
this->x = this->y = this->z = r;
}
};
typedef vec3<int> ivec3;
typedef vec3<float> fvec3;
/** a single point list */
struct point_list_t {
/** point index */
int ip;
/** next element in the list, or 0 if end */
point_list_t *next;
};
/** gets a random float value between 0 and 1 */
float frandom(void) {
const int rand_max = 65536;
return (double)(random() % rand_max) / rand_max;
}
/** gets a random point within [0, 1]^3 cube */
fvec3 random_point(void) {
return fvec3(frandom(), frandom(), frandom());
} // random_point
typedef unsigned long long int uint64;
/** atomicCAS wrapper for pointers (arguments same as standard atomicCAS()) */
__device__ void *atomicCAS(void **address, void *compare, void *val) {
return (void *)atomicCAS((uint64 *)address, (uint64)compare, (uint64)val);
} // atomicCAS
/** a function to insert a point into a grid on device; this function can be
called concurrently by multiple threads */
__device__ void insert_point
(point_list_t **grid, int ncells, const fvec3 * __restrict__ ps, int ip,
point_list_t *plist) {
// compute the cell
fvec3 p = ps[ip];
ivec3 cell;
cell.x = max(min((int)floorf(p.x * ncells), ncells - 1), 0);
cell.y = max(min((int)floorf(p.y * ncells), ncells - 1), 0);
cell.z = max(min((int)floorf(p.z * ncells), ncells - 1), 0);
// cell.x = (int)floorf(p.x * ncells);
// cell.y = (int)floorf(p.y * ncells);
// cell.z = (int)floorf(p.z * ncells);
//printf("point = (%lf, %lf, %lf)\n, cell = (%d, %d, %d)",
// (double)p.x, (double)p.y, (double)p.z, cell.x, cell.y, cell.z);
// get the cell pointer
point_list_t * volatile *pcell = grid + (cell.x + ncells * (cell.y + ncells *
cell.z));
//point_list_t * volatile *pcell = grid + ip % (ncells * ncells * ncells);
//point_list_t *plist = (point_list_t *)malloc(sizeof(point_list_t));
plist->ip = ip;
plist->next = 0;
// try to take over the new start
// TODO: add __threadfence() somewhere
point_list_t *old = *pcell, *assumed;
do {
assumed = old;
plist->next = assumed;
old = (point_list_t *)atomicCAS((void **)pcell, assumed, plist);
//*pcell = plist;
} while(old != assumed);
// when the loop is over, new point is there
} // insert_point
/** frees the grid cell; one cell can be simultaneously freed by one thread only
*/
__device__ void free_cell(point_list_t **grid, int ncells, ivec3 cell,
point_list_t *pre_chains) {
point_list_t **pcell = grid + cell.x + ncells * (cell.y + ncells * cell.z);
// free all cells
point_list_t *plist = *pcell, *pnext;
while(plist) {
pnext = plist->next;
//plist->next = 0;
if(!pre_chains) {
hafree(plist);
//delete plist;
}
//free(plist);
plist = pnext;
}
} // free_cell
/** the kernel to insert points into the grid */
__global__ void sort_points_k
(point_list_t **grid, int ncells, const fvec3 * __restrict__ ps,
point_list_t *pre_chains, int n) {
int ip = threadIdx.x + blockIdx.x * blockDim.x;
if(ip >= n)
return;
// allocate memory for list element
point_list_t *plist;
if(pre_chains)
plist = pre_chains + ip;
else {
plist = (point_list_t *)hamalloc(sizeof(point_list_t));
//plist = new point_list_t();
}
if(!plist) {
//printf("cannot allocate memory\n");
return;
}
insert_point(grid, ncells, ps, ip, plist);
} // sort_points_k
/** the kernel to free the entire grid; this is 1d kernel */
__global__ void free_grid_k
(point_list_t **grid, int ncells, point_list_t *pre_chains) {
int ncells3 = ncells * ncells * ncells;
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= ncells3)
return;
ivec3 cell;
cell.x = i % ncells;
cell.y = i % (ncells * ncells) / ncells;
cell.z = i / (ncells * ncells);
free_cell(grid, ncells, cell, pre_chains);
} // free_grid_k
// a test to fill in the grid and then free it
void grid_test(int n, int ncells, bool alloc, bool print) {
// points
size_t sz = n * sizeof(fvec3);
fvec3 *ps, *d_ps;
ps = (fvec3 *)malloc(sz);
cucheck(cudaMalloc((void **)&d_ps, sz));
for(int ip = 0; ip < n; ip++) {
ps[ip] = random_point();
//printf("point = (%lf, %lf %lf)\n", (double)ps[ip].x, (double)ps[ip].y,
// (double)ps[ip].z);
}
cucheck(cudaMemcpy(d_ps, ps, sz, cudaMemcpyHostToDevice));
// grid
int ncells3 = ncells * ncells * ncells;
size_t grid_sz = ncells3 * sizeof(point_list_t *);
point_list_t **d_grid;
cucheck(cudaMalloc((void **)&d_grid, grid_sz));
cucheck(cudaMemset(d_grid, 0, grid_sz));
// pre-allocated per-point chains
point_list_t *pre_chains = 0;
if(!alloc) {
cucheck(cudaMalloc((void **)&pre_chains, n * sizeof(point_list_t)));
cucheck(cudaMemset(pre_chains, 0, n * sizeof(point_list_t)));
}
// fill the grid
double t1 = omp_get_wtime();
int bs = 128;
sort_points_k<<<divup(n, bs), bs>>>(d_grid, ncells, d_ps, pre_chains, n);
cucheck(cudaGetLastError());
cucheck(cudaStreamSynchronize(0));
double t2 = omp_get_wtime();
// free the grid
free_grid_k<<<divup(ncells3, bs), bs>>>(d_grid, ncells, pre_chains);
cucheck(cudaGetLastError());
cucheck(cudaStreamSynchronize(0));
double t3 = omp_get_wtime();
// free everything
//free(ps);
cucheck(cudaFree(d_grid));
cucheck(cudaFree(d_ps));
cucheck(cudaFree(pre_chains));
// print time
if(print) {
printf("allocation time %.2lf ms\n", (t2 - t1) * 1e3);
printf("free time %.2lf ms\n", (t3 - t2) * 1e3);
printf("allocation performance %.2lf Mpoints/s\n", n / (t2 - t1) * 1e-6);
printf("free performance %.2lf Mpoints/s\n", n / (t3 - t2) * 1e-6);
} // if(print)
} // grid_test
int main(int argc, char **argv) {
srandom((int)time(0));
size_t memory = 512 * 1024 * 1024;
bool alloc = true;
//cucheck(cudaSetDevice(0));
ha_init(halloc_opts_t(memory));
// warm-up run
grid_test(10000, 8, alloc, false);
// main run
grid_test(1000000, 32, alloc, true);
ha_shutdown();
} // main
|
b34f0bac34cdcd2849176e67af923a547c6ffa7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transposeUnroll4Col.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const int nx = 1;
const int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transposeUnroll4Col), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transposeUnroll4Col), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transposeUnroll4Col), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b34f0bac34cdcd2849176e67af923a547c6ffa7c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transposeUnroll4Col.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const int nx = 1;
const int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transposeUnroll4Col<<<gridBlock,threadBlock>>>(in,out,nx,ny);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transposeUnroll4Col<<<gridBlock,threadBlock>>>(in,out,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transposeUnroll4Col<<<gridBlock,threadBlock>>>(in,out,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
edd0c9bcce4dd1d4431d8e932dcb87c7fd7dc92f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop MAGMA_C_CONJ.
chemv_kernel_U (upper) in chemv_upper.cu is very similar to
chemv_kernel_L (lower) in chemv.cu; diff the two files to compare.
@generated from magmablas/zhemv_upper.cu, normal z -> c, Tue Aug 30 09:38:29 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
chemv_kernel_U(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_C_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_C_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_C_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_U
/***************************************************************************//**
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
*******************************************************************************/
__global__ void
chemv_kernel_U_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
| edd0c9bcce4dd1d4431d8e932dcb87c7fd7dc92f.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop MAGMA_C_CONJ.
chemv_kernel_U (upper) in chemv_upper.cu is very similar to
chemv_kernel_L (lower) in chemv.cu; diff the two files to compare.
@generated from magmablas/zhemv_upper.cu, normal z -> c, Tue Aug 30 09:38:29 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
chemv_kernel_U(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_C_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_C_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_C_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_U
/***************************************************************************//**
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
*******************************************************************************/
__global__ void
chemv_kernel_U_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
|
77a22fc12891d7fb75241e3863021126ccbe5789.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS_PER_BLOCK 3
#define TOTAL_BLOCKS 1
//make sure numbers above match the matlab script
__device__ int blockSums [TOTAL_BLOCKS];
__constant__ int VECTOR_SIZE;
__global__ void dot_product (int* a, int*b, int*c)
{
__shared__ int multiplicationStorage [THREADS_PER_BLOCK];
if (threadIdx.x < VECTOR_SIZE)
multiplicationStorage[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads();
if (threadIdx.x == 0){
//compute sum
int tempSum = 0;
for (int i = 0; i < VECTOR_SIZE; i++){
tempSum+=multiplicationStorage[i];
}
blockSums[blockIdx.x]=tempSum;
__syncthreads();
if (blockIdx.x==0)
for (int i = 0; i < TOTAL_BLOCKS; i++)
*c+=blockSums[i];
//atomicAdd(c,tempSum);
}
}
| 77a22fc12891d7fb75241e3863021126ccbe5789.cu | #define THREADS_PER_BLOCK 3
#define TOTAL_BLOCKS 1
//make sure numbers above match the matlab script
__device__ int blockSums [TOTAL_BLOCKS];
__constant__ int VECTOR_SIZE;
__global__ void dot_product (int* a, int*b, int*c)
{
__shared__ int multiplicationStorage [THREADS_PER_BLOCK];
if (threadIdx.x < VECTOR_SIZE)
multiplicationStorage[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads();
if (threadIdx.x == 0){
//compute sum
int tempSum = 0;
for (int i = 0; i < VECTOR_SIZE; i++){
tempSum+=multiplicationStorage[i];
}
blockSums[blockIdx.x]=tempSum;
__syncthreads();
if (blockIdx.x==0)
for (int i = 0; i < TOTAL_BLOCKS; i++)
*c+=blockSums[i];
//atomicAdd(c,tempSum);
}
}
|
c31ffd314300f155a7bd377ab1426d8e55593de1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
__global__ void MatrixMulKernel(int m, int n, int k, float *A, float *B, float *C)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if ((Row < m) && (Col < k))
{
float Cvalue = 0.0;
for (int i = 0; i < n; ++i)
Cvalue += A[Row * n + i] * B[Col + i * k];
C[Row * k + Col] = Cvalue;
}
}
#define TILE_WIDTH 16
int main()
{
//
int m = 4096, n = 4096, k = 4096;
float *A = (float *)malloc(m * n * sizeof(float));
float *B = (float *)malloc(n * k * sizeof(float));
float *C = (float *)malloc(m * k * sizeof(float));
float *result = (float *)malloc(m * k * sizeof(float));
for (int i = 0; i < m; ++i)
for (int j = 0; j < m; ++j)
{
A[i * m + j] = (i - 0.1 * j + 1) / (i + j + 1);
B[i * m + j] = (j - 0.2 * i + 1) * (i + j + 1) / (i * i + j * j + 1);
C[i * m + j] = 0.0;
}
//
int size = sizeof(float);
float *d_a;
float *d_b;
float *d_c;
// GPU time calculate start
hipEvent_t start, stop;
float elapsedTime = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMalloc((void **)&d_a, m * n * size);
hipMalloc((void **)&d_b, n * k * size);
hipMalloc((void **)&d_c, m * k * size);
//HostDevice
hipMemcpy(d_a, A, size * m * n, hipMemcpyHostToDevice);
hipMemcpy(d_b, B, size * n * k, hipMemcpyHostToDevice);
hipMemcpy(d_c, C, size * m * k, hipMemcpyHostToDevice);
//
dim3 dimGrid((k - 1) / TILE_WIDTH + 1, (m - 1) / TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
//
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, m, n, k, d_a, d_b, d_c);
//
hipMemcpy(C, d_c, size * m * k, hipMemcpyDeviceToHost);
// GPU time calculate end
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
cout << "GPU time: " << elapsedTime << " ms" << endl;
//CPU
clock_t begin = clock();
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < m; ++j)
{
float sum = 0;
for (int k = 0; k < m; ++k)
sum += A[i * m + k] * B[k * m + j];
result[i * m + j] = sum;
}
}
clock_t end = clock();
cout << "CPU time: " << (end - begin) * 1000 / CLOCKS_PER_SEC << " ms" << endl;
//
bool flag = true;
for (int i = 0; i < m * k; ++i)
{
if (abs(result[i] - C[i]) > 0.001)
{
flag = false;
cout << result[i] << "-" << C[i] << endl;
}
}
if (flag)
cout << "Check answer: Correct!" << endl;
else
cout << "Check answer: Error!" << endl;
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(A);
free(B);
free(C);
free(result);
return 0;
} | c31ffd314300f155a7bd377ab1426d8e55593de1.cu | #include <iostream>
using namespace std;
__global__ void MatrixMulKernel(int m, int n, int k, float *A, float *B, float *C)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if ((Row < m) && (Col < k))
{
float Cvalue = 0.0;
for (int i = 0; i < n; ++i)
Cvalue += A[Row * n + i] * B[Col + i * k];
C[Row * k + Col] = Cvalue;
}
}
#define TILE_WIDTH 16
int main()
{
//这里将矩阵按照行优先转换成了一维的形式
int m = 4096, n = 4096, k = 4096;
float *A = (float *)malloc(m * n * sizeof(float));
float *B = (float *)malloc(n * k * sizeof(float));
float *C = (float *)malloc(m * k * sizeof(float));
float *result = (float *)malloc(m * k * sizeof(float));
for (int i = 0; i < m; ++i)
for (int j = 0; j < m; ++j)
{
A[i * m + j] = (i - 0.1 * j + 1) / (i + j + 1);
B[i * m + j] = (j - 0.2 * i + 1) * (i + j + 1) / (i * i + j * j + 1);
C[i * m + j] = 0.0;
}
//分配显存空间
int size = sizeof(float);
float *d_a;
float *d_b;
float *d_c;
// GPU time calculate start
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void **)&d_a, m * n * size);
cudaMalloc((void **)&d_b, n * k * size);
cudaMalloc((void **)&d_c, m * k * size);
//把数据从Host传到Device
cudaMemcpy(d_a, A, size * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, size * n * k, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, C, size * m * k, cudaMemcpyHostToDevice);
//分配网格结构
dim3 dimGrid((k - 1) / TILE_WIDTH + 1, (m - 1) / TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
//调用内核函数
MatrixMulKernel<<<dimGrid, dimBlock>>>(m, n, k, d_a, d_b, d_c);
//将结果传回到主机端
cudaMemcpy(C, d_c, size * m * k, cudaMemcpyDeviceToHost);
// GPU time calculate end
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "GPU time: " << elapsedTime << " ms" << endl;
//CPU计算正确结果
clock_t begin = clock();
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < m; ++j)
{
float sum = 0;
for (int k = 0; k < m; ++k)
sum += A[i * m + k] * B[k * m + j];
result[i * m + j] = sum;
}
}
clock_t end = clock();
cout << "CPU time: " << (end - begin) * 1000 / CLOCKS_PER_SEC << " ms" << endl;
//比较结果
bool flag = true;
for (int i = 0; i < m * k; ++i)
{
if (abs(result[i] - C[i]) > 0.001)
{
flag = false;
cout << result[i] << "-" << C[i] << endl;
}
}
if (flag)
cout << "Check answer: Correct!" << endl;
else
cout << "Check answer: Error!" << endl;
//释放显存空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(A);
free(B);
free(C);
free(result);
return 0;
} |
aa1ea1a1ca33ca4ffad51dc842db747fd9e12d27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <unordered_map>
#include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/place.h"
// This unit test is an example comparing the performance between using pinned
// memory and not. In general, using pinned memory will be faster.
template <typename T>
__global__ void Kernel(T* output, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < dim) {
output[tid] = output[tid] * output[tid] / 100;
}
}
template <typename Place>
float test_pinned_memory() {
Place cpu_place;
paddle::platform::CUDAPlace cuda_place;
const int data_size = 4096;
const int iteration = 10;
// create event start and end
hipEvent_t start_e, stop_e, copying_e;
float elapsedTime = 0;
hipEventCreate(&start_e);
hipEventCreate(&stop_e);
hipEventCreate(©ing_e);
// create computation stream, data copying stream
hipStream_t computation_stream, copying_stream;
hipStreamCreate(&computation_stream);
hipStreamCreate(©ing_stream);
// create record event, pinned memory, gpu memory
std::vector<hipEvent_t> record_event(iteration);
std::vector<float*> input_pinned_mem(iteration);
std::vector<float*> gpu_mem(iteration);
std::vector<float*> output_pinned_mem(iteration);
// initial data
for (int j = 0; j < iteration; ++j) {
hipEventCreateWithFlags(&record_event[j], hipEventDisableTiming);
hipEventCreate(&(record_event[j]));
input_pinned_mem[j] = static_cast<float*>(
paddle::memory::Alloc(cpu_place, data_size * sizeof(float)));
output_pinned_mem[j] = static_cast<float*>(
paddle::memory::Alloc(cpu_place, data_size * sizeof(float)));
gpu_mem[j] = static_cast<float*>(
paddle::memory::Alloc(cuda_place, data_size * sizeof(float)));
for (int k = 0; k < data_size; ++k) {
input_pinned_mem[j][k] = k;
}
}
hipEventRecord(start_e, computation_stream);
// computation
for (int m = 0; m < 30; ++m) {
for (int i = 0; i < iteration; ++i) {
// cpu -> GPU on computation stream.
// note: this operation is async for pinned memory.
paddle::memory::Copy(cuda_place,
gpu_mem[i],
cpu_place,
input_pinned_mem[i],
data_size * sizeof(float),
computation_stream);
// call kernel on computation stream.
hipLaunchKernelGGL(( Kernel), dim3(4), dim3(1024), 0, computation_stream, gpu_mem[i], data_size);
// record event_computation on computation stream
hipEventRecord(record_event[i], computation_stream);
// wait event_computation on copy stream.
// note: this operation is async.
hipStreamWaitEvent(copying_stream, record_event[i], 0);
// copy data GPU->CPU, on copy stream.
// note: this operation is async for pinned memory.
paddle::memory::Copy(cpu_place,
output_pinned_mem[i],
cuda_place,
gpu_mem[i],
data_size * sizeof(float),
copying_stream);
}
}
hipEventRecord(copying_e, copying_stream);
hipStreamWaitEvent(computation_stream, copying_e, 0);
hipEventRecord(stop_e, computation_stream);
hipEventSynchronize(start_e);
hipEventSynchronize(stop_e);
hipEventElapsedTime(&elapsedTime, start_e, stop_e);
// std::cout << cpu_place << " "
// << "time consume:" << elapsedTime / 30 << std::endl;
for (int l = 0; l < iteration; ++l) {
for (int k = 0; k < data_size; ++k) {
float temp = input_pinned_mem[l][k];
temp = temp * temp / 100;
EXPECT_FLOAT_EQ(temp, output_pinned_mem[l][k]);
}
}
// destroy resource
hipEventDestroy(copying_e);
hipEventDestroy(start_e);
hipEventDestroy(stop_e);
for (int j = 0; j < 10; ++j) {
hipEventDestroy((record_event[j]));
paddle::memory::Free(cpu_place, input_pinned_mem[j]);
paddle::memory::Free(cpu_place, output_pinned_mem[j]);
paddle::memory::Free(cuda_place, gpu_mem[j]);
}
return elapsedTime / 30;
}
TEST(CPUANDCUDAPinned, CPUAllocatorAndCUDAPinnedAllocator) {
// Generally speaking, operation on pinned_memory is faster than that on
// unpinned-memory, but if this unit test fails frequently, please close this
// test for the time being.
float time1 = test_pinned_memory<paddle::platform::CPUPlace>();
float time2 = test_pinned_memory<paddle::platform::CUDAPinnedPlace>();
EXPECT_GT(time1, time2);
}
| aa1ea1a1ca33ca4ffad51dc842db747fd9e12d27.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <unordered_map>
#include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/place.h"
// This unit test is an example comparing the performance between using pinned
// memory and not. In general, using pinned memory will be faster.
template <typename T>
__global__ void Kernel(T* output, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < dim) {
output[tid] = output[tid] * output[tid] / 100;
}
}
template <typename Place>
float test_pinned_memory() {
Place cpu_place;
paddle::platform::CUDAPlace cuda_place;
const int data_size = 4096;
const int iteration = 10;
// create event start and end
cudaEvent_t start_e, stop_e, copying_e;
float elapsedTime = 0;
cudaEventCreate(&start_e);
cudaEventCreate(&stop_e);
cudaEventCreate(©ing_e);
// create computation stream, data copying stream
cudaStream_t computation_stream, copying_stream;
cudaStreamCreate(&computation_stream);
cudaStreamCreate(©ing_stream);
// create record event, pinned memory, gpu memory
std::vector<cudaEvent_t> record_event(iteration);
std::vector<float*> input_pinned_mem(iteration);
std::vector<float*> gpu_mem(iteration);
std::vector<float*> output_pinned_mem(iteration);
// initial data
for (int j = 0; j < iteration; ++j) {
cudaEventCreateWithFlags(&record_event[j], cudaEventDisableTiming);
cudaEventCreate(&(record_event[j]));
input_pinned_mem[j] = static_cast<float*>(
paddle::memory::Alloc(cpu_place, data_size * sizeof(float)));
output_pinned_mem[j] = static_cast<float*>(
paddle::memory::Alloc(cpu_place, data_size * sizeof(float)));
gpu_mem[j] = static_cast<float*>(
paddle::memory::Alloc(cuda_place, data_size * sizeof(float)));
for (int k = 0; k < data_size; ++k) {
input_pinned_mem[j][k] = k;
}
}
cudaEventRecord(start_e, computation_stream);
// computation
for (int m = 0; m < 30; ++m) {
for (int i = 0; i < iteration; ++i) {
// cpu -> GPU on computation stream.
// note: this operation is async for pinned memory.
paddle::memory::Copy(cuda_place,
gpu_mem[i],
cpu_place,
input_pinned_mem[i],
data_size * sizeof(float),
computation_stream);
// call kernel on computation stream.
Kernel<<<4, 1024, 0, computation_stream>>>(gpu_mem[i], data_size);
// record event_computation on computation stream
cudaEventRecord(record_event[i], computation_stream);
// wait event_computation on copy stream.
// note: this operation is async.
cudaStreamWaitEvent(copying_stream, record_event[i], 0);
// copy data GPU->CPU, on copy stream.
// note: this operation is async for pinned memory.
paddle::memory::Copy(cpu_place,
output_pinned_mem[i],
cuda_place,
gpu_mem[i],
data_size * sizeof(float),
copying_stream);
}
}
cudaEventRecord(copying_e, copying_stream);
cudaStreamWaitEvent(computation_stream, copying_e, 0);
cudaEventRecord(stop_e, computation_stream);
cudaEventSynchronize(start_e);
cudaEventSynchronize(stop_e);
cudaEventElapsedTime(&elapsedTime, start_e, stop_e);
// std::cout << cpu_place << " "
// << "time consume:" << elapsedTime / 30 << std::endl;
for (int l = 0; l < iteration; ++l) {
for (int k = 0; k < data_size; ++k) {
float temp = input_pinned_mem[l][k];
temp = temp * temp / 100;
EXPECT_FLOAT_EQ(temp, output_pinned_mem[l][k]);
}
}
// destroy resource
cudaEventDestroy(copying_e);
cudaEventDestroy(start_e);
cudaEventDestroy(stop_e);
for (int j = 0; j < 10; ++j) {
cudaEventDestroy((record_event[j]));
paddle::memory::Free(cpu_place, input_pinned_mem[j]);
paddle::memory::Free(cpu_place, output_pinned_mem[j]);
paddle::memory::Free(cuda_place, gpu_mem[j]);
}
return elapsedTime / 30;
}
TEST(CPUANDCUDAPinned, CPUAllocatorAndCUDAPinnedAllocator) {
// Generally speaking, operation on pinned_memory is faster than that on
// unpinned-memory, but if this unit test fails frequently, please close this
// test for the time being.
float time1 = test_pinned_memory<paddle::platform::CPUPlace>();
float time2 = test_pinned_memory<paddle::platform::CUDAPinnedPlace>();
EXPECT_GT(time1, time2);
}
|
56158b13c9b17c96cf6e4c8ded30f074bab59743.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/native/SortingUtils.h>
#include <assert.h>
#include <c10/macros/Macros.h>
#include <stdlib.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <THH/THHDeviceUtils.cuh> // only for THCRoundUp?
#include <THH/THHNumerics.cuh>
#include <THH/THHScanUtils.cuh>
#include <THH/THHTensorMathReduce.cuh> // AddOp
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/native/hip/SortingRadixSelect.cuh>
#include <ATen/NamedTensorUtils.h>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename index_t, int Dim>
__global__ void gatherKthValue(
cuda::detail::TensorInfo<scalar_t, index_t> input,
index_t inputSliceSize,
index_t k,
index_t numInputSlices,
index_t inputWithinSliceStride,
cuda::detail::TensorInfo<scalar_t, index_t> kthValue,
cuda::detail::TensorInfo<int64_t, index_t> indices) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of index_t
__shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit
index_t slice = getLinearBlockId<index_t>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
index_t sliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input);
index_t kthValueSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue);
index_t indicesSliceStartIndex =
cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices);
scalar_t* inputSliceStart = &input.data[sliceStartIndex];
scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
scalar_t kValue = static_cast<scalar_t>(0);
radixSelect<
scalar_t,
typename TopKTypeConfig<scalar_t>::RadixType,
index_t,
false>(
inputSliceStart,
k,
inputSliceSize,
inputWithinSliceStride,
smem,
&kValue);
// Find the index of the k-th highest element
index_t kValueIndex = 0;
bool foundKValue = false;
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride])
: static_cast<scalar_t>(0);
bool isKValue = inRange &&
((v == kValue) ||
(THCNumerics<scalar_t>::isnan(v) &&
THCNumerics<scalar_t>::isnan(kValue)));
if (isKValue) {
kValueIndex = i;
foundKValue = true;
break;
}
}
if (foundKValue) {
kthValueSliceStart[0] = kValue;
indicesSliceStart[0] = kValueIndex;
}
}
struct KthValueLauncher {
int64_t k;
KthValueLauncher(int64_t k) : k(k) {}
template <typename scalar_t, typename index_t, int all_dims>
inline void launch(
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
int collapse_values_dim,
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
int collapse_indices_dim,
cuda::detail::TensorInfo<scalar_t, index_t> self_info,
int collapse_self_dim,
int64_t num_slices,
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
}
dim3 block(
::min(THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( gatherKthValue<scalar_t, index_t, all_dims>), dim3(grid), dim3(block), 0, stream,
self_info,
slice_size,
k,
num_slices,
/* The actual dimension that the k-selection is running in */
/* may have changed from collapseDims() */
self_info.strides[collapse_self_dim],
values_info,
indices_info);
}
};
template <typename scalar_t>
void kthvalue_cuda_template(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim_,
bool keepdim) {
int64_t dim = maybe_wrap_dim(dim_, self.dim());
int64_t slicesize = self.size(dim);
// FIXME: This seems bogus, I only do this because it was the old behaviour.
// The reductions are fine, as long as the axis being reduced along
// isn't of 0 elements (and the output has elements).
TORCH_CHECK(
self.numel() > 0,
"cannot perform reduction function kthvalue",
" on tensor with no elements because the operation does not have an identity");
TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range");
_reduction_with_indices_allocate_or_resize_output(
values, indices, self, dim, keepdim);
if (self.dim() == 0 && self.numel() == 1) {
values.copy_(self);
indices.zero_();
return;
}
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices)) {
run_launcher<scalar_t, uint32_t>(
values, indices, self, dim, KthValueLauncher(k));
} else {
run_launcher<scalar_t, uint64_t>(
values, indices, self, dim, KthValueLauncher(k));
}
if (!keepdim) {
values.squeeze_(dim);
indices.squeeze_(dim);
}
AT_CUDA_CHECK(hipGetLastError());
}
// this does not reduce to median with dim because we don't want to copy twice
template <typename scalar_t>
Tensor median_cuda_template(const Tensor& self) {
TORCH_CHECK(self.numel() > 0, "median cannot be called with empty tensor");
if (self.dim() == 0 && self.numel() == 1) {
return self.clone(at::MemoryFormat::Contiguous);
}
auto self_copy = self.clone(at::MemoryFormat::Contiguous).view(-1);
auto values = at::empty({1}, self.options());
auto indices = at::empty({1}, self.options().dtype(kLong));
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices)) {
run_launcher<scalar_t, uint32_t>(
values,
indices,
self_copy,
0,
KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based
} else {
run_launcher<scalar_t, uint64_t>(
values,
indices,
self_copy,
0,
KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based
}
return values.view({});
}
} // namespace
static std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] {
kthvalue_cuda_template<scalar_t>(values, indices, self, k, dim, keepdim);
});
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor&, Tensor&> kthvalue_out_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
auto result = [&]() {
NoNamesGuard guard;
// `kthvalue_out_impl_cuda` expects contiguous in input `self`.
return kthvalue_out_impl_cuda(values, indices, self.contiguous(), k, dim, keepdim);
}();
namedinference::propagate_names_for_reduction(values, self, dim, keepdim);
namedinference::propagate_names_for_reduction(indices, self, dim, keepdim);
return result;
}
Tensor median_cuda(const Tensor& self) {
NoNamesGuard guard;
return AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "median", [&] {
return median_cuda_template<scalar_t>(self);
});
}
} // namespace native
} // namespace at
| 56158b13c9b17c96cf6e4c8ded30f074bab59743.cu | #include <ATen/ATen.h>
#include <ATen/native/SortingUtils.h>
#include <assert.h>
#include <c10/macros/Macros.h>
#include <stdlib.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <THC/THCDeviceUtils.cuh> // only for THCRoundUp?
#include <THC/THCNumerics.cuh>
#include <THC/THCScanUtils.cuh>
#include <THC/THCTensorMathReduce.cuh> // AddOp
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/native/cuda/SortingRadixSelect.cuh>
#include <ATen/NamedTensorUtils.h>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename index_t, int Dim>
__global__ void gatherKthValue(
cuda::detail::TensorInfo<scalar_t, index_t> input,
index_t inputSliceSize,
index_t k,
index_t numInputSlices,
index_t inputWithinSliceStride,
cuda::detail::TensorInfo<scalar_t, index_t> kthValue,
cuda::detail::TensorInfo<int64_t, index_t> indices) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of index_t
__shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit
index_t slice = getLinearBlockId<index_t>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
index_t sliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input);
index_t kthValueSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue);
index_t indicesSliceStartIndex =
cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices);
scalar_t* inputSliceStart = &input.data[sliceStartIndex];
scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
scalar_t kValue = static_cast<scalar_t>(0);
radixSelect<
scalar_t,
typename TopKTypeConfig<scalar_t>::RadixType,
index_t,
false>(
inputSliceStart,
k,
inputSliceSize,
inputWithinSliceStride,
smem,
&kValue);
// Find the index of the k-th highest element
index_t kValueIndex = 0;
bool foundKValue = false;
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride])
: static_cast<scalar_t>(0);
bool isKValue = inRange &&
((v == kValue) ||
(THCNumerics<scalar_t>::isnan(v) &&
THCNumerics<scalar_t>::isnan(kValue)));
if (isKValue) {
kValueIndex = i;
foundKValue = true;
break;
}
}
if (foundKValue) {
kthValueSliceStart[0] = kValue;
indicesSliceStart[0] = kValueIndex;
}
}
struct KthValueLauncher {
int64_t k;
KthValueLauncher(int64_t k) : k(k) {}
template <typename scalar_t, typename index_t, int all_dims>
inline void launch(
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
int collapse_values_dim,
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
int collapse_indices_dim,
cuda::detail::TensorInfo<scalar_t, index_t> self_info,
int collapse_self_dim,
int64_t num_slices,
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
}
dim3 block(
std::min(THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024));
auto stream = at::cuda::getCurrentCUDAStream();
gatherKthValue<scalar_t, index_t, all_dims><<<grid, block, 0, stream>>>(
self_info,
slice_size,
k,
num_slices,
/* The actual dimension that the k-selection is running in */
/* may have changed from collapseDims() */
self_info.strides[collapse_self_dim],
values_info,
indices_info);
}
};
template <typename scalar_t>
void kthvalue_cuda_template(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim_,
bool keepdim) {
int64_t dim = maybe_wrap_dim(dim_, self.dim());
int64_t slicesize = self.size(dim);
// FIXME: This seems bogus, I only do this because it was the old behaviour.
// The reductions are fine, as long as the axis being reduced along
// isn't of 0 elements (and the output has elements).
TORCH_CHECK(
self.numel() > 0,
"cannot perform reduction function kthvalue",
" on tensor with no elements because the operation does not have an identity");
TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range");
_reduction_with_indices_allocate_or_resize_output(
values, indices, self, dim, keepdim);
if (self.dim() == 0 && self.numel() == 1) {
values.copy_(self);
indices.zero_();
return;
}
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices)) {
run_launcher<scalar_t, uint32_t>(
values, indices, self, dim, KthValueLauncher(k));
} else {
run_launcher<scalar_t, uint64_t>(
values, indices, self, dim, KthValueLauncher(k));
}
if (!keepdim) {
values.squeeze_(dim);
indices.squeeze_(dim);
}
AT_CUDA_CHECK(cudaGetLastError());
}
// this does not reduce to median with dim because we don't want to copy twice
template <typename scalar_t>
Tensor median_cuda_template(const Tensor& self) {
TORCH_CHECK(self.numel() > 0, "median cannot be called with empty tensor");
if (self.dim() == 0 && self.numel() == 1) {
return self.clone(at::MemoryFormat::Contiguous);
}
auto self_copy = self.clone(at::MemoryFormat::Contiguous).view(-1);
auto values = at::empty({1}, self.options());
auto indices = at::empty({1}, self.options().dtype(kLong));
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices)) {
run_launcher<scalar_t, uint32_t>(
values,
indices,
self_copy,
0,
KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based
} else {
run_launcher<scalar_t, uint64_t>(
values,
indices,
self_copy,
0,
KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based
}
return values.view({});
}
} // namespace
static std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] {
kthvalue_cuda_template<scalar_t>(values, indices, self, k, dim, keepdim);
});
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor&, Tensor&> kthvalue_out_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
auto result = [&]() {
NoNamesGuard guard;
// `kthvalue_out_impl_cuda` expects contiguous in input `self`.
return kthvalue_out_impl_cuda(values, indices, self.contiguous(), k, dim, keepdim);
}();
namedinference::propagate_names_for_reduction(values, self, dim, keepdim);
namedinference::propagate_names_for_reduction(indices, self, dim, keepdim);
return result;
}
Tensor median_cuda(const Tensor& self) {
NoNamesGuard guard;
return AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "median", [&] {
return median_cuda_template<scalar_t>(self);
});
}
} // namespace native
} // namespace at
|
a2335bdb4a60cf7fa064390a3d90dfcee5253659.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include "hip/hip_runtime.h"
#define N 2
__global__ void foo(int* p) {
p[threadIdx.x] = 2;
__syncthreads();
}
int main(){
int *a;
int *dev_a;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
a = (int*)malloc(N*size);
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( foo), dim3(1),dim3(N), 0, 0, dev_a);
//ESBMC_verify_kernel(foo,1,N,dev_a);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++){
assert(a[i]==1);
}
free(a);
hipFree(dev_a);
return 0;
}
| a2335bdb4a60cf7fa064390a3d90dfcee5253659.cu | #include <stdio.h>
#include <assert.h>
#include "cuda.h"
#define N 2
__global__ void foo(int* p) {
p[threadIdx.x] = 2;
__syncthreads();
}
int main(){
int *a;
int *dev_a;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
a = (int*)malloc(N*size);
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
foo<<<1,N>>>(dev_a);
//ESBMC_verify_kernel(foo,1,N,dev_a);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++){
assert(a[i]==1);
}
free(a);
cudaFree(dev_a);
return 0;
}
|
dd9f23ee870dd37a423c0c4427c8525adf0c8084.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/bernoulli_kernel.h"
#ifdef __NVCC__
#include <hiprand/hiprand_kernel.h>
#endif
#ifdef __HIPCC__
#include <hiprand_kernel.h>
#endif
#include <algorithm>
#include <vector>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/distribution_helper.h"
namespace phi {
// 'hiprand_uniform4/hiprand_uniform4' generate 4 random number each time
template <typename T>
__global__ void bernoulli_cuda_kernel(
size_t size, uint64_t seed, uint64_t offset, const T* x_data, T* out_data) {
size_t thread_idx =
static_cast<size_t>(blockIdx.x * blockDim.x + threadIdx.x);
#if defined(__NVCC__)
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed, thread_idx, offset, &state);
#else
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed, thread_idx, offset, &state);
#endif
size_t total_thread = gridDim.x * blockDim.x;
for (size_t i = 4 * thread_idx; i < size; i += total_thread * 4) {
funcs::uniform_distribution<float> dist;
float4 rand = dist(&state);
using MPType = typename phi::dtype::MPTypeTrait<T>::Type;
#pragma unroll
for (size_t j = 0; j < 4; j++) {
size_t idx = i + j;
if (idx < size) {
out_data[idx] =
static_cast<T>((&rand.x)[j] <= static_cast<MPType>(x_data[idx]));
}
}
}
}
template <typename T, typename Context>
void BernoulliKernel(const Context& ctx,
const DenseTensor& x,
DenseTensor* out) {
const T* x_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(out);
auto numel = x.numel();
auto gen_cuda = ctx.GetGenerator();
auto seed_offset = gen_cuda->IncrementOffset(12);
uint64_t seed = seed_offset.first;
uint64_t offset = seed_offset.second;
auto gpu_config = phi::backends::gpu::GetGpuLaunchConfig1D(ctx, numel, 4);
size_t grid_size = gpu_config.GetGridSize();
size_t block_size = gpu_config.GetBlockSize();
hipLaunchKernelGGL(( bernoulli_cuda_kernel), dim3(grid_size), dim3(block_size), 0, ctx.stream(),
numel, seed, offset, x_data, out_data);
}
} // namespace phi
PD_REGISTER_KERNEL(bernoulli,
GPU,
ALL_LAYOUT,
phi::BernoulliKernel,
phi::dtype::float16,
phi::dtype::bfloat16,
float,
double) {}
| dd9f23ee870dd37a423c0c4427c8525adf0c8084.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/bernoulli_kernel.h"
#ifdef __NVCC__
#include <curand_kernel.h>
#endif
#ifdef __HIPCC__
#include <hiprand_kernel.h>
#endif
#include <algorithm>
#include <vector>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/distribution_helper.h"
namespace phi {
// 'curand_uniform4/hiprand_uniform4' generate 4 random number each time
template <typename T>
__global__ void bernoulli_cuda_kernel(
size_t size, uint64_t seed, uint64_t offset, const T* x_data, T* out_data) {
size_t thread_idx =
static_cast<size_t>(blockIdx.x * blockDim.x + threadIdx.x);
#if defined(__NVCC__)
curandStatePhilox4_32_10_t state;
curand_init(seed, thread_idx, offset, &state);
#else
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed, thread_idx, offset, &state);
#endif
size_t total_thread = gridDim.x * blockDim.x;
for (size_t i = 4 * thread_idx; i < size; i += total_thread * 4) {
funcs::uniform_distribution<float> dist;
float4 rand = dist(&state);
using MPType = typename phi::dtype::MPTypeTrait<T>::Type;
#pragma unroll
for (size_t j = 0; j < 4; j++) {
size_t idx = i + j;
if (idx < size) {
out_data[idx] =
static_cast<T>((&rand.x)[j] <= static_cast<MPType>(x_data[idx]));
}
}
}
}
template <typename T, typename Context>
void BernoulliKernel(const Context& ctx,
const DenseTensor& x,
DenseTensor* out) {
const T* x_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(out);
auto numel = x.numel();
auto gen_cuda = ctx.GetGenerator();
auto seed_offset = gen_cuda->IncrementOffset(12);
uint64_t seed = seed_offset.first;
uint64_t offset = seed_offset.second;
auto gpu_config = phi::backends::gpu::GetGpuLaunchConfig1D(ctx, numel, 4);
size_t grid_size = gpu_config.GetGridSize();
size_t block_size = gpu_config.GetBlockSize();
bernoulli_cuda_kernel<<<grid_size, block_size, 0, ctx.stream()>>>(
numel, seed, offset, x_data, out_data);
}
} // namespace phi
PD_REGISTER_KERNEL(bernoulli,
GPU,
ALL_LAYOUT,
phi::BernoulliKernel,
phi::dtype::float16,
phi::dtype::bfloat16,
float,
double) {}
|
a092873005ebcba2b837a35dc6c7f8d908674083.hip | // !!! This is a file automatically generated by hipify!!!
// -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#include "core/gpu/cuda_pinned_memory.h"
#include "core/gpu/cuda_error_chk.h"
namespace bdm {
template <typename T>
void CudaAllocPinned(T** d, uint64_t elements) {
GpuErrchk(hipHostMalloc((void**)d, elements * sizeof(T)));
}
template void CudaAllocPinned<double>(double**, uint64_t);
template void CudaAllocPinned<float>(float**, uint64_t);
template void CudaAllocPinned<uint64_t>(uint64_t**, uint64_t);
template void CudaAllocPinned<int64_t>(int64_t**, uint64_t);
template void CudaAllocPinned<uint32_t>(uint32_t**, uint64_t);
template void CudaAllocPinned<int32_t>(int32_t**, uint64_t);
template void CudaAllocPinned<uint16_t>(uint16_t**, uint64_t);
template void CudaAllocPinned<int16_t>(int16_t**, uint64_t);
void CudaFreePinned(void* p) {
GpuErrchk(hipHostFree(p));
}
} // namespace bdm
| a092873005ebcba2b837a35dc6c7f8d908674083.cu | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#include "core/gpu/cuda_pinned_memory.h"
#include "core/gpu/cuda_error_chk.h"
namespace bdm {
template <typename T>
void CudaAllocPinned(T** d, uint64_t elements) {
GpuErrchk(cudaMallocHost((void**)d, elements * sizeof(T)));
}
template void CudaAllocPinned<double>(double**, uint64_t);
template void CudaAllocPinned<float>(float**, uint64_t);
template void CudaAllocPinned<uint64_t>(uint64_t**, uint64_t);
template void CudaAllocPinned<int64_t>(int64_t**, uint64_t);
template void CudaAllocPinned<uint32_t>(uint32_t**, uint64_t);
template void CudaAllocPinned<int32_t>(int32_t**, uint64_t);
template void CudaAllocPinned<uint16_t>(uint16_t**, uint64_t);
template void CudaAllocPinned<int16_t>(int16_t**, uint64_t);
void CudaFreePinned(void* p) {
GpuErrchk(cudaFreeHost(p));
}
} // namespace bdm
|
236a5d4d4ac7acd39f24dad349f4240b6fb299bf.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N, int iterations)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float sum = 0.0;
float mult = 2.5;
if(tid < N){
for(unsigned i=0; i<iterations; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
A[tid*2] = tex1Dfetch(texmem2,tid)*B[tid]+sum;
B[tid] = A[tid*2]+A[tid];
}
}
__syncthreads();
}
__global__ void PowerKernalEmpty(unsigned* C, int iterations)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*4;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(N*sizeof(float));
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
hipMalloc((void**) &device_texture1, N*sizeof(float));
hipMalloc((void**) &device_texture2, N*sizeof(float));
hipMemcpy(device_texture1, host_texture1, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, N*sizeof(float), hipMemcpyHostToDevice);
checkCudaErrors(hipBindTexture(0, texmem1, device_texture1, N*sizeof(float)));
checkCudaErrors(hipBindTexture(0, texmem2, device_texture2, N*sizeof(float)));
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N, iterations);
checkCudaErrors(hipEventRecord(stop));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, iterations);
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
checkCudaErrors(hipUnbindTexture(texmem1));
checkCudaErrors(hipUnbindTexture(texmem2));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 236a5d4d4ac7acd39f24dad349f4240b6fb299bf.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N, int iterations)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float sum = 0.0;
float mult = 2.5;
if(tid < N){
for(unsigned i=0; i<iterations; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
sum*=mult;
A[tid*2] = tex1Dfetch(texmem2,tid)*B[tid]+sum;
B[tid] = A[tid*2]+A[tid];
}
}
__syncthreads();
}
__global__ void PowerKernalEmpty(unsigned* C, int iterations)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*4;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(N*sizeof(float));
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
cudaMalloc((void**) &device_texture1, N*sizeof(float));
cudaMalloc((void**) &device_texture2, N*sizeof(float));
cudaMemcpy(device_texture1, host_texture1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, N*sizeof(float), cudaMemcpyHostToDevice);
checkCudaErrors(cudaBindTexture(0, texmem1, device_texture1, N*sizeof(float)));
checkCudaErrors(cudaBindTexture(0, texmem2, device_texture2, N*sizeof(float)));
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N, iterations);
checkCudaErrors(cudaEventRecord(stop));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, iterations);
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
checkCudaErrors(cudaUnbindTexture(texmem1));
checkCudaErrors(cudaUnbindTexture(texmem2));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
f7ff235af46fabc13dbeb3c2658cf7360d93626b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricMaxUnpooling.cu"
#else
static inline void THNN_(VolumetricMaxUnpooling_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH) {
int inputSlices = 0;
THCUNN_check_shape_indices(state, indices, input);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 10,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
if (THCTensor_(nDimension)(state, input) == 4)
{
inputSlices = THCTensor_(size)(state, input, 0);
}
else if (THCTensor_(nDimension)(state, input) == 5)
{
inputSlices = THCTensor_(size)(state, input, 1);
}
else
{
THArgCheck(false, 2, "4D or 5D tensor expected, got %d",
THCTensor_(nDimension)(state, input));
}
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input->nDimension == 5)
{
dimt++;
dimw++;
dimh++;
dimn++;
}
if (gradOutput != NULL) {
if (oT != gradOutput->size[dimt] || oW != gradOutput->size[dimw] || oH != gradOutput->size[dimh])
{
THError(
"Inconsistent gradOutput size. oT= %d, oH= %d, oW= %d, gradOutput: %dx%dx%d",
oT, oH, oW, gradOutput->size[dimt], gradOutput->size[dimh], gradOutput->size[dimw]);
}
THCUNN_check_dim_size(state, gradOutput, input->nDimension, dimn, inputSlices);
}
}
void THNN_(VolumetricMaxUnpooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int outputTime, int outputWidth, int outputHeight,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int inputHeight = 0;
int inputWidth = 0;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, NULL, indices,
outputTime, outputWidth, outputHeight,
dT, dW, dH, padT, padW, padH);
THCUNN_assertSameGPU(state, 3, input, indices, output);
if (THCTensor_(nDimension)(state, input) == 4)
{
/* sizes */
batchSize = 1;
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
}
else if (THCTensor_(nDimension)(state, input) == 5)
{
/* sizes */
batchSize = THCTensor_(size)(state, input, 0);
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
}
if (input->nDimension == 4) /* 4D */
{
/* resize output */
THCTensor_(resize4d)(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
}
else
{ /* 5D */
THCTensor_(resize5d)(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCTensor_(newContiguous)(state, input);
indices = THCIndexTensor_(newContiguous)(state, indices);
THCTensor_(zero)(state, output);
// Collapse batch and feature dimensions
THCDeviceTensor<real, 4> cudaInput;
THCDeviceTensor<real, 4> cudaOutput;
THCDeviceTensor<THCIndex_t, 4> cudaIndices;
if (THCTensor_(nDimension)(state, input) == 4)
{
cudaInput = toDeviceTensor<real, 4>(state, input);
cudaOutput = toDeviceTensor<real, 4>(state, output);
cudaIndices = toDeviceTensor<THCIndex_t, 4>(state, indices);
}
else
{
cudaInput = toDeviceTensor<real, 5>(state, input).downcastOuter<4>();
cudaOutput = toDeviceTensor<real, 5>(state, output).downcastOuter<4>();
cudaIndices = toDeviceTensor<THCIndex_t, 5>(state, indices).downcastOuter<4>();
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)),
THCCeilDiv(inputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( cuda_VolumetricMaxUnpooling_updateOutput), dim3(grid), dim3(block),
0, THCState_getCurrentStream(state),
cudaInput, cudaIndices, cudaOutput,
dT, dH, dW,
padT, padH, padW, offsetZ);
THCudaCheck(hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, indices);
}
void THNN_(VolumetricMaxUnpooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int outputTime, int outputWidth, int outputHeight,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int inputHeight = 0;
int inputWidth = 0;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, gradOutput, indices,
outputTime, outputWidth, outputHeight,
dT, dW, dH, padT, padW, padH);
THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput);
if (THCTensor_(nDimension)(state, input) == 4) /* 4D */
{
batchSize = 1;
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
}
else
{
batchSize = THCTensor_(size)(state, input, 0);
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
}
input = THCTensor_(newContiguous)(state, input);
indices = THCIndexTensor_(newContiguous)(state, indices);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
// Collapse batch and feature dimensions
THCDeviceTensor<real, 4> cudaGradInput;
THCDeviceTensor<real, 4> cudaGradOutput;
THCDeviceTensor<THCIndex_t, 4> cudaIndices;
if (THCTensor_(nDimension)(state, input) == 4)
{
cudaGradInput = toDeviceTensor<real, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
cudaIndices = toDeviceTensor<THCIndex_t, 4>(state, indices);
}
else
{
cudaGradInput =
toDeviceTensor<real, 5>(state, gradInput).downcastOuter<4>();
cudaGradOutput =
toDeviceTensor<real, 5>(state, gradOutput).downcastOuter<4>();
cudaIndices =
toDeviceTensor<THCIndex_t, 5>(state, indices).downcastOuter<4>();
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)),
THCCeilDiv(inputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( cuda_VolumetricMaxUnpooling_updateGradInput), dim3(grid), dim3(block),
0, THCState_getCurrentStream(state),
cudaGradOutput,
cudaIndices,
cudaGradInput,
dT, dH, dW,
padT, padH, padW, offsetZ);
THCudaCheck(hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
// cleanup
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
THCIndexTensor_(free)(state, indices);
}
#endif
| f7ff235af46fabc13dbeb3c2658cf7360d93626b.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricMaxUnpooling.cu"
#else
static inline void THNN_(VolumetricMaxUnpooling_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCIndexTensor *indices,
int oT,
int oW,
int oH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH) {
int inputSlices = 0;
THCUNN_check_shape_indices(state, indices, input);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 10,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
if (THCTensor_(nDimension)(state, input) == 4)
{
inputSlices = THCTensor_(size)(state, input, 0);
}
else if (THCTensor_(nDimension)(state, input) == 5)
{
inputSlices = THCTensor_(size)(state, input, 1);
}
else
{
THArgCheck(false, 2, "4D or 5D tensor expected, got %d",
THCTensor_(nDimension)(state, input));
}
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input->nDimension == 5)
{
dimt++;
dimw++;
dimh++;
dimn++;
}
if (gradOutput != NULL) {
if (oT != gradOutput->size[dimt] || oW != gradOutput->size[dimw] || oH != gradOutput->size[dimh])
{
THError(
"Inconsistent gradOutput size. oT= %d, oH= %d, oW= %d, gradOutput: %dx%dx%d",
oT, oH, oW, gradOutput->size[dimt], gradOutput->size[dimh], gradOutput->size[dimw]);
}
THCUNN_check_dim_size(state, gradOutput, input->nDimension, dimn, inputSlices);
}
}
void THNN_(VolumetricMaxUnpooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int outputTime, int outputWidth, int outputHeight,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int inputHeight = 0;
int inputWidth = 0;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, NULL, indices,
outputTime, outputWidth, outputHeight,
dT, dW, dH, padT, padW, padH);
THCUNN_assertSameGPU(state, 3, input, indices, output);
if (THCTensor_(nDimension)(state, input) == 4)
{
/* sizes */
batchSize = 1;
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
}
else if (THCTensor_(nDimension)(state, input) == 5)
{
/* sizes */
batchSize = THCTensor_(size)(state, input, 0);
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
}
if (input->nDimension == 4) /* 4D */
{
/* resize output */
THCTensor_(resize4d)(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
}
else
{ /* 5D */
THCTensor_(resize5d)(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCTensor_(newContiguous)(state, input);
indices = THCIndexTensor_(newContiguous)(state, indices);
THCTensor_(zero)(state, output);
// Collapse batch and feature dimensions
THCDeviceTensor<real, 4> cudaInput;
THCDeviceTensor<real, 4> cudaOutput;
THCDeviceTensor<THCIndex_t, 4> cudaIndices;
if (THCTensor_(nDimension)(state, input) == 4)
{
cudaInput = toDeviceTensor<real, 4>(state, input);
cudaOutput = toDeviceTensor<real, 4>(state, output);
cudaIndices = toDeviceTensor<THCIndex_t, 4>(state, indices);
}
else
{
cudaInput = toDeviceTensor<real, 5>(state, input).downcastOuter<4>();
cudaOutput = toDeviceTensor<real, 5>(state, output).downcastOuter<4>();
cudaIndices = toDeviceTensor<THCIndex_t, 5>(state, indices).downcastOuter<4>();
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)),
THCCeilDiv(inputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
cuda_VolumetricMaxUnpooling_updateOutput<<<grid, block,
0, THCState_getCurrentStream(state)>>>(
cudaInput, cudaIndices, cudaOutput,
dT, dH, dW,
padT, padH, padW, offsetZ);
THCudaCheck(cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, indices);
}
void THNN_(VolumetricMaxUnpooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int outputTime, int outputWidth, int outputHeight,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int inputHeight = 0;
int inputWidth = 0;
THNN_(VolumetricMaxUnpooling_shapeCheck)(
state, input, gradOutput, indices,
outputTime, outputWidth, outputHeight,
dT, dW, dH, padT, padW, padH);
THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput);
if (THCTensor_(nDimension)(state, input) == 4) /* 4D */
{
batchSize = 1;
inputSlices = THCTensor_(size)(state, input, 0);
inputTime = THCTensor_(size)(state, input, 1);
inputHeight = THCTensor_(size)(state, input, 2);
inputWidth = THCTensor_(size)(state, input, 3);
}
else
{
batchSize = THCTensor_(size)(state, input, 0);
inputSlices = THCTensor_(size)(state, input, 1);
inputTime = THCTensor_(size)(state, input, 2);
inputHeight = THCTensor_(size)(state, input, 3);
inputWidth = THCTensor_(size)(state, input, 4);
}
input = THCTensor_(newContiguous)(state, input);
indices = THCIndexTensor_(newContiguous)(state, indices);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
// Collapse batch and feature dimensions
THCDeviceTensor<real, 4> cudaGradInput;
THCDeviceTensor<real, 4> cudaGradOutput;
THCDeviceTensor<THCIndex_t, 4> cudaIndices;
if (THCTensor_(nDimension)(state, input) == 4)
{
cudaGradInput = toDeviceTensor<real, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
cudaIndices = toDeviceTensor<THCIndex_t, 4>(state, indices);
}
else
{
cudaGradInput =
toDeviceTensor<real, 5>(state, gradInput).downcastOuter<4>();
cudaGradOutput =
toDeviceTensor<real, 5>(state, gradOutput).downcastOuter<4>();
cudaIndices =
toDeviceTensor<THCIndex_t, 5>(state, indices).downcastOuter<4>();
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(THCCeilDiv(inputWidth, static_cast<int>(block.x)),
THCCeilDiv(inputHeight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
cuda_VolumetricMaxUnpooling_updateGradInput<<<grid, block,
0, THCState_getCurrentStream(state)>>>(
cudaGradOutput,
cudaIndices,
cudaGradInput,
dT, dH, dW,
padT, padH, padW, offsetZ);
THCudaCheck(cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
// cleanup
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
THCIndexTensor_(free)(state, indices);
}
#endif
|
3088bb20054028632d3cdf9e9f60447525666dce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Inverse Discrete Sine Transform in Column wise (DST four)
* DST_IV_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_IV_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DEFAULT_DIM 32
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTIV_Column_Inverse_Kernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DSTIV_Column_Inverse_Kernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
hipMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
hipMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
hipMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
hipMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, hipMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIV_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost);
C = hostC;
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
if ((nrhs!=1)) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n");
return;
}
// numDCOSRows=numDCOSColumns = numARows;
// numCRows = numARows;
//
// numCColumns = numAColumns;
// numDCOSRows=numDCOSColumns=numAColumns;
// numCRows = numARows;
// numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostBinvL[i* numBColumns + j] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));//DST I Column
//hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(numBColumns, j + 1)));
//hostBinvL[i* numBColumns + j] = sin(((j + 0.5)*PI_d*(i + 1)) / (numBColumns))*sqrt((2.0 - DELTA(i + 1, numBRows)) / (numBColumns)); //DST II and III Column
pointer[i* numDCOSColumns + j] = sin(((j + 0.5)*PI_d*(i + 0.5)) / (numDCOSColumns))*sqrt(2.0 / (numDCOSColumns));
//hostB[i + j* numBColumns] = 1;
//hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns);
}
}
// for (int i = 0; i < numDCOSRows; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// // hostBinv[i + j* numBColumns] = cos(((2 * j + 1)*3.14*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
// pointer[i* numDCOSColumns + j] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numDCOSColumns)))*sqrt(2.0 / numDCOSColumns);
//
//
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
//(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
DSTIV_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns);
// hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
//hipDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
// numBRows = numBColumns = numAColumns;
// numCRows = numARows;
//
// numCColumns = numBColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n");
return;
}
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Inverse Discrete Sine Transform in Columns wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostBinvL[i* numBColumns + j] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));//DST I Column
//hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(numBColumns, j + 1)));
//hostBinvL[i* numBColumns + j] = sin(((j + 0.5)*PI_d*(i + 1)) / (numBColumns))*sqrt((2.0 - DELTA(i + 1, numBRows)) / (numBColumns)); //DST II and III Column
hostB[i* numBColumns + j] = sin(((j + 0.5)*PI_d*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns));
//hostB[i + j* numBColumns] = 1;
//hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns);
}
}
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// // hostBinv[i + j* numBColumns] = cos(((2 * j + 1)*3.14*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
// hostB[i* numBColumns + j] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
//
//
// }
// }
//
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
// (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
}
| 3088bb20054028632d3cdf9e9f60447525666dce.cu | /*
* Inverse Discrete Sine Transform in Column wise (DST four)
* DST_IV_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_IV_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DEFAULT_DIM 32
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTIV_Column_Inverse_Kernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DSTIV_Column_Inverse_Kernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
cudaMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
cudaMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
cudaMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
cudaMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, cudaMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIV_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost);
C = hostC;
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
if ((nrhs!=1)) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n");
return;
}
// numDCOSRows=numDCOSColumns = numARows;
// numCRows = numARows;
//
// numCColumns = numAColumns;
// numDCOSRows=numDCOSColumns=numAColumns;
// numCRows = numARows;
// numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostBinvL[i* numBColumns + j] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));//DST I Column
//hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(numBColumns, j + 1)));
//hostBinvL[i* numBColumns + j] = sin(((j + 0.5)*PI_d*(i + 1)) / (numBColumns))*sqrt((2.0 - DELTA(i + 1, numBRows)) / (numBColumns)); //DST II and III Column
pointer[i* numDCOSColumns + j] = sin(((j + 0.5)*PI_d*(i + 0.5)) / (numDCOSColumns))*sqrt(2.0 / (numDCOSColumns));
//hostB[i + j* numBColumns] = 1;
//hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns);
}
}
// for (int i = 0; i < numDCOSRows; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// // hostBinv[i + j* numBColumns] = cos(((2 * j + 1)*3.14*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
// pointer[i* numDCOSColumns + j] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numDCOSColumns)))*sqrt(2.0 / numDCOSColumns);
//
//
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
//(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
DSTIV_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns);
// cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
//cudaDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
// numBRows = numBColumns = numAColumns;
// numCRows = numARows;
//
// numCColumns = numBColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n");
return;
}
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Inverse Discrete Sine Transform in Columns wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostBinvL[i* numBColumns + j] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));//DST I Column
//hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(numBColumns, j + 1)));
//hostBinvL[i* numBColumns + j] = sin(((j + 0.5)*PI_d*(i + 1)) / (numBColumns))*sqrt((2.0 - DELTA(i + 1, numBRows)) / (numBColumns)); //DST II and III Column
hostB[i* numBColumns + j] = sin(((j + 0.5)*PI_d*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns));
//hostB[i + j* numBColumns] = 1;
//hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns);
}
}
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// // hostBinv[i + j* numBColumns] = cos(((2 * j + 1)*3.14*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
// hostB[i* numBColumns + j] = cos(((2 * j + 1)*PI_d*(2 * i + 1) / (4.0 * numBColumns)))*sqrt(2.0 / numBColumns);
//
//
// }
// }
//
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
// (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
}
|
8deef7d68c385916b85a6ba3e0d2adb8e5110a19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void InvertPermutationKernel(float* input, float* output, int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x
+ blockDim.x*blockIdx.x
+ threadIdx.x;
if (id >= size)
return;
int temp = __float2int_rn(input[id]);
if (input == output)
__syncthreads();
output[temp] = id;
} | 8deef7d68c385916b85a6ba3e0d2adb8e5110a19.cu | #include "includes.h"
__global__ void InvertPermutationKernel(float* input, float* output, int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x
+ blockDim.x*blockIdx.x
+ threadIdx.x;
if (id >= size)
return;
int temp = __float2int_rn(input[id]);
if (input == output)
__syncthreads();
output[temp] = id;
} |
9299c72cf5f6b122398f216cbc3cc960a530b57c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include <opencv2\opencv.hpp>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
/////////////////////////temp vis//////////////////////////
//for (int i = 0; i < 64 * 2; i++){
// Dtype weightFilter[49];
// hipMemcpy(weightFilter, &this->blobs_[0]->gpu_data()[49 * i], sizeof(Dtype) * 49, hipMemcpyDeviceToHost);
// cv::Mat filter(7, 7, CV_32FC1);
// float min = FLT_MAX, max = -FLT_MIN;
// for (int j = 0; j < 49; j++){
// if (min > weightFilter[j]) min = weightFilter[j];
// if (max < weightFilter[j]) max = weightFilter[j];
// }
// for (int h = 0; h < 7; h++){
// for (int w = 0; w < 7; w++){
// filter.at<float>(h, w) = (weightFilter[h * 7 + w] - min) / (max - min) * 255.f;
// }
// }
// char name[32];
// itoa(i, name, 10);
// strcat(name, ".jpg");
// cv::imwrite(name, filter);
//}
//printf("complete\n");
//////////////////////////////////////////////////////////
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| 9299c72cf5f6b122398f216cbc3cc960a530b57c.cu | #ifdef USE_CUDNN
#include <vector>
#include <opencv2\opencv.hpp>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
/////////////////////////temp vis//////////////////////////
//for (int i = 0; i < 64 * 2; i++){
// Dtype weightFilter[49];
// cudaMemcpy(weightFilter, &this->blobs_[0]->gpu_data()[49 * i], sizeof(Dtype) * 49, cudaMemcpyDeviceToHost);
// cv::Mat filter(7, 7, CV_32FC1);
// float min = FLT_MAX, max = -FLT_MIN;
// for (int j = 0; j < 49; j++){
// if (min > weightFilter[j]) min = weightFilter[j];
// if (max < weightFilter[j]) max = weightFilter[j];
// }
// for (int h = 0; h < 7; h++){
// for (int w = 0; w < 7; w++){
// filter.at<float>(h, w) = (weightFilter[h * 7 + w] - min) / (max - min) * 255.f;
// }
// }
// char name[32];
// itoa(i, name, 10);
// strcat(name, ".jpg");
// cv::imwrite(name, filter);
//}
//printf("complete\n");
//////////////////////////////////////////////////////////
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter_v3(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData_v3(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
a14adbdab2be4b98abb31a35a1cebd6b9a16a0ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_macros.h"
#include "active_set_buffers.h"
#define BLOCK_DIM_X 128
#define GRID_DIM_X 64
#define BLOCK_DIM_BATCH_X 32
#define BLOCK_DIM_BATCH_Y 32
#define BLOCK_DIM_NORM_X 128
#define BLOCK_DIM_NORM_Y 1
#define MAT_IJ_TO_LINEAR(i, j, dim) ((i) + (j)*(dim))
extern "C" void construct_active_set_buffers(ActiveSetBuffers *buffers, int dim_input, int dim_target, int max_active) {
// assign params
buffers->max_active = max_active;
buffers->num_active = 0;
buffers->dim_input = dim_input;
buffers->dim_target = dim_target;
// allocate buffers
cudaSafeCall(hipMalloc((void**)&(buffers->active_inputs), dim_input * max_active * sizeof(float)));
cudaSafeCall(hipMalloc((void**)&(buffers->active_targets), dim_target * max_active * sizeof(float)));
cudaSafeCall(hipMalloc((void**)&(buffers->active_kernel_matrix), max_active * max_active * sizeof(float)));
// set kernel matrix to all zeros
cudaSafeCall(hipMemset(buffers->active_targets, 0, max_active * sizeof(float)));
cudaSafeCall(hipMemset(buffers->active_kernel_matrix, 0, max_active * max_active * sizeof(float)));
}
extern "C" void free_active_set_buffers(ActiveSetBuffers *buffers) {
// free everything
cudaSafeCall(hipFree(buffers->active_inputs));
cudaSafeCall(hipFree(buffers->active_targets));
cudaSafeCall(hipFree(buffers->active_kernel_matrix));
}
__device__ float exponential_kernel(float* x, float* y, int dim, int sigma)
{
float sum = 0;
for (int i = 0; i < dim; i++) {
sum += __fmul_rn(__fadd_rn(x[i], -y[i]), __fadd_rn(x[i], -y[i]));
// printf("sum %f\n", sum);
}
return __expf(-sum / (2 * sigma));
}
__global__ void compute_kernel_vector_kernel(float* active_inputs, float* all_inputs, float* kernel_vector, int index, float sigma, int dim_input, int num_pts, int num_active, int max_active)
{
float local_new_input[MAX_DIM_INPUT];
float local_active_input[MAX_DIM_INPUT];
int global_x = threadIdx.x + blockDim.x * blockIdx.x;
float kernel_val = 0.0f;
if (global_x >= max_active)
return;
// float test = all_inputs[1];
// if (threadIdx.x == 0 && blockIdx.x == 0)
// printf("Test kernel %f\n", test);
__syncthreads();
if (global_x < num_active) {
// read new input into local memory
for (int i = 0; i < dim_input; i++) {
local_new_input[i] = all_inputs[index + i*num_pts];
// printf("KV New %d %d %f \n", i, index, local_new_input[i]);
}
// coalesced read of active input to compute kernel with
for (int i = 0; i < dim_input; i++) {
local_active_input[i] = active_inputs[global_x + i*num_pts];
// printf("Active %d %d %f \n", i, global_x, local_active_input[i]);
}
kernel_val = exponential_kernel(local_new_input, local_active_input, dim_input, sigma);
// printf("Kernel val %d %f\n", index, kernel_val/*, local_new_input[0], local_new_input[1], local_active_input[0], local_active_input[1]*/);
}
// coalesced value write to vector
__syncthreads();
kernel_vector[global_x] = kernel_val;
}
extern "C" void compute_kernel_vector(ActiveSetBuffers *active_buffers, MaxSubsetBuffers *subset_buffers, int index, float* kernel_vector, GaussianProcessHyperparams hypers)
{
dim3 block_dim(BLOCK_DIM_X, 1, 1);
dim3 grid_dim(ceilf((float)(active_buffers->num_active)/(float)(block_dim.x)), 1, 1);
hipLaunchKernelGGL(( cudaSafeCall((compute_kernel_vector_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, active_buffers->active_inputs, subset_buffers->inputs, kernel_vector, index, hypers.sigma, active_buffers->dim_input, subset_buffers->num_pts, active_buffers->num_active, active_buffers->max_active)));
}
__global__ void compute_kernel_vector_batch_kernel(float* active_inputs, float* all_inputs, float* kernel_vectors, int index, int batch_size, float sigma, int dim_input, int num_pts, int num_active, int max_active)
{
float local_new_input[MAX_DIM_INPUT];
float local_active_input[MAX_DIM_INPUT];
int global_x = threadIdx.x + blockDim.x * blockIdx.x; // active point to grab
int global_y = threadIdx.y + blockDim.y * blockIdx.y; // point to operate on (offset from index)
float kernel_val = 0.0f;
if (global_x >= max_active || global_y >= num_pts - index || global_y >= batch_size)
return;
__syncthreads();
if (global_x < num_active) {
// read new input into local memory
for (int i = 0; i < dim_input; i++) {
local_new_input[i] = all_inputs[global_y + index + i*num_pts];
// printf("KV New %d %d %f \n", i, index, local_new_input[i]);
}
// coalesced read of active input to compute kernel with
for (int i = 0; i < dim_input; i++) {
local_active_input[i] = active_inputs[global_x + i*max_active];
//printf("Active %d %d %f \n", i, global_x, local_active_input[i]);
}
kernel_val = exponential_kernel(local_new_input, local_active_input, dim_input, sigma);
// printf("Kernel val %d %d %d %f\n", num_active, global_x, global_y, kernel_val/*, local_new_input[0], local_new_input[1], local_active_input[0], local_active_input[1]*/);
}
// coalesced value write to vector
__syncthreads();
kernel_vectors[global_x + global_y*max_active] = kernel_val;
}
extern "C" void compute_kernel_vector_batch(ActiveSetBuffers *active_buffers, MaxSubsetBuffers* subset_buffers, int index, int batch_size, float* kernel_vectors, GaussianProcessHyperparams hypers)
{
// x corresponds to the active point to compute the kernel with
// y corresponds to the query point
dim3 block_dim(BLOCK_DIM_BATCH_X, BLOCK_DIM_BATCH_Y, 1);
dim3 grid_dim(ceilf((float)(active_buffers->num_active)/(float)(block_dim.x)),
ceilf((float)(batch_size)/(float)(block_dim.y)),
1);
hipLaunchKernelGGL(( cudaSafeCall((compute_kernel_vector_batch_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, active_buffers->active_inputs, subset_buffers->inputs, kernel_vectors, index, batch_size, hypers.sigma, active_buffers->dim_input, subset_buffers->num_pts, active_buffers->num_active, active_buffers->max_active)));
}
__global__ void update_kernel_matrix_kernel(float* kernel_matrix, float* active_inputs, float* active_targets, float* all_inputs, float* all_targets, float beta, float sigma, int* g_index, int dim_input, int dim_target, int num_pts, int num_active, int max_active)
{
// parameters
__shared__ int segment_size;
__shared__ int index;
float local_new_input[MAX_DIM_INPUT];
float local_active_input[MAX_DIM_INPUT];
float local_new_target[MAX_DIM_INPUT];
// read global variables into shared memory
if (threadIdx.x == 0) {
segment_size = max((int)ceilf((float)(num_active+1)/(float)GRID_DIM_X), 1);
index = g_index[0];
}
int global_x = 0;
float kernel = 0.0f;
__syncthreads();
for (int i = 0; i * blockDim.x < segment_size; i++) {
global_x = threadIdx.x + i * blockDim.x + segment_size * blockIdx.x;
// fetch new data from global menory
for (int j = 0; j < dim_input; j++) {
local_new_input[j] = all_inputs[index + j*num_pts];
}
for (int j = 0; j < dim_target; j++) {
local_new_target[j] = all_targets[index + j*num_pts];
}
// fetch active points from global memory
if (global_x < segment_size * (blockIdx.x + 1) && global_x < num_active) {
for (int j = 0; j < dim_input; j++) {
local_active_input[j] = active_inputs[global_x + j*max_active];
}
kernel = exponential_kernel(local_new_input, local_active_input, dim_input, sigma);
}
// coalesced write to new column and row
__syncthreads();
if (global_x < segment_size * (blockIdx.x + 1) && global_x < num_active) {
kernel_matrix[MAT_IJ_TO_LINEAR(global_x, num_active, max_active)] = kernel;
kernel_matrix[MAT_IJ_TO_LINEAR(num_active, global_x, max_active)] = kernel;
}
// coalesced write to active inputs
__syncthreads();
if (i == 0 && global_x < dim_input && global_x < segment_size * (blockIdx.x + 1)) {
active_inputs[num_active + global_x*max_active] = local_new_input[global_x];
// printf("new input %d %d %f\n", num_active, global_x, local_new_input[global_x]);
}
// coalesced write to active targets
__syncthreads();
if (i == 0 && global_x < dim_target && global_x < segment_size * (blockIdx.x + 1)) {
active_targets[num_active + global_x*max_active] = local_new_target[global_x];
// printf("new target %d %f\n", global_x, local_new_target[global_x]);
}
// write diagonal term
__syncthreads();
if (i == 0 && global_x == 0) {
float diag_val = exponential_kernel(local_new_input, local_new_input, dim_input, sigma);
kernel_matrix[MAT_IJ_TO_LINEAR(num_active, num_active, max_active)] = diag_val + beta;
// printf("new diag %d %d %f\n", global_x, MAT_IJ_TO_LINEAR(num_active, num_active, max_active), kernel_matrix[MAT_IJ_TO_LINEAR(num_active, num_active, max_active)]);
}
__syncthreads();
}
}
extern "C" void update_active_set_buffers(ActiveSetBuffers *active_buffers, MaxSubsetBuffers *subset_buffers, GaussianProcessHyperparams hypers) {
int dim_input = subset_buffers->dim_input;
int dim_target = subset_buffers->dim_target;
if (dim_input > MAX_DIM_INPUT) {
printf("Error: Input is too high dimensional for update. Aborting...");
return;
}
if (dim_target > MAX_DIM_INPUT) {
printf("Error: Target is too high dimensional for update. Aborting...");
return;
}
dim3 block_dim(BLOCK_DIM_X, 1, 1);
dim3 grid_dim(GRID_DIM_X, 1, 1);
hipLaunchKernelGGL(( cudaSafeCall((update_kernel_matrix_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, active_buffers->active_kernel_matrix,
active_buffers->active_inputs,
active_buffers->active_targets,
subset_buffers->inputs,
subset_buffers->targets,
hypers.beta, hypers.sigma,
subset_buffers->d_next_index,
dim_input, dim_target,
subset_buffers->num_pts,
active_buffers->num_active,
active_buffers->max_active)));
active_buffers->num_active++;
}
__global__ void norm_columns_kernel(float* A, float* x, int m, int n)
{
// max score for each thread
__shared__ float s_sums[BLOCK_DIM_BATCH_X * BLOCK_DIM_BATCH_Y];
// parameters
__shared__ int segment_size;
if (threadIdx.x == 0 && threadIdx.y == 0) {
segment_size = m;
}
// initialize scores and count
int local_x = threadIdx.x + blockDim.x*threadIdx.y;
s_sums[local_x] = 0;
__syncthreads();
int global_x = 0;
int global_y = threadIdx.y + blockIdx.y*blockDim.y;
float val = 0.0f;
// keep reading the values and squaring them
for (int i = 0; i * blockDim.x < segment_size; i++) {
global_x = threadIdx.x + i * blockDim.x;
// read from global memory
__syncthreads();
if (global_x < segment_size && global_y < n) {
val = A[global_x + m * global_y];
// printf("Read %f at %d, %d\n", val, global_x, global_y);
s_sums[local_x] += val * val;
}
}
// reduce the squared sum
global_x = threadIdx.x;
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (global_x < stride && (global_x + stride) < blockDim.x) {
s_sums[global_x + blockDim.x * threadIdx.y] += s_sums[global_x + blockDim.x * threadIdx.y + stride];
}
}
// write result to global x vector
__syncthreads();
if (threadIdx.x == 0) {
// printf("Sig at %d: %f\n", threadIdx.y, s_sums[blockDim.x * threadIdx.y]);
x[threadIdx.y + blockIdx.y * blockDim.y] = s_sums[blockDim.x * threadIdx.y];
}
}
// square norm the columns of A, store in x
extern "C" void norm_columns(float* A, float* x, int m, int n)
{
// y for each column of the matrix
dim3 block_dim(BLOCK_DIM_NORM_X, BLOCK_DIM_NORM_Y, 1);
dim3 grid_dim(1,
ceilf((float)(n)/(float)(block_dim.y)),
1);
hipLaunchKernelGGL(( cudaSafeCall((norm_columns_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, A, x, m, n)));
}
| a14adbdab2be4b98abb31a35a1cebd6b9a16a0ab.cu | #include "cuda_macros.h"
#include "active_set_buffers.h"
#define BLOCK_DIM_X 128
#define GRID_DIM_X 64
#define BLOCK_DIM_BATCH_X 32
#define BLOCK_DIM_BATCH_Y 32
#define BLOCK_DIM_NORM_X 128
#define BLOCK_DIM_NORM_Y 1
#define MAT_IJ_TO_LINEAR(i, j, dim) ((i) + (j)*(dim))
extern "C" void construct_active_set_buffers(ActiveSetBuffers *buffers, int dim_input, int dim_target, int max_active) {
// assign params
buffers->max_active = max_active;
buffers->num_active = 0;
buffers->dim_input = dim_input;
buffers->dim_target = dim_target;
// allocate buffers
cudaSafeCall(cudaMalloc((void**)&(buffers->active_inputs), dim_input * max_active * sizeof(float)));
cudaSafeCall(cudaMalloc((void**)&(buffers->active_targets), dim_target * max_active * sizeof(float)));
cudaSafeCall(cudaMalloc((void**)&(buffers->active_kernel_matrix), max_active * max_active * sizeof(float)));
// set kernel matrix to all zeros
cudaSafeCall(cudaMemset(buffers->active_targets, 0, max_active * sizeof(float)));
cudaSafeCall(cudaMemset(buffers->active_kernel_matrix, 0, max_active * max_active * sizeof(float)));
}
extern "C" void free_active_set_buffers(ActiveSetBuffers *buffers) {
// free everything
cudaSafeCall(cudaFree(buffers->active_inputs));
cudaSafeCall(cudaFree(buffers->active_targets));
cudaSafeCall(cudaFree(buffers->active_kernel_matrix));
}
__device__ float exponential_kernel(float* x, float* y, int dim, int sigma)
{
float sum = 0;
for (int i = 0; i < dim; i++) {
sum += __fmul_rn(__fadd_rn(x[i], -y[i]), __fadd_rn(x[i], -y[i]));
// printf("sum %f\n", sum);
}
return __expf(-sum / (2 * sigma));
}
__global__ void compute_kernel_vector_kernel(float* active_inputs, float* all_inputs, float* kernel_vector, int index, float sigma, int dim_input, int num_pts, int num_active, int max_active)
{
float local_new_input[MAX_DIM_INPUT];
float local_active_input[MAX_DIM_INPUT];
int global_x = threadIdx.x + blockDim.x * blockIdx.x;
float kernel_val = 0.0f;
if (global_x >= max_active)
return;
// float test = all_inputs[1];
// if (threadIdx.x == 0 && blockIdx.x == 0)
// printf("Test kernel %f\n", test);
__syncthreads();
if (global_x < num_active) {
// read new input into local memory
for (int i = 0; i < dim_input; i++) {
local_new_input[i] = all_inputs[index + i*num_pts];
// printf("KV New %d %d %f \n", i, index, local_new_input[i]);
}
// coalesced read of active input to compute kernel with
for (int i = 0; i < dim_input; i++) {
local_active_input[i] = active_inputs[global_x + i*num_pts];
// printf("Active %d %d %f \n", i, global_x, local_active_input[i]);
}
kernel_val = exponential_kernel(local_new_input, local_active_input, dim_input, sigma);
// printf("Kernel val %d %f\n", index, kernel_val/*, local_new_input[0], local_new_input[1], local_active_input[0], local_active_input[1]*/);
}
// coalesced value write to vector
__syncthreads();
kernel_vector[global_x] = kernel_val;
}
extern "C" void compute_kernel_vector(ActiveSetBuffers *active_buffers, MaxSubsetBuffers *subset_buffers, int index, float* kernel_vector, GaussianProcessHyperparams hypers)
{
dim3 block_dim(BLOCK_DIM_X, 1, 1);
dim3 grid_dim(ceilf((float)(active_buffers->num_active)/(float)(block_dim.x)), 1, 1);
cudaSafeCall((compute_kernel_vector_kernel<<<grid_dim, block_dim>>>(active_buffers->active_inputs, subset_buffers->inputs, kernel_vector, index, hypers.sigma, active_buffers->dim_input, subset_buffers->num_pts, active_buffers->num_active, active_buffers->max_active)));
}
__global__ void compute_kernel_vector_batch_kernel(float* active_inputs, float* all_inputs, float* kernel_vectors, int index, int batch_size, float sigma, int dim_input, int num_pts, int num_active, int max_active)
{
float local_new_input[MAX_DIM_INPUT];
float local_active_input[MAX_DIM_INPUT];
int global_x = threadIdx.x + blockDim.x * blockIdx.x; // active point to grab
int global_y = threadIdx.y + blockDim.y * blockIdx.y; // point to operate on (offset from index)
float kernel_val = 0.0f;
if (global_x >= max_active || global_y >= num_pts - index || global_y >= batch_size)
return;
__syncthreads();
if (global_x < num_active) {
// read new input into local memory
for (int i = 0; i < dim_input; i++) {
local_new_input[i] = all_inputs[global_y + index + i*num_pts];
// printf("KV New %d %d %f \n", i, index, local_new_input[i]);
}
// coalesced read of active input to compute kernel with
for (int i = 0; i < dim_input; i++) {
local_active_input[i] = active_inputs[global_x + i*max_active];
//printf("Active %d %d %f \n", i, global_x, local_active_input[i]);
}
kernel_val = exponential_kernel(local_new_input, local_active_input, dim_input, sigma);
// printf("Kernel val %d %d %d %f\n", num_active, global_x, global_y, kernel_val/*, local_new_input[0], local_new_input[1], local_active_input[0], local_active_input[1]*/);
}
// coalesced value write to vector
__syncthreads();
kernel_vectors[global_x + global_y*max_active] = kernel_val;
}
extern "C" void compute_kernel_vector_batch(ActiveSetBuffers *active_buffers, MaxSubsetBuffers* subset_buffers, int index, int batch_size, float* kernel_vectors, GaussianProcessHyperparams hypers)
{
// x corresponds to the active point to compute the kernel with
// y corresponds to the query point
dim3 block_dim(BLOCK_DIM_BATCH_X, BLOCK_DIM_BATCH_Y, 1);
dim3 grid_dim(ceilf((float)(active_buffers->num_active)/(float)(block_dim.x)),
ceilf((float)(batch_size)/(float)(block_dim.y)),
1);
cudaSafeCall((compute_kernel_vector_batch_kernel<<<grid_dim, block_dim>>>(active_buffers->active_inputs, subset_buffers->inputs, kernel_vectors, index, batch_size, hypers.sigma, active_buffers->dim_input, subset_buffers->num_pts, active_buffers->num_active, active_buffers->max_active)));
}
__global__ void update_kernel_matrix_kernel(float* kernel_matrix, float* active_inputs, float* active_targets, float* all_inputs, float* all_targets, float beta, float sigma, int* g_index, int dim_input, int dim_target, int num_pts, int num_active, int max_active)
{
// parameters
__shared__ int segment_size;
__shared__ int index;
float local_new_input[MAX_DIM_INPUT];
float local_active_input[MAX_DIM_INPUT];
float local_new_target[MAX_DIM_INPUT];
// read global variables into shared memory
if (threadIdx.x == 0) {
segment_size = max((int)ceilf((float)(num_active+1)/(float)GRID_DIM_X), 1);
index = g_index[0];
}
int global_x = 0;
float kernel = 0.0f;
__syncthreads();
for (int i = 0; i * blockDim.x < segment_size; i++) {
global_x = threadIdx.x + i * blockDim.x + segment_size * blockIdx.x;
// fetch new data from global menory
for (int j = 0; j < dim_input; j++) {
local_new_input[j] = all_inputs[index + j*num_pts];
}
for (int j = 0; j < dim_target; j++) {
local_new_target[j] = all_targets[index + j*num_pts];
}
// fetch active points from global memory
if (global_x < segment_size * (blockIdx.x + 1) && global_x < num_active) {
for (int j = 0; j < dim_input; j++) {
local_active_input[j] = active_inputs[global_x + j*max_active];
}
kernel = exponential_kernel(local_new_input, local_active_input, dim_input, sigma);
}
// coalesced write to new column and row
__syncthreads();
if (global_x < segment_size * (blockIdx.x + 1) && global_x < num_active) {
kernel_matrix[MAT_IJ_TO_LINEAR(global_x, num_active, max_active)] = kernel;
kernel_matrix[MAT_IJ_TO_LINEAR(num_active, global_x, max_active)] = kernel;
}
// coalesced write to active inputs
__syncthreads();
if (i == 0 && global_x < dim_input && global_x < segment_size * (blockIdx.x + 1)) {
active_inputs[num_active + global_x*max_active] = local_new_input[global_x];
// printf("new input %d %d %f\n", num_active, global_x, local_new_input[global_x]);
}
// coalesced write to active targets
__syncthreads();
if (i == 0 && global_x < dim_target && global_x < segment_size * (blockIdx.x + 1)) {
active_targets[num_active + global_x*max_active] = local_new_target[global_x];
// printf("new target %d %f\n", global_x, local_new_target[global_x]);
}
// write diagonal term
__syncthreads();
if (i == 0 && global_x == 0) {
float diag_val = exponential_kernel(local_new_input, local_new_input, dim_input, sigma);
kernel_matrix[MAT_IJ_TO_LINEAR(num_active, num_active, max_active)] = diag_val + beta;
// printf("new diag %d %d %f\n", global_x, MAT_IJ_TO_LINEAR(num_active, num_active, max_active), kernel_matrix[MAT_IJ_TO_LINEAR(num_active, num_active, max_active)]);
}
__syncthreads();
}
}
extern "C" void update_active_set_buffers(ActiveSetBuffers *active_buffers, MaxSubsetBuffers *subset_buffers, GaussianProcessHyperparams hypers) {
int dim_input = subset_buffers->dim_input;
int dim_target = subset_buffers->dim_target;
if (dim_input > MAX_DIM_INPUT) {
printf("Error: Input is too high dimensional for update. Aborting...");
return;
}
if (dim_target > MAX_DIM_INPUT) {
printf("Error: Target is too high dimensional for update. Aborting...");
return;
}
dim3 block_dim(BLOCK_DIM_X, 1, 1);
dim3 grid_dim(GRID_DIM_X, 1, 1);
cudaSafeCall((update_kernel_matrix_kernel<<<grid_dim, block_dim>>>(active_buffers->active_kernel_matrix,
active_buffers->active_inputs,
active_buffers->active_targets,
subset_buffers->inputs,
subset_buffers->targets,
hypers.beta, hypers.sigma,
subset_buffers->d_next_index,
dim_input, dim_target,
subset_buffers->num_pts,
active_buffers->num_active,
active_buffers->max_active)));
active_buffers->num_active++;
}
__global__ void norm_columns_kernel(float* A, float* x, int m, int n)
{
// max score for each thread
__shared__ float s_sums[BLOCK_DIM_BATCH_X * BLOCK_DIM_BATCH_Y];
// parameters
__shared__ int segment_size;
if (threadIdx.x == 0 && threadIdx.y == 0) {
segment_size = m;
}
// initialize scores and count
int local_x = threadIdx.x + blockDim.x*threadIdx.y;
s_sums[local_x] = 0;
__syncthreads();
int global_x = 0;
int global_y = threadIdx.y + blockIdx.y*blockDim.y;
float val = 0.0f;
// keep reading the values and squaring them
for (int i = 0; i * blockDim.x < segment_size; i++) {
global_x = threadIdx.x + i * blockDim.x;
// read from global memory
__syncthreads();
if (global_x < segment_size && global_y < n) {
val = A[global_x + m * global_y];
// printf("Read %f at %d, %d\n", val, global_x, global_y);
s_sums[local_x] += val * val;
}
}
// reduce the squared sum
global_x = threadIdx.x;
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (global_x < stride && (global_x + stride) < blockDim.x) {
s_sums[global_x + blockDim.x * threadIdx.y] += s_sums[global_x + blockDim.x * threadIdx.y + stride];
}
}
// write result to global x vector
__syncthreads();
if (threadIdx.x == 0) {
// printf("Sig at %d: %f\n", threadIdx.y, s_sums[blockDim.x * threadIdx.y]);
x[threadIdx.y + blockIdx.y * blockDim.y] = s_sums[blockDim.x * threadIdx.y];
}
}
// square norm the columns of A, store in x
extern "C" void norm_columns(float* A, float* x, int m, int n)
{
// y for each column of the matrix
dim3 block_dim(BLOCK_DIM_NORM_X, BLOCK_DIM_NORM_Y, 1);
dim3 grid_dim(1,
ceilf((float)(n)/(float)(block_dim.y)),
1);
cudaSafeCall((norm_columns_kernel<<<grid_dim, block_dim>>>(A, x, m, n)));
}
|
7c5b996b0fcdb17dc055f952311731c256d1cbc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
__device__ __forceinline__ uint bfe(uint val, int pos)
{
uint bit;
asm ("bfe.u32 %0, %1, %2, 1;" : "=r"(bit) : "r"(val), "r"(pos) );
return bit;
}
typedef struct __align__(8) KeyVal
{
uint key;
float val;
} KeyVal;
template <typename T>
__global__ void top_k(T* Y, uint* A, const T* __restrict__ X, uint Exp, uint topK, uint K, uint rect, uint rebase)
{
extern __shared__ KeyVal data[];
uint tid = threadIdx.x;
uint n = blockIdx.x;
uint offset = n*K + tid;
KeyVal init;
init.key = tid;
init.val = tid < K ? load(add_ptr_u(X, offset)) : -FLT_MAX;
data[tid] = init;
__syncthreads();
for (int i = 1; i <= Exp; ++i)
{
int j;
#pragma unroll 1
for (j = i - 1; j >= 5; --j)
{
// when the comparison stride is 32 or greater,
// use half of warps and uniform shared memory access to make comparisons
if (tid < blockDim.x/2)
{
// figure out the a and b indexes for the "butterfly" compare operation
uint m = (tid >> j) << (j + 1);
uint r = tid & ((1 << j) - 1);
uint a = m + r;
uint b = a + (1 << j);
bool d = bfe(a, i) != 0;
KeyVal A = data[a];
KeyVal B = data[b];
if((B.val > A.val) ^ d)
{
KeyVal t = A;
A = B;
B = t;
}
data[a] = A;
data[b] = B;
}
__syncthreads();
}
// When the comparison stride is less than 32,
// use all warps and shfl_xor operations to make comparisons in registers
// Load shared to registers
KeyVal A = data[tid];
#pragma unroll 5
while (j >= 0)
{
KeyVal B;
B.val = shfl_xor(A.val, 1 << j);
B.key = shfl_xor(A.key, 1 << j);
bool d = bfe(tid, i) != bfe(tid, j--);
// in the case of equality we want both shuffle lanes to not swap
if(((B.val > A.val) ^ d) && B.val != A.val)
A = B;
}
// Load final register values back to shared.
data[tid] = A;
__syncthreads();
}
if (rect)
{
// avoid extra __syncthreads by coalescing to unused shared
float* coalesce = (float*)&data[blockDim.x];
// Output same size as input, with zeros for non-topK values.
// rebase sets the zero line to the min value of the topK
KeyVal out = data[tid];
float base = rebase ? fmaxf(data[topK-1].val, 0.0f) : 0.0f;
float val = tid < topK ? out.val : 0.0f;
//if (tid == 0 && n == 0)
// printf("base: %f %d\n", base, data[topK-1].key);
// apply the rectification and coalesce the output
coalesce[out.key] = fmaxf(val, base) - base;
__syncthreads();
if (tid < K)
store(add_ptr_u(Y, offset), coalesce[tid]);
}
else
{
// output just top values and their indicies.
if (tid < topK)
{
KeyVal out = data[tid];
offset = n*topK + tid;
store(add_ptr_u(Y, offset), out.val);
__stg(add_ptr_u(A, offset), out.key);
}
}
}
template <typename T>
bool TopK(hipStream_t stream, T* y, uint* a, const T* x, uint topK, uint N, uint K, uint rebase)
{
uint exp;
if (K > 512) exp = 10;
else if (K > 256) exp = 9;
else if (K > 128) exp = 8;
else if (K > 64) exp = 7;
else if (K > 32) exp = 6;
else exp = 5;
uint threads = 1 << exp;
uint shared = threads * 16;
hipLaunchKernelGGL(( top_k<T>), dim3(N),dim3(threads),shared,stream, y, a, x, exp, topK, K, a == NULL, rebase);
return true;
}
template bool TopK<float>(hipStream_t stream, float* y, uint* a, const float* x, uint topK, uint N, uint K, uint rebase);
template bool TopK<ehalf>(hipStream_t stream, ehalf* y, uint* a, const ehalf* x, uint topK, uint N, uint K, uint rebase);
template bool TopK<bhalf>(hipStream_t stream, bhalf* y, uint* a, const bhalf* x, uint topK, uint N, uint K, uint rebase);
template <typename T>
__global__ void masked_top_k_softmax(T* Y, const float* __restrict__ M, const T* __restrict__ X, uint Exp, uint topK, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale)
{
extern __shared__ KeyVal block[];
extern __shared__ float stage[];
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
uint offsetX = d0*D123 + d1*D23 + d2*D3 + tid;
uint offsetM = d1*M1 + d2*M2 + tid;
M = add_ptr_u(M, offsetM);
X = add_ptr_u(X, offsetX);
float mask = tid < D3 ? (use_mask ? __ldg(M) : 1.0f) : 0.0f;
float xval = mask != 0.0 ? load(X) * mask * scale : -FLT_MAX;
KeyVal init;
init.key = tid;
init.val = xval;
block[tid] = init;
__syncthreads();
for (int i = 1; i <= Exp; ++i)
{
int j;
#pragma unroll 1
for (j = i - 1; j >= 5; --j)
{
// when the comparison stride is 32 or greater,
// use half of warps and uniform shared memory access to make comparisons
if (tid < blockDim.x/2)
{
// figure out the a and b indexes for the "butterfly" compare operation
uint m = (tid >> j) << (j + 1);
uint r = tid & ((1 << j) - 1);
uint a = m + r;
uint b = a + (1 << j);
bool d = bfe(a, i) != 0;
KeyVal A = block[a];
KeyVal B = block[b];
if((B.val > A.val) ^ d)
{
KeyVal t = A;
A = B;
B = t;
}
block[a] = A;
block[b] = B;
}
__syncthreads();
}
// When the comparison stride is less than 32,
// use all warps and shfl_xor operations to make comparisons in registers
// Load shared to registers
KeyVal A = block[tid];
#pragma unroll 5
while (j >= 0)
{
KeyVal B;
B.val = shfl_xor(A.val, 1 << j);
B.key = shfl_xor(A.key, 1 << j);
bool d = bfe(tid, i) != bfe(tid, j--);
// in the case of equality we want both shuffle lanes to not swap
if(((B.val > A.val) ^ d) && B.val != A.val)
A = B;
}
// Load final register values back to shared.
block[tid] = A;
__syncthreads();
}
float* vals = &stage[blockDim.x*2];
float* reds = &vals[blockDim.x];
KeyVal out = block[tid];
float val = 0.0f;
if (tid < topK)
val = expf(out.val - block[0].val);
vals[out.key] = val;
// reduce within warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
val += shfl_xor(val, i);
// first thread of each warp store to shared
if ((tid & 31) == 0)
reds[tid/32] = val;
__syncthreads();
if (tid < blockDim.x/32)
{
// first warp loads all prior reductions
val = reds[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = blockDim.x/64; i > 0; i >>= 1)
val += shfl_xor(val, i);
// rcp final reduction to shared
reds[tid] = 1.0f / val;
}
__syncthreads();
if (tid < D3)
store(add_ptr_u(Y, offsetX), vals[tid] * reds[0]);
}
template <typename T>
bool MaskedTopKSoftmax(hipStream_t stream, T* y, const float* m, const T* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale)
{
uint exp;
if (D3 > 512) exp = 10;
else if (D3 > 256) exp = 9;
else if (D3 > 128) exp = 8;
else if (D3 > 64) exp = 7;
else if (D3 > 32) exp = 6;
else exp = 5;
uint threads = 1 << exp;
uint shared = threads * 16;
hipLaunchKernelGGL(( masked_top_k_softmax<T>), dim3(dim3(D0,D1,D2)),dim3(threads),shared,stream, y, m, x, exp, topK, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale);
return true;
}
template bool MaskedTopKSoftmax<float>(hipStream_t stream, float* y, const float* m, const float* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedTopKSoftmax<ehalf>(hipStream_t stream, ehalf* y, const float* m, const ehalf* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedTopKSoftmax<bhalf>(hipStream_t stream, bhalf* y, const float* m, const bhalf* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
// x *= mask * scale
// y = exp(x - max(x)) / sum( exp(x - max(x)) )
template <typename T, int U>
__global__ void masked_softmax(
T* Y,
const T* __restrict__ X,
const float* __restrict__ M,
uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale, int threads_pow2)
{
__shared__ float Max[32];
__shared__ float Sum[32];
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
if (blockDim.x > 32)
{
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
__syncthreads();
}
uint ti = (tid & 0x3fe0)*U + (tid & 31); // 0x3fe0 = -32
uint offsetX = d0*D123 + d1*D23 + d2*D3 + ti;
uint offsetM = d1*M1 + d2*M2 + ti;
M = add_ptr_u(M, offsetM);
X = add_ptr_u(X, offsetX);
// Load mask
float mask[U]; for (int i = 0; i < U; i++) mask[i]= 1.0f;
if (use_mask)
{
for (int i = 0; i < U; i++)
{
mask[i] = 0.0f;
if (ti + i*32 < D3)
mask[i] = __ldg(M + i*32);
}
}
// Load X
float xval[U]; for (int i = 0; i < U; i++) xval[i] = -FLT_MAX;
for (int i = 0; i < U; i++)
if (mask[i] != 0.0 && ti + i*32 < D3)
xval[i] = load(X, i*32) * mask[i] * scale;
// reduce within thread
float Xmax[U];
for (int i = 0; i < U; i++)
Xmax[i] = xval[i];
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xmax[i] = fmaxf(Xmax[i], Xmax[i+j]);
float xmax = Xmax[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = threads_pow2/64; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// compute exponent of softmax
float Xsum[U];
for (int i = 0; i < U; i++)
Xsum[i] = xval[i] = expf(xval[i] - xmax);
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xsum[i] = Xsum[i] + Xsum[i+j];
float exp_sum = Xsum[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = threads_pow2/64; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = 1.0f / exp_sum;
Y = add_ptr_u(Y, offsetX);
for (int i = 0; i < U; i++)
store(Y, xval[i] * rcp_exp_sum, i*32, ti + i*32 < D3);
}
// x *= mask * scale
// y = exp(x - max(x)) / sum( exp(x - max(x)) )
template <typename T>
__global__ void __launch_bounds__(32) masked_softmax2(
T* Y,
const T* __restrict__ X,
const float* __restrict__ M,
uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale)
{
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
uint offsetX = d0*D123 + d1*D23 + d2*D3 + tid;
uint offsetM = d1*M1 + d2*M2 + tid;
// max(x, axis-1)
float max_x = -FLT_MAX;
#pragma unroll 2
for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32)
{
float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f;
float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX;
max_x = fmaxf(max_x, x);
}
for (int i = 16; i > 0; i >>= 1)
max_x = fmaxf(max_x, shfl_xor(max_x, i));
float exp_sum = 0.0f;
#pragma unroll 2
for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32)
{
float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f;
float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX;
exp_sum += expf(x - max_x);
}
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
float rcp_exp_sum = 1.0f / exp_sum;
#pragma unroll 2
for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32)
{
float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f;
float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX;
float y = expf(x - max_x) * rcp_exp_sum;
store(add_ptr_u(Y, xi), y);
}
}
// dx = (dy - sum(dy * y, axis=-1)) * y * m * scale
template <typename T, int U>
__global__ void masked_softmax_grad(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ Y,
const float* __restrict__ M,
uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale, int threads_pow2)
{
__shared__ float Sum[32];
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
if (tid < 32)
Sum[tid] = 0.0f;
__syncthreads();
}
uint ti = (tid & 0x3fe0)*U + (tid & 31);
uint offsetY = d0*D123 + d1*D23 + d2*D3 + ti;
uint offsetM = d1*M1 + d2*M2 + ti;
DY = add_ptr_u(DY, offsetY);
Y = add_ptr_u( Y, offsetY);
M = add_ptr_u( M, offsetM);
// Load mask
float mask[U]; for (int i = 0; i < U; i++) mask[i]= 1.0f;
if (use_mask)
{
for (int i = 0; i < U; i++)
{
mask[i] = 0.0f;
if (ti + i*32 < D3)
mask[i] = __ldg(M + i*32);
}
}
// Load DY
float dy[U]; for (int i = 0; i < U; i++) dy[i]= 0.0f;
for (int i = 0; i < U; i++)
if (mask[i] != 0.0 && ti + i*32 < D3)
dy[i] = load(DY, i*32);
// Load Y
float y[U]; for (int i = 0; i < U; i++) y[i]= 0.0f;
for (int i = 0; i < U; i++)
if (mask[i] != 0.0 && ti + i*32 < D3)
y[i] = load(Y, i*32);
// compute dy * y and y * mask * scale
float dyy[U];
for (int i = 0; i < U; i++)
{
dyy[i] = dy[i] * y[i];
y[i] *= mask[i] * scale;
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
dyy[i] = dyy[i] + dyy[i+j];
float sum_dyy = dyy[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = threads_pow2/64; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
// dx = (dy - sum_dyy) * y * mask* scale
DX = add_ptr_u(DX, offsetY);
for (int i = 0; i < U; i++)
store(DX, (dy[i] - sum_dyy) * y[i], i*32, ti + i*32 < D3);
}
// dx = (dy - sum(dy * y, axis=-1)) * y * m * scale
template <typename T>
__global__ void __launch_bounds__(32) masked_softmax_grad2(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ Y,
const float* __restrict__ M,
uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale)
{
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
uint offsetY = d0*D123 + d1*D23 + d2*D3 + tid;
uint offsetM = d1*M1 + d2*M2 + tid;
// sum(dy * y, axis=-1))
float sum_dy_y = 0.0f;
#pragma unroll 2
for (uint d3 = tid, offset = offsetY; d3 < D3; d3 += 32, offset += 32)
{
float dy = load(add_ptr_u(DY, offset));
float y = load(add_ptr_u(Y, offset));
sum_dy_y += dy * y;
}
for (int i = 16; i > 0; i >>= 1)
sum_dy_y += shfl_xor(sum_dy_y, i);
#pragma unroll 2
for (uint d3 = tid; d3 < D3; d3 += 32, offsetY += 32, offsetM += 32)
{
float dy = load(add_ptr_u(DY, offsetY));
float y = load(add_ptr_u(Y, offsetY));
float m = use_mask ? __ldg(add_ptr_u(M, offsetM)) : 1.0f;
float dx = (dy - sum_dy_y) * y * m * scale;
store(add_ptr_u(DX, offsetY), dx);
}
}
template <typename T>
bool MaskedSoftmax(hipStream_t stream, T* y, const T* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale)
{
if (D3 > 1024*8)
hipLaunchKernelGGL(( masked_softmax2<T>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale);
else
{
if (D3 > 32*4)
{
uint threads = CEIL_DIV(D3, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( masked_softmax<T,8>), dim3(dim3(D0,D1,D2)),dim3(threads),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale, thread2);
}
else if (D3 > 32*2)
hipLaunchKernelGGL(( masked_softmax<T,4>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
else if (D3 > 32*1)
hipLaunchKernelGGL(( masked_softmax<T,2>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
else
hipLaunchKernelGGL(( masked_softmax<T,1>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
}
return true;
}
template <typename T>
bool MaskedSoftmaxGrad(hipStream_t stream, T* dx, const T* dy, const T* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale)
{
if (D3 > 1024*4)
hipLaunchKernelGGL(( masked_softmax_grad2<T>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale);
else
{
if (D3 > 32*2)
{
uint threads = CEIL_DIV(D3, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( masked_softmax_grad<T,4>), dim3(dim3(D0,D1,D2)),dim3(threads),0,stream, dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale, thread2);
}
else if (D3 > 32*1)
hipLaunchKernelGGL(( masked_softmax_grad<T,2>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
else
hipLaunchKernelGGL(( masked_softmax_grad<T,1>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
}
return true;
}
template bool MaskedSoftmax<float>(hipStream_t stream, float* y, const float* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmax<ehalf>(hipStream_t stream, ehalf* y, const ehalf* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmax<bhalf>(hipStream_t stream, bhalf* y, const bhalf* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmaxGrad<float>(hipStream_t stream, float* dx, const float* dy, const float* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmaxGrad<ehalf>(hipStream_t stream, ehalf* dx, const ehalf* dy, const ehalf* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmaxGrad<bhalf>(hipStream_t stream, bhalf* dx, const bhalf* dy, const bhalf* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
// split_heads: (batch, pixel, head, state) -> (batch, head, pixel, state)
// merge_heads: (batch, head, pixel, state) -> (batch, pixel, head, state)
template <typename T, uint U>
__global__ void __launch_bounds__(32) transpose_0213(T* Y, const T* X, uint D123, uint D23, uint D13, uint D2, uint D3)
{
uint tid = threadIdx.x;
uint d2 = blockIdx.x;
uint d1 = blockIdx.y;
uint d0 = blockIdx.z;
uint offset = d0*D123 + tid;
uint offsetX = d1*D23 + d2*D3 + offset;
uint offsetY = d2*D13 + d1*D3 + offset;
#pragma unroll 1
while (d2 < D2)
{
#pragma unroll 1
for (uint d3 = tid, xi = offsetX, yi = offsetY; d3 < D3; d3 += U*32, xi += U*32, yi += U*32)
{
const T* Xi = add_ptr_u(X, xi);
T* Yi = add_ptr_u(Y, yi);
float x[U];
for (uint i = 0; i < U; i++)
x[i] = load(Xi, i*32, d3 + i*32 < D3);
for (uint i = 0; i < U; i++)
store(Yi, x[i], i*32, d3 + i*32 < D3);
}
offsetX += gridDim.x*D3;
offsetY += gridDim.x*D13;
d2 += gridDim.x;
}
}
template <typename T>
bool Transpose_0213(hipStream_t stream, T* y, const T* x, uint D0, uint D1, uint D2, uint D3)
{
// make sure each block has enough work to cover launch overhead
uint gridX = CEIL_DIV(D2, 4);
if (D3 <= 64)
hipLaunchKernelGGL(( transpose_0213<T,2>), dim3(dim3(gridX,D1,D0)),dim3(32),0,stream, y, x, D1*D2*D3, D2*D3, D1*D3, D2, D3);
else
hipLaunchKernelGGL(( transpose_0213<T,4>), dim3(dim3(gridX,D1,D0)),dim3(32),0,stream, y, x, D1*D2*D3, D2*D3, D1*D3, D2, D3);
return true;
}
template bool Transpose_0213<float>(hipStream_t stream, float* y, const float* x, uint D0, uint D1, uint D2, uint D3);
template bool Transpose_0213<ehalf>(hipStream_t stream, ehalf* y, const ehalf* x, uint D0, uint D1, uint D2, uint D3);
template bool Transpose_0213<bhalf>(hipStream_t stream, bhalf* y, const bhalf* x, uint D0, uint D1, uint D2, uint D3);
#endif
| 7c5b996b0fcdb17dc055f952311731c256d1cbc5.cu | #if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
__device__ __forceinline__ uint bfe(uint val, int pos)
{
uint bit;
asm ("bfe.u32 %0, %1, %2, 1;" : "=r"(bit) : "r"(val), "r"(pos) );
return bit;
}
typedef struct __align__(8) KeyVal
{
uint key;
float val;
} KeyVal;
template <typename T>
__global__ void top_k(T* Y, uint* A, const T* __restrict__ X, uint Exp, uint topK, uint K, uint rect, uint rebase)
{
extern __shared__ KeyVal data[];
uint tid = threadIdx.x;
uint n = blockIdx.x;
uint offset = n*K + tid;
KeyVal init;
init.key = tid;
init.val = tid < K ? load(add_ptr_u(X, offset)) : -FLT_MAX;
data[tid] = init;
__syncthreads();
for (int i = 1; i <= Exp; ++i)
{
int j;
#pragma unroll 1
for (j = i - 1; j >= 5; --j)
{
// when the comparison stride is 32 or greater,
// use half of warps and uniform shared memory access to make comparisons
if (tid < blockDim.x/2)
{
// figure out the a and b indexes for the "butterfly" compare operation
uint m = (tid >> j) << (j + 1);
uint r = tid & ((1 << j) - 1);
uint a = m + r;
uint b = a + (1 << j);
bool d = bfe(a, i) != 0;
KeyVal A = data[a];
KeyVal B = data[b];
if((B.val > A.val) ^ d)
{
KeyVal t = A;
A = B;
B = t;
}
data[a] = A;
data[b] = B;
}
__syncthreads();
}
// When the comparison stride is less than 32,
// use all warps and shfl_xor operations to make comparisons in registers
// Load shared to registers
KeyVal A = data[tid];
#pragma unroll 5
while (j >= 0)
{
KeyVal B;
B.val = shfl_xor(A.val, 1 << j);
B.key = shfl_xor(A.key, 1 << j);
bool d = bfe(tid, i) != bfe(tid, j--);
// in the case of equality we want both shuffle lanes to not swap
if(((B.val > A.val) ^ d) && B.val != A.val)
A = B;
}
// Load final register values back to shared.
data[tid] = A;
__syncthreads();
}
if (rect)
{
// avoid extra __syncthreads by coalescing to unused shared
float* coalesce = (float*)&data[blockDim.x];
// Output same size as input, with zeros for non-topK values.
// rebase sets the zero line to the min value of the topK
KeyVal out = data[tid];
float base = rebase ? fmaxf(data[topK-1].val, 0.0f) : 0.0f;
float val = tid < topK ? out.val : 0.0f;
//if (tid == 0 && n == 0)
// printf("base: %f %d\n", base, data[topK-1].key);
// apply the rectification and coalesce the output
coalesce[out.key] = fmaxf(val, base) - base;
__syncthreads();
if (tid < K)
store(add_ptr_u(Y, offset), coalesce[tid]);
}
else
{
// output just top values and their indicies.
if (tid < topK)
{
KeyVal out = data[tid];
offset = n*topK + tid;
store(add_ptr_u(Y, offset), out.val);
__stg(add_ptr_u(A, offset), out.key);
}
}
}
template <typename T>
bool TopK(CUstream stream, T* y, uint* a, const T* x, uint topK, uint N, uint K, uint rebase)
{
uint exp;
if (K > 512) exp = 10;
else if (K > 256) exp = 9;
else if (K > 128) exp = 8;
else if (K > 64) exp = 7;
else if (K > 32) exp = 6;
else exp = 5;
uint threads = 1 << exp;
uint shared = threads * 16;
top_k<T><<<N,threads,shared,stream>>>(y, a, x, exp, topK, K, a == NULL, rebase);
return true;
}
template bool TopK<float>(CUstream stream, float* y, uint* a, const float* x, uint topK, uint N, uint K, uint rebase);
template bool TopK<ehalf>(CUstream stream, ehalf* y, uint* a, const ehalf* x, uint topK, uint N, uint K, uint rebase);
template bool TopK<bhalf>(CUstream stream, bhalf* y, uint* a, const bhalf* x, uint topK, uint N, uint K, uint rebase);
template <typename T>
__global__ void masked_top_k_softmax(T* Y, const float* __restrict__ M, const T* __restrict__ X, uint Exp, uint topK, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale)
{
extern __shared__ KeyVal block[];
extern __shared__ float stage[];
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
uint offsetX = d0*D123 + d1*D23 + d2*D3 + tid;
uint offsetM = d1*M1 + d2*M2 + tid;
M = add_ptr_u(M, offsetM);
X = add_ptr_u(X, offsetX);
float mask = tid < D3 ? (use_mask ? __ldg(M) : 1.0f) : 0.0f;
float xval = mask != 0.0 ? load(X) * mask * scale : -FLT_MAX;
KeyVal init;
init.key = tid;
init.val = xval;
block[tid] = init;
__syncthreads();
for (int i = 1; i <= Exp; ++i)
{
int j;
#pragma unroll 1
for (j = i - 1; j >= 5; --j)
{
// when the comparison stride is 32 or greater,
// use half of warps and uniform shared memory access to make comparisons
if (tid < blockDim.x/2)
{
// figure out the a and b indexes for the "butterfly" compare operation
uint m = (tid >> j) << (j + 1);
uint r = tid & ((1 << j) - 1);
uint a = m + r;
uint b = a + (1 << j);
bool d = bfe(a, i) != 0;
KeyVal A = block[a];
KeyVal B = block[b];
if((B.val > A.val) ^ d)
{
KeyVal t = A;
A = B;
B = t;
}
block[a] = A;
block[b] = B;
}
__syncthreads();
}
// When the comparison stride is less than 32,
// use all warps and shfl_xor operations to make comparisons in registers
// Load shared to registers
KeyVal A = block[tid];
#pragma unroll 5
while (j >= 0)
{
KeyVal B;
B.val = shfl_xor(A.val, 1 << j);
B.key = shfl_xor(A.key, 1 << j);
bool d = bfe(tid, i) != bfe(tid, j--);
// in the case of equality we want both shuffle lanes to not swap
if(((B.val > A.val) ^ d) && B.val != A.val)
A = B;
}
// Load final register values back to shared.
block[tid] = A;
__syncthreads();
}
float* vals = &stage[blockDim.x*2];
float* reds = &vals[blockDim.x];
KeyVal out = block[tid];
float val = 0.0f;
if (tid < topK)
val = expf(out.val - block[0].val);
vals[out.key] = val;
// reduce within warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
val += shfl_xor(val, i);
// first thread of each warp store to shared
if ((tid & 31) == 0)
reds[tid/32] = val;
__syncthreads();
if (tid < blockDim.x/32)
{
// first warp loads all prior reductions
val = reds[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = blockDim.x/64; i > 0; i >>= 1)
val += shfl_xor(val, i);
// rcp final reduction to shared
reds[tid] = 1.0f / val;
}
__syncthreads();
if (tid < D3)
store(add_ptr_u(Y, offsetX), vals[tid] * reds[0]);
}
template <typename T>
bool MaskedTopKSoftmax(CUstream stream, T* y, const float* m, const T* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale)
{
uint exp;
if (D3 > 512) exp = 10;
else if (D3 > 256) exp = 9;
else if (D3 > 128) exp = 8;
else if (D3 > 64) exp = 7;
else if (D3 > 32) exp = 6;
else exp = 5;
uint threads = 1 << exp;
uint shared = threads * 16;
masked_top_k_softmax<T><<<dim3(D0,D1,D2),threads,shared,stream>>>(y, m, x, exp, topK, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale);
return true;
}
template bool MaskedTopKSoftmax<float>(CUstream stream, float* y, const float* m, const float* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedTopKSoftmax<ehalf>(CUstream stream, ehalf* y, const float* m, const ehalf* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedTopKSoftmax<bhalf>(CUstream stream, bhalf* y, const float* m, const bhalf* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
// x *= mask * scale
// y = exp(x - max(x)) / sum( exp(x - max(x)) )
template <typename T, int U>
__global__ void masked_softmax(
T* Y,
const T* __restrict__ X,
const float* __restrict__ M,
uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale, int threads_pow2)
{
__shared__ float Max[32];
__shared__ float Sum[32];
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
if (blockDim.x > 32)
{
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
__syncthreads();
}
uint ti = (tid & 0x3fe0)*U + (tid & 31); // 0x3fe0 = -32
uint offsetX = d0*D123 + d1*D23 + d2*D3 + ti;
uint offsetM = d1*M1 + d2*M2 + ti;
M = add_ptr_u(M, offsetM);
X = add_ptr_u(X, offsetX);
// Load mask
float mask[U]; for (int i = 0; i < U; i++) mask[i]= 1.0f;
if (use_mask)
{
for (int i = 0; i < U; i++)
{
mask[i] = 0.0f;
if (ti + i*32 < D3)
mask[i] = __ldg(M + i*32);
}
}
// Load X
float xval[U]; for (int i = 0; i < U; i++) xval[i] = -FLT_MAX;
for (int i = 0; i < U; i++)
if (mask[i] != 0.0 && ti + i*32 < D3)
xval[i] = load(X, i*32) * mask[i] * scale;
// reduce within thread
float Xmax[U];
for (int i = 0; i < U; i++)
Xmax[i] = xval[i];
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xmax[i] = fmaxf(Xmax[i], Xmax[i+j]);
float xmax = Xmax[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = threads_pow2/64; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// compute exponent of softmax
float Xsum[U];
for (int i = 0; i < U; i++)
Xsum[i] = xval[i] = expf(xval[i] - xmax);
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xsum[i] = Xsum[i] + Xsum[i+j];
float exp_sum = Xsum[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = threads_pow2/64; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = 1.0f / exp_sum;
Y = add_ptr_u(Y, offsetX);
for (int i = 0; i < U; i++)
store(Y, xval[i] * rcp_exp_sum, i*32, ti + i*32 < D3);
}
// x *= mask * scale
// y = exp(x - max(x)) / sum( exp(x - max(x)) )
template <typename T>
__global__ void __launch_bounds__(32) masked_softmax2(
T* Y,
const T* __restrict__ X,
const float* __restrict__ M,
uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale)
{
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
uint offsetX = d0*D123 + d1*D23 + d2*D3 + tid;
uint offsetM = d1*M1 + d2*M2 + tid;
// max(x, axis-1)
float max_x = -FLT_MAX;
#pragma unroll 2
for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32)
{
float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f;
float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX;
max_x = fmaxf(max_x, x);
}
for (int i = 16; i > 0; i >>= 1)
max_x = fmaxf(max_x, shfl_xor(max_x, i));
float exp_sum = 0.0f;
#pragma unroll 2
for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32)
{
float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f;
float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX;
exp_sum += expf(x - max_x);
}
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
float rcp_exp_sum = 1.0f / exp_sum;
#pragma unroll 2
for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32)
{
float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f;
float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX;
float y = expf(x - max_x) * rcp_exp_sum;
store(add_ptr_u(Y, xi), y);
}
}
// dx = (dy - sum(dy * y, axis=-1)) * y * m * scale
template <typename T, int U>
__global__ void masked_softmax_grad(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ Y,
const float* __restrict__ M,
uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale, int threads_pow2)
{
__shared__ float Sum[32];
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
if (tid < 32)
Sum[tid] = 0.0f;
__syncthreads();
}
uint ti = (tid & 0x3fe0)*U + (tid & 31);
uint offsetY = d0*D123 + d1*D23 + d2*D3 + ti;
uint offsetM = d1*M1 + d2*M2 + ti;
DY = add_ptr_u(DY, offsetY);
Y = add_ptr_u( Y, offsetY);
M = add_ptr_u( M, offsetM);
// Load mask
float mask[U]; for (int i = 0; i < U; i++) mask[i]= 1.0f;
if (use_mask)
{
for (int i = 0; i < U; i++)
{
mask[i] = 0.0f;
if (ti + i*32 < D3)
mask[i] = __ldg(M + i*32);
}
}
// Load DY
float dy[U]; for (int i = 0; i < U; i++) dy[i]= 0.0f;
for (int i = 0; i < U; i++)
if (mask[i] != 0.0 && ti + i*32 < D3)
dy[i] = load(DY, i*32);
// Load Y
float y[U]; for (int i = 0; i < U; i++) y[i]= 0.0f;
for (int i = 0; i < U; i++)
if (mask[i] != 0.0 && ti + i*32 < D3)
y[i] = load(Y, i*32);
// compute dy * y and y * mask * scale
float dyy[U];
for (int i = 0; i < U; i++)
{
dyy[i] = dy[i] * y[i];
y[i] *= mask[i] * scale;
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
dyy[i] = dyy[i] + dyy[i+j];
float sum_dyy = dyy[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = threads_pow2/64; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
// dx = (dy - sum_dyy) * y * mask* scale
DX = add_ptr_u(DX, offsetY);
for (int i = 0; i < U; i++)
store(DX, (dy[i] - sum_dyy) * y[i], i*32, ti + i*32 < D3);
}
// dx = (dy - sum(dy * y, axis=-1)) * y * m * scale
template <typename T>
__global__ void __launch_bounds__(32) masked_softmax_grad2(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ Y,
const float* __restrict__ M,
uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale)
{
// x: D0, D1, D2, D3
// m: 1, D1, D2, D3
// m: 1, 1, D2, D3
// m: 1, 1, 1, D3
uint tid = threadIdx.x;
uint d0 = blockIdx.x;
uint d1 = blockIdx.y;
uint d2 = blockIdx.z;
uint offsetY = d0*D123 + d1*D23 + d2*D3 + tid;
uint offsetM = d1*M1 + d2*M2 + tid;
// sum(dy * y, axis=-1))
float sum_dy_y = 0.0f;
#pragma unroll 2
for (uint d3 = tid, offset = offsetY; d3 < D3; d3 += 32, offset += 32)
{
float dy = load(add_ptr_u(DY, offset));
float y = load(add_ptr_u(Y, offset));
sum_dy_y += dy * y;
}
for (int i = 16; i > 0; i >>= 1)
sum_dy_y += shfl_xor(sum_dy_y, i);
#pragma unroll 2
for (uint d3 = tid; d3 < D3; d3 += 32, offsetY += 32, offsetM += 32)
{
float dy = load(add_ptr_u(DY, offsetY));
float y = load(add_ptr_u(Y, offsetY));
float m = use_mask ? __ldg(add_ptr_u(M, offsetM)) : 1.0f;
float dx = (dy - sum_dy_y) * y * m * scale;
store(add_ptr_u(DX, offsetY), dx);
}
}
template <typename T>
bool MaskedSoftmax(CUstream stream, T* y, const T* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale)
{
if (D3 > 1024*8)
masked_softmax2<T><<<dim3(D0,D1,D2),32,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale);
else
{
if (D3 > 32*4)
{
uint threads = CEIL_DIV(D3, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
masked_softmax<T,8><<<dim3(D0,D1,D2),threads,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale, thread2);
}
else if (D3 > 32*2)
masked_softmax<T,4><<<dim3(D0,D1,D2),32,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
else if (D3 > 32*1)
masked_softmax<T,2><<<dim3(D0,D1,D2),32,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
else
masked_softmax<T,1><<<dim3(D0,D1,D2),32,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
}
return true;
}
template <typename T>
bool MaskedSoftmaxGrad(CUstream stream, T* dx, const T* dy, const T* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale)
{
if (D3 > 1024*4)
masked_softmax_grad2<T><<<dim3(D0,D1,D2),32,0,stream>>>(dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale);
else
{
if (D3 > 32*2)
{
uint threads = CEIL_DIV(D3, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
masked_softmax_grad<T,4><<<dim3(D0,D1,D2),threads,0,stream>>>(dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale, thread2);
}
else if (D3 > 32*1)
masked_softmax_grad<T,2><<<dim3(D0,D1,D2),32,0,stream>>>(dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
else
masked_softmax_grad<T,1><<<dim3(D0,D1,D2),32,0,stream>>>(dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32);
}
return true;
}
template bool MaskedSoftmax<float>(CUstream stream, float* y, const float* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmax<ehalf>(CUstream stream, ehalf* y, const ehalf* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmax<bhalf>(CUstream stream, bhalf* y, const bhalf* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmaxGrad<float>(CUstream stream, float* dx, const float* dy, const float* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmaxGrad<ehalf>(CUstream stream, ehalf* dx, const ehalf* dy, const ehalf* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
template bool MaskedSoftmaxGrad<bhalf>(CUstream stream, bhalf* dx, const bhalf* dy, const bhalf* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale);
// split_heads: (batch, pixel, head, state) -> (batch, head, pixel, state)
// merge_heads: (batch, head, pixel, state) -> (batch, pixel, head, state)
template <typename T, uint U>
__global__ void __launch_bounds__(32) transpose_0213(T* Y, const T* X, uint D123, uint D23, uint D13, uint D2, uint D3)
{
uint tid = threadIdx.x;
uint d2 = blockIdx.x;
uint d1 = blockIdx.y;
uint d0 = blockIdx.z;
uint offset = d0*D123 + tid;
uint offsetX = d1*D23 + d2*D3 + offset;
uint offsetY = d2*D13 + d1*D3 + offset;
#pragma unroll 1
while (d2 < D2)
{
#pragma unroll 1
for (uint d3 = tid, xi = offsetX, yi = offsetY; d3 < D3; d3 += U*32, xi += U*32, yi += U*32)
{
const T* Xi = add_ptr_u(X, xi);
T* Yi = add_ptr_u(Y, yi);
float x[U];
for (uint i = 0; i < U; i++)
x[i] = load(Xi, i*32, d3 + i*32 < D3);
for (uint i = 0; i < U; i++)
store(Yi, x[i], i*32, d3 + i*32 < D3);
}
offsetX += gridDim.x*D3;
offsetY += gridDim.x*D13;
d2 += gridDim.x;
}
}
template <typename T>
bool Transpose_0213(CUstream stream, T* y, const T* x, uint D0, uint D1, uint D2, uint D3)
{
// make sure each block has enough work to cover launch overhead
uint gridX = CEIL_DIV(D2, 4);
if (D3 <= 64)
transpose_0213<T,2><<<dim3(gridX,D1,D0),32,0,stream>>>(y, x, D1*D2*D3, D2*D3, D1*D3, D2, D3);
else
transpose_0213<T,4><<<dim3(gridX,D1,D0),32,0,stream>>>(y, x, D1*D2*D3, D2*D3, D1*D3, D2, D3);
return true;
}
template bool Transpose_0213<float>(CUstream stream, float* y, const float* x, uint D0, uint D1, uint D2, uint D3);
template bool Transpose_0213<ehalf>(CUstream stream, ehalf* y, const ehalf* x, uint D0, uint D1, uint D2, uint D3);
template bool Transpose_0213<bhalf>(CUstream stream, bhalf* y, const bhalf* x, uint D0, uint D1, uint D2, uint D3);
#endif
|
20955041023f73313feb0ae8042e53cb89f3d5f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T, int D>
__global__ void _Assign(
const int N,
const int num_dims,
const SimpleArray<int, D> X_dims,
const SimpleArray<int, D> Y_strides,
const SimpleArray<int, D> X_starts,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(xi, N) {
int yi = 0, tmp = xi;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(X_dims.data[d], tmp, &tmp, &r);
yi += (r + X_starts.data[d]) * Y_strides.data[d];
}
y[yi] = x[xi];
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void Assign<T, CUDAContext>( \
const int num_dims, \
const int64_t* x_dims, \
const int64_t* y_strides, \
const int64_t* starts, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
CUDA_TENSOR_DIMS_CHECK(num_dims); \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_dims, Y_strides, X_starts; \
const auto N = std::accumulate( \
x_dims, x_dims + num_dims, 1, std::multiplies<int64_t>()); \
for (int i = 0; i < num_dims; ++i) { \
X_dims.data[i] = x_dims[i]; \
Y_strides.data[i] = y_strides[i]; \
X_starts.data[i] = starts[i]; \
} \
hipLaunchKernelGGL(( _Assign), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
N, num_dims, X_dims, Y_strides, X_starts, x, y); \
}
DEFINE_KERNEL_LAUNCHER(bool);
DEFINE_KERNEL_LAUNCHER(uint8_t);
DEFINE_KERNEL_LAUNCHER(int8_t);
DEFINE_KERNEL_LAUNCHER(int);
DEFINE_KERNEL_LAUNCHER(int64_t);
DEFINE_KERNEL_LAUNCHER(float16);
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernels
} // namespace dragon
#endif // USE_ROCM
| 20955041023f73313feb0ae8042e53cb89f3d5f8.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T, int D>
__global__ void _Assign(
const int N,
const int num_dims,
const SimpleArray<int, D> X_dims,
const SimpleArray<int, D> Y_strides,
const SimpleArray<int, D> X_starts,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(xi, N) {
int yi = 0, tmp = xi;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(X_dims.data[d], tmp, &tmp, &r);
yi += (r + X_starts.data[d]) * Y_strides.data[d];
}
y[yi] = x[xi];
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void Assign<T, CUDAContext>( \
const int num_dims, \
const int64_t* x_dims, \
const int64_t* y_strides, \
const int64_t* starts, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
CUDA_TENSOR_DIMS_CHECK(num_dims); \
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> X_dims, Y_strides, X_starts; \
const auto N = std::accumulate( \
x_dims, x_dims + num_dims, 1, std::multiplies<int64_t>()); \
for (int i = 0; i < num_dims; ++i) { \
X_dims.data[i] = x_dims[i]; \
Y_strides.data[i] = y_strides[i]; \
X_starts.data[i] = starts[i]; \
} \
_Assign<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
N, num_dims, X_dims, Y_strides, X_starts, x, y); \
}
DEFINE_KERNEL_LAUNCHER(bool);
DEFINE_KERNEL_LAUNCHER(uint8_t);
DEFINE_KERNEL_LAUNCHER(int8_t);
DEFINE_KERNEL_LAUNCHER(int);
DEFINE_KERNEL_LAUNCHER(int64_t);
DEFINE_KERNEL_LAUNCHER(float16);
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernels
} // namespace dragon
#endif // USE_CUDA
|
95815d288ed73d5f9aa5b361e9700c35582d764e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#define BLOCK_SIZE 8
#define GRID_SIZE 8
__global__ void ising_kernel(int *G,int *newG,double *w,int n){
int id=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int xBlock = blockDim.x * blockIdx.x;
unsigned int yBlock = blockDim.y * blockIdx.y;
unsigned int xIndex = xBlock + threadIdx.x;
unsigned int yIndex = yBlock + threadIdx.y;
unsigned int tempX = xBlock + threadIdx.x;
unsigned int tempY = yBlock + threadIdx.y;
int iterations;
if (n%(BLOCK_SIZE*GRID_SIZE)==0){
iterations=n/(BLOCK_SIZE*GRID_SIZE);
}else{
iterations=n/(BLOCK_SIZE*GRID_SIZE)+1;
}
for(int i=0;i<iterations;i++){
xIndex=tempX+GRID_SIZE*BLOCK_SIZE*(i);
for(int j=0;j<iterations;j++){
yIndex=tempY+GRID_SIZE*BLOCK_SIZE*(j);
if(xIndex<n&&yIndex<n){
double weight=0;
for(int ibor=-2;ibor<3;ibor++){
for(int jbor=-2;jbor<3;jbor++){
weight+=w[(ibor+2)*5+jbor+2]*G[((xIndex-ibor+n)%n)*n +(yIndex-jbor+n)%n ];
}
}
if(weight<1e-4&&weight>-(1e-4)){
newG[xIndex*n+yIndex]=G[xIndex*n+yIndex];
}else if(weight>0){
newG[xIndex*n+yIndex]=1;
}else{
newG[xIndex*n+yIndex]=-1;
}
}
}
}
}
void ising( int *G, double *w, int k, int n){
int *newG,*G2;
double *w2;
hipMallocManaged(&newG,n*n*sizeof(int));
hipMallocManaged(&G2,n*n*sizeof(int));
hipMallocManaged(&w2,25*sizeof(double));
hipMemcpy( w2, w, 25*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy( G2, G, n*n*sizeof(int),hipMemcpyHostToDevice);
// double total_time=0;
for(int iter=0;iter<k;iter++){
bool repeat=true;
dim3 grid(GRID_SIZE, GRID_SIZE);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
// struct timeval startwtime, endwtime;
// gettimeofday (&startwtime, NULL);
hipLaunchKernelGGL(( ising_kernel), dim3(grid),dim3(block), 0, 0, G2,newG,w2,n);
hipDeviceSynchronize();
//gettimeofday (&endwtime, NULL);
//double time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6+ endwtime.tv_sec - startwtime.tv_sec);
// total_time+=time;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(repeat&&newG[i*n+j]!=G2[i*n+j]){
repeat=false;
}
int temp=newG[i*n+j];
newG[i*n+j]=G2[i*n+j];
G2[i*n+j]=temp;
}
}
if(repeat){
break;
}
}
hipMemcpy(G, G2, n*n*sizeof(int),hipMemcpyDeviceToHost);
// printf("Seconds are %lf",total_time);
}
int main()
{
printf("=================START=========================\n");
double weight[]={0.004,0.016,0.026,0.016,0.004,0.016,0.071,0.117,0.071,0.016,0.026,0.117,0,0.117,0.026,0.016,0.071,0.117,0.071,0.016,0.004,0.016,0.026,0.016,0.004};
int n=517;
int X[n*n];
size_t size;
FILE *fp = fopen("conf-init.bin", "rb");
size = fread(X, sizeof(int), n * n, fp);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp);
int k=11;
ising(X,weight,k,n);
int checkX[n*n];
FILE *fp2 = fopen("conf-11.bin", "rb");
size = fread(checkX, sizeof(int), n * n, fp2);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp2);
bool flag=true;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(checkX[i*n+j]!=X[i*n+j]){
printf("\nWRONG IMPLEMENTATION\n");
flag=false;
break;
}
}
if(!flag){
break;
}
}
if(flag){
printf("\nCORRECT IMPLEMENTATION\n");
}
printf("\n================END==============\n");
return 0;
} | 95815d288ed73d5f9aa5b361e9700c35582d764e.cu |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#define BLOCK_SIZE 8
#define GRID_SIZE 8
__global__ void ising_kernel(int *G,int *newG,double *w,int n){
int id=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int xBlock = blockDim.x * blockIdx.x;
unsigned int yBlock = blockDim.y * blockIdx.y;
unsigned int xIndex = xBlock + threadIdx.x;
unsigned int yIndex = yBlock + threadIdx.y;
unsigned int tempX = xBlock + threadIdx.x;
unsigned int tempY = yBlock + threadIdx.y;
int iterations;
if (n%(BLOCK_SIZE*GRID_SIZE)==0){
iterations=n/(BLOCK_SIZE*GRID_SIZE);
}else{
iterations=n/(BLOCK_SIZE*GRID_SIZE)+1;
}
for(int i=0;i<iterations;i++){
xIndex=tempX+GRID_SIZE*BLOCK_SIZE*(i);
for(int j=0;j<iterations;j++){
yIndex=tempY+GRID_SIZE*BLOCK_SIZE*(j);
if(xIndex<n&&yIndex<n){
double weight=0;
for(int ibor=-2;ibor<3;ibor++){
for(int jbor=-2;jbor<3;jbor++){
weight+=w[(ibor+2)*5+jbor+2]*G[((xIndex-ibor+n)%n)*n +(yIndex-jbor+n)%n ];
}
}
if(weight<1e-4&&weight>-(1e-4)){
newG[xIndex*n+yIndex]=G[xIndex*n+yIndex];
}else if(weight>0){
newG[xIndex*n+yIndex]=1;
}else{
newG[xIndex*n+yIndex]=-1;
}
}
}
}
}
void ising( int *G, double *w, int k, int n){
int *newG,*G2;
double *w2;
cudaMallocManaged(&newG,n*n*sizeof(int));
cudaMallocManaged(&G2,n*n*sizeof(int));
cudaMallocManaged(&w2,25*sizeof(double));
cudaMemcpy( w2, w, 25*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy( G2, G, n*n*sizeof(int),cudaMemcpyHostToDevice);
// double total_time=0;
for(int iter=0;iter<k;iter++){
bool repeat=true;
dim3 grid(GRID_SIZE, GRID_SIZE);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
// struct timeval startwtime, endwtime;
// gettimeofday (&startwtime, NULL);
ising_kernel<<<grid,block>>>(G2,newG,w2,n);
cudaDeviceSynchronize();
//gettimeofday (&endwtime, NULL);
//double time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6+ endwtime.tv_sec - startwtime.tv_sec);
// total_time+=time;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(repeat&&newG[i*n+j]!=G2[i*n+j]){
repeat=false;
}
int temp=newG[i*n+j];
newG[i*n+j]=G2[i*n+j];
G2[i*n+j]=temp;
}
}
if(repeat){
break;
}
}
cudaMemcpy(G, G2, n*n*sizeof(int),cudaMemcpyDeviceToHost);
// printf("Seconds are %lf",total_time);
}
int main()
{
printf("=================START=========================\n");
double weight[]={0.004,0.016,0.026,0.016,0.004,0.016,0.071,0.117,0.071,0.016,0.026,0.117,0,0.117,0.026,0.016,0.071,0.117,0.071,0.016,0.004,0.016,0.026,0.016,0.004};
int n=517;
int X[n*n];
size_t size;
FILE *fp = fopen("conf-init.bin", "rb");
size = fread(X, sizeof(int), n * n, fp);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp);
int k=11;
ising(X,weight,k,n);
int checkX[n*n];
FILE *fp2 = fopen("conf-11.bin", "rb");
size = fread(checkX, sizeof(int), n * n, fp2);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp2);
bool flag=true;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(checkX[i*n+j]!=X[i*n+j]){
printf("\nWRONG IMPLEMENTATION\n");
flag=false;
break;
}
}
if(!flag){
break;
}
}
if(flag){
printf("\nCORRECT IMPLEMENTATION\n");
}
printf("\n================END==============\n");
return 0;
} |
f9bc079e608a8fa2117294753ffc5a422733d5b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_plugin_helper.h"
using namespace himan;
template <typename Type>
__device__ Type VVMS(Type VV, Type T, Type P)
{
ASSERT(P < 1200 || IsMissing(P));
return 287 * -VV * T / (static_cast<Type>(himan::constants::kG) * 100 * P);
}
template <typename T>
__global__ void VVMSKernel(const T* __restrict__ d_t, const T* __restrict__ d_vv, const T* __restrict__ d_p,
T* __restrict__ d_vv_ms, T vv_scale, size_t N)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
d_vv_ms[idx] = vv_scale * VVMS<T>(d_vv[idx], d_t[idx], d_p[idx]);
}
}
template <typename T>
__global__ void VVMSKernel(const T* __restrict__ d_t, const T* __restrict__ d_vv, const T P, T* __restrict__ d_vv_ms,
T vv_scale, size_t N)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
d_vv_ms[idx] = vv_scale * VVMS<T>(d_vv[idx], d_t[idx], P);
}
}
namespace vvmsgpu
{
void Process(std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info<float>> myTargetInfo)
{
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
// Allocate device arrays
float* d_t = 0;
float* d_p = 0;
float* d_vv = 0;
float* d_vv_ms = 0;
const size_t N = myTargetInfo->SizeLocations();
const size_t memsize = N * sizeof(float);
auto TInfo = cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), param("T-K"),
myTargetInfo->ForecastType());
auto VVInfo = cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), param("VV-PAS"),
myTargetInfo->ForecastType());
if (!TInfo || !VVInfo)
{
return;
}
CUDA_CHECK(hipMalloc((void**)&d_vv_ms, memsize));
CUDA_CHECK(hipMalloc((void**)&d_t, memsize));
CUDA_CHECK(hipMalloc((void**)&d_vv, memsize));
cuda::PrepareInfo<float>(TInfo, d_t, stream, conf->UseCacheForReads());
cuda::PrepareInfo<float>(VVInfo, d_vv, stream, conf->UseCacheForReads());
// dims
const int blockSize = 512;
const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1);
bool isPressureLevel = (myTargetInfo->Level().Type() == kPressure);
const float vv_scale = (myTargetInfo->Param().Name() == "VV-MMS") ? 1000. : 1.;
// "SetAB"
if (myTargetInfo->Level().Type() == kHybrid)
{
const size_t paramIndex = myTargetInfo->Index<param>();
for (myTargetInfo->Reset<param>(); myTargetInfo->Next<param>();)
{
myTargetInfo->Set<level>(TInfo->Level());
}
myTargetInfo->Index<param>(paramIndex);
}
if (isPressureLevel == false)
{
CUDA_CHECK(hipMalloc((void**)&d_p, memsize));
auto PInfo = cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), param("P-HPA"),
myTargetInfo->ForecastType());
if (!PInfo)
{
return;
}
cuda::PrepareInfo(PInfo, d_p, stream, conf->UseCacheForReads());
hipLaunchKernelGGL(( VVMSKernel<float>), dim3(gridSize), dim3(blockSize), 0, stream, d_t, d_vv, d_p, d_vv_ms, vv_scale, N);
}
else
{
hipLaunchKernelGGL(( VVMSKernel<float>)
, dim3(gridSize), dim3(blockSize), 0, stream, d_t, d_vv, myTargetInfo->Level().Value(), d_vv_ms, vv_scale, N);
}
cuda::ReleaseInfo<float>(myTargetInfo, d_vv_ms, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipFree(d_t));
CUDA_CHECK(hipFree(d_vv));
CUDA_CHECK(hipFree(d_vv_ms));
if (d_p)
{
// himan::ReleaseInfo(opts.p);
CUDA_CHECK(hipFree(d_p));
}
CUDA_CHECK(hipStreamDestroy(stream));
}
}
| f9bc079e608a8fa2117294753ffc5a422733d5b5.cu | #include "cuda_plugin_helper.h"
using namespace himan;
template <typename Type>
__device__ Type VVMS(Type VV, Type T, Type P)
{
ASSERT(P < 1200 || IsMissing(P));
return 287 * -VV * T / (static_cast<Type>(himan::constants::kG) * 100 * P);
}
template <typename T>
__global__ void VVMSKernel(const T* __restrict__ d_t, const T* __restrict__ d_vv, const T* __restrict__ d_p,
T* __restrict__ d_vv_ms, T vv_scale, size_t N)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
d_vv_ms[idx] = vv_scale * VVMS<T>(d_vv[idx], d_t[idx], d_p[idx]);
}
}
template <typename T>
__global__ void VVMSKernel(const T* __restrict__ d_t, const T* __restrict__ d_vv, const T P, T* __restrict__ d_vv_ms,
T vv_scale, size_t N)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
d_vv_ms[idx] = vv_scale * VVMS<T>(d_vv[idx], d_t[idx], P);
}
}
namespace vvmsgpu
{
void Process(std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info<float>> myTargetInfo)
{
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
// Allocate device arrays
float* d_t = 0;
float* d_p = 0;
float* d_vv = 0;
float* d_vv_ms = 0;
const size_t N = myTargetInfo->SizeLocations();
const size_t memsize = N * sizeof(float);
auto TInfo = cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), param("T-K"),
myTargetInfo->ForecastType());
auto VVInfo = cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), param("VV-PAS"),
myTargetInfo->ForecastType());
if (!TInfo || !VVInfo)
{
return;
}
CUDA_CHECK(cudaMalloc((void**)&d_vv_ms, memsize));
CUDA_CHECK(cudaMalloc((void**)&d_t, memsize));
CUDA_CHECK(cudaMalloc((void**)&d_vv, memsize));
cuda::PrepareInfo<float>(TInfo, d_t, stream, conf->UseCacheForReads());
cuda::PrepareInfo<float>(VVInfo, d_vv, stream, conf->UseCacheForReads());
// dims
const int blockSize = 512;
const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1);
bool isPressureLevel = (myTargetInfo->Level().Type() == kPressure);
const float vv_scale = (myTargetInfo->Param().Name() == "VV-MMS") ? 1000. : 1.;
// "SetAB"
if (myTargetInfo->Level().Type() == kHybrid)
{
const size_t paramIndex = myTargetInfo->Index<param>();
for (myTargetInfo->Reset<param>(); myTargetInfo->Next<param>();)
{
myTargetInfo->Set<level>(TInfo->Level());
}
myTargetInfo->Index<param>(paramIndex);
}
if (isPressureLevel == false)
{
CUDA_CHECK(cudaMalloc((void**)&d_p, memsize));
auto PInfo = cuda::Fetch<float>(conf, myTargetInfo->Time(), myTargetInfo->Level(), param("P-HPA"),
myTargetInfo->ForecastType());
if (!PInfo)
{
return;
}
cuda::PrepareInfo(PInfo, d_p, stream, conf->UseCacheForReads());
VVMSKernel<float><<<gridSize, blockSize, 0, stream>>>(d_t, d_vv, d_p, d_vv_ms, vv_scale, N);
}
else
{
VVMSKernel<float>
<<<gridSize, blockSize, 0, stream>>>(d_t, d_vv, myTargetInfo->Level().Value(), d_vv_ms, vv_scale, N);
}
cuda::ReleaseInfo<float>(myTargetInfo, d_vv_ms, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaFree(d_t));
CUDA_CHECK(cudaFree(d_vv));
CUDA_CHECK(cudaFree(d_vv_ms));
if (d_p)
{
// himan::ReleaseInfo(opts.p);
CUDA_CHECK(cudaFree(d_p));
}
CUDA_CHECK(cudaStreamDestroy(stream));
}
}
|
bb6b93c3e98def518d08720fb08fc0d0f665d31c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define BLOCKSIZE 1024
__global__ void dkernel() {
__shared__ unsigned s;
if (threadIdx.x == 0) s = 0;
__syncthreads();
if (threadIdx.x == 1) s += 1;
__syncthreads();
if (threadIdx.x == 100) s += 2;
__syncthreads();
if (threadIdx.x == 0) printf("s=%d\n", s);
}
int main() {
int i;
for (i = 0; i < 10; ++i) {
hipLaunchKernelGGL(( dkernel), dim3(2), dim3(BLOCKSIZE), 0, 0, );
hipDeviceSynchronize();
}
}
| bb6b93c3e98def518d08720fb08fc0d0f665d31c.cu | #include <stdio.h>
#include <cuda.h>
#define BLOCKSIZE 1024
__global__ void dkernel() {
__shared__ unsigned s;
if (threadIdx.x == 0) s = 0;
__syncthreads();
if (threadIdx.x == 1) s += 1;
__syncthreads();
if (threadIdx.x == 100) s += 2;
__syncthreads();
if (threadIdx.x == 0) printf("s=%d\n", s);
}
int main() {
int i;
for (i = 0; i < 10; ++i) {
dkernel<<<2, BLOCKSIZE>>>();
cudaDeviceSynchronize();
}
}
|
58ddfc4b45cd9d1c1051a38dde94373c57caf12d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* author gumboshi <[email protected]> */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "pzdr_def.h"
#include "pzdr_saidai.h"
#define LOCALRANKINGLENGTH 5
#ifndef NUM_BLOCK
#define NUM_BLOCK 52
#endif
#define NUM_THREAD 256
#define CUDA_SAFE_CALL(func) \
do { \
hipError_t err = (func); \
if (err != hipSuccess) { \
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", hipGetErrorString(err), err, __FILE__, __LINE__); \
exit(err); \
} \
} while(0)
// __device__ inline void init_combo_info(int *color_combo, int *num_drops_combo, int *isLine_combo, int combo_length){
// int i;
// for(i = 0;i < combo_length;i++){
// color_combo[i] = 0;
// num_drops_combo[i] = 0;
// isLine_combo[i] = 0;
// }
// }
__device__ void print_table_dev(unsigned long long *color_table, int width, int hight){
int i, j;
for(i = 1;i <= hight;i++){
for(j = 1;j <= width;j++){
unsigned long long p = (1L << ((width*2)*i+j));
if((color_table[0] & p) == p)
printf("G ");
else if((color_table[1] & p) == p)
printf("Y ");
else
printf("? ");
}
printf("\n");
}
printf("\n");
}
__device__ void print_table2_dev(unsigned long long color_table, int width, int hight){
int i, j;
for(i = 1;i <= hight;i++){
for(j = 1;j <= width;j++){
unsigned long long p = (1L << ((width*2)*i+j));
printf("%d ", (color_table & p) == p);
}
printf("\n");
}
printf("\n");
}
#if NUM_COLORS==2
#define WID 7
__device__ inline void generate_table_small_dev(unsigned long long tableID, unsigned long long *color_table){
unsigned long long b0, b1, b2, b3;
unsigned long long ID = tableID;
b0 = ID & 31;
b1 = (ID >> 5 ) & 31;
b2 = (ID >> 10) & 31;
b3 = (ID >> 15) & 31;
color_table[0] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1));
ID = ~ID;
b0 = ID & 31;
b1 = (ID >> 5 ) & 31;
b2 = (ID >> 10) & 31;
b3 = (ID >> 15) & 31;
color_table[1] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1));
}
#undef WID
#define WID 8
__device__ inline void generate_table_normal_dev(unsigned long long tableID, unsigned long long *color_table){
unsigned long long b0, b1, b2, b3, b4;
unsigned long long ID = tableID;
b0 = ID & 63;
b1 = (ID >> 6 ) & 63;
b2 = (ID >> 12) & 63;
b3 = (ID >> 18) & 63;
b4 = (ID >> 24) & 63;
color_table[0] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1)) | (b4 << (WID*5+1));
ID = ~ID;
b0 = ID & 63;
b1 = (ID >> 6 ) & 63;
b2 = (ID >> 12) & 63;
b3 = (ID >> 18) & 63;
b4 = (ID >> 24) & 63;
color_table[1] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1)) | (b4 << (WID*5+1));
}
#undef WID
#define WID 9
__device__ inline void generate_table_big_dev(unsigned long long tableID, unsigned long long *color_table){
unsigned long long b0, b1, b2, b3, b4, b5;
unsigned long long ID = tableID;
b0 = ID & 127;
b1 = (ID >> 7 ) & 127;
b2 = (ID >> 14) & 127;
b3 = (ID >> 21) & 127;
b4 = (ID >> 28) & 127;
b5 = (ID >> 35) & 127;
color_table[0] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1))
| (b4 << (WID*5+1)) | (b5 << (WID*6+1));
ID = ~ID;
b0 = ID & 127;
b1 = (ID >> 7 ) & 127;
b2 = (ID >> 14) & 127;
b3 = (ID >> 21) & 127;
b4 = (ID >> 28) & 127;
b5 = (ID >> 35) & 127;
color_table[1] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1))
| (b4 << (WID*5+1)) | (b5 << (WID*6+1));
}
#undef WID
#endif
#if NUM_COLORS==2
#define WID 7
__device__ inline int one_step_small_dev(unsigned long long *color_table, int *color_combo, int *num_drops_combo, int *isLine_combo, int finish){
// 0 width
//
// hight
// 000000000
// 000000000
// 000000000
// 000000000
// 000000000
// 000000010
// 000000111
// 000000010
unsigned long long isErase_tables[NUM_COLORS];
int combo_counter = finish;
int num_c;
unsigned long long tmp, tmp2;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long n, w, s, e;
n = color >> WID;
w = color >> 1;
s = color << WID;
e = color << 1;
tmp = (color & n & s);
tmp = tmp | (tmp >> WID) | (tmp << WID);
tmp2 = (color & w & e);
tmp2 = tmp2 | (tmp2 >> 1 ) | (tmp2 << 1 );
isErase_tables[num_c] = (color & tmp) | (color & tmp2);
}
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long isErase_table = isErase_tables[num_c];
color_table[num_c] = color_table[num_c] & (~isErase_table);
unsigned long long p = 1L << (WID+1);
while(isErase_table) {
while(!(isErase_table & p)){
p = p << 1;
}
tmp = p;
color_combo[combo_counter] = num_c;
unsigned long long tmp_old;
do{
tmp_old = tmp;
tmp = (tmp | (tmp << 1) | (tmp >> 1) | (tmp << WID) | (tmp >> WID)) & isErase_table;
}while(tmp_old != tmp);
isErase_table = isErase_table & (~tmp);
// int b1, b2, b3, b4, b5, b6;
// b1 = tmp >> (WID*1+1) & 127;
// b2 = tmp >> (WID*2+1) & 127;
// b3 = tmp >> (WID*3+1) & 127;
// b4 = tmp >> (WID*4+1) & 127;
// b5 = tmp >> (WID*5+1) & 127;
// b6 = tmp >> (WID*6+1) & 127;
// num_drops_combo[combo_counter] = bit_count_table[b1] + bit_count_table[b2]
// + bit_count_table[b3] + bit_count_table[b4] + bit_count_table[b5] + bit_count_table[b6];
unsigned long long bits = tmp;
bits = (bits & 0x5555555555555555LU) + (bits >> 1 & 0x5555555555555555LU);
bits = (bits & 0x3333333333333333LU) + (bits >> 2 & 0x3333333333333333LU);
bits = bits + (bits >> 4) & 0x0F0F0F0F0F0F0F0FLU;
bits = bits + (bits >> 8);
bits = bits + (bits >> 16);
bits = bits + (bits >> 32) & 0x0000007F;
num_drops_combo[combo_counter] = bits;
isLine_combo[combo_counter] = ((tmp >> (WID +1)) & 31) == 31
|| ((tmp >> (WID*2+1)) & 31) == 31
|| ((tmp >> (WID*3+1)) & 31) == 31
|| ((tmp >> (WID*4+1)) & 31) == 31;
// bits = tmp;
// bits = bits & (bits >> 1);
// bits = bits & (bits >> 2);
// bits = bits & (bits >> 3);
// isLine_combo[combo_counter] = ((bits & 36099303471055872L) != 0);
combo_counter++;
}
}
if(finish != combo_counter){
unsigned long long exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
unsigned long long exist_org;
do{
exist_org = exist_table;
unsigned long long exist_u = (exist_table >> WID) | 16642998272L;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long color_u = color & exist_u;
unsigned long long color_d = (color << WID) & (~exist_table) & (~2130303778816L);
color_table[num_c] = color_u | color_d;
}
exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
}while(exist_org != exist_table);
}
return combo_counter;
}
#undef WID
#define WID 8
__device__ inline int one_step_normal_dev(unsigned long long *color_table, int *color_combo, int *num_drops_combo, int *isLine_combo, int finish){
// 0 width
//
// hight
// 000000000
// 000000000
// 000000000
// 000000000
// 000000000
// 000000010
// 000000111
// 000000010
unsigned long long isErase_tables[NUM_COLORS];
int combo_counter = finish;
int num_c;
unsigned long long tmp, tmp2;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long n, w, s, e;
n = color >> WID;
w = color >> 1;
s = color << WID;
e = color << 1;
tmp = (color & n & s);
tmp = tmp | (tmp >> WID) | (tmp << WID);
tmp2 = (color & w & e);
tmp2 = tmp2 | (tmp2 >> 1 ) | (tmp2 << 1 );
isErase_tables[num_c] = (color & tmp) | (color & tmp2);
}
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long isErase_table = isErase_tables[num_c];
color_table[num_c] = color_table[num_c] & (~isErase_table);
unsigned long long p = 1L << (WID+1);
while(isErase_table) {
while(!(isErase_table & p)){
p = p << 1;
}
tmp = p;
color_combo[combo_counter] = num_c;
unsigned long long tmp_old;
do{
tmp_old = tmp;
tmp = (tmp | (tmp << 1) | (tmp >> 1) | (tmp << WID) | (tmp >> WID)) & isErase_table;
}while(tmp_old != tmp);
isErase_table = isErase_table & (~tmp);
unsigned long long bits = tmp;
bits = (bits & 0x5555555555555555LU) + (bits >> 1 & 0x5555555555555555LU);
bits = (bits & 0x3333333333333333LU) + (bits >> 2 & 0x3333333333333333LU);
bits = bits + (bits >> 4) & 0x0F0F0F0F0F0F0F0FLU;
bits = bits + (bits >> 8);
bits = bits + (bits >> 16);
bits = bits + (bits >> 32) & 0x0000007F;
num_drops_combo[combo_counter] = bits;
isLine_combo[combo_counter] = ((tmp >> (WID +1)) & 63) == 63
|| ((tmp >> (WID*2+1)) & 63) == 63
|| ((tmp >> (WID*3+1)) & 63) == 63
|| ((tmp >> (WID*4+1)) & 63) == 63
|| ((tmp >> (WID*5+1)) & 63) == 63;
combo_counter++;
}
}
if(finish != combo_counter){
unsigned long long exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
unsigned long long exist_org;
do{
exist_org = exist_table;
unsigned long long exist_u = (exist_table >> WID) | 138538465099776L;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long color_u = color & exist_u;
unsigned long long color_d = (color << WID) & (~exist_table) & (~35465847065542656L);
color_table[num_c] = color_u | color_d;
}
exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
}while(exist_org != exist_table);
}
return combo_counter;
}
#undef WID
#define WID 9
__device__ inline int one_step_big_dev(unsigned long long *color_table, int *color_combo, int *num_drops_combo, int *isLine_combo, int finish){
// 0 width
//
// hight
// 000000000
// 000000000
// 000000000
// 000000000
// 000000000
// 000000010
// 000000111
// 000000010
unsigned long long isErase_tables[NUM_COLORS];
int combo_counter = finish;
int num_c;
unsigned long long tmp, tmp2;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
//
unsigned long long n, w, s, e;
n = color >> WID;
w = color >> 1;
s = color << WID;
e = color << 1;
tmp = (color & n & s);
tmp = tmp | (tmp >> WID) | (tmp << WID);
tmp2 = (color & w & e);
tmp2 = tmp2 | (tmp2 >> 1 ) | (tmp2 << 1 );
isErase_tables[num_c] = (color & tmp) | (color & tmp2);
//isErase_table = (color & tmp) | (color & tmp2);
}
// #if NUM_COLORS==2
// if(isErase_tables[0] == isErase_tables[1])
// return combo_counter;
// // isErase_table[0~N] == 0,
// // Warp divergence(GPU)
// // isErasetable
// #endif
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long isErase_table = isErase_tables[num_c];
color_table[num_c] = color_table[num_c] & (~isErase_table);
unsigned long long p = 1L << (WID+1);
while(isErase_table) {
while(!(isErase_table & p)){
p = p << 1;
}
tmp = p;
color_combo[combo_counter] = num_c;
unsigned long long tmp_old;
do{
tmp_old = tmp;
tmp = (tmp | (tmp << 1) | (tmp >> 1) | (tmp << WID) | (tmp >> WID)) & isErase_table;
}while(tmp_old != tmp);
isErase_table = isErase_table & (~tmp);
// int b1, b2, b3, b4, b5, b6;
// b1 = tmp >> (WID*1+1) & 127;
// b2 = tmp >> (WID*2+1) & 127;
// b3 = tmp >> (WID*3+1) & 127;
// b4 = tmp >> (WID*4+1) & 127;
// b5 = tmp >> (WID*5+1) & 127;
// b6 = tmp >> (WID*6+1) & 127;
// num_drops_combo[combo_counter] = bit_count_table[b1] + bit_count_table[b2]
// + bit_count_table[b3] + bit_count_table[b4] + bit_count_table[b5] + bit_count_table[b6];
// unsigned long long bits = tmp;
// bits = (bits & 0x5555555555555555) + (bits >> 1 & 0x5555555555555555);
// bits = (bits & 0x3333333333333333) + (bits >> 2 & 0x3333333333333333);
// bits = (bits & 0x0f0f0f0f0f0f0f0f) + (bits >> 4 & 0x0f0f0f0f0f0f0f0f);
// bits = (bits & 0x00ff00ff00ff00ff) + (bits >> 8 & 0x00ff00ff00ff00ff);
// bits = (bits & 0x0000ffff0000ffff) + (bits >>16 & 0x0000ffff0000ffff);
// num_drops_combo[combo_counter] = (bits & 0x00000000ffffffff) + (bits >>32 & 0x00000000ffffffff);
// bits = (bits & 0x5555555555555555LU) + (bits >> 1 & 0x5555555555555555LU);
// bits = (bits & 0x3333333333333333LU) + (bits >> 2 & 0x3333333333333333LU);
// bits = bits + (bits >> 4) & 0x0F0F0F0F0F0F0F0FLU;
// bits = bits + (bits >> 8);
// bits = bits + (bits >> 16);
// bits = bits + (bits >> 32) & 0x0000007F;
// num_drops_combo[combo_counter] = bits;
unsigned int u = tmp >> 32;
unsigned int l = tmp;
u = (u & 0x55555555) + (u >> 1 & 0x55555555);
u = (u & 0x33333333) + (u >> 2 & 0x33333333);
u = u + (u >> 4) & 0x0F0F0F0F;
u = u + (u >> 8);
u = u + (u >> 16) & 0x0000007F;
l = (l & 0x55555555) + (l >> 1 & 0x55555555);
l = (l & 0x33333333) + (l >> 2 & 0x33333333);
l = l + (l >> 4) & 0x0F0F0F0F;
l = l + (l >> 8);
l = l + (l >> 16) & 0x0000007F;
num_drops_combo[combo_counter] = u + l;
// num_drops_combo[combo_counter] = __popcll(tmp);
isLine_combo[combo_counter] = ((tmp >> (WID +1)) & 127) == 127
|| ((tmp >> (WID*2+1)) & 127) == 127
|| ((tmp >> (WID*3+1)) & 127) == 127
|| ((tmp >> (WID*4+1)) & 127) == 127
|| ((tmp >> (WID*5+1)) & 127) == 127
|| ((tmp >> (WID*6+1)) & 127) == 127;
// bits = tmp;
// bits = bits & (bits >> 1);
// bits = bits & (bits >> 2);
// bits = bits & (bits >> 3);
// isLine_combo[combo_counter] = ((bits & 36099303471055872L) != 0);
combo_counter++;
}
}
if(finish != combo_counter){
unsigned long long exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
unsigned long long exist_org;
do{
exist_org = exist_table;
unsigned long long exist_u = (exist_table >> WID) | 4575657221408423936L;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long color_u = color & exist_u;
unsigned long long color_d = (color << WID) & (~exist_table);
color_table[num_c] = color_u | color_d;
}
exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
}while(exist_org != exist_table);
}
return combo_counter;
}
#undef WID
#endif
__device__ inline float return_attack_dev(int combo_counter, int *color_combo, int *num_drops_combo, int *isLine_combo, int LS, int strong, float line, float way){
// used for simulation mode
// [FIXME] check only Green attack
int num_line = 0;
float AT = 1.0;
float attack = 0;
float l = 1.0;
int i;
for(i = 0;i < combo_counter;i++){
int color = color_combo[i];
float drop_pwr;
switch(color){
case MAINCOLOR:
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack += drop_pwr;
if(isLine_combo[i]) num_line++;
break;
default:
break;
}
}
int count;
switch(LS){
case HERO:
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l = 16;
}else if(num_drops == 7 && l < 12.25){
l = 12.25;
}else if(num_drops == 6 && l < 9){
l = 9;
}
}
}
break;
case SONIA:
if(combo_counter < 6)
l = 6.25;
else
l = 2.75*2.75;
break;
case KRISHNA:
count = 0;
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
count++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l = 2.25;
}
}
if(count == 2)
l = l * 3 * 3;
else if(count >= 3)
l = l * 4.5 * 4.5;
else
l = 1;
break;
case BASTET:
if(combo_counter == 5)
l = 3.0*3.0;
else if(combo_counter == 6)
l = 3.5*3.5;
else if(combo_counter >= 7)
l = 4.0*4.0;
else
l = 1.0;
break;
case LAKU_PARU:
l = 6.25;
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR != color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l = 25;
}
}
break;
default:
break;
}
attack = attack * (1+0.25*(combo_counter-1)) * AT * l * (1+0.1*line*num_line) ;
return attack;
}
__device__ inline void return_attack_double_dev(float *power, const int combo_counter, int *const color_combo, int * const num_drops_combo, int * const isLine_combo, const int LS, const int strong, const float line, const float way){
// used for simulation mode
// [FIXME] check only Green attack
const float AT = 1.0;
int num_line_m = 0;
float attack_m = 0;
int num_line_s = 0;
float attack_s = 0;
float l_m = 1.0;
float l_s = 1.0;
int i;
float drop_pwr;
for(i = 0;i < combo_counter;i++){
int color = color_combo[i];
if(color == MAINCOLOR){
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack_m += drop_pwr;
if(isLine_combo[i]) num_line_m++;
}else{
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack_s += drop_pwr;
if(isLine_combo[i]) num_line_s++;
}
}
int count_m;
int count_s;
switch(LS){
case HERO:
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l_m = 16;
}else if(num_drops == 7 && l_m < 12.25){
l_m = 12.25;
}else if(num_drops == 6 && l_m < 9){
l_m = 9;
}
}
if(SUBCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l_s = 16;
}else if(num_drops == 7 && l_s < 12.25){
l_s = 12.25;
}else if(num_drops == 6 && l_s < 9){
l_s = 9;
}
}
}
break;
case SONIA:
if(combo_counter < 6){
l_m = 6.25;
l_s = 6.25;
}else{
l_m = 2.75*2.75;
l_s = 2.75*2.75;
}
break;
case KRISHNA:
count_m = 0;
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
count_m++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l_m = 2.25;
}
}
if(count_m == 2)
l_m = l_m * 3 * 3;
else if(count_m >= 3)
l_m = l_m * 4.5 * 4.5;
else
l_m = 1;
count_s = 0;
for(i = 0;i < combo_counter;i++){
if(SUBCOLOR == color_combo[i]){
count_s++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l_s = 2.25;
}
}
if(count_s == 2)
l_s = l_s * 3 * 3;
else if(count_s >= 3)
l_s = l_s * 4.5 * 4.5;
else
l_s = 1;
break;
case BASTET:
if(combo_counter == 5){
l_m = 3.0*3.0;
l_s = 3.0*3.0;
}else if(combo_counter == 6){
l_m = 3.5*3.5;
l_s = 3.5*3.5;
}else if(combo_counter >= 7){
l_m = 4.0*4.0;
l_s = 4.0*4.0;
}else{
l_m = 1.0;
l_s = 1.0;
}
break;
case LAKU_PARU:
l_m = 6.25;
l_s = 6.25;
for(i = 0;i < combo_counter;i++){
if(SUBCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l_m = 25;
}
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l_s = 25;
}
}
break;
default:
break;
}
power[0] = attack_m * (1+0.25*(combo_counter-1)) * AT * l_m * (1+0.1*line*num_line_m);
power[1] = attack_s * (1+0.25*(combo_counter-1)) * AT * l_s * (1+0.1*line*num_line_s);
}
#define COMBO_LENGTH 7
#define REVERSE_LENGTH 32
// __global__ void simulate_all_kernel_small(int num_attacks, const int * __restrict__ num_patterns10, unsigned long long *maxID, float *maxPower, float line, float way, const int * __restrict__ tableID_prefix10, int *tableID_table10, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ int LS, int strong){
// int tid = threadIdx.x;
// int bid = blockIdx.x;
// int gdim = gridDim.x;
// int bdim = blockDim.x;
// int color_combo[COMBO_LENGTH];
// int num_drops_combo[COMBO_LENGTH];
// int isLine_combo[COMBO_LENGTH];
// int i,j,k;
// int rank = LOCALRANKINGLENGTH;
// float MP[LOCALRANKINGLENGTH];
// unsigned long long MID[LOCALRANKINGLENGTH];
// unsigned long long tableID = 0;
// unsigned long long color_table[NUM_COLORS];
// int num_c;
// for(num_c = 0;num_c < NUM_COLORS;num_c++){
// color_table[num_c] = 0;
// }
// for(i = 0;i < rank;i++){
// MID[i] = 0;
// MP[i] = 0.0;
// }
// int u, l, uu, ll;
// int bit_num[4];
// for(u = 0;u <= num_attacks;u++){
// l = num_attacks - u;
// if(u <= 10 && l <= 10){
// int uoffset = tableID_prefix10[u];
// int loffset = tableID_prefix10[l];
// for(uu = bid;uu < num_patterns10[u];uu+=gdim){
// unsigned long long upperID = (unsigned long long)tableID_table10[uu+uoffset];
// for(ll = tid;ll < num_patterns10[l];ll+=bdim){
// unsigned long long lowerID = (unsigned long long)tableID_table10[ll+loffset];
// tableID = (upperID << 10) | lowerID;
// unsigned long long reversed = 0;
// for(i = 0;i < 4; i++){
// bit_num[i] = (int)((tableID >> (5*i) ) & (REVERSE_LENGTH-1));
// reversed += ((unsigned long long)reversed_bit_table[bit_num[i]]) << (5*i);
// }
// if(tableID <= reversed){
// //init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
// int combo_counter = 0;
// //tableID = 1103874885640L;
// //tableID = 42656280L;
// generate_table_small_dev(tableID, color_table);
// int returned_combo_counter = 0;
// do{
// // if(blockDim.x * blockIdx.x + threadIdx.x == 0){
// // printf("ID %lld\n",tableID);
// // print_table(color_table);
// // print_table2(color_table[0]);
// // print_table2(color_table[1]);
// // }
// combo_counter = returned_combo_counter;
// returned_combo_counter = one_step_small_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
// //printf("combo = %d\n", returned_combo_counter);
// }while(returned_combo_counter != combo_counter);
// float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
// if(MP[rank-1] < power){
// for(j = 0;j < rank;j++){
// if(MP[j] < power){
// for(k = rank-2;k >= j;k--){
// MID[k+1] = MID[k];
// MP[k+1] = MP[k];
// }
// MID[j] = tableID;
// MP[j] = power;
// break;
// }
// }
// }
// }
// }
// }
// }
// }
// int id = blockDim.x * blockIdx.x + threadIdx.x;
// int step = blockDim.x * gridDim.x;
// for(i = 0;i < rank;i++){
// maxPower[id + step*i] = MP[i];
// maxID[id + step*i] = MID[i];
// }
// }
__global__ void simulate_all_kernel_small(const int num_attacks, const int * __restrict__ num_patterns10, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix10, const int * __restrict__ tableID_table10, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= num_attacks;u++){
l = num_attacks - u;
if(u <= 10 && l <= 10){
int uoffset = tableID_prefix10[u];
int loffset = tableID_prefix10[l];
for(uu = bid;uu < num_patterns10[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table10[uu+uoffset];
for(ll = tid;ll < num_patterns10[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table10[ll+loffset];
tableID = (upperID << 10) | lowerID;
unsigned long long reversed = 0;
int reversed_bit[4];
for(i = 0;i < 4; i++){
reversed_bit[i] = ((tableID >> (5*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit[i]]) << (5*i);
}
if(tableID <= reversed){
//init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
unsigned long long color_table[NUM_COLORS];
generate_table_small_dev(tableID, color_table);
int combo_counter;
int returned_combo_counter = 0;
do{
combo_counter = returned_combo_counter;
returned_combo_counter = one_step_small_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
}while(returned_combo_counter != combo_counter);
//float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
float power[2];
return_attack_double_dev(power, combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x000FFFFF;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
#undef COMBO_LENGTH
#undef REVERSE_LENGTH
#define COMBO_LENGTH 10
#define REVERSE_LENGTH 64
// __global__ void simulate_all_kernel_normal(int num_attacks, const int * __restrict__ num_patterns15, unsigned long long *maxID, float *maxPower, float line, float way, const int * __restrict__ tableID_prefix15, int *tableID_table15, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ int LS, int strong){
// int tid = threadIdx.x;
// int bid = blockIdx.x;
// int gdim = gridDim.x;
// int bdim = blockDim.x;
// int color_combo[COMBO_LENGTH];
// int num_drops_combo[COMBO_LENGTH];
// int isLine_combo[COMBO_LENGTH];
// int i,j,k;
// int rank = LOCALRANKINGLENGTH;
// float MP[LOCALRANKINGLENGTH];
// unsigned long long MID[LOCALRANKINGLENGTH];
// unsigned long long tableID = 0;
// unsigned long long color_table[NUM_COLORS];
// int num_c;
// for(num_c = 0;num_c < NUM_COLORS;num_c++){
// color_table[num_c] = 0;
// }
// for(i = 0;i < rank;i++){
// MID[i] = 0;
// MP[i] = 0.0;
// }
// int u, l, uu, ll;
// int bit_num[5];
// for(u = 0;u <= num_attacks;u++){
// l = num_attacks - u;
// if(u <= 15 && l <= 15){
// int uoffset = tableID_prefix15[u];
// int loffset = tableID_prefix15[l];
// for(uu = bid;uu < num_patterns15[u];uu+=gdim){
// unsigned long long upperID = (unsigned long long)tableID_table15[uu+uoffset];
// for(ll = tid;ll < num_patterns15[l];ll+=bdim){
// unsigned long long lowerID = (unsigned long long)tableID_table15[ll+loffset];
// tableID = (upperID << 15) | lowerID;
// unsigned long long reversed = 0;
// for(i = 0;i < 5; i++){
// bit_num[i] = (int)((tableID >> (6*i) ) & (REVERSE_LENGTH-1));
// reversed += ((unsigned long long)reversed_bit_table[bit_num[i]]) << (6*i);
// }
// if(tableID <= reversed){
// //init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
// int combo_counter = 0;
// //tableID = 1103874885640L;
// //tableID = 42656280L;
// generate_table_normal_dev(tableID, color_table);
// int returned_combo_counter = 0;
// do{
// // if(blockDim.x * blockIdx.x + threadIdx.x == 0){
// // printf("ID %lld\n",tableID);
// // print_table(color_table);
// // print_table2(color_table[0]);
// // print_table2(color_table[1]);
// // }
// combo_counter = returned_combo_counter;
// returned_combo_counter = one_step_normal_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
// //printf("combo = %d\n", returned_combo_counter);
// }while(returned_combo_counter != combo_counter);
// float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
// if(MP[rank-1] < power){
// for(j = 0;j < rank;j++){
// if(MP[j] < power){
// for(k = rank-2;k >= j;k--){
// MID[k+1] = MID[k];
// MP[k+1] = MP[k];
// }
// MID[j] = tableID;
// MP[j] = power;
// break;
// }
// }
// }
// }
// }
// }
// }
// }
// int id = blockDim.x * blockIdx.x + threadIdx.x;
// int step = blockDim.x * gridDim.x;
// for(i = 0;i < rank;i++){
// maxPower[id + step*i] = MP[i];
// maxID[id + step*i] = MID[i];
// }
// }
__global__ void simulate_all_kernel_normal(const int num_attacks, const int * __restrict__ num_patterns15, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix15, const int * __restrict__ tableID_table15, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= num_attacks;u++){
l = num_attacks - u;
if(u <= 15 && l <= 15){
int uoffset = tableID_prefix15[u];
int loffset = tableID_prefix15[l];
for(uu = bid;uu < num_patterns15[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table15[uu+uoffset];
for(ll = tid;ll < num_patterns15[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table15[ll+loffset];
tableID = (upperID << 15) | lowerID;
unsigned long long reversed = 0;
int reversed_bit[5];
for(i = 0;i < 5; i++){
reversed_bit[i] = ((tableID >> (6*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit[i]]) << (6*i);
}
if(tableID <= reversed){
//init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
unsigned long long color_table[NUM_COLORS];
generate_table_normal_dev(tableID, color_table);
int combo_counter;
int returned_combo_counter = 0;
do{
combo_counter = returned_combo_counter;
returned_combo_counter = one_step_normal_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
}while(returned_combo_counter != combo_counter);
//float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
float power[2];
return_attack_double_dev(power, combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x3FFFFFFF;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
#undef COMBO_LENGTH
#undef REVERSE_LENGTH
#define COMBO_LENGTH 14
#define REVERSE_LENGTH 128
__global__ void simulate_all_kernel_big(const int num_attacks, const int * __restrict__ num_patterns21, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix21, const int * __restrict__ tableID_table21, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
// unsigned long long color_table[NUM_COLORS];
// int num_c;
// for(num_c = 0;num_c < NUM_COLORS;num_c++){
// color_table[num_c] = 0;
// }
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= num_attacks;u++){
l = num_attacks - u;
if(u <= 21 && l <= 21){
int uoffset = tableID_prefix21[u];
int loffset = tableID_prefix21[l];
for(uu = bid;uu < num_patterns21[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table21[uu+uoffset];
for(ll = tid;ll < num_patterns21[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table21[ll+loffset];
tableID = (upperID << 21) | lowerID;
unsigned long long reversed = 0;
int reversed_bit[6];
for(i = 0;i < 6; i++){
reversed_bit[i] = ((tableID >> (7*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit[i]]) << (7*i);
}
if(tableID <= reversed){
//init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
unsigned long long color_table[NUM_COLORS];
generate_table_big_dev(tableID, color_table);
int combo_counter;
int returned_combo_counter = 0;
do{
combo_counter = returned_combo_counter;
returned_combo_counter = one_step_big_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
}while(returned_combo_counter != combo_counter);
//float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
float power[2];
return_attack_double_dev(power, combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x000003FFFFFFFFFFLU;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
__global__ void simulate_all_kernel_big_21(const int * __restrict__ num_patterns21, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix21, const int * __restrict__ tableID_table21, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= 21;u++){
l = 21 - u;
int uoffset = tableID_prefix21[u];
int loffset = tableID_prefix21[l];
for(uu = bid;uu < num_patterns21[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table21[uu+uoffset];
for(ll = tid;ll < num_patterns21[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table21[ll+loffset];
tableID = (upperID << 21) | lowerID;
unsigned long long reversed = 0;
int reversed_bit;
for(i = 0;i < 6; i++){
reversed_bit = ((tableID >> (7*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit]) << (7*i);
}
unsigned long long inversed = (~tableID) & 0x000003FFFFFFFFFFLU;
if(tableID <= reversed && tableID <= inversed){
unsigned long long color_table[NUM_COLORS];
generate_table_big_dev(tableID, color_table);
int combo_counter;
int returned_combo_counter = 0;
do{
combo_counter = returned_combo_counter;
returned_combo_counter = one_step_big_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
}while(returned_combo_counter != combo_counter);
float power[2];
return_attack_double_dev(power, combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x000003FFFFFFFFFFLU;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
#define WID 9
__global__ void simulate_all_kernel_big_inlined(const int num_attacks, const int * __restrict__ num_patterns21, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix21, const int * __restrict__ tableID_table21, const int * __restrict__ reversed_bit_table, const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
const int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= num_attacks;u++){
l = num_attacks - u;
if(u <= 21 && l <= 21){
int uoffset = tableID_prefix21[u];
int loffset = tableID_prefix21[l];
for(uu = bid;uu < num_patterns21[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table21[uu+uoffset];
for(ll = tid;ll < num_patterns21[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table21[ll+loffset];
tableID = (upperID << 21) | lowerID;
unsigned long long reversed = 0;
int reversed_bit[6];
for(i = 0;i < 6; i++){
reversed_bit[i] = ((tableID >> (7*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit[i]]) << (7*i);
}
if(tableID <= reversed){
unsigned long long color_table[NUM_COLORS];
unsigned long long b0, b1, b2, b3, b4, b5;
unsigned long long ID = tableID;
b0 = ID & 127;
b1 = (ID >> 7 ) & 127;
b2 = (ID >> 14) & 127;
b3 = (ID >> 21) & 127;
b4 = (ID >> 28) & 127;
b5 = (ID >> 35) & 127;
color_table[0] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1))
| (b4 << (WID*5+1)) | (b5 << (WID*6+1));
ID = ~ID;
b0 = ID & 127;
b1 = (ID >> 7 ) & 127;
b2 = (ID >> 14) & 127;
b3 = (ID >> 21) & 127;
b4 = (ID >> 28) & 127;
b5 = (ID >> 35) & 127;
color_table[1] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1))
| (b4 << (WID*5+1)) | (b5 << (WID*6+1));
int combo_counter = 0;
int combo_counter_org;
//returned_combo_counter = one_step_big_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
do{
combo_counter_org = combo_counter;
unsigned long long isErase_tables[NUM_COLORS];
int num_c;
unsigned long long tmp, tmp2;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long n, w, s, e;
n = color >> WID;
w = color >> 1;
s = color << WID;
e = color << 1;
tmp = (color & n & s);
tmp = tmp | (tmp >> WID) | (tmp << WID);
tmp2 = (color & w & e);
tmp2 = tmp2 | (tmp2 >> 1 ) | (tmp2 << 1 );
isErase_tables[num_c] = (color & tmp) | (color & tmp2);
}
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long isErase_table = isErase_tables[num_c];
color_table[num_c] = color_table[num_c] & (~isErase_table);
unsigned long long p = 1L << (WID+1);
while(isErase_table) {
while(!(isErase_table & p)){
p = p << 1;
}
tmp = p;
color_combo[combo_counter] = num_c;
unsigned long long tmp_old;
do{
tmp_old = tmp;
tmp = (tmp | (tmp << 1) | (tmp >> 1) | (tmp << WID) | (tmp >> WID)) & isErase_table;
}while(tmp_old != tmp);
isErase_table = isErase_table & (~tmp);
unsigned int ubits = tmp >> 32;
unsigned int lbits = tmp;
ubits = (ubits & 0x55555555) + (ubits >> 1 & 0x55555555);
ubits = (ubits & 0x33333333) + (ubits >> 2 & 0x33333333);
ubits = ubits + (ubits >> 4) & 0x0F0F0F0F;
ubits = ubits + (ubits >> 8);
ubits = ubits + (ubits >> 16) & 0x0000007F;
lbits = (lbits & 0x55555555) + (lbits >> 1 & 0x55555555);
lbits = (lbits & 0x33333333) + (lbits >> 2 & 0x33333333);
lbits = lbits + (lbits >> 4) & 0x0F0F0F0F;
lbits = lbits + (lbits >> 8);
lbits = lbits + (lbits >> 16) & 0x0000007F;
num_drops_combo[combo_counter] = ubits + lbits;
isLine_combo[combo_counter] = ((tmp >> (WID +1)) & 127) == 127
|| ((tmp >> (WID*2+1)) & 127) == 127
|| ((tmp >> (WID*3+1)) & 127) == 127
|| ((tmp >> (WID*4+1)) & 127) == 127
|| ((tmp >> (WID*5+1)) & 127) == 127
|| ((tmp >> (WID*6+1)) & 127) == 127;
combo_counter++;
}
}
if(combo_counter_org != combo_counter){
unsigned long long exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
unsigned long long exist_org;
do{
exist_org = exist_table;
unsigned long long exist_u = (exist_table >> WID) | 4575657221408423936L;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long color_u = color & exist_u;
unsigned long long color_d = (color << WID) & (~exist_table);
color_table[num_c] = color_u | color_d;
}
exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
}while(exist_org != exist_table);
}
}while(combo_counter_org != combo_counter);
float power[2];
const float AT = 1.0;
int num_line_m = 0;
float attack_m = 0;
int num_line_s = 0;
float attack_s = 0;
float l_m = 1.0;
float l_s = 1.0;
int i;
float drop_pwr;
for(i = 0;i < combo_counter;i++){
int color = color_combo[i];
if(color == MAINCOLOR){
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack_m += drop_pwr;
if(isLine_combo[i]) num_line_m++;
}else{
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack_s += drop_pwr;
if(isLine_combo[i]) num_line_s++;
}
}
int count_m;
int count_s;
switch(LS){
case HERO:
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l_m = 16;
}else if(num_drops == 7 && l_m < 12.25){
l_m = 12.25;
}else if(num_drops == 6 && l_m < 9){
l_m = 9;
}
}
if(SUBCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l_s = 16;
}else if(num_drops == 7 && l_s < 12.25){
l_s = 12.25;
}else if(num_drops == 6 && l_s < 9){
l_s = 9;
}
}
}
break;
case SONIA:
if(combo_counter < 6){
l_m = 6.25;
l_s = 6.25;
}else{
l_m = 2.75*2.75;
l_s = 2.75*2.75;
}
break;
case KRISHNA:
count_m = 0;
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
count_m++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l_m = 2.25;
}
}
if(count_m == 2)
l_m = l_m * 3 * 3;
else if(count_m >= 3)
l_m = l_m * 4.5 * 4.5;
else
l_m = 1;
count_s = 0;
for(i = 0;i < combo_counter;i++){
if(SUBCOLOR == color_combo[i]){
count_s++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l_s = 2.25;
}
}
if(count_s == 2)
l_s = l_s * 3 * 3;
else if(count_s >= 3)
l_s = l_s * 4.5 * 4.5;
else
l_s = 1;
break;
case BASTET:
if(combo_counter == 5){
l_m = 3.0*3.0;
l_s = 3.0*3.0;
}else if(combo_counter == 6){
l_m = 3.5*3.5;
l_s = 3.5*3.5;
}else if(combo_counter >= 7){
l_m = 4.0*4.0;
l_s = 4.0*4.0;
}else{
l_m = 1.0;
l_s = 1.0;
}
break;
case LAKU_PARU:
l_m = 6.25;
l_s = 6.25;
for(i = 0;i < combo_counter;i++){
if(SUBCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l_m = 25;
}
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l_s = 25;
}
}
break;
default:
break;
}
power[0] = attack_m * (1+0.25*(combo_counter-1)) * AT * l_m * (1+0.1*line*num_line_m);
power[1] = attack_s * (1+0.25*(combo_counter-1)) * AT * l_s * (1+0.1*line*num_line_m);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x000003FFFFFFFFFFLU;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
#undef WID
#undef COMBO_LENGTH
#undef REVERSE_LENGTH
extern "C"
{
void simulate_all_cuda(const int table_size, const int start, const int end, /*int *bit_count_table,*/ int *const reversed_bit_table, int *const tableID_half_table, int *const tableID_half_prefix, /*unsigned long long *const num_patterns,*/ int *const num_patterns_half, const int width, const int hight, const int combo_length, const int LS, const int isStrong, const int line, const int way, const int simuave){
int rank = RANKINGLENGTH;
int i, j, k;
unsigned long long *max_powerID_dev;
float *max_power_dev;
int tsize = NUM_THREAD;
//int gsize = ((num_patterns_omitted[num_attacks]-1)/128+1);
int gsize = NUM_BLOCK;
const int length = gsize*tsize*LOCALRANKINGLENGTH;
unsigned long long max_powerID[2*length];
float max_power[2*length];
unsigned long long final_MID[43][rank];
float final_MP[43][rank];
int reverse_length = 1 << width;
int *tableID_half_table_dev, *tableID_half_prefix_dev, *num_patterns_half_dev;
//int *bit_count_table_dev, *reversed_bit_table_dev;
int *reversed_bit_table_dev;
const float pline = (float)line;
const float pway = pow(1.5,way);
const int half_table_size = width*hight/2;
for(i = 0;i < 43;i++){
final_MID[i][0] = 0xFFFFFFFFFFFFFFFFLU;
}
//CUDA_SAFE_CALL(hipMalloc((void**)&bit_count_table_dev, sizeof(int) * 256));
CUDA_SAFE_CALL(hipMalloc((void**)&reversed_bit_table_dev, sizeof(int) * reverse_length));
CUDA_SAFE_CALL(hipMalloc((void**)&tableID_half_table_dev, sizeof(int) * (1 << (width*hight/2))));
CUDA_SAFE_CALL(hipMalloc((void**)&num_patterns_half_dev, sizeof(int) * (width*hight/2+1)));
CUDA_SAFE_CALL(hipMalloc((void**)&tableID_half_prefix_dev,sizeof(int) * (width*hight/2+1)));
//CUDA_SAFE_CALL(hipMemcpy(bit_count_table_dev, bit_count_table, sizeof(int) * 256, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(reversed_bit_table_dev,reversed_bit_table, sizeof(int) * reverse_length, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(tableID_half_table_dev,tableID_half_table, sizeof(int) * (1 << (width*hight/2)), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(num_patterns_half_dev, num_patterns_half, sizeof(int) * (width*hight/2+1), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(tableID_half_prefix_dev,tableID_half_prefix,sizeof(int) * (width*hight/2+1), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMalloc((void**)&max_powerID_dev, sizeof(unsigned long long) * 2 * length));;
CUDA_SAFE_CALL(hipMalloc((void**)&max_power_dev, sizeof(float) * 2 * length));;
//fprintf(stdout,"%d\n",__LINE__);
int num_attacks;
for(num_attacks = start;num_attacks <= end;num_attacks++){
if(half_table_size < num_attacks && num_attacks <= width*hight-start) break;
printf("calculating %2d-%2d & %2d-%2d ...\n", num_attacks, width*hight-num_attacks, width*hight-num_attacks, num_attacks);
//printf("%2d-%2d, line %d, way %d\n", num_attacks, width*hight-num_attacks, line, way);
dim3 grid(gsize,1,1);
dim3 block(tsize,1,1);
#ifdef TIME
hipDeviceSynchronize();
double t1 = gettimeofday_sec();
#endif
switch(table_size){
case SMALL_TABLE:
hipLaunchKernelGGL(( simulate_all_kernel_small), dim3(grid), dim3(block), 0, 0, num_attacks, num_patterns_half_dev, max_powerID_dev, max_power_dev, pline, pway, tableID_half_prefix_dev, tableID_half_table_dev, reversed_bit_table_dev, /*bit_count_table_dev,*/ LS, isStrong);
break;
case NORMAL_TABLE:
hipLaunchKernelGGL(( simulate_all_kernel_normal), dim3(grid), dim3(block), 0, 0, num_attacks, num_patterns_half_dev, max_powerID_dev, max_power_dev, pline, pway, tableID_half_prefix_dev, tableID_half_table_dev, reversed_bit_table_dev, /*bit_count_table_dev,*/ LS, isStrong);
break;
case BIG_TABLE:
if(num_attacks == 21){
hipLaunchKernelGGL(( simulate_all_kernel_big_21), dim3(grid), dim3(block), 0, 0, num_patterns_half_dev, max_powerID_dev, max_power_dev, pline, pway, tableID_half_prefix_dev, tableID_half_table_dev, reversed_bit_table_dev, /*bit_count_table_dev,*/ LS, isStrong);
}else{
hipLaunchKernelGGL(( simulate_all_kernel_big), dim3(grid), dim3(block), 0, 0, num_attacks, num_patterns_half_dev, max_powerID_dev, max_power_dev, pline, pway, tableID_half_prefix_dev, tableID_half_table_dev, reversed_bit_table_dev, /*bit_count_table_dev,*/ LS, isStrong);
}
break;
}
#ifdef TIME
hipDeviceSynchronize();
double t2 = gettimeofday_sec();
printf("num %d,time,%f\n",num_attacks,t2-t1);
#endif
//fprintf(stdout,"%d\n",__LINE__);
hipMemcpy(max_powerID, max_powerID_dev, sizeof(unsigned long long) * 2 * length, hipMemcpyDeviceToHost);
hipMemcpy(max_power , max_power_dev , sizeof(float) * 2 * length, hipMemcpyDeviceToHost);
//fprintf(stdout,"%d\n",__LINE__);
float MP[2][rank];
unsigned long long MID[2][rank];
int ms;
for(ms = 0; ms < 2; ms++){
for(i = 0;i < rank;i++){
MP[ms][i] = 0.0;
MID[ms][i]= 0;
}
for(i = 0;i < length;i++){
float power = max_power[i + length*ms];
unsigned long long tableID = max_powerID[i + length*ms];
if(MP[ms][rank-1] < power){
for(k = 0;k < rank;k++){
if(MP[ms][k] < power){
for(j = rank-2;j >= k;j--){
MID[ms][j+1] = MID[ms][j];
MP[ms][j+1] = MP[ms][j];
}
MID[ms][k] = tableID;
MP[ms][k] = power;
break;
}
}
}
}
//fprintf(stdout,"%d\n",__LINE__);
for(i = 0;i < rank;i++){
float power = MP[ms][i];
unsigned long long tmp = MID[ms][i];
unsigned long long minID = tmp;
int index = i;
for(j = i+1;j < rank;j++){
if(power == MP[ms][j]){
if(minID > MID[ms][j]){
minID = MID[ms][j];
index = j;
}
}else{
break;
}
}
MID[ms][index] = tmp;
MID[ms][i] = minID;
}
}
//fprintf(stdout,"%d\n",__LINE__);
if(num_attacks == half_table_size){
int mc = 0;
int sc = 0;
for(i = 0;i < rank;i++){
if(MP[0][mc] < MP[1][sc]){
final_MID[num_attacks][i] = MID[1][sc];
final_MP [num_attacks][i] = MP [1][sc];
sc++;
}else if(MP[0][mc] == MP[1][sc]){
if(MID[0][mc] < MID[1][sc]){
final_MID[num_attacks][i] = MID[0][mc];
final_MP [num_attacks][i] = MP [0][mc];
mc++;
}else{
final_MID[num_attacks][i] = MID[1][sc];
final_MP [num_attacks][i] = MP [1][sc];
sc++;
}
}else{
final_MID[num_attacks][i] = MID[0][mc];
final_MP [num_attacks][i] = MP [0][mc];
mc++;
}
}
}else{
for(i = 0;i < rank;i++){
final_MID[num_attacks][i] = MID[0][i];
final_MP [num_attacks][i] = MP [0][i];
final_MID[width*hight-num_attacks][i] = MID[1][i];
final_MP [width*hight-num_attacks][i] = MP [1][i];
}
}
}
//fprintf(stdout,"%d\n",__LINE__);
for(num_attacks = 0;num_attacks <= width*hight;num_attacks++){
if(final_MID[num_attacks][0] != 0xFFFFFFFFFFFFFFFFLU){
printf("%2d-%2d, line %d, way %d\n", num_attacks, width*hight-num_attacks, line, way);
if(simuave){
simulate_average(table_size, final_MID[num_attacks], final_MP[num_attacks], num_attacks, width, hight, LS, isStrong, pline, pway);
}else{
for(i = 0;i < rank;i++){
printf("%d,max ID,%lld,power,%f\n",i,final_MID[num_attacks][i],final_MP[num_attacks][i]);
}
}
}
}
CUDA_SAFE_CALL(hipFree(max_powerID_dev));
CUDA_SAFE_CALL(hipFree(max_power_dev));
CUDA_SAFE_CALL(hipFree(tableID_half_table_dev));
//CUDA_SAFE_CALL(hipFree(num_patterns_half_dev));
CUDA_SAFE_CALL(hipFree(tableID_half_prefix_dev));
//CUDA_SAFE_CALL(hipFree(bit_count_table_dev));
CUDA_SAFE_CALL(hipFree(reversed_bit_table_dev));
}
}
| 58ddfc4b45cd9d1c1051a38dde94373c57caf12d.cu | /* author gumboshi <[email protected]> */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "pzdr_def.h"
#include "pzdr_saidai.h"
#define LOCALRANKINGLENGTH 5
#ifndef NUM_BLOCK
#define NUM_BLOCK 52
#endif
#define NUM_THREAD 256
#define CUDA_SAFE_CALL(func) \
do { \
cudaError_t err = (func); \
if (err != cudaSuccess) { \
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", cudaGetErrorString(err), err, __FILE__, __LINE__); \
exit(err); \
} \
} while(0)
// __device__ inline void init_combo_info(int *color_combo, int *num_drops_combo, int *isLine_combo, int combo_length){
// int i;
// for(i = 0;i < combo_length;i++){
// color_combo[i] = 0;
// num_drops_combo[i] = 0;
// isLine_combo[i] = 0;
// }
// }
__device__ void print_table_dev(unsigned long long *color_table, int width, int hight){
int i, j;
for(i = 1;i <= hight;i++){
for(j = 1;j <= width;j++){
unsigned long long p = (1L << ((width*2)*i+j));
if((color_table[0] & p) == p)
printf("G ");
else if((color_table[1] & p) == p)
printf("Y ");
else
printf("? ");
}
printf("\n");
}
printf("\n");
}
__device__ void print_table2_dev(unsigned long long color_table, int width, int hight){
int i, j;
for(i = 1;i <= hight;i++){
for(j = 1;j <= width;j++){
unsigned long long p = (1L << ((width*2)*i+j));
printf("%d ", (color_table & p) == p);
}
printf("\n");
}
printf("\n");
}
#if NUM_COLORS==2
#define WID 7
__device__ inline void generate_table_small_dev(unsigned long long tableID, unsigned long long *color_table){
unsigned long long b0, b1, b2, b3;
unsigned long long ID = tableID;
b0 = ID & 31;
b1 = (ID >> 5 ) & 31;
b2 = (ID >> 10) & 31;
b3 = (ID >> 15) & 31;
color_table[0] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1));
ID = ~ID;
b0 = ID & 31;
b1 = (ID >> 5 ) & 31;
b2 = (ID >> 10) & 31;
b3 = (ID >> 15) & 31;
color_table[1] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1));
}
#undef WID
#define WID 8
__device__ inline void generate_table_normal_dev(unsigned long long tableID, unsigned long long *color_table){
unsigned long long b0, b1, b2, b3, b4;
unsigned long long ID = tableID;
b0 = ID & 63;
b1 = (ID >> 6 ) & 63;
b2 = (ID >> 12) & 63;
b3 = (ID >> 18) & 63;
b4 = (ID >> 24) & 63;
color_table[0] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1)) | (b4 << (WID*5+1));
ID = ~ID;
b0 = ID & 63;
b1 = (ID >> 6 ) & 63;
b2 = (ID >> 12) & 63;
b3 = (ID >> 18) & 63;
b4 = (ID >> 24) & 63;
color_table[1] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1)) | (b4 << (WID*5+1));
}
#undef WID
#define WID 9
__device__ inline void generate_table_big_dev(unsigned long long tableID, unsigned long long *color_table){
unsigned long long b0, b1, b2, b3, b4, b5;
unsigned long long ID = tableID;
b0 = ID & 127;
b1 = (ID >> 7 ) & 127;
b2 = (ID >> 14) & 127;
b3 = (ID >> 21) & 127;
b4 = (ID >> 28) & 127;
b5 = (ID >> 35) & 127;
color_table[0] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1))
| (b4 << (WID*5+1)) | (b5 << (WID*6+1));
ID = ~ID;
b0 = ID & 127;
b1 = (ID >> 7 ) & 127;
b2 = (ID >> 14) & 127;
b3 = (ID >> 21) & 127;
b4 = (ID >> 28) & 127;
b5 = (ID >> 35) & 127;
color_table[1] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1))
| (b4 << (WID*5+1)) | (b5 << (WID*6+1));
}
#undef WID
#endif
#if NUM_COLORS==2
#define WID 7
__device__ inline int one_step_small_dev(unsigned long long *color_table, int *color_combo, int *num_drops_combo, int *isLine_combo, int finish){
// 0 → width
// ↓
// hight
// 000000000
// 000000000
// 000000000
// 000000000
// 000000000
// 000000010
// 000000111
// 000000010
unsigned long long isErase_tables[NUM_COLORS];
int combo_counter = finish;
int num_c;
unsigned long long tmp, tmp2;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long n, w, s, e;
n = color >> WID;
w = color >> 1;
s = color << WID;
e = color << 1;
tmp = (color & n & s);
tmp = tmp | (tmp >> WID) | (tmp << WID);
tmp2 = (color & w & e);
tmp2 = tmp2 | (tmp2 >> 1 ) | (tmp2 << 1 );
isErase_tables[num_c] = (color & tmp) | (color & tmp2);
}
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long isErase_table = isErase_tables[num_c];
color_table[num_c] = color_table[num_c] & (~isErase_table);
unsigned long long p = 1L << (WID+1);
while(isErase_table) {
while(!(isErase_table & p)){
p = p << 1;
}
tmp = p;
color_combo[combo_counter] = num_c;
unsigned long long tmp_old;
do{
tmp_old = tmp;
tmp = (tmp | (tmp << 1) | (tmp >> 1) | (tmp << WID) | (tmp >> WID)) & isErase_table;
}while(tmp_old != tmp);
isErase_table = isErase_table & (~tmp);
// int b1, b2, b3, b4, b5, b6;
// b1 = tmp >> (WID*1+1) & 127;
// b2 = tmp >> (WID*2+1) & 127;
// b3 = tmp >> (WID*3+1) & 127;
// b4 = tmp >> (WID*4+1) & 127;
// b5 = tmp >> (WID*5+1) & 127;
// b6 = tmp >> (WID*6+1) & 127;
// num_drops_combo[combo_counter] = bit_count_table[b1] + bit_count_table[b2]
// + bit_count_table[b3] + bit_count_table[b4] + bit_count_table[b5] + bit_count_table[b6];
unsigned long long bits = tmp;
bits = (bits & 0x5555555555555555LU) + (bits >> 1 & 0x5555555555555555LU);
bits = (bits & 0x3333333333333333LU) + (bits >> 2 & 0x3333333333333333LU);
bits = bits + (bits >> 4) & 0x0F0F0F0F0F0F0F0FLU;
bits = bits + (bits >> 8);
bits = bits + (bits >> 16);
bits = bits + (bits >> 32) & 0x0000007F;
num_drops_combo[combo_counter] = bits;
isLine_combo[combo_counter] = ((tmp >> (WID +1)) & 31) == 31
|| ((tmp >> (WID*2+1)) & 31) == 31
|| ((tmp >> (WID*3+1)) & 31) == 31
|| ((tmp >> (WID*4+1)) & 31) == 31;
// bits = tmp;
// bits = bits & (bits >> 1);
// bits = bits & (bits >> 2);
// bits = bits & (bits >> 3);
// isLine_combo[combo_counter] = ((bits & 36099303471055872L) != 0);
combo_counter++;
}
}
if(finish != combo_counter){
unsigned long long exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
unsigned long long exist_org;
do{
exist_org = exist_table;
unsigned long long exist_u = (exist_table >> WID) | 16642998272L;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long color_u = color & exist_u;
unsigned long long color_d = (color << WID) & (~exist_table) & (~2130303778816L);
color_table[num_c] = color_u | color_d;
}
exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
}while(exist_org != exist_table);
}
return combo_counter;
}
#undef WID
#define WID 8
__device__ inline int one_step_normal_dev(unsigned long long *color_table, int *color_combo, int *num_drops_combo, int *isLine_combo, int finish){
// 0 → width
// ↓
// hight
// 000000000
// 000000000
// 000000000
// 000000000
// 000000000
// 000000010
// 000000111
// 000000010
unsigned long long isErase_tables[NUM_COLORS];
int combo_counter = finish;
int num_c;
unsigned long long tmp, tmp2;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long n, w, s, e;
n = color >> WID;
w = color >> 1;
s = color << WID;
e = color << 1;
tmp = (color & n & s);
tmp = tmp | (tmp >> WID) | (tmp << WID);
tmp2 = (color & w & e);
tmp2 = tmp2 | (tmp2 >> 1 ) | (tmp2 << 1 );
isErase_tables[num_c] = (color & tmp) | (color & tmp2);
}
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long isErase_table = isErase_tables[num_c];
color_table[num_c] = color_table[num_c] & (~isErase_table);
unsigned long long p = 1L << (WID+1);
while(isErase_table) {
while(!(isErase_table & p)){
p = p << 1;
}
tmp = p;
color_combo[combo_counter] = num_c;
unsigned long long tmp_old;
do{
tmp_old = tmp;
tmp = (tmp | (tmp << 1) | (tmp >> 1) | (tmp << WID) | (tmp >> WID)) & isErase_table;
}while(tmp_old != tmp);
isErase_table = isErase_table & (~tmp);
unsigned long long bits = tmp;
bits = (bits & 0x5555555555555555LU) + (bits >> 1 & 0x5555555555555555LU);
bits = (bits & 0x3333333333333333LU) + (bits >> 2 & 0x3333333333333333LU);
bits = bits + (bits >> 4) & 0x0F0F0F0F0F0F0F0FLU;
bits = bits + (bits >> 8);
bits = bits + (bits >> 16);
bits = bits + (bits >> 32) & 0x0000007F;
num_drops_combo[combo_counter] = bits;
isLine_combo[combo_counter] = ((tmp >> (WID +1)) & 63) == 63
|| ((tmp >> (WID*2+1)) & 63) == 63
|| ((tmp >> (WID*3+1)) & 63) == 63
|| ((tmp >> (WID*4+1)) & 63) == 63
|| ((tmp >> (WID*5+1)) & 63) == 63;
combo_counter++;
}
}
if(finish != combo_counter){
unsigned long long exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
unsigned long long exist_org;
do{
exist_org = exist_table;
unsigned long long exist_u = (exist_table >> WID) | 138538465099776L;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long color_u = color & exist_u;
unsigned long long color_d = (color << WID) & (~exist_table) & (~35465847065542656L);
color_table[num_c] = color_u | color_d;
}
exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
}while(exist_org != exist_table);
}
return combo_counter;
}
#undef WID
#define WID 9
__device__ inline int one_step_big_dev(unsigned long long *color_table, int *color_combo, int *num_drops_combo, int *isLine_combo, int finish){
// 0 → width
// ↓
// hight
// 000000000
// 000000000
// 000000000
// 000000000
// 000000000
// 000000010
// 000000111
// 000000010
unsigned long long isErase_tables[NUM_COLORS];
int combo_counter = finish;
int num_c;
unsigned long long tmp, tmp2;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
//自身の上下シフト・左右シフトとビット積をとる。その上下・左右が消すべきビット
unsigned long long n, w, s, e;
n = color >> WID;
w = color >> 1;
s = color << WID;
e = color << 1;
tmp = (color & n & s);
tmp = tmp | (tmp >> WID) | (tmp << WID);
tmp2 = (color & w & e);
tmp2 = tmp2 | (tmp2 >> 1 ) | (tmp2 << 1 );
isErase_tables[num_c] = (color & tmp) | (color & tmp2);
//isErase_table = (color & tmp) | (color & tmp2);
}
// #if NUM_COLORS==2
// if(isErase_tables[0] == isErase_tables[1])
// return combo_counter;
// // isErase_table[0~N] == 0, つまりは消えるドロップがないなら以降の処理は必要ない。
// // が、しかしおそらくWarp divergenceの関係で、ない方が速い。(少なくともGPUでは)
// // とすれば、isEraseをtableにしてループ分割する必要はないが、おそらく最適化の関係で分割した方が速い。
// #endif
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long isErase_table = isErase_tables[num_c];
color_table[num_c] = color_table[num_c] & (~isErase_table);
unsigned long long p = 1L << (WID+1);
while(isErase_table) {
while(!(isErase_table & p)){
p = p << 1;
}
tmp = p;
color_combo[combo_counter] = num_c;
unsigned long long tmp_old;
do{
tmp_old = tmp;
tmp = (tmp | (tmp << 1) | (tmp >> 1) | (tmp << WID) | (tmp >> WID)) & isErase_table;
}while(tmp_old != tmp);
isErase_table = isErase_table & (~tmp);
// int b1, b2, b3, b4, b5, b6;
// b1 = tmp >> (WID*1+1) & 127;
// b2 = tmp >> (WID*2+1) & 127;
// b3 = tmp >> (WID*3+1) & 127;
// b4 = tmp >> (WID*4+1) & 127;
// b5 = tmp >> (WID*5+1) & 127;
// b6 = tmp >> (WID*6+1) & 127;
// num_drops_combo[combo_counter] = bit_count_table[b1] + bit_count_table[b2]
// + bit_count_table[b3] + bit_count_table[b4] + bit_count_table[b5] + bit_count_table[b6];
// unsigned long long bits = tmp;
// bits = (bits & 0x5555555555555555) + (bits >> 1 & 0x5555555555555555);
// bits = (bits & 0x3333333333333333) + (bits >> 2 & 0x3333333333333333);
// bits = (bits & 0x0f0f0f0f0f0f0f0f) + (bits >> 4 & 0x0f0f0f0f0f0f0f0f);
// bits = (bits & 0x00ff00ff00ff00ff) + (bits >> 8 & 0x00ff00ff00ff00ff);
// bits = (bits & 0x0000ffff0000ffff) + (bits >>16 & 0x0000ffff0000ffff);
// num_drops_combo[combo_counter] = (bits & 0x00000000ffffffff) + (bits >>32 & 0x00000000ffffffff);
// bits = (bits & 0x5555555555555555LU) + (bits >> 1 & 0x5555555555555555LU);
// bits = (bits & 0x3333333333333333LU) + (bits >> 2 & 0x3333333333333333LU);
// bits = bits + (bits >> 4) & 0x0F0F0F0F0F0F0F0FLU;
// bits = bits + (bits >> 8);
// bits = bits + (bits >> 16);
// bits = bits + (bits >> 32) & 0x0000007F;
// num_drops_combo[combo_counter] = bits;
unsigned int u = tmp >> 32;
unsigned int l = tmp;
u = (u & 0x55555555) + (u >> 1 & 0x55555555);
u = (u & 0x33333333) + (u >> 2 & 0x33333333);
u = u + (u >> 4) & 0x0F0F0F0F;
u = u + (u >> 8);
u = u + (u >> 16) & 0x0000007F;
l = (l & 0x55555555) + (l >> 1 & 0x55555555);
l = (l & 0x33333333) + (l >> 2 & 0x33333333);
l = l + (l >> 4) & 0x0F0F0F0F;
l = l + (l >> 8);
l = l + (l >> 16) & 0x0000007F;
num_drops_combo[combo_counter] = u + l;
// num_drops_combo[combo_counter] = __popcll(tmp);
isLine_combo[combo_counter] = ((tmp >> (WID +1)) & 127) == 127
|| ((tmp >> (WID*2+1)) & 127) == 127
|| ((tmp >> (WID*3+1)) & 127) == 127
|| ((tmp >> (WID*4+1)) & 127) == 127
|| ((tmp >> (WID*5+1)) & 127) == 127
|| ((tmp >> (WID*6+1)) & 127) == 127;
// bits = tmp;
// bits = bits & (bits >> 1);
// bits = bits & (bits >> 2);
// bits = bits & (bits >> 3);
// isLine_combo[combo_counter] = ((bits & 36099303471055872L) != 0);
combo_counter++;
}
}
if(finish != combo_counter){
unsigned long long exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
unsigned long long exist_org;
do{
exist_org = exist_table;
unsigned long long exist_u = (exist_table >> WID) | 4575657221408423936L;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long color_u = color & exist_u;
unsigned long long color_d = (color << WID) & (~exist_table);
color_table[num_c] = color_u | color_d;
}
exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
}while(exist_org != exist_table);
}
return combo_counter;
}
#undef WID
#endif
__device__ inline float return_attack_dev(int combo_counter, int *color_combo, int *num_drops_combo, int *isLine_combo, int LS, int strong, float line, float way){
// used for simulation mode
// [FIXME] check only Green attack
int num_line = 0;
float AT = 1.0;
float attack = 0;
float l = 1.0;
int i;
for(i = 0;i < combo_counter;i++){
int color = color_combo[i];
float drop_pwr;
switch(color){
case MAINCOLOR:
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack += drop_pwr;
if(isLine_combo[i]) num_line++;
break;
default:
break;
}
}
int count;
switch(LS){
case HERO:
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l = 16;
}else if(num_drops == 7 && l < 12.25){
l = 12.25;
}else if(num_drops == 6 && l < 9){
l = 9;
}
}
}
break;
case SONIA:
if(combo_counter < 6)
l = 6.25;
else
l = 2.75*2.75;
break;
case KRISHNA:
count = 0;
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
count++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l = 2.25;
}
}
if(count == 2)
l = l * 3 * 3;
else if(count >= 3)
l = l * 4.5 * 4.5;
else
l = 1;
break;
case BASTET:
if(combo_counter == 5)
l = 3.0*3.0;
else if(combo_counter == 6)
l = 3.5*3.5;
else if(combo_counter >= 7)
l = 4.0*4.0;
else
l = 1.0;
break;
case LAKU_PARU:
l = 6.25;
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR != color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l = 25;
}
}
break;
default:
break;
}
attack = attack * (1+0.25*(combo_counter-1)) * AT * l * (1+0.1*line*num_line) ;
return attack;
}
__device__ inline void return_attack_double_dev(float *power, const int combo_counter, int *const color_combo, int * const num_drops_combo, int * const isLine_combo, const int LS, const int strong, const float line, const float way){
// used for simulation mode
// [FIXME] check only Green attack
const float AT = 1.0;
int num_line_m = 0;
float attack_m = 0;
int num_line_s = 0;
float attack_s = 0;
float l_m = 1.0;
float l_s = 1.0;
int i;
float drop_pwr;
for(i = 0;i < combo_counter;i++){
int color = color_combo[i];
if(color == MAINCOLOR){
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack_m += drop_pwr;
if(isLine_combo[i]) num_line_m++;
}else{
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack_s += drop_pwr;
if(isLine_combo[i]) num_line_s++;
}
}
int count_m;
int count_s;
switch(LS){
case HERO:
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l_m = 16;
}else if(num_drops == 7 && l_m < 12.25){
l_m = 12.25;
}else if(num_drops == 6 && l_m < 9){
l_m = 9;
}
}
if(SUBCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l_s = 16;
}else if(num_drops == 7 && l_s < 12.25){
l_s = 12.25;
}else if(num_drops == 6 && l_s < 9){
l_s = 9;
}
}
}
break;
case SONIA:
if(combo_counter < 6){
l_m = 6.25;
l_s = 6.25;
}else{
l_m = 2.75*2.75;
l_s = 2.75*2.75;
}
break;
case KRISHNA:
count_m = 0;
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
count_m++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l_m = 2.25;
}
}
if(count_m == 2)
l_m = l_m * 3 * 3;
else if(count_m >= 3)
l_m = l_m * 4.5 * 4.5;
else
l_m = 1;
count_s = 0;
for(i = 0;i < combo_counter;i++){
if(SUBCOLOR == color_combo[i]){
count_s++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l_s = 2.25;
}
}
if(count_s == 2)
l_s = l_s * 3 * 3;
else if(count_s >= 3)
l_s = l_s * 4.5 * 4.5;
else
l_s = 1;
break;
case BASTET:
if(combo_counter == 5){
l_m = 3.0*3.0;
l_s = 3.0*3.0;
}else if(combo_counter == 6){
l_m = 3.5*3.5;
l_s = 3.5*3.5;
}else if(combo_counter >= 7){
l_m = 4.0*4.0;
l_s = 4.0*4.0;
}else{
l_m = 1.0;
l_s = 1.0;
}
break;
case LAKU_PARU:
l_m = 6.25;
l_s = 6.25;
for(i = 0;i < combo_counter;i++){
if(SUBCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l_m = 25;
}
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l_s = 25;
}
}
break;
default:
break;
}
power[0] = attack_m * (1+0.25*(combo_counter-1)) * AT * l_m * (1+0.1*line*num_line_m);
power[1] = attack_s * (1+0.25*(combo_counter-1)) * AT * l_s * (1+0.1*line*num_line_s);
}
#define COMBO_LENGTH 7
#define REVERSE_LENGTH 32
// __global__ void simulate_all_kernel_small(int num_attacks, const int * __restrict__ num_patterns10, unsigned long long *maxID, float *maxPower, float line, float way, const int * __restrict__ tableID_prefix10, int *tableID_table10, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ int LS, int strong){
// int tid = threadIdx.x;
// int bid = blockIdx.x;
// int gdim = gridDim.x;
// int bdim = blockDim.x;
// int color_combo[COMBO_LENGTH];
// int num_drops_combo[COMBO_LENGTH];
// int isLine_combo[COMBO_LENGTH];
// int i,j,k;
// int rank = LOCALRANKINGLENGTH;
// float MP[LOCALRANKINGLENGTH];
// unsigned long long MID[LOCALRANKINGLENGTH];
// unsigned long long tableID = 0;
// unsigned long long color_table[NUM_COLORS];
// int num_c;
// for(num_c = 0;num_c < NUM_COLORS;num_c++){
// color_table[num_c] = 0;
// }
// for(i = 0;i < rank;i++){
// MID[i] = 0;
// MP[i] = 0.0;
// }
// int u, l, uu, ll;
// int bit_num[4];
// for(u = 0;u <= num_attacks;u++){
// l = num_attacks - u;
// if(u <= 10 && l <= 10){
// int uoffset = tableID_prefix10[u];
// int loffset = tableID_prefix10[l];
// for(uu = bid;uu < num_patterns10[u];uu+=gdim){
// unsigned long long upperID = (unsigned long long)tableID_table10[uu+uoffset];
// for(ll = tid;ll < num_patterns10[l];ll+=bdim){
// unsigned long long lowerID = (unsigned long long)tableID_table10[ll+loffset];
// tableID = (upperID << 10) | lowerID;
// unsigned long long reversed = 0;
// for(i = 0;i < 4; i++){
// bit_num[i] = (int)((tableID >> (5*i) ) & (REVERSE_LENGTH-1));
// reversed += ((unsigned long long)reversed_bit_table[bit_num[i]]) << (5*i);
// }
// if(tableID <= reversed){
// //init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
// int combo_counter = 0;
// //tableID = 1103874885640L;
// //tableID = 42656280L;
// generate_table_small_dev(tableID, color_table);
// int returned_combo_counter = 0;
// do{
// // if(blockDim.x * blockIdx.x + threadIdx.x == 0){
// // printf("ID %lld\n",tableID);
// // print_table(color_table);
// // print_table2(color_table[0]);
// // print_table2(color_table[1]);
// // }
// combo_counter = returned_combo_counter;
// returned_combo_counter = one_step_small_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
// //printf("combo = %d\n", returned_combo_counter);
// }while(returned_combo_counter != combo_counter);
// float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
// if(MP[rank-1] < power){
// for(j = 0;j < rank;j++){
// if(MP[j] < power){
// for(k = rank-2;k >= j;k--){
// MID[k+1] = MID[k];
// MP[k+1] = MP[k];
// }
// MID[j] = tableID;
// MP[j] = power;
// break;
// }
// }
// }
// }
// }
// }
// }
// }
// int id = blockDim.x * blockIdx.x + threadIdx.x;
// int step = blockDim.x * gridDim.x;
// for(i = 0;i < rank;i++){
// maxPower[id + step*i] = MP[i];
// maxID[id + step*i] = MID[i];
// }
// }
__global__ void simulate_all_kernel_small(const int num_attacks, const int * __restrict__ num_patterns10, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix10, const int * __restrict__ tableID_table10, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= num_attacks;u++){
l = num_attacks - u;
if(u <= 10 && l <= 10){
int uoffset = tableID_prefix10[u];
int loffset = tableID_prefix10[l];
for(uu = bid;uu < num_patterns10[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table10[uu+uoffset];
for(ll = tid;ll < num_patterns10[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table10[ll+loffset];
tableID = (upperID << 10) | lowerID;
unsigned long long reversed = 0;
int reversed_bit[4];
for(i = 0;i < 4; i++){
reversed_bit[i] = ((tableID >> (5*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit[i]]) << (5*i);
}
if(tableID <= reversed){
//init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
unsigned long long color_table[NUM_COLORS];
generate_table_small_dev(tableID, color_table);
int combo_counter;
int returned_combo_counter = 0;
do{
combo_counter = returned_combo_counter;
returned_combo_counter = one_step_small_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
}while(returned_combo_counter != combo_counter);
//float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
float power[2];
return_attack_double_dev(power, combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x000FFFFF;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
#undef COMBO_LENGTH
#undef REVERSE_LENGTH
#define COMBO_LENGTH 10
#define REVERSE_LENGTH 64
// __global__ void simulate_all_kernel_normal(int num_attacks, const int * __restrict__ num_patterns15, unsigned long long *maxID, float *maxPower, float line, float way, const int * __restrict__ tableID_prefix15, int *tableID_table15, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ int LS, int strong){
// int tid = threadIdx.x;
// int bid = blockIdx.x;
// int gdim = gridDim.x;
// int bdim = blockDim.x;
// int color_combo[COMBO_LENGTH];
// int num_drops_combo[COMBO_LENGTH];
// int isLine_combo[COMBO_LENGTH];
// int i,j,k;
// int rank = LOCALRANKINGLENGTH;
// float MP[LOCALRANKINGLENGTH];
// unsigned long long MID[LOCALRANKINGLENGTH];
// unsigned long long tableID = 0;
// unsigned long long color_table[NUM_COLORS];
// int num_c;
// for(num_c = 0;num_c < NUM_COLORS;num_c++){
// color_table[num_c] = 0;
// }
// for(i = 0;i < rank;i++){
// MID[i] = 0;
// MP[i] = 0.0;
// }
// int u, l, uu, ll;
// int bit_num[5];
// for(u = 0;u <= num_attacks;u++){
// l = num_attacks - u;
// if(u <= 15 && l <= 15){
// int uoffset = tableID_prefix15[u];
// int loffset = tableID_prefix15[l];
// for(uu = bid;uu < num_patterns15[u];uu+=gdim){
// unsigned long long upperID = (unsigned long long)tableID_table15[uu+uoffset];
// for(ll = tid;ll < num_patterns15[l];ll+=bdim){
// unsigned long long lowerID = (unsigned long long)tableID_table15[ll+loffset];
// tableID = (upperID << 15) | lowerID;
// unsigned long long reversed = 0;
// for(i = 0;i < 5; i++){
// bit_num[i] = (int)((tableID >> (6*i) ) & (REVERSE_LENGTH-1));
// reversed += ((unsigned long long)reversed_bit_table[bit_num[i]]) << (6*i);
// }
// if(tableID <= reversed){
// //init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
// int combo_counter = 0;
// //tableID = 1103874885640L;
// //tableID = 42656280L;
// generate_table_normal_dev(tableID, color_table);
// int returned_combo_counter = 0;
// do{
// // if(blockDim.x * blockIdx.x + threadIdx.x == 0){
// // printf("ID %lld\n",tableID);
// // print_table(color_table);
// // print_table2(color_table[0]);
// // print_table2(color_table[1]);
// // }
// combo_counter = returned_combo_counter;
// returned_combo_counter = one_step_normal_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
// //printf("combo = %d\n", returned_combo_counter);
// }while(returned_combo_counter != combo_counter);
// float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
// if(MP[rank-1] < power){
// for(j = 0;j < rank;j++){
// if(MP[j] < power){
// for(k = rank-2;k >= j;k--){
// MID[k+1] = MID[k];
// MP[k+1] = MP[k];
// }
// MID[j] = tableID;
// MP[j] = power;
// break;
// }
// }
// }
// }
// }
// }
// }
// }
// int id = blockDim.x * blockIdx.x + threadIdx.x;
// int step = blockDim.x * gridDim.x;
// for(i = 0;i < rank;i++){
// maxPower[id + step*i] = MP[i];
// maxID[id + step*i] = MID[i];
// }
// }
__global__ void simulate_all_kernel_normal(const int num_attacks, const int * __restrict__ num_patterns15, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix15, const int * __restrict__ tableID_table15, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= num_attacks;u++){
l = num_attacks - u;
if(u <= 15 && l <= 15){
int uoffset = tableID_prefix15[u];
int loffset = tableID_prefix15[l];
for(uu = bid;uu < num_patterns15[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table15[uu+uoffset];
for(ll = tid;ll < num_patterns15[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table15[ll+loffset];
tableID = (upperID << 15) | lowerID;
unsigned long long reversed = 0;
int reversed_bit[5];
for(i = 0;i < 5; i++){
reversed_bit[i] = ((tableID >> (6*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit[i]]) << (6*i);
}
if(tableID <= reversed){
//init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
unsigned long long color_table[NUM_COLORS];
generate_table_normal_dev(tableID, color_table);
int combo_counter;
int returned_combo_counter = 0;
do{
combo_counter = returned_combo_counter;
returned_combo_counter = one_step_normal_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
}while(returned_combo_counter != combo_counter);
//float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
float power[2];
return_attack_double_dev(power, combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x3FFFFFFF;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
#undef COMBO_LENGTH
#undef REVERSE_LENGTH
#define COMBO_LENGTH 14
#define REVERSE_LENGTH 128
__global__ void simulate_all_kernel_big(const int num_attacks, const int * __restrict__ num_patterns21, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix21, const int * __restrict__ tableID_table21, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
// unsigned long long color_table[NUM_COLORS];
// int num_c;
// for(num_c = 0;num_c < NUM_COLORS;num_c++){
// color_table[num_c] = 0;
// }
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= num_attacks;u++){
l = num_attacks - u;
if(u <= 21 && l <= 21){
int uoffset = tableID_prefix21[u];
int loffset = tableID_prefix21[l];
for(uu = bid;uu < num_patterns21[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table21[uu+uoffset];
for(ll = tid;ll < num_patterns21[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table21[ll+loffset];
tableID = (upperID << 21) | lowerID;
unsigned long long reversed = 0;
int reversed_bit[6];
for(i = 0;i < 6; i++){
reversed_bit[i] = ((tableID >> (7*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit[i]]) << (7*i);
}
if(tableID <= reversed){
//init_combo_info(color_combo, num_drops_combo, isLine_combo, COMBO_LENGTH);
unsigned long long color_table[NUM_COLORS];
generate_table_big_dev(tableID, color_table);
int combo_counter;
int returned_combo_counter = 0;
do{
combo_counter = returned_combo_counter;
returned_combo_counter = one_step_big_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
}while(returned_combo_counter != combo_counter);
//float power = return_attack_dev(combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
float power[2];
return_attack_double_dev(power, combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x000003FFFFFFFFFFLU;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
__global__ void simulate_all_kernel_big_21(const int * __restrict__ num_patterns21, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix21, const int * __restrict__ tableID_table21, const int * __restrict__ reversed_bit_table, /*const int * __restrict__ bit_count_table,*/ const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= 21;u++){
l = 21 - u;
int uoffset = tableID_prefix21[u];
int loffset = tableID_prefix21[l];
for(uu = bid;uu < num_patterns21[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table21[uu+uoffset];
for(ll = tid;ll < num_patterns21[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table21[ll+loffset];
tableID = (upperID << 21) | lowerID;
unsigned long long reversed = 0;
int reversed_bit;
for(i = 0;i < 6; i++){
reversed_bit = ((tableID >> (7*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit]) << (7*i);
}
unsigned long long inversed = (~tableID) & 0x000003FFFFFFFFFFLU;
if(tableID <= reversed && tableID <= inversed){
unsigned long long color_table[NUM_COLORS];
generate_table_big_dev(tableID, color_table);
int combo_counter;
int returned_combo_counter = 0;
do{
combo_counter = returned_combo_counter;
returned_combo_counter = one_step_big_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
}while(returned_combo_counter != combo_counter);
float power[2];
return_attack_double_dev(power, combo_counter, color_combo, num_drops_combo, isLine_combo, LS, strong, line, way);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x000003FFFFFFFFFFLU;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
#define WID 9
__global__ void simulate_all_kernel_big_inlined(const int num_attacks, const int * __restrict__ num_patterns21, unsigned long long *maxID, float *maxPower, const float line, const float way, const int * __restrict__ tableID_prefix21, const int * __restrict__ tableID_table21, const int * __restrict__ reversed_bit_table, const int LS, const int strong){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bdim = blockDim.x;
int color_combo[COMBO_LENGTH];
int num_drops_combo[COMBO_LENGTH];
int isLine_combo[COMBO_LENGTH];
int i,j,k;
const int rank = LOCALRANKINGLENGTH;
float MP[2][LOCALRANKINGLENGTH];
unsigned long long MID[2][LOCALRANKINGLENGTH];
unsigned long long tableID = 0;
int ms;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
MID[ms][i] = 0;
MP[ms][i] = 0.0;
}
}
int u, l, uu, ll;
for(u = 0;u <= num_attacks;u++){
l = num_attacks - u;
if(u <= 21 && l <= 21){
int uoffset = tableID_prefix21[u];
int loffset = tableID_prefix21[l];
for(uu = bid;uu < num_patterns21[u];uu+=gdim){
unsigned long long upperID = (unsigned long long)tableID_table21[uu+uoffset];
for(ll = tid;ll < num_patterns21[l];ll+=bdim){
unsigned long long lowerID = (unsigned long long)tableID_table21[ll+loffset];
tableID = (upperID << 21) | lowerID;
unsigned long long reversed = 0;
int reversed_bit[6];
for(i = 0;i < 6; i++){
reversed_bit[i] = ((tableID >> (7*i) ) & (REVERSE_LENGTH-1));
reversed = reversed | ((unsigned long long)reversed_bit_table[reversed_bit[i]]) << (7*i);
}
if(tableID <= reversed){
unsigned long long color_table[NUM_COLORS];
unsigned long long b0, b1, b2, b3, b4, b5;
unsigned long long ID = tableID;
b0 = ID & 127;
b1 = (ID >> 7 ) & 127;
b2 = (ID >> 14) & 127;
b3 = (ID >> 21) & 127;
b4 = (ID >> 28) & 127;
b5 = (ID >> 35) & 127;
color_table[0] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1))
| (b4 << (WID*5+1)) | (b5 << (WID*6+1));
ID = ~ID;
b0 = ID & 127;
b1 = (ID >> 7 ) & 127;
b2 = (ID >> 14) & 127;
b3 = (ID >> 21) & 127;
b4 = (ID >> 28) & 127;
b5 = (ID >> 35) & 127;
color_table[1] = (b0 << (WID+1)) | (b1 << (WID*2+1))
| (b2 << (WID*3+1)) | (b3 << (WID*4+1))
| (b4 << (WID*5+1)) | (b5 << (WID*6+1));
int combo_counter = 0;
int combo_counter_org;
//returned_combo_counter = one_step_big_dev(color_table, color_combo, num_drops_combo, isLine_combo, combo_counter);
do{
combo_counter_org = combo_counter;
unsigned long long isErase_tables[NUM_COLORS];
int num_c;
unsigned long long tmp, tmp2;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long n, w, s, e;
n = color >> WID;
w = color >> 1;
s = color << WID;
e = color << 1;
tmp = (color & n & s);
tmp = tmp | (tmp >> WID) | (tmp << WID);
tmp2 = (color & w & e);
tmp2 = tmp2 | (tmp2 >> 1 ) | (tmp2 << 1 );
isErase_tables[num_c] = (color & tmp) | (color & tmp2);
}
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long isErase_table = isErase_tables[num_c];
color_table[num_c] = color_table[num_c] & (~isErase_table);
unsigned long long p = 1L << (WID+1);
while(isErase_table) {
while(!(isErase_table & p)){
p = p << 1;
}
tmp = p;
color_combo[combo_counter] = num_c;
unsigned long long tmp_old;
do{
tmp_old = tmp;
tmp = (tmp | (tmp << 1) | (tmp >> 1) | (tmp << WID) | (tmp >> WID)) & isErase_table;
}while(tmp_old != tmp);
isErase_table = isErase_table & (~tmp);
unsigned int ubits = tmp >> 32;
unsigned int lbits = tmp;
ubits = (ubits & 0x55555555) + (ubits >> 1 & 0x55555555);
ubits = (ubits & 0x33333333) + (ubits >> 2 & 0x33333333);
ubits = ubits + (ubits >> 4) & 0x0F0F0F0F;
ubits = ubits + (ubits >> 8);
ubits = ubits + (ubits >> 16) & 0x0000007F;
lbits = (lbits & 0x55555555) + (lbits >> 1 & 0x55555555);
lbits = (lbits & 0x33333333) + (lbits >> 2 & 0x33333333);
lbits = lbits + (lbits >> 4) & 0x0F0F0F0F;
lbits = lbits + (lbits >> 8);
lbits = lbits + (lbits >> 16) & 0x0000007F;
num_drops_combo[combo_counter] = ubits + lbits;
isLine_combo[combo_counter] = ((tmp >> (WID +1)) & 127) == 127
|| ((tmp >> (WID*2+1)) & 127) == 127
|| ((tmp >> (WID*3+1)) & 127) == 127
|| ((tmp >> (WID*4+1)) & 127) == 127
|| ((tmp >> (WID*5+1)) & 127) == 127
|| ((tmp >> (WID*6+1)) & 127) == 127;
combo_counter++;
}
}
if(combo_counter_org != combo_counter){
unsigned long long exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
unsigned long long exist_org;
do{
exist_org = exist_table;
unsigned long long exist_u = (exist_table >> WID) | 4575657221408423936L;
for(num_c = 0;num_c < NUM_COLORS;num_c++){
unsigned long long color = color_table[num_c];
unsigned long long color_u = color & exist_u;
unsigned long long color_d = (color << WID) & (~exist_table);
color_table[num_c] = color_u | color_d;
}
exist_table = color_table[0];
for(num_c = 1;num_c < NUM_COLORS;num_c++){
exist_table = exist_table | color_table[num_c];
}
}while(exist_org != exist_table);
}
}while(combo_counter_org != combo_counter);
float power[2];
const float AT = 1.0;
int num_line_m = 0;
float attack_m = 0;
int num_line_s = 0;
float attack_s = 0;
float l_m = 1.0;
float l_s = 1.0;
int i;
float drop_pwr;
for(i = 0;i < combo_counter;i++){
int color = color_combo[i];
if(color == MAINCOLOR){
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack_m += drop_pwr;
if(isLine_combo[i]) num_line_m++;
}else{
drop_pwr = num_drops_combo[i]==4 ? (1+0.25*(num_drops_combo[i]-3))*way : 1+0.25*(num_drops_combo[i]-3);
if(strong)
drop_pwr = drop_pwr * (1+0.06*num_drops_combo[i]);
attack_s += drop_pwr;
if(isLine_combo[i]) num_line_s++;
}
}
int count_m;
int count_s;
switch(LS){
case HERO:
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l_m = 16;
}else if(num_drops == 7 && l_m < 12.25){
l_m = 12.25;
}else if(num_drops == 6 && l_m < 9){
l_m = 9;
}
}
if(SUBCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 8){
l_s = 16;
}else if(num_drops == 7 && l_s < 12.25){
l_s = 12.25;
}else if(num_drops == 6 && l_s < 9){
l_s = 9;
}
}
}
break;
case SONIA:
if(combo_counter < 6){
l_m = 6.25;
l_s = 6.25;
}else{
l_m = 2.75*2.75;
l_s = 2.75*2.75;
}
break;
case KRISHNA:
count_m = 0;
for(i = 0;i < combo_counter;i++){
if(MAINCOLOR == color_combo[i]){
count_m++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l_m = 2.25;
}
}
if(count_m == 2)
l_m = l_m * 3 * 3;
else if(count_m >= 3)
l_m = l_m * 4.5 * 4.5;
else
l_m = 1;
count_s = 0;
for(i = 0;i < combo_counter;i++){
if(SUBCOLOR == color_combo[i]){
count_s++;
int num_drops = num_drops_combo[i];
if(num_drops == 5)
l_s = 2.25;
}
}
if(count_s == 2)
l_s = l_s * 3 * 3;
else if(count_s >= 3)
l_s = l_s * 4.5 * 4.5;
else
l_s = 1;
break;
case BASTET:
if(combo_counter == 5){
l_m = 3.0*3.0;
l_s = 3.0*3.0;
}else if(combo_counter == 6){
l_m = 3.5*3.5;
l_s = 3.5*3.5;
}else if(combo_counter >= 7){
l_m = 4.0*4.0;
l_s = 4.0*4.0;
}else{
l_m = 1.0;
l_s = 1.0;
}
break;
case LAKU_PARU:
l_m = 6.25;
l_s = 6.25;
for(i = 0;i < combo_counter;i++){
if(SUBCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l_m = 25;
}
if(MAINCOLOR == color_combo[i]){
int num_drops = num_drops_combo[i];
if(num_drops >= 5)
l_s = 25;
}
}
break;
default:
break;
}
power[0] = attack_m * (1+0.25*(combo_counter-1)) * AT * l_m * (1+0.1*line*num_line_m);
power[1] = attack_s * (1+0.25*(combo_counter-1)) * AT * l_s * (1+0.1*line*num_line_m);
if(MP[0][rank-1] < power[0]){
for(j = 0;j < rank;j++){
if(MP[0][j] < power[0]){
for(k = rank-2;k >= j;k--){
MID[0][k+1] = MID[0][k];
MP[0][k+1] = MP[0][k];
}
MID[0][j] = tableID;
MP[0][j] = power[0];
break;
}
}
}
if(MP[1][rank-1] < power[1]){
for(j = 0;j < rank;j++){
if(MP[1][j] < power[1]){
for(k = rank-2;k >= j;k--){
MID[1][k+1] = MID[1][k];
MP[1][k+1] = MP[1][k];
}
MID[1][j] = (~tableID) & 0x000003FFFFFFFFFFLU;
MP[1][j] = power[1];
break;
}
}
}
}
}
}
}
}
int id = blockDim.x * blockIdx.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for(ms = 0;ms < 2;ms++){
for(i = 0;i < rank;i++){
maxPower[id + step*i + ms*step*rank] = MP [ms][i];
maxID [id + step*i + ms*step*rank] = MID[ms][i];
}
}
}
#undef WID
#undef COMBO_LENGTH
#undef REVERSE_LENGTH
extern "C"
{
void simulate_all_cuda(const int table_size, const int start, const int end, /*int *bit_count_table,*/ int *const reversed_bit_table, int *const tableID_half_table, int *const tableID_half_prefix, /*unsigned long long *const num_patterns,*/ int *const num_patterns_half, const int width, const int hight, const int combo_length, const int LS, const int isStrong, const int line, const int way, const int simuave){
int rank = RANKINGLENGTH;
int i, j, k;
unsigned long long *max_powerID_dev;
float *max_power_dev;
int tsize = NUM_THREAD;
//int gsize = ((num_patterns_omitted[num_attacks]-1)/128+1);
int gsize = NUM_BLOCK;
const int length = gsize*tsize*LOCALRANKINGLENGTH;
unsigned long long max_powerID[2*length];
float max_power[2*length];
unsigned long long final_MID[43][rank];
float final_MP[43][rank];
int reverse_length = 1 << width;
int *tableID_half_table_dev, *tableID_half_prefix_dev, *num_patterns_half_dev;
//int *bit_count_table_dev, *reversed_bit_table_dev;
int *reversed_bit_table_dev;
const float pline = (float)line;
const float pway = pow(1.5,way);
const int half_table_size = width*hight/2;
for(i = 0;i < 43;i++){
final_MID[i][0] = 0xFFFFFFFFFFFFFFFFLU;
}
//CUDA_SAFE_CALL(cudaMalloc((void**)&bit_count_table_dev, sizeof(int) * 256));
CUDA_SAFE_CALL(cudaMalloc((void**)&reversed_bit_table_dev, sizeof(int) * reverse_length));
CUDA_SAFE_CALL(cudaMalloc((void**)&tableID_half_table_dev, sizeof(int) * (1 << (width*hight/2))));
CUDA_SAFE_CALL(cudaMalloc((void**)&num_patterns_half_dev, sizeof(int) * (width*hight/2+1)));
CUDA_SAFE_CALL(cudaMalloc((void**)&tableID_half_prefix_dev,sizeof(int) * (width*hight/2+1)));
//CUDA_SAFE_CALL(cudaMemcpy(bit_count_table_dev, bit_count_table, sizeof(int) * 256, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(reversed_bit_table_dev,reversed_bit_table, sizeof(int) * reverse_length, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(tableID_half_table_dev,tableID_half_table, sizeof(int) * (1 << (width*hight/2)), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(num_patterns_half_dev, num_patterns_half, sizeof(int) * (width*hight/2+1), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(tableID_half_prefix_dev,tableID_half_prefix,sizeof(int) * (width*hight/2+1), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMalloc((void**)&max_powerID_dev, sizeof(unsigned long long) * 2 * length));;
CUDA_SAFE_CALL(cudaMalloc((void**)&max_power_dev, sizeof(float) * 2 * length));;
//fprintf(stdout,"%d\n",__LINE__);
int num_attacks;
for(num_attacks = start;num_attacks <= end;num_attacks++){
if(half_table_size < num_attacks && num_attacks <= width*hight-start) break;
printf("calculating %2d-%2d & %2d-%2d ...\n", num_attacks, width*hight-num_attacks, width*hight-num_attacks, num_attacks);
//printf("%2d-%2d, line %d, way %d\n", num_attacks, width*hight-num_attacks, line, way);
dim3 grid(gsize,1,1);
dim3 block(tsize,1,1);
#ifdef TIME
cudaDeviceSynchronize();
double t1 = gettimeofday_sec();
#endif
switch(table_size){
case SMALL_TABLE:
simulate_all_kernel_small<<<grid, block>>>(num_attacks, num_patterns_half_dev, max_powerID_dev, max_power_dev, pline, pway, tableID_half_prefix_dev, tableID_half_table_dev, reversed_bit_table_dev, /*bit_count_table_dev,*/ LS, isStrong);
break;
case NORMAL_TABLE:
simulate_all_kernel_normal<<<grid, block>>>(num_attacks, num_patterns_half_dev, max_powerID_dev, max_power_dev, pline, pway, tableID_half_prefix_dev, tableID_half_table_dev, reversed_bit_table_dev, /*bit_count_table_dev,*/ LS, isStrong);
break;
case BIG_TABLE:
if(num_attacks == 21){
simulate_all_kernel_big_21<<<grid, block>>>(num_patterns_half_dev, max_powerID_dev, max_power_dev, pline, pway, tableID_half_prefix_dev, tableID_half_table_dev, reversed_bit_table_dev, /*bit_count_table_dev,*/ LS, isStrong);
}else{
simulate_all_kernel_big<<<grid, block>>>(num_attacks, num_patterns_half_dev, max_powerID_dev, max_power_dev, pline, pway, tableID_half_prefix_dev, tableID_half_table_dev, reversed_bit_table_dev, /*bit_count_table_dev,*/ LS, isStrong);
}
break;
}
#ifdef TIME
cudaDeviceSynchronize();
double t2 = gettimeofday_sec();
printf("num %d,time,%f\n",num_attacks,t2-t1);
#endif
//fprintf(stdout,"%d\n",__LINE__);
cudaMemcpy(max_powerID, max_powerID_dev, sizeof(unsigned long long) * 2 * length, cudaMemcpyDeviceToHost);
cudaMemcpy(max_power , max_power_dev , sizeof(float) * 2 * length, cudaMemcpyDeviceToHost);
//fprintf(stdout,"%d\n",__LINE__);
float MP[2][rank];
unsigned long long MID[2][rank];
int ms;
for(ms = 0; ms < 2; ms++){
for(i = 0;i < rank;i++){
MP[ms][i] = 0.0;
MID[ms][i]= 0;
}
for(i = 0;i < length;i++){
float power = max_power[i + length*ms];
unsigned long long tableID = max_powerID[i + length*ms];
if(MP[ms][rank-1] < power){
for(k = 0;k < rank;k++){
if(MP[ms][k] < power){
for(j = rank-2;j >= k;j--){
MID[ms][j+1] = MID[ms][j];
MP[ms][j+1] = MP[ms][j];
}
MID[ms][k] = tableID;
MP[ms][k] = power;
break;
}
}
}
}
//fprintf(stdout,"%d\n",__LINE__);
for(i = 0;i < rank;i++){
float power = MP[ms][i];
unsigned long long tmp = MID[ms][i];
unsigned long long minID = tmp;
int index = i;
for(j = i+1;j < rank;j++){
if(power == MP[ms][j]){
if(minID > MID[ms][j]){
minID = MID[ms][j];
index = j;
}
}else{
break;
}
}
MID[ms][index] = tmp;
MID[ms][i] = minID;
}
}
//fprintf(stdout,"%d\n",__LINE__);
if(num_attacks == half_table_size){
int mc = 0;
int sc = 0;
for(i = 0;i < rank;i++){
if(MP[0][mc] < MP[1][sc]){
final_MID[num_attacks][i] = MID[1][sc];
final_MP [num_attacks][i] = MP [1][sc];
sc++;
}else if(MP[0][mc] == MP[1][sc]){
if(MID[0][mc] < MID[1][sc]){
final_MID[num_attacks][i] = MID[0][mc];
final_MP [num_attacks][i] = MP [0][mc];
mc++;
}else{
final_MID[num_attacks][i] = MID[1][sc];
final_MP [num_attacks][i] = MP [1][sc];
sc++;
}
}else{
final_MID[num_attacks][i] = MID[0][mc];
final_MP [num_attacks][i] = MP [0][mc];
mc++;
}
}
}else{
for(i = 0;i < rank;i++){
final_MID[num_attacks][i] = MID[0][i];
final_MP [num_attacks][i] = MP [0][i];
final_MID[width*hight-num_attacks][i] = MID[1][i];
final_MP [width*hight-num_attacks][i] = MP [1][i];
}
}
}
//fprintf(stdout,"%d\n",__LINE__);
for(num_attacks = 0;num_attacks <= width*hight;num_attacks++){
if(final_MID[num_attacks][0] != 0xFFFFFFFFFFFFFFFFLU){
printf("%2d-%2d, line %d, way %d\n", num_attacks, width*hight-num_attacks, line, way);
if(simuave){
simulate_average(table_size, final_MID[num_attacks], final_MP[num_attacks], num_attacks, width, hight, LS, isStrong, pline, pway);
}else{
for(i = 0;i < rank;i++){
printf("%d,max ID,%lld,power,%f\n",i,final_MID[num_attacks][i],final_MP[num_attacks][i]);
}
}
}
}
CUDA_SAFE_CALL(cudaFree(max_powerID_dev));
CUDA_SAFE_CALL(cudaFree(max_power_dev));
CUDA_SAFE_CALL(cudaFree(tableID_half_table_dev));
//CUDA_SAFE_CALL(cudaFree(num_patterns_half_dev));
CUDA_SAFE_CALL(cudaFree(tableID_half_prefix_dev));
//CUDA_SAFE_CALL(cudaFree(bit_count_table_dev));
CUDA_SAFE_CALL(cudaFree(reversed_bit_table_dev));
}
}
|
ea17498a50497506e95bae4cff4bc29af158a873.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
# define M 5
# define N 4
__global__ void matrix_add(int* A, int* B, int* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < M && j < N) {
C[i * N + j] = A[i * N + j] + B[i * N + j];
}
}
void print_matrix(int* matrix)
{
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
printf("%d ", matrix[i * N + j]);
}
printf("\n");
}
}
void check_cuda_error(hipError_t status)
{
if (status != hipSuccess)
{
printf("error: %s\n", hipGetErrorString(status));
exit(1);
}
}
int main()
{
dim3 threads_per_block(2, 3);
int n_blocks_x = (int)ceil(float(M) / threads_per_block.x);
int n_blocks_y = (int)ceil(float(N) / threads_per_block.y);
dim3 n_blocks(n_blocks_x, n_blocks_y);
int* A = (int*)malloc(M * N * sizeof(int));
int* B = (int*)malloc(M * N * sizeof(int));
int* C = (int*)malloc(M * N * sizeof(int));
for (int i = 0; i < M;i++)
{
for (int j = 0; j < N;j++)
{
A[i * N + j] = B[i * N + j] = rand() % 10;
}
}
int* cuda_A;
int* cuda_B;
int* cuda_C;
hipError_t cuda_status;
cuda_status = hipMalloc((void**)&cuda_A, M * N * sizeof(int));
check_cuda_error(cuda_status);
cuda_status = hipMalloc((void**)&cuda_B, M * N * sizeof(int));
check_cuda_error(cuda_status);
cuda_status = hipMalloc((void**)&cuda_C, M * N * sizeof(int));
check_cuda_error(cuda_status);
hipMemcpy(cuda_A, A, M * N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuda_B, B, M * N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuda_C, C, M * N * sizeof(int), hipMemcpyHostToDevice);
matrix_add << <n_blocks, threads_per_block >> > (cuda_A, cuda_B, cuda_C);
hipMemcpy(C, cuda_C, M * N * sizeof(int), hipMemcpyDeviceToHost);
printf("A\n");
print_matrix(A);
printf("B\n");
print_matrix(B);
printf("C\n");
print_matrix(C);
hipFree(cuda_A);
hipFree(cuda_B);
hipFree(cuda_C);
free(A);
free(B);
free(C);
return 0;
}
| ea17498a50497506e95bae4cff4bc29af158a873.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
# define M 5
# define N 4
__global__ void matrix_add(int* A, int* B, int* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < M && j < N) {
C[i * N + j] = A[i * N + j] + B[i * N + j];
}
}
void print_matrix(int* matrix)
{
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
printf("%d ", matrix[i * N + j]);
}
printf("\n");
}
}
void check_cuda_error(cudaError_t status)
{
if (status != cudaSuccess)
{
printf("error: %s\n", cudaGetErrorString(status));
exit(1);
}
}
int main()
{
dim3 threads_per_block(2, 3);
int n_blocks_x = (int)ceil(float(M) / threads_per_block.x);
int n_blocks_y = (int)ceil(float(N) / threads_per_block.y);
dim3 n_blocks(n_blocks_x, n_blocks_y);
int* A = (int*)malloc(M * N * sizeof(int));
int* B = (int*)malloc(M * N * sizeof(int));
int* C = (int*)malloc(M * N * sizeof(int));
for (int i = 0; i < M;i++)
{
for (int j = 0; j < N;j++)
{
A[i * N + j] = B[i * N + j] = rand() % 10;
}
}
int* cuda_A;
int* cuda_B;
int* cuda_C;
cudaError_t cuda_status;
cuda_status = cudaMalloc((void**)&cuda_A, M * N * sizeof(int));
check_cuda_error(cuda_status);
cuda_status = cudaMalloc((void**)&cuda_B, M * N * sizeof(int));
check_cuda_error(cuda_status);
cuda_status = cudaMalloc((void**)&cuda_C, M * N * sizeof(int));
check_cuda_error(cuda_status);
cudaMemcpy(cuda_A, A, M * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_B, B, M * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_C, C, M * N * sizeof(int), cudaMemcpyHostToDevice);
matrix_add << <n_blocks, threads_per_block >> > (cuda_A, cuda_B, cuda_C);
cudaMemcpy(C, cuda_C, M * N * sizeof(int), cudaMemcpyDeviceToHost);
printf("A\n");
print_matrix(A);
printf("B\n");
print_matrix(B);
printf("C\n");
print_matrix(C);
cudaFree(cuda_A);
cudaFree(cuda_B);
cudaFree(cuda_C);
free(A);
free(B);
free(C);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.