hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
0186b09b7a6cba91e97dd63c37ba757010d2a1d6.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
Sample for Mobile CUDA
Simple Adding Vectors Application.
Authoer @ Taichirou Suzuki
**/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
/**
Simple Kernel.
**/
__global__ void ___add(float* a,float* b,unsigned long size){
int _x = blockDim.x * blockIdx.x + threadIdx.x;
int _y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned long id = _x + _y * size;
a[id] += b[id];
}
static float elapsed(struct timeval tv0,struct timeval tv1){
return (float)(tv1.tv_sec - tv0.tv_sec)
+ (float)(tv1.tv_usec - tv0.tv_usec)
* 0.000001f;
}
int main(void){
struct timeval t0,t1;
gettimeofday(&t0,NULL);
/**
Define Vector Size.
**/
// unsigned long _hen = 11000;
unsigned long _hen = 18000;
// unsigned long _hen = 18000;
unsigned long size = _hen * _hen;
printf("gyouretu size : %lu\n",size);
/**
Number Of Launch Kernel.
**/
int numOfLaunchKernel = 1;
//int numOfLaunchKernel = 1;
hipSetDevice(0);
// float* h_a = (float*)malloc(sizeof(float)*size);
// float* h_b = (float*)malloc(sizeof(float)*size);
float* d_a = NULL;
float* d_b = NULL;
// float* d_c = NULL;
hipMalloc((void**)&d_a,sizeof(float)*size);
hipMalloc((void**)&d_b,sizeof(float)*size);
// hipMalloc((void**)&d_c,sizeof(float)*size);
float* h_a = NULL;
float* h_b = NULL;
/*
hipError_t res;
res = hipHostMalloc((void **)&h_a,sizeof(float)*size,0);
printf("hipHostMalloc : %d\n",res);
res = hipHostMalloc((void **)&h_b,sizeof(float)*size,0);
printf("hipHostMalloc : %d\n",res);
*/
h_a = (float*)malloc(sizeof(float)*size);
h_b = (float*)malloc(sizeof(float)*size);
// float* h_c = (float*)malloc(sizeof(float)*size);
printf("This Sample Application Uses %d[Mbyte] per vector.(Total : %d[Mbyte])\n",sizeof(float)*size >> 20,sizeof(float)*size*2 >> 20);
for(int i = 0 ; i < size ; i ++){
h_a[i] = 0.0f;
h_b[i] = 1.0f;
}
// int ite = 140;
int ite = 155;
// int ite = 1000000;
for(int j = 0 ; j < ite ; j ++){
hipMemcpy(d_a,h_a,sizeof(float)*size,hipMemcpyHostToDevice);
hipMemcpy(d_b,h_b,sizeof(float)*size,hipMemcpyHostToDevice);
int _size = 10;
dim3 threads(_size,_size,1);
dim3 grid(_hen/_size,_hen/_size,1);
for(int i = 0 ; i < numOfLaunchKernel ; i ++){
//__add<<<grid,threads>>>(d_c,d_a,d_b,_hen);
hipLaunchKernelGGL(( ___add), dim3(grid),dim3(threads), 0, 0, d_a,d_b,_hen);
/**
Main thread can sleep at here.
**/
// sleep(1);
}
// hipMemcpy(h_c,d_c,sizeof(float)*size,hipMemcpyDeviceToHost);
hipMemcpy(h_a,d_a,sizeof(float)*size,hipMemcpyDeviceToHost);
}
int pass = 1;
for(int i = 0 ; i < size ; i ++){
// if(h_c[i] != numOfLaunchKernel){
// if(h_a[i] != numOfLaunchKernel){
// printf("H_A[%d] : %d",i,h_a[i]);
if(h_a[i] != ite){
pass = 0;
}
}
if(pass){
printf(">Result TEST : PASS\n");
}else{
printf(">Result TEST : FAILED\n");
}
hipFree(d_a);
hipFree(d_b);
// hipFree(d_c);
free(h_a);
free(h_b);
// hipHostFree(h_a);
// hipHostFree(h_b);
// free(h_c);
printf("Application Closed...\n");
gettimeofday(&t1,NULL);
printf("My RESULT : %f\n",elapsed(t0,t1));
return 0;
}
|
0186b09b7a6cba91e97dd63c37ba757010d2a1d6.cu
|
/**
Sample for Mobile CUDA
Simple Adding Vectors Application.
Authoer @ Taichirou Suzuki
**/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
/**
Simple Kernel.
**/
__global__ void ___add(float* a,float* b,unsigned long size){
int _x = blockDim.x * blockIdx.x + threadIdx.x;
int _y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned long id = _x + _y * size;
a[id] += b[id];
}
static float elapsed(struct timeval tv0,struct timeval tv1){
return (float)(tv1.tv_sec - tv0.tv_sec)
+ (float)(tv1.tv_usec - tv0.tv_usec)
* 0.000001f;
}
int main(void){
struct timeval t0,t1;
gettimeofday(&t0,NULL);
/**
Define Vector Size.
**/
// unsigned long _hen = 11000;
unsigned long _hen = 18000;
// unsigned long _hen = 18000;
unsigned long size = _hen * _hen;
printf("gyouretu size : %lu\n",size);
/**
Number Of Launch Kernel.
**/
int numOfLaunchKernel = 1;
//int numOfLaunchKernel = 1;
cudaSetDevice(0);
// float* h_a = (float*)malloc(sizeof(float)*size);
// float* h_b = (float*)malloc(sizeof(float)*size);
float* d_a = NULL;
float* d_b = NULL;
// float* d_c = NULL;
cudaMalloc((void**)&d_a,sizeof(float)*size);
cudaMalloc((void**)&d_b,sizeof(float)*size);
// cudaMalloc((void**)&d_c,sizeof(float)*size);
float* h_a = NULL;
float* h_b = NULL;
/*
cudaError_t res;
res = cudaHostAlloc((void **)&h_a,sizeof(float)*size,0);
printf("cudaHostAlloc : %d\n",res);
res = cudaHostAlloc((void **)&h_b,sizeof(float)*size,0);
printf("cudaHostAlloc : %d\n",res);
*/
h_a = (float*)malloc(sizeof(float)*size);
h_b = (float*)malloc(sizeof(float)*size);
// float* h_c = (float*)malloc(sizeof(float)*size);
printf("This Sample Application Uses %d[Mbyte] per vector.(Total : %d[Mbyte])\n",sizeof(float)*size >> 20,sizeof(float)*size*2 >> 20);
for(int i = 0 ; i < size ; i ++){
h_a[i] = 0.0f;
h_b[i] = 1.0f;
}
// int ite = 140;
int ite = 155;
// int ite = 1000000;
for(int j = 0 ; j < ite ; j ++){
cudaMemcpy(d_a,h_a,sizeof(float)*size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,sizeof(float)*size,cudaMemcpyHostToDevice);
int _size = 10;
dim3 threads(_size,_size,1);
dim3 grid(_hen/_size,_hen/_size,1);
for(int i = 0 ; i < numOfLaunchKernel ; i ++){
//__add<<<grid,threads>>>(d_c,d_a,d_b,_hen);
___add<<<grid,threads>>>(d_a,d_b,_hen);
/**
Main thread can sleep at here.
**/
// sleep(1);
}
// cudaMemcpy(h_c,d_c,sizeof(float)*size,cudaMemcpyDeviceToHost);
cudaMemcpy(h_a,d_a,sizeof(float)*size,cudaMemcpyDeviceToHost);
}
int pass = 1;
for(int i = 0 ; i < size ; i ++){
// if(h_c[i] != numOfLaunchKernel){
// if(h_a[i] != numOfLaunchKernel){
// printf("H_A[%d] : %d",i,h_a[i]);
if(h_a[i] != ite){
pass = 0;
}
}
if(pass){
printf(">Result TEST : PASS\n");
}else{
printf(">Result TEST : FAILED\n");
}
cudaFree(d_a);
cudaFree(d_b);
// cudaFree(d_c);
free(h_a);
free(h_b);
// cudaFreeHost(h_a);
// cudaFreeHost(h_b);
// free(h_c);
printf("Application Closed...\n");
gettimeofday(&t1,NULL);
printf("My RESULT : %f\n",elapsed(t0,t1));
return 0;
}
|
b7129aa040effdcf10f128cd924a505098d7e219.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "distconv/util/nvshmem.hpp"
#include "distconv/util/util_mpi.hpp"
#include "distconv/util/util_cuda.hpp"
namespace distconv {
namespace util {
namespace nvshmem {
#ifdef DISTCONV_HAS_NVSHMEM
void initialize(MPI_Comm comm) {
util::MPIRootPrintStreamInfo() << "Initializing NVSHMEM with MPI";
nvshmemx_init_attr_t attr;
attr.mpi_comm = &comm;
DISTCONV_CHECK_NVSHMEM(nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr));
}
void finalize() {
util::MPIRootPrintStreamInfo() << "Finalizing NVSHMEM";
nvshmem_finalize();
}
void barrier() {
nvshmem_barrier_all();
}
namespace internal {
__global__ void sync_pairwise_kernel(int peer,
bool notify, bool wait,
SyncType sync_type,
PairwiseSyncDevice sync) {
if (notify) {
sync.notify(peer, sync_type);
}
if (wait) {
sync.wait();
}
sync.inc_counter();
}
__global__ void notify_kernel(int peer,
SyncType sync_type,
PairwiseSyncDevice sync) {
sync.notify(peer, sync_type);
}
__global__ void wait_kernel(PairwiseSyncDevice sync) {
sync.wait();
}
__global__ void inc_counter_kernel(PairwiseSyncDevice sync) {
sync.inc_counter();
}
} // namespace internal
void PairwiseSync::sync(int peer, bool notify, bool wait,
SyncType sync_type, hipStream_t stream) {
if (peer == MPI_PROC_NULL) return;
hipLaunchKernelGGL(( internal::sync_pairwise_kernel), dim3(1), dim3(1), 0, stream,
peer, notify, wait, sync_type, get_for_device());
}
void PairwiseSync::notify(int peer, SyncType sync_type,
hipStream_t stream) {
if (peer == MPI_PROC_NULL) return;
hipLaunchKernelGGL(( internal::notify_kernel), dim3(1), dim3(1), 0, stream,
peer, sync_type, get_for_device());
}
void PairwiseSync::wait(hipStream_t stream) {
hipLaunchKernelGGL(( internal::wait_kernel), dim3(1), dim3(1), 0, stream, get_for_device());
}
void PairwiseSync::inc_counter(hipStream_t stream) {
hipLaunchKernelGGL(( internal::inc_counter_kernel), dim3(1), dim3(1), 0, stream, get_for_device());
}
void PairwiseSync::alloc_buffers() {
CounterType *shmem_counter = static_cast<CounterType*>(
nvshmem_malloc(sizeof(CounterType)));
//util::MPIPrintStreamDebug() << "shmem flag: " << p;
if (shmem_counter == nullptr) {
util::MPIPrintStreamError() << "Allocation of shmem buffer failed";
throw std::exception();
}
DISTCONV_CHECK_CUDA(hipMemset(shmem_counter, 0, sizeof(CounterType)));
// Make sure the memset is completed
DISTCONV_CHECK_CUDA(hipStreamSynchronize(0));
barrier();
m_shmem_counter = std::shared_ptr<CounterType>(
shmem_counter, [](CounterType *ptr) { nvshmem_free(ptr); });
// Setup the device counter variable
CounterType *local_counter = nullptr;
DISTCONV_CHECK_CUDA(hipMalloc(&local_counter, sizeof(CounterType)));
CounterType counter_init = 1;
DISTCONV_CHECK_CUDA(hipMemcpy(
local_counter, &counter_init,
sizeof(CounterType), hipMemcpyHostToDevice));
m_local_counter = std::shared_ptr<CounterType>(
local_counter, [](CounterType *ptr) {
DISTCONV_CHECK_CUDA(hipFree(ptr)); });
}
PairwiseSyncDevice PairwiseSync::get_for_device() {
return PairwiseSyncDevice(m_local_counter.get(), m_shmem_counter.get());
}
#endif // DISTCONV_HAS_NVSHMEM
} // namespace nvshmem
} // namespace util
} // namespace distconv
|
b7129aa040effdcf10f128cd924a505098d7e219.cu
|
#include "distconv/util/nvshmem.hpp"
#include "distconv/util/util_mpi.hpp"
#include "distconv/util/util_cuda.hpp"
namespace distconv {
namespace util {
namespace nvshmem {
#ifdef DISTCONV_HAS_NVSHMEM
void initialize(MPI_Comm comm) {
util::MPIRootPrintStreamInfo() << "Initializing NVSHMEM with MPI";
nvshmemx_init_attr_t attr;
attr.mpi_comm = &comm;
DISTCONV_CHECK_NVSHMEM(nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr));
}
void finalize() {
util::MPIRootPrintStreamInfo() << "Finalizing NVSHMEM";
nvshmem_finalize();
}
void barrier() {
nvshmem_barrier_all();
}
namespace internal {
__global__ void sync_pairwise_kernel(int peer,
bool notify, bool wait,
SyncType sync_type,
PairwiseSyncDevice sync) {
if (notify) {
sync.notify(peer, sync_type);
}
if (wait) {
sync.wait();
}
sync.inc_counter();
}
__global__ void notify_kernel(int peer,
SyncType sync_type,
PairwiseSyncDevice sync) {
sync.notify(peer, sync_type);
}
__global__ void wait_kernel(PairwiseSyncDevice sync) {
sync.wait();
}
__global__ void inc_counter_kernel(PairwiseSyncDevice sync) {
sync.inc_counter();
}
} // namespace internal
void PairwiseSync::sync(int peer, bool notify, bool wait,
SyncType sync_type, cudaStream_t stream) {
if (peer == MPI_PROC_NULL) return;
internal::sync_pairwise_kernel<<<1, 1, 0, stream>>>(
peer, notify, wait, sync_type, get_for_device());
}
void PairwiseSync::notify(int peer, SyncType sync_type,
cudaStream_t stream) {
if (peer == MPI_PROC_NULL) return;
internal::notify_kernel<<<1, 1, 0, stream>>>(
peer, sync_type, get_for_device());
}
void PairwiseSync::wait(cudaStream_t stream) {
internal::wait_kernel<<<1, 1, 0, stream>>>(get_for_device());
}
void PairwiseSync::inc_counter(cudaStream_t stream) {
internal::inc_counter_kernel<<<1, 1, 0, stream>>>(get_for_device());
}
void PairwiseSync::alloc_buffers() {
CounterType *shmem_counter = static_cast<CounterType*>(
nvshmem_malloc(sizeof(CounterType)));
//util::MPIPrintStreamDebug() << "shmem flag: " << p;
if (shmem_counter == nullptr) {
util::MPIPrintStreamError() << "Allocation of shmem buffer failed";
throw std::exception();
}
DISTCONV_CHECK_CUDA(cudaMemset(shmem_counter, 0, sizeof(CounterType)));
// Make sure the memset is completed
DISTCONV_CHECK_CUDA(cudaStreamSynchronize(0));
barrier();
m_shmem_counter = std::shared_ptr<CounterType>(
shmem_counter, [](CounterType *ptr) { nvshmem_free(ptr); });
// Setup the device counter variable
CounterType *local_counter = nullptr;
DISTCONV_CHECK_CUDA(cudaMalloc(&local_counter, sizeof(CounterType)));
CounterType counter_init = 1;
DISTCONV_CHECK_CUDA(cudaMemcpy(
local_counter, &counter_init,
sizeof(CounterType), cudaMemcpyHostToDevice));
m_local_counter = std::shared_ptr<CounterType>(
local_counter, [](CounterType *ptr) {
DISTCONV_CHECK_CUDA(cudaFree(ptr)); });
}
PairwiseSyncDevice PairwiseSync::get_for_device() {
return PairwiseSyncDevice(m_local_counter.get(), m_shmem_counter.get());
}
#endif // DISTCONV_HAS_NVSHMEM
} // namespace nvshmem
} // namespace util
} // namespace distconv
|
4536d944363df06ebbc81743abc159cf0c988259.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/core/tensor_op.h"
#include <limits>
namespace anakin{
namespace saber{
template <typename Dtype>
__global__ void set_device_data(Dtype* data_ptr, Dtype value, int size){
CUDA_KERNEL_LOOP(index, size){
data_ptr[index] = value;
}
}
template <typename Dtype>
__global__ void print_device_data(const Dtype* data_ptr, int size, int width){
for (int i = 0; i < size; i++){
printf("%.2f ", static_cast<float>(data_ptr[i]));
if ((i + 1) % width == 0){
printf("\n");
}
}
printf("\n");
}
template <typename Dtype>
__global__ void cuda_cvt_data(const float* src, Dtype* dst, Dtype scale, int size){
CUDA_KERNEL_LOOP(index, size){
dst[index] = static_cast<Dtype>(src[index] * scale);
}
}
template <class Tensor_t>
void fill_tensor_device_const(Tensor_t& tensor, \
typename Tensor_t::Dtype value, \
typename Tensor_t::API::stream_t stream){
typedef typename Tensor_t::Dtype Dtype;
Dtype* data_ptr = static_cast<Dtype*>(tensor.get_buf()->get_data_mutable());
int size = tensor.size();
hipLaunchKernelGGL(( set_device_data), dim3(CUDA_GET_BLOCKS(size)), dim3(CUDA_NUM_THREADS), 0, stream, data_ptr, value, size);
CUDA_POST_KERNEL_CHECK;
};
template <class Tensor_t>
void fill_tensor_device_rand(Tensor_t& tensor, typename Tensor_t::API::stream_t stream) {
typedef typename Tensor_t::Dtype Dtype;
Dtype* data_ptr = static_cast<Dtype*>(tensor.get_buf()->get_data_mutable());
int size = tensor.size();
float* data_f;
hipMalloc(&data_f, size * sizeof(float));
hiprandGenerator_t gen;
CHECK_EQ(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT), HIPRAND_STATUS_SUCCESS);
CHECK_EQ(hiprandSetPseudoRandomGeneratorSeed(gen, rand()), HIPRAND_STATUS_SUCCESS);
CHECK_EQ(hiprandGenerateUniform(gen, data_f, size), HIPRAND_STATUS_SUCCESS);
CHECK_EQ(hiprandDestroyGenerator(gen), HIPRAND_STATUS_SUCCESS);
Dtype scale = std::numeric_limits<Dtype>::max();
hipLaunchKernelGGL(( cuda_cvt_data), dim3(CUDA_GET_BLOCKS(size)), dim3(CUDA_NUM_THREADS), 0, stream, data_f, data_ptr, scale, size);
hipDeviceSynchronize();
hipFree(data_f);
CUDA_POST_KERNEL_CHECK;
};
template <class Tensor_t>
void fill_tensor_device_rand(Tensor_t& tensor, typename Tensor_t::Dtype vstart, \
typename Tensor_t::Dtype vend, typename Tensor_t::API::stream_t stream) {
typedef typename Tensor_t::Dtype Dtype;
Dtype* data_ptr = static_cast<Dtype*>(tensor.get_buf()->get_data_mutable());
int size = tensor.size();
float* data_f;
hipMalloc(&data_f, size * sizeof(float));
hiprandGenerator_t gen;
CHECK_EQ(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT), HIPRAND_STATUS_SUCCESS);
CHECK_EQ(hiprandSetPseudoRandomGeneratorSeed(gen, rand()), HIPRAND_STATUS_SUCCESS);
CHECK_EQ(hiprandGenerateUniform(gen, data_f, size), HIPRAND_STATUS_SUCCESS);
CHECK_EQ(hiprandDestroyGenerator(gen), HIPRAND_STATUS_SUCCESS);
Dtype scale = vend - vstart;
hipLaunchKernelGGL(( cuda_cvt_data), dim3(CUDA_GET_BLOCKS(size)), dim3(CUDA_NUM_THREADS), 0, stream, data_f, data_ptr, scale, size);
hipDeviceSynchronize();
hipFree(data_f);
CUDA_POST_KERNEL_CHECK;
};
template <class Tensor_t>
void print_tensor_device(Tensor_t& tensor, typename Tensor_t::API::stream_t stream){
typedef typename Tensor_t::Dtype Dtype;
LOG(INFO) << "device tensor size: " << tensor.size();
const Dtype* data_ptr = static_cast<const Dtype*>(tensor.get_buf()->get_data());
int size = tensor.size();
hipLaunchKernelGGL(( print_device_data), dim3(1), dim3(1), 0, stream, data_ptr, size, tensor.width());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
};
#define FILL_TENSOR_NV(type, layout) \
template void fill_tensor_device_const<Tensor<NV, type, layout>>\
(Tensor<NV, type, layout>& tensor, DataTrait<type>::dtype value, \
typename TargetWrapper<NV>::stream_t stream); \
template void fill_tensor_device_rand<Tensor<NV, type, layout>>\
(Tensor<NV, type, layout>& tensor, typename TargetWrapper<NV>::stream_t stream); \
template void fill_tensor_device_rand<Tensor<NV, type, layout>>\
(Tensor<NV, type, layout>& tensor, DataTrait<type>::dtype vstart, \
DataTrait<type>::dtype vend, typename TargetWrapper<NV>::stream_t stream); \
template void print_tensor_device<Tensor<NV, type, layout>>\
(Tensor<NV, type, layout>& tensor, typename TargetWrapper<NV>::stream_t stream);
FILL_TENSOR_NV(AK_FLOAT, NCHW);
FILL_TENSOR_NV(AK_FLOAT, NHWC);
FILL_TENSOR_NV(AK_FLOAT, NHW);
FILL_TENSOR_NV(AK_FLOAT, NW);
FILL_TENSOR_NV(AK_FLOAT, HW);
FILL_TENSOR_NV(AK_FLOAT, W);
FILL_TENSOR_NV(AK_INT8, NCHW);
FILL_TENSOR_NV(AK_INT8, NHWC);
FILL_TENSOR_NV(AK_INT8, NHW);
FILL_TENSOR_NV(AK_INT8, NW);
FILL_TENSOR_NV(AK_INT8, HW);
FILL_TENSOR_NV(AK_INT8, W);
// INT8 NCHW_C4
template void fill_tensor_device_const<Tensor<NV, AK_INT8, NCHW_C4>>(Tensor<NV, AK_INT8, NCHW_C4>& tensor, \
char value, typename TargetWrapper<NV>::stream_t stream);
template void fill_tensor_device_rand<Tensor<NV, AK_INT8, NCHW_C4>>(Tensor<NV, AK_INT8, NCHW_C4>& tensor, \
typename TargetWrapper<NV>::stream_t stream);
template <>
void print_tensor_device<Tensor<NV, AK_INT8, NCHW_C4>>(Tensor<NV, AK_INT8, NCHW_C4>& tensor, \
typename TargetWrapper<NV>::stream_t stream) {
typedef typename Tensor<NV, AK_INT8, NCHW_C4>::Dtype Dtype;
LOG(INFO) << "device tensor size: " << tensor.size();
const Dtype* data_ptr = (const Dtype*)tensor.get_buf()->get_data();
int size = tensor.size();
hipLaunchKernelGGL(( print_device_data), dim3(1), dim3(1), 0, stream, data_ptr, size, tensor.width() * 4);
CUDA_POST_KERNEL_CHECK;
};
// use BLOCKCOUNT and THREADNUM
__global__
void int8nchwc4_fp32nchw(float* out_data, const char* in_data,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float* scale, int count) {
float load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int read_w = (gid) % valid_width;
int read_h = (gid / (in_h_stride)) % valid_height;
int read_c = (gid / (in_c_stride)) % valid_channel_4;
int read_n = (gid / (in_n_stride)) % valid_num;
int scale_index = read_c << 2;
int in_offset = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w;
int out_offset = read_n * out_n_stride
+ read_c * (out_c_stride << 2)
+ read_h * out_h_stride
+ read_w * out_w_stride;
if (gid < count) {
char4 readin = __ldg(&((const char4*)in_data)[in_offset]);
load0 = static_cast<float>(readin.x);
load1 = static_cast<float>(readin.y);
load2 = static_cast<float>(readin.z);
load3 = static_cast<float>(readin.w);
out_data[out_offset] = load0 * scale[scale_index]; out_offset += out_c_stride;
out_data[out_offset] = load1 * scale[scale_index + 1]; out_offset += out_c_stride;
out_data[out_offset] = load2 * scale[scale_index + 2]; out_offset += out_c_stride;
out_data[out_offset] = load3 * scale[scale_index + 3];
}
}
template<>
SaberStatus DataTensorTransformHelper::transform<Tensor<NV, AK_FLOAT, NCHW>, Tensor<NV, AK_INT8, NCHW_C4> >(
Tensor<NV, AK_FLOAT, NCHW> &out_tensor,
const Tensor<NV, AK_INT8, NCHW_C4> &in_tensor, Context<NV> ctx){
Shape out_stride = out_tensor.get_stride();
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
int count = in_shape[0] * in_shape[1] * in_shape[2] * in_shape[3];
const char * in_data = in_tensor.data();
float * out_data = out_tensor.mutable_data();
hipStream_t cuda_stream = ctx.get_compute_stream();
hipLaunchKernelGGL(( int8nchwc4_fp32nchw), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, out_data, in_data,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
in_shape[1] * in_shape[2] * in_shape[3],
in_shape[2] * in_shape[3],
in_shape[3], 1,
out_stride[0], out_stride[1], out_stride[2], out_stride[3],
_weight_scale, count);
return SaberSuccess;
}
__global__
void transform_nchw_2_c4(char* out_data, const float* in_data,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float scale,
int count) {
int load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int write_w = (gid) % valid_width;
int write_h = (gid / (out_h_stride)) % valid_height;
int write_c = (gid / (out_c_stride)) % valid_channel_4;
int write_n = (gid / (out_n_stride)) % valid_num;
int in_offset = write_n * in_n_stride
+ write_c * (in_c_stride << 2)
+ write_h * in_h_stride
+ write_w * in_w_stride;
int out_offset = write_n * out_n_stride
+ write_c * out_c_stride
+ write_h * out_h_stride
+ write_w;
if (gid < count) {
char4 write;
load0 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.x = static_cast<char>(load0);
in_offset += in_c_stride;
load1 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.y = static_cast<char>(load1);
in_offset += in_c_stride;
load2 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.z = static_cast<char>(load2);
in_offset += in_c_stride;
load3 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.w = static_cast<char>(load3);
((char4*)out_data)[out_offset] = write;
}
}
template<>
SaberStatus DataTensorTransformHelper::transform<Tensor<NV, AK_INT8, NCHW_C4>, Tensor<NV, AK_FLOAT, NCHW> >(
Tensor<NV, AK_INT8, NCHW_C4> &out_tensor,
const Tensor<NV, AK_FLOAT, NCHW> &in_tensor, Context<NV> ctx){
const float * in_data = in_tensor.data();
char * out_data = out_tensor.mutable_data();
Shape in_stride = in_tensor.get_stride();
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
int count = out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3];
hipStream_t cuda_stream = ctx.get_compute_stream();
hipLaunchKernelGGL(( transform_nchw_2_c4), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, out_data, in_data,
out_shape[0], out_shape[1], out_shape[2], out_shape[3],
in_stride[0], in_stride[1], in_stride[2], in_stride[3],
out_shape[1] * out_shape[2] * out_shape[3],
out_shape[2] * out_shape[3], out_shape[3], 1,
(1.f / _in_scale), count);
return SaberSuccess;
}
__global__ void transform_nchw_2_nchw(float * out_data,
const float* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n, int out_c, int out_h, int out_w,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float *scale) {
CUDA_KERNEL_LOOP(tid, count){
int read_w = tid % in_w;
int read_h = (tid / (in_w)) % in_h;
int read_c = (tid / (in_h * in_w)) % in_c;
int read_n = (tid / (in_c * in_h * in_w)) % in_n;
int write_w = tid % out_w;
int write_h = (tid / (out_w)) % out_h;
int write_c = (tid / (out_h * out_w)) % out_c;
int write_n = (tid / (out_c * out_h * out_w)) % out_n;
int in_idx = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w * in_w_stride;
int out_idx = write_n * out_n_stride
+ write_c * out_c_stride
+ write_h * out_h_stride
+ write_w * out_w_stride;
float in_var = in_data[in_idx];
float in_scale = scale[read_c];
out_data[out_idx] = in_var * in_scale;
}
}
template<>
SaberStatus DataTensorTransformHelper::transform<Tensor<NV, AK_FLOAT, NCHW>, Tensor<NV, AK_FLOAT, NCHW> >(
Tensor<NV, AK_FLOAT, NCHW> &out_tensor,
const Tensor<NV, AK_FLOAT, NCHW> &in_tensor, Context<NV> ctx){
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
Shape stride_in = in_tensor.get_stride();
Shape stride_out = out_tensor.get_stride();
const float *in_data = (const float*)in_tensor.data();
float *out_data = (float*)out_tensor.mutable_data();
const int count = in_tensor.valid_size();
hipStream_t cuda_stream = ctx.get_compute_stream();
hipLaunchKernelGGL(( transform_nchw_2_nchw)
, dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream,
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
out_shape[0], out_shape[1], out_shape[2], out_shape[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3],
_weight_scale);
return SaberSuccess;
}
} //namespace saber
} //namespace anakin
|
4536d944363df06ebbc81743abc159cf0c988259.cu
|
#include "saber/core/tensor_op.h"
#include <limits>
namespace anakin{
namespace saber{
template <typename Dtype>
__global__ void set_device_data(Dtype* data_ptr, Dtype value, int size){
CUDA_KERNEL_LOOP(index, size){
data_ptr[index] = value;
}
}
template <typename Dtype>
__global__ void print_device_data(const Dtype* data_ptr, int size, int width){
for (int i = 0; i < size; i++){
printf("%.2f ", static_cast<float>(data_ptr[i]));
if ((i + 1) % width == 0){
printf("\n");
}
}
printf("\n");
}
template <typename Dtype>
__global__ void cuda_cvt_data(const float* src, Dtype* dst, Dtype scale, int size){
CUDA_KERNEL_LOOP(index, size){
dst[index] = static_cast<Dtype>(src[index] * scale);
}
}
template <class Tensor_t>
void fill_tensor_device_const(Tensor_t& tensor, \
typename Tensor_t::Dtype value, \
typename Tensor_t::API::stream_t stream){
typedef typename Tensor_t::Dtype Dtype;
Dtype* data_ptr = static_cast<Dtype*>(tensor.get_buf()->get_data_mutable());
int size = tensor.size();
set_device_data<<<CUDA_GET_BLOCKS(size), CUDA_NUM_THREADS, 0, stream>>>(data_ptr, value, size);
CUDA_POST_KERNEL_CHECK;
};
template <class Tensor_t>
void fill_tensor_device_rand(Tensor_t& tensor, typename Tensor_t::API::stream_t stream) {
typedef typename Tensor_t::Dtype Dtype;
Dtype* data_ptr = static_cast<Dtype*>(tensor.get_buf()->get_data_mutable());
int size = tensor.size();
float* data_f;
cudaMalloc(&data_f, size * sizeof(float));
curandGenerator_t gen;
CHECK_EQ(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT), CURAND_STATUS_SUCCESS);
CHECK_EQ(curandSetPseudoRandomGeneratorSeed(gen, rand()), CURAND_STATUS_SUCCESS);
CHECK_EQ(curandGenerateUniform(gen, data_f, size), CURAND_STATUS_SUCCESS);
CHECK_EQ(curandDestroyGenerator(gen), CURAND_STATUS_SUCCESS);
Dtype scale = std::numeric_limits<Dtype>::max();
cuda_cvt_data<<<CUDA_GET_BLOCKS(size), CUDA_NUM_THREADS, 0, stream>>>(data_f, data_ptr, scale, size);
cudaDeviceSynchronize();
cudaFree(data_f);
CUDA_POST_KERNEL_CHECK;
};
template <class Tensor_t>
void fill_tensor_device_rand(Tensor_t& tensor, typename Tensor_t::Dtype vstart, \
typename Tensor_t::Dtype vend, typename Tensor_t::API::stream_t stream) {
typedef typename Tensor_t::Dtype Dtype;
Dtype* data_ptr = static_cast<Dtype*>(tensor.get_buf()->get_data_mutable());
int size = tensor.size();
float* data_f;
cudaMalloc(&data_f, size * sizeof(float));
curandGenerator_t gen;
CHECK_EQ(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT), CURAND_STATUS_SUCCESS);
CHECK_EQ(curandSetPseudoRandomGeneratorSeed(gen, rand()), CURAND_STATUS_SUCCESS);
CHECK_EQ(curandGenerateUniform(gen, data_f, size), CURAND_STATUS_SUCCESS);
CHECK_EQ(curandDestroyGenerator(gen), CURAND_STATUS_SUCCESS);
Dtype scale = vend - vstart;
cuda_cvt_data<<<CUDA_GET_BLOCKS(size), CUDA_NUM_THREADS, 0, stream>>>(data_f, data_ptr, scale, size);
cudaDeviceSynchronize();
cudaFree(data_f);
CUDA_POST_KERNEL_CHECK;
};
template <class Tensor_t>
void print_tensor_device(Tensor_t& tensor, typename Tensor_t::API::stream_t stream){
typedef typename Tensor_t::Dtype Dtype;
LOG(INFO) << "device tensor size: " << tensor.size();
const Dtype* data_ptr = static_cast<const Dtype*>(tensor.get_buf()->get_data());
int size = tensor.size();
print_device_data<<<1, 1, 0, stream>>>(data_ptr, size, tensor.width());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
};
#define FILL_TENSOR_NV(type, layout) \
template void fill_tensor_device_const<Tensor<NV, type, layout>>\
(Tensor<NV, type, layout>& tensor, DataTrait<type>::dtype value, \
typename TargetWrapper<NV>::stream_t stream); \
template void fill_tensor_device_rand<Tensor<NV, type, layout>>\
(Tensor<NV, type, layout>& tensor, typename TargetWrapper<NV>::stream_t stream); \
template void fill_tensor_device_rand<Tensor<NV, type, layout>>\
(Tensor<NV, type, layout>& tensor, DataTrait<type>::dtype vstart, \
DataTrait<type>::dtype vend, typename TargetWrapper<NV>::stream_t stream); \
template void print_tensor_device<Tensor<NV, type, layout>>\
(Tensor<NV, type, layout>& tensor, typename TargetWrapper<NV>::stream_t stream);
FILL_TENSOR_NV(AK_FLOAT, NCHW);
FILL_TENSOR_NV(AK_FLOAT, NHWC);
FILL_TENSOR_NV(AK_FLOAT, NHW);
FILL_TENSOR_NV(AK_FLOAT, NW);
FILL_TENSOR_NV(AK_FLOAT, HW);
FILL_TENSOR_NV(AK_FLOAT, W);
FILL_TENSOR_NV(AK_INT8, NCHW);
FILL_TENSOR_NV(AK_INT8, NHWC);
FILL_TENSOR_NV(AK_INT8, NHW);
FILL_TENSOR_NV(AK_INT8, NW);
FILL_TENSOR_NV(AK_INT8, HW);
FILL_TENSOR_NV(AK_INT8, W);
// INT8 NCHW_C4
template void fill_tensor_device_const<Tensor<NV, AK_INT8, NCHW_C4>>(Tensor<NV, AK_INT8, NCHW_C4>& tensor, \
char value, typename TargetWrapper<NV>::stream_t stream);
template void fill_tensor_device_rand<Tensor<NV, AK_INT8, NCHW_C4>>(Tensor<NV, AK_INT8, NCHW_C4>& tensor, \
typename TargetWrapper<NV>::stream_t stream);
template <>
void print_tensor_device<Tensor<NV, AK_INT8, NCHW_C4>>(Tensor<NV, AK_INT8, NCHW_C4>& tensor, \
typename TargetWrapper<NV>::stream_t stream) {
typedef typename Tensor<NV, AK_INT8, NCHW_C4>::Dtype Dtype;
LOG(INFO) << "device tensor size: " << tensor.size();
const Dtype* data_ptr = (const Dtype*)tensor.get_buf()->get_data();
int size = tensor.size();
print_device_data<<<1, 1, 0, stream>>>(data_ptr, size, tensor.width() * 4);
CUDA_POST_KERNEL_CHECK;
};
// use BLOCKCOUNT and THREADNUM
__global__
void int8nchwc4_fp32nchw(float* out_data, const char* in_data,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float* scale, int count) {
float load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int read_w = (gid) % valid_width;
int read_h = (gid / (in_h_stride)) % valid_height;
int read_c = (gid / (in_c_stride)) % valid_channel_4;
int read_n = (gid / (in_n_stride)) % valid_num;
int scale_index = read_c << 2;
int in_offset = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w;
int out_offset = read_n * out_n_stride
+ read_c * (out_c_stride << 2)
+ read_h * out_h_stride
+ read_w * out_w_stride;
if (gid < count) {
char4 readin = __ldg(&((const char4*)in_data)[in_offset]);
load0 = static_cast<float>(readin.x);
load1 = static_cast<float>(readin.y);
load2 = static_cast<float>(readin.z);
load3 = static_cast<float>(readin.w);
out_data[out_offset] = load0 * scale[scale_index]; out_offset += out_c_stride;
out_data[out_offset] = load1 * scale[scale_index + 1]; out_offset += out_c_stride;
out_data[out_offset] = load2 * scale[scale_index + 2]; out_offset += out_c_stride;
out_data[out_offset] = load3 * scale[scale_index + 3];
}
}
template<>
SaberStatus DataTensorTransformHelper::transform<Tensor<NV, AK_FLOAT, NCHW>, Tensor<NV, AK_INT8, NCHW_C4> >(
Tensor<NV, AK_FLOAT, NCHW> &out_tensor,
const Tensor<NV, AK_INT8, NCHW_C4> &in_tensor, Context<NV> ctx){
Shape out_stride = out_tensor.get_stride();
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
int count = in_shape[0] * in_shape[1] * in_shape[2] * in_shape[3];
const char * in_data = in_tensor.data();
float * out_data = out_tensor.mutable_data();
cudaStream_t cuda_stream = ctx.get_compute_stream();
int8nchwc4_fp32nchw<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(out_data, in_data,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
in_shape[1] * in_shape[2] * in_shape[3],
in_shape[2] * in_shape[3],
in_shape[3], 1,
out_stride[0], out_stride[1], out_stride[2], out_stride[3],
_weight_scale, count);
return SaberSuccess;
}
__global__
void transform_nchw_2_c4(char* out_data, const float* in_data,
int valid_num, int valid_channel_4, int valid_height, int valid_width,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float scale,
int count) {
int load0, load1, load2, load3;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
int write_w = (gid) % valid_width;
int write_h = (gid / (out_h_stride)) % valid_height;
int write_c = (gid / (out_c_stride)) % valid_channel_4;
int write_n = (gid / (out_n_stride)) % valid_num;
int in_offset = write_n * in_n_stride
+ write_c * (in_c_stride << 2)
+ write_h * in_h_stride
+ write_w * in_w_stride;
int out_offset = write_n * out_n_stride
+ write_c * out_c_stride
+ write_h * out_h_stride
+ write_w;
if (gid < count) {
char4 write;
load0 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.x = static_cast<char>(load0);
in_offset += in_c_stride;
load1 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.y = static_cast<char>(load1);
in_offset += in_c_stride;
load2 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.z = static_cast<char>(load2);
in_offset += in_c_stride;
load3 = __float2int_rn(__ldg(&in_data[in_offset]) * scale);
write.w = static_cast<char>(load3);
((char4*)out_data)[out_offset] = write;
}
}
template<>
SaberStatus DataTensorTransformHelper::transform<Tensor<NV, AK_INT8, NCHW_C4>, Tensor<NV, AK_FLOAT, NCHW> >(
Tensor<NV, AK_INT8, NCHW_C4> &out_tensor,
const Tensor<NV, AK_FLOAT, NCHW> &in_tensor, Context<NV> ctx){
const float * in_data = in_tensor.data();
char * out_data = out_tensor.mutable_data();
Shape in_stride = in_tensor.get_stride();
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
int count = out_shape[0] * out_shape[1] * out_shape[2] * out_shape[3];
cudaStream_t cuda_stream = ctx.get_compute_stream();
transform_nchw_2_c4<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(out_data, in_data,
out_shape[0], out_shape[1], out_shape[2], out_shape[3],
in_stride[0], in_stride[1], in_stride[2], in_stride[3],
out_shape[1] * out_shape[2] * out_shape[3],
out_shape[2] * out_shape[3], out_shape[3], 1,
(1.f / _in_scale), count);
return SaberSuccess;
}
__global__ void transform_nchw_2_nchw(float * out_data,
const float* in_data, const int count,
int in_n, int in_c, int in_h, int in_w,
int in_n_stride, int in_c_stride, int in_h_stride, int in_w_stride,
int out_n, int out_c, int out_h, int out_w,
int out_n_stride, int out_c_stride, int out_h_stride, int out_w_stride,
float *scale) {
CUDA_KERNEL_LOOP(tid, count){
int read_w = tid % in_w;
int read_h = (tid / (in_w)) % in_h;
int read_c = (tid / (in_h * in_w)) % in_c;
int read_n = (tid / (in_c * in_h * in_w)) % in_n;
int write_w = tid % out_w;
int write_h = (tid / (out_w)) % out_h;
int write_c = (tid / (out_h * out_w)) % out_c;
int write_n = (tid / (out_c * out_h * out_w)) % out_n;
int in_idx = read_n * in_n_stride
+ read_c * in_c_stride
+ read_h * in_h_stride
+ read_w * in_w_stride;
int out_idx = write_n * out_n_stride
+ write_c * out_c_stride
+ write_h * out_h_stride
+ write_w * out_w_stride;
float in_var = in_data[in_idx];
float in_scale = scale[read_c];
out_data[out_idx] = in_var * in_scale;
}
}
template<>
SaberStatus DataTensorTransformHelper::transform<Tensor<NV, AK_FLOAT, NCHW>, Tensor<NV, AK_FLOAT, NCHW> >(
Tensor<NV, AK_FLOAT, NCHW> &out_tensor,
const Tensor<NV, AK_FLOAT, NCHW> &in_tensor, Context<NV> ctx){
Shape in_shape = in_tensor.valid_shape();
Shape out_shape = out_tensor.valid_shape();
Shape stride_in = in_tensor.get_stride();
Shape stride_out = out_tensor.get_stride();
const float *in_data = (const float*)in_tensor.data();
float *out_data = (float*)out_tensor.mutable_data();
const int count = in_tensor.valid_size();
cudaStream_t cuda_stream = ctx.get_compute_stream();
transform_nchw_2_nchw
<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data, in_data, count,
in_shape[0], in_shape[1], in_shape[2], in_shape[3],
stride_in[0], stride_in[1], stride_in[2], stride_in[3],
out_shape[0], out_shape[1], out_shape[2], out_shape[3],
stride_out[0], stride_out[1], stride_out[2], stride_out[3],
_weight_scale);
return SaberSuccess;
}
} //namespace saber
} //namespace anakin
|
214f0d33cba20ceb38ac061638e1e7d02559d1c6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <rocblas.h>
#include "learn_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
inline bool checkCUDAError() {
hipError_t err = hipGetLastError();
if (hipSuccess != err)
printf("%s\n", hipGetErrorString( err));
return hipSuccess != err;
}
EXPORT int mult_by_sigmoid_deriv(cudamat* target, cudamat* acts) {
int len = acts->size[0]*acts->size[1];
if (acts->is_trans != target->is_trans)
return ERROR_TRANSPOSED;
if (acts->size[0] != target->size[0] || acts->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultiplyBySigmoidGrad), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, acts->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
}
|
214f0d33cba20ceb38ac061638e1e7d02559d1c6.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include "learn_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
EXPORT int mult_by_sigmoid_deriv(cudamat* target, cudamat* acts) {
int len = acts->size[0]*acts->size[1];
if (acts->is_trans != target->is_trans)
return ERROR_TRANSPOSED;
if (acts->size[0] != target->size[0] || acts->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultiplyBySigmoidGrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(acts->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
}
|
ce4085e4786f0c5b36d06f62ef3c001d30c333eb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "bitonic_kernel.cu"
//
// A sorting network is a sorting algorith, where the sequence of comparisons
// is not data-dependent. That makes them suitable for parallel implementations.
//
// Bitonic sort is one of the fastest sorting networks, consisting of o(n log^2 n)
// comparators. It has a simple implemention and it's very efficient when sorting
// a small number of elements:
//
// http://citeseer.ist.psu.edu/blelloch98experimental.html
//
// This implementation is based on:
//
// http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
//
int main(int argc, char** argv)
{
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
int values[NUM];
for(int i = 0; i < NUM; i++)
{
values[i] = rand();
}
int * dvalues;
cutilSafeCall(hipMalloc((void**)&dvalues, sizeof(int) * NUM));
cutilSafeCall(hipMemcpy(dvalues, values, sizeof(int) * NUM, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( bitonicSort), dim3(1), dim3(NUM), sizeof(int) * NUM, 0, dvalues);
// check for any errors
cutilCheckMsg("Kernel execution failed");
cutilSafeCall(hipMemcpy(values, dvalues, sizeof(int) * NUM, hipMemcpyDeviceToHost));
cutilSafeCall(hipFree(dvalues));
bool passed = true;
for(int i = 1; i < NUM; i++)
{
if (values[i-1] > values[i])
{
passed = false;
}
}
printf( "Test %s\n", passed ? "PASSED" : "FAILED");
hipDeviceReset();
cutilExit(argc, argv);
}
|
ce4085e4786f0c5b36d06f62ef3c001d30c333eb.cu
|
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "bitonic_kernel.cu"
//
// A sorting network is a sorting algorith, where the sequence of comparisons
// is not data-dependent. That makes them suitable for parallel implementations.
//
// Bitonic sort is one of the fastest sorting networks, consisting of o(n log^2 n)
// comparators. It has a simple implemention and it's very efficient when sorting
// a small number of elements:
//
// http://citeseer.ist.psu.edu/blelloch98experimental.html
//
// This implementation is based on:
//
// http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
//
int main(int argc, char** argv)
{
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
int values[NUM];
for(int i = 0; i < NUM; i++)
{
values[i] = rand();
}
int * dvalues;
cutilSafeCall(cudaMalloc((void**)&dvalues, sizeof(int) * NUM));
cutilSafeCall(cudaMemcpy(dvalues, values, sizeof(int) * NUM, cudaMemcpyHostToDevice));
bitonicSort<<<1, NUM, sizeof(int) * NUM>>>(dvalues);
// check for any errors
cutilCheckMsg("Kernel execution failed");
cutilSafeCall(cudaMemcpy(values, dvalues, sizeof(int) * NUM, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaFree(dvalues));
bool passed = true;
for(int i = 1; i < NUM; i++)
{
if (values[i-1] > values[i])
{
passed = false;
}
}
printf( "Test %s\n", passed ? "PASSED" : "FAILED");
cudaThreadExit();
cutilExit(argc, argv);
}
|
3aafcfd3492747cd36a461b04e6434e369cdcc88.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_pr_nibble.cu
*
* @brief Simple test driver program for Gunrock template.
*/
#include <iostream>
#include <gunrock/app/pr_nibble/pr_nibble_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
namespace APP_NAMESPACE = app::pr_nibble;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return hipError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use int as the value type
hipError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
hipError_t retval = hipSuccess;
// CLI parameters
bool quick = parameters.Get<bool>("quick");
bool quiet = parameters.Get<bool>("quiet");
std::string validation = parameters.Get<std::string>("validation");
if (quick && (parameters.UseDefault("validation") == false && validation != "none")) {
util::PrintMsg("Invalid options --quick and --validation=" + validation +
", no CPU reference result to validate");
return retval;
}
typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_CSR>
GraphT;
util::CpuTimer cpu_timer;
GraphT graph;
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
// Problem specific variables
GUARD_CU(app::Set_Srcs(parameters, graph));
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT> >("srcs");
int num_srcs = srcs.size();
ValueT **ref_values = NULL;
if (!quick) {
ref_values = new ValueT *[num_srcs];
for (int i = 0; i < num_srcs; i++) {
VertexT src = srcs[i];
ref_values[i] = new ValueT[graph.nodes];
util::PrintMsg("__________________________", !quiet);
float elapsed = app::pr_nibble::CPU_Reference(
graph.csr(), parameters, src, ref_values[i], quiet);
util::PrintMsg(
"--------------------------\n Elapsed: " + std::to_string(elapsed),
!quiet);
}
}
std::vector<std::string> switches{"advance-mode"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[ref_values](util::Parameters ¶meters, GraphT &graph) {
return app::pr_nibble::RunTests(parameters, graph, ref_values,
util::DEVICE);
}));
if (!quick) {
for (int i = 0; i < num_srcs; i++) {
delete[] ref_values[i];
ref_values[i] = NULL;
}
delete[] ref_values;
ref_values = NULL;
}
return retval;
}
};
int main(int argc, char **argv) {
hipError_t retval = hipSuccess;
util::Parameters parameters("test pr_nibble");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::pr_nibble::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return hipSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | app::VERTEXT_U64B |
app::SIZET_U32B | app::SIZET_U64B |
app::VALUET_F64B | app::UNDIRECTED>(parameters,
main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
3aafcfd3492747cd36a461b04e6434e369cdcc88.cu
|
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_pr_nibble.cu
*
* @brief Simple test driver program for Gunrock template.
*/
#include <iostream>
#include <gunrock/app/pr_nibble/pr_nibble_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
namespace APP_NAMESPACE = app::pr_nibble;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return cudaError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use int as the value type
cudaError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
cudaError_t retval = cudaSuccess;
// CLI parameters
bool quick = parameters.Get<bool>("quick");
bool quiet = parameters.Get<bool>("quiet");
std::string validation = parameters.Get<std::string>("validation");
if (quick && (parameters.UseDefault("validation") == false && validation != "none")) {
util::PrintMsg("Invalid options --quick and --validation=" + validation +
", no CPU reference result to validate");
return retval;
}
typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_CSR>
GraphT;
util::CpuTimer cpu_timer;
GraphT graph;
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
// Problem specific variables
GUARD_CU(app::Set_Srcs(parameters, graph));
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT> >("srcs");
int num_srcs = srcs.size();
ValueT **ref_values = NULL;
if (!quick) {
ref_values = new ValueT *[num_srcs];
for (int i = 0; i < num_srcs; i++) {
VertexT src = srcs[i];
ref_values[i] = new ValueT[graph.nodes];
util::PrintMsg("__________________________", !quiet);
float elapsed = app::pr_nibble::CPU_Reference(
graph.csr(), parameters, src, ref_values[i], quiet);
util::PrintMsg(
"--------------------------\n Elapsed: " + std::to_string(elapsed),
!quiet);
}
}
std::vector<std::string> switches{"advance-mode"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[ref_values](util::Parameters ¶meters, GraphT &graph) {
return app::pr_nibble::RunTests(parameters, graph, ref_values,
util::DEVICE);
}));
if (!quick) {
for (int i = 0; i < num_srcs; i++) {
delete[] ref_values[i];
ref_values[i] = NULL;
}
delete[] ref_values;
ref_values = NULL;
}
return retval;
}
};
int main(int argc, char **argv) {
cudaError_t retval = cudaSuccess;
util::Parameters parameters("test pr_nibble");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::pr_nibble::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return cudaSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | app::VERTEXT_U64B |
app::SIZET_U32B | app::SIZET_U64B |
app::VALUET_F64B | app::UNDIRECTED>(parameters,
main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
e6df69e9637f06fb1c9676194f3da0dddfb682a4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
namespace at { namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void exp_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exp_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "exp_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp(a);
});
});
});
}
void expm1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "expm1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
// We manually overload rsqrt because std::rsqrt does not work with complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) {
return ::rsqrt(v);
}
template<typename T>
__host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) {
const c10::complex<T> one = c10::complex<T>(1.0, 0);
// std::sqrt for c10::complex is overloaded in c10/util/complex_math.h
return one / ::sqrt(v);
}
void rsqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return rsqrt_wrapper(a);
});
});
}
void sqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "sqrt_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "sqrt_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sqrt(a);
});
});
});
}
void sigmoid_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "sigmoid_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "sigmoid_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t one = scalar_t(1);
return one / (one + ::exp(- a));
});
});
});
}
void erf_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "erf_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "erf_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
});
}
void erfc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
}
void erfinv_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void clamp_kernel_cuda(TensorIterator& iter, Scalar min_value, Scalar max_value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "clamp_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
return (v < lower) ? lower : (v > upper ? upper : v);
});
});
}
void clamp_min_kernel_cuda(TensorIterator& iter, Scalar min_value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "clamp_min_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
return v < lower ? lower : v;
});
});
}
void clamp_max_kernel_cuda(TensorIterator& iter, Scalar max_value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "clamp_max_cuda", [&]() {
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
return v > upper ? upper : v;
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda);
REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda);
REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda);
}}
|
e6df69e9637f06fb1c9676194f3da0dddfb682a4.cu
|
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
namespace at { namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void exp_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exp_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "exp_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp(a);
});
});
});
}
void expm1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "expm1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
// We manually overload rsqrt because std::rsqrt does not work with complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) {
return ::rsqrt(v);
}
template<typename T>
__host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) {
const c10::complex<T> one = c10::complex<T>(1.0, 0);
// std::sqrt for c10::complex is overloaded in c10/util/complex_math.h
return one / ::sqrt(v);
}
void rsqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return rsqrt_wrapper(a);
});
});
}
void sqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "sqrt_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "sqrt_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sqrt(a);
});
});
});
}
void sigmoid_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "sigmoid_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "sigmoid_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t one = scalar_t(1);
return one / (one + std::exp(- a));
});
});
});
}
void erf_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "erf_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "erf_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
});
}
void erfc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
}
void erfinv_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void clamp_kernel_cuda(TensorIterator& iter, Scalar min_value, Scalar max_value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "clamp_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
return (v < lower) ? lower : (v > upper ? upper : v);
});
});
}
void clamp_min_kernel_cuda(TensorIterator& iter, Scalar min_value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "clamp_min_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
return v < lower ? lower : v;
});
});
}
void clamp_max_kernel_cuda(TensorIterator& iter, Scalar max_value) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "clamp_max_cuda", [&]() {
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
return v > upper ? upper : v;
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda);
REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda);
REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda);
}}
|
3f3a4b317607f47804764591098b04d45d553388.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorSort.hip"
#else
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
void THCTensor_(sortKeyValueInplace)(THCState* state,
THCTensor* key,
THCudaLongTensor* value,
int dim, bool dir) {
THArgCheck(key->sizes().equals(value->sizes()), 2,
"Key tensor must have same size as value tensor");
int dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, value);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimensionLegacyNoScalars)(state, key);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCTensor_(nElement)(state, key);
if (inElements == 0) {
return;
}
int64_t keySliceSize = THCTensor_(sizeLegacyNoScalars)(state, key, dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
if (ceilPowerOf2 > 2048) {
THError("sortKeyValueInplace only works for sizes <= 2048 at present");
}
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
if (!THC_getGridFromTiles(keySlices, grid)) {
THError("Slice to sort is too large");
}
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, GTComp<scalar_t, true>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTComp<scalar_t, true>()); \
} else { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<scalar_t, int64_t, A, -1, LTComp<scalar_t, true>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTComp<scalar_t, true>()); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
assert(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
if (THCTensor_canUse32BitIndexMath(state, key)) {
TensorInfo<scalar_t, unsigned int> keyInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, key);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
TensorInfo<int64_t, unsigned int> valueInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, value);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
TensorInfo<scalar_t, uint64_t> keyInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, key);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
TensorInfo<int64_t, uint64_t> valueInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, value);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
THCudaCheck(hipGetLastError());
}
void THCTensor_(sortViaThrust)(THCState* state,
THCTensor* sorted,
THCudaLongTensor* indices,
THCTensor* input,
int dim, bool dir) {
int nDims = THCTensor_(nDimensionLegacyAll)(state, input);
ptrdiff_t totalElements = THCTensor_(nElement)(state, input);
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dim);
int64_t sliceStride = THTensor_strideLegacyNoScalars(input, dim);
// We perform a vectorized segmented sort in Thrust.
// Say we are sorting a (2, 3) tensor. We have in flattened form:
// values 0.4 1.2 5.3 6.2 1.3 2.3
// indices 0 1 2 3 4 5
// where indices is a global index (across all slices)
// First we sort by values, globally:
// values 6.2 5.3 2.3 1.2 1.3 0.4
// indices 3 2 5 1 4 0
// Then we stable sort by segment, which is index / 3:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 2 1 0 3 5 4
// Then we translate the global index to a per-slice Lua index
// (index % 3) + 1:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 3 2 1 1 3 2
// This method can only work if the slice we are sorting (`dim`) is
// innermost, and both values and indices are contiguous. We do this
// by re-arranging the input into this form as needed, which will
// unfortunately allocate memory if the request is not in this form.
// Vectorized sort is slower than iterated sort if the number of
// slices is small (since we're sorting twice, instead of invoking a
// smaller sort `numSlices` times), but the Thrust sort
// implementation here is a catch-all, so we're not looking for
// efficiency, but instead correctness.
THCTensor_(copy)(state, sorted, input);
THCTensor* trKeys = THCTensor_(newWithTensor)(state, sorted);
THCudaLongTensor* trIndices = THCudaLongTensor_newWithTensor(state, indices);
// Transpose dim to innermost
if (dim != nDims - 1) {
THCTensor_(transpose)(state, trKeys, NULL, dim, nDims - 1);
THCudaLongTensor_transpose(state, trIndices, NULL, dim, nDims - 1);
}
// Thrust must operate on a contiguous layout
THCTensor* trContigKey = THCTensor_(newContiguous)(state, trKeys);
THCudaLongTensor* trContigIndices = THCudaLongTensor_newContiguous(state, trIndices);
THCTensor_(free)(state, trKeys);
THCudaLongTensor_free(state, trIndices);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<scalar_t> keyIter(THCTensor_(data)(state, trContigKey));
// Since we are composing a global index across all segments rather
// than a per-segment index, we treat the memory as int so we don't
// have problems sorting slices < 2^24 but where the entire tensor
// has more than 2^24 elements
thrust::device_ptr<int64_t>
indexIter((int64_t*) THCudaLongTensor_data(state, trContigIndices));
// Fill the indices with a global index across all slices
thrust::counting_iterator<int64_t> countIter(0);
thrust::copy(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#endif
countIter, countIter + totalElements, indexIter);
auto begin = thrust::make_zip_iterator(thrust::make_tuple(indexIter, keyIter));
if (dir){
if (totalElements < INT_MAX)
thrust::sort(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#endif
begin, begin + totalElements, ThrustSliceGTOp<scalar_t, int, true>(sliceSize));
else
thrust::sort(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#endif
begin, begin + totalElements, ThrustSliceGTOp<scalar_t, int64_t, true>(sliceSize));
} else {
if (totalElements < INT_MAX)
thrust::sort(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#endif
begin, begin + totalElements, ThrustSliceLTOp<scalar_t, int, true>(sliceSize));
else
thrust::sort(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#endif
begin, begin + totalElements, ThrustSliceLTOp<scalar_t, int64_t, true>(sliceSize));
}
// Translate the global integer 0-based index to a per-slice real
// Lua index
thrust::for_each(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#endif
indexIter, indexIter + totalElements,
GlobalIndexToPerSliceIndex(sliceSize));
// Reverse the transposition as needed
if (dim != nDims - 1) {
THCTensor_(transpose)(state, trContigKey, NULL, dim, nDims - 1);
THCudaLongTensor_transpose(state, trContigIndices, NULL, dim, nDims - 1);
}
// Then copy back to the expected output
THCTensor_(freeCopyTo)(state, trContigKey, sorted);
THCudaLongTensor_freeCopyTo(state, trContigIndices, indices);
}
void THCTensor_(sort)(THCState* state,
THCTensor *sorted,
THCudaLongTensor *indices,
THCTensor *input,
int dim, int order) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, sorted, input));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
dim = at::maybe_wrap_dim(dim, input);
int64_t dims = THCTensor_(nDimensionLegacyNoScalars)(state, sorted);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
// Make sure sufficient output space is allocated
THCTensor_(resizeAs)(state, sorted, input);
THCudaLongTensor_resize(state, indices, input->sizes(), {});
// How large are the slices that we are sorting?
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dim);
// Workaround:
// CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace,
// and so for the double word types,
// we get "too many resources requested for launch" in the 2048 case
#if TORCH_HIP_VERSION >= 8000
#if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG)
int maxSliceSize = 1024;
#else
int maxSliceSize = 2048;
#endif
#else
int maxSliceSize = 2048;
#endif
if (sliceSize <= maxSliceSize) {
// Fill `indices` (the values) with the
// slice-relative index.
THCudaLongTensor_fillSliceWithIndex(state, indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
THCTensor_(copy)(state, sorted, input);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
THCTensor_(sortKeyValueInplace)(state, sorted, indices, dim, order);
} else {
// Otherwise, fall back upon Thrust, which handles all other cases
// (potentially slowly, with extra copies/memory allocations)
THCTensor_(sortViaThrust)(state, sorted, indices, input, dim, (bool) order);
}
THCudaCheck(hipGetLastError());
}
#endif
|
3f3a4b317607f47804764591098b04d45d553388.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorSort.cu"
#else
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
void THCTensor_(sortKeyValueInplace)(THCState* state,
THCTensor* key,
THCudaLongTensor* value,
int dim, bool dir) {
THArgCheck(key->sizes().equals(value->sizes()), 2,
"Key tensor must have same size as value tensor");
int dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, value);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimensionLegacyNoScalars)(state, key);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCTensor_(nElement)(state, key);
if (inElements == 0) {
return;
}
int64_t keySliceSize = THCTensor_(sizeLegacyNoScalars)(state, key, dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
if (ceilPowerOf2 > 2048) {
THError("sortKeyValueInplace only works for sizes <= 2048 at present");
}
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
if (!THC_getGridFromTiles(keySlices, grid)) {
THError("Slice to sort is too large");
}
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
bitonicSortKVInPlace<scalar_t, int64_t, A, -1, GTComp<scalar_t, true>, TYPE, SIZE> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTComp<scalar_t, true>()); \
} else { \
bitonicSortKVInPlace<scalar_t, int64_t, A, -1, LTComp<scalar_t, true>, TYPE, SIZE> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTComp<scalar_t, true>()); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
assert(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
if (THCTensor_canUse32BitIndexMath(state, key)) {
TensorInfo<scalar_t, unsigned int> keyInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, key);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
TensorInfo<int64_t, unsigned int> valueInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, value);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
TensorInfo<scalar_t, uint64_t> keyInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, key);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
TensorInfo<int64_t, uint64_t> valueInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, value);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
THCudaCheck(cudaGetLastError());
}
void THCTensor_(sortViaThrust)(THCState* state,
THCTensor* sorted,
THCudaLongTensor* indices,
THCTensor* input,
int dim, bool dir) {
int nDims = THCTensor_(nDimensionLegacyAll)(state, input);
ptrdiff_t totalElements = THCTensor_(nElement)(state, input);
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dim);
int64_t sliceStride = THTensor_strideLegacyNoScalars(input, dim);
// We perform a vectorized segmented sort in Thrust.
// Say we are sorting a (2, 3) tensor. We have in flattened form:
// values 0.4 1.2 5.3 6.2 1.3 2.3
// indices 0 1 2 3 4 5
// where indices is a global index (across all slices)
// First we sort by values, globally:
// values 6.2 5.3 2.3 1.2 1.3 0.4
// indices 3 2 5 1 4 0
// Then we stable sort by segment, which is index / 3:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 2 1 0 3 5 4
// Then we translate the global index to a per-slice Lua index
// (index % 3) + 1:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 3 2 1 1 3 2
// This method can only work if the slice we are sorting (`dim`) is
// innermost, and both values and indices are contiguous. We do this
// by re-arranging the input into this form as needed, which will
// unfortunately allocate memory if the request is not in this form.
// Vectorized sort is slower than iterated sort if the number of
// slices is small (since we're sorting twice, instead of invoking a
// smaller sort `numSlices` times), but the Thrust sort
// implementation here is a catch-all, so we're not looking for
// efficiency, but instead correctness.
THCTensor_(copy)(state, sorted, input);
THCTensor* trKeys = THCTensor_(newWithTensor)(state, sorted);
THCudaLongTensor* trIndices = THCudaLongTensor_newWithTensor(state, indices);
// Transpose dim to innermost
if (dim != nDims - 1) {
THCTensor_(transpose)(state, trKeys, NULL, dim, nDims - 1);
THCudaLongTensor_transpose(state, trIndices, NULL, dim, nDims - 1);
}
// Thrust must operate on a contiguous layout
THCTensor* trContigKey = THCTensor_(newContiguous)(state, trKeys);
THCudaLongTensor* trContigIndices = THCudaLongTensor_newContiguous(state, trIndices);
THCTensor_(free)(state, trKeys);
THCudaLongTensor_free(state, trIndices);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<scalar_t> keyIter(THCTensor_(data)(state, trContigKey));
// Since we are composing a global index across all segments rather
// than a per-segment index, we treat the memory as int so we don't
// have problems sorting slices < 2^24 but where the entire tensor
// has more than 2^24 elements
thrust::device_ptr<int64_t>
indexIter((int64_t*) THCudaLongTensor_data(state, trContigIndices));
// Fill the indices with a global index across all slices
thrust::counting_iterator<int64_t> countIter(0);
thrust::copy(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#endif
countIter, countIter + totalElements, indexIter);
auto begin = thrust::make_zip_iterator(thrust::make_tuple(indexIter, keyIter));
if (dir){
if (totalElements < INT_MAX)
thrust::sort(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#endif
begin, begin + totalElements, ThrustSliceGTOp<scalar_t, int, true>(sliceSize));
else
thrust::sort(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#endif
begin, begin + totalElements, ThrustSliceGTOp<scalar_t, int64_t, true>(sliceSize));
} else {
if (totalElements < INT_MAX)
thrust::sort(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#endif
begin, begin + totalElements, ThrustSliceLTOp<scalar_t, int, true>(sliceSize));
else
thrust::sort(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#endif
begin, begin + totalElements, ThrustSliceLTOp<scalar_t, int64_t, true>(sliceSize));
}
// Translate the global integer 0-based index to a per-slice real
// Lua index
thrust::for_each(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#endif
indexIter, indexIter + totalElements,
GlobalIndexToPerSliceIndex(sliceSize));
// Reverse the transposition as needed
if (dim != nDims - 1) {
THCTensor_(transpose)(state, trContigKey, NULL, dim, nDims - 1);
THCudaLongTensor_transpose(state, trContigIndices, NULL, dim, nDims - 1);
}
// Then copy back to the expected output
THCTensor_(freeCopyTo)(state, trContigKey, sorted);
THCudaLongTensor_freeCopyTo(state, trContigIndices, indices);
}
void THCTensor_(sort)(THCState* state,
THCTensor *sorted,
THCudaLongTensor *indices,
THCTensor *input,
int dim, int order) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, sorted, input));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
dim = at::maybe_wrap_dim(dim, input);
int64_t dims = THCTensor_(nDimensionLegacyNoScalars)(state, sorted);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
// Make sure sufficient output space is allocated
THCTensor_(resizeAs)(state, sorted, input);
THCudaLongTensor_resize(state, indices, input->sizes(), {});
// How large are the slices that we are sorting?
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dim);
// Workaround:
// CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace,
// and so for the double word types,
// we get "too many resources requested for launch" in the 2048 case
#if CUDA_VERSION >= 8000
#if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG)
int maxSliceSize = 1024;
#else
int maxSliceSize = 2048;
#endif
#else
int maxSliceSize = 2048;
#endif
if (sliceSize <= maxSliceSize) {
// Fill `indices` (the values) with the
// slice-relative index.
THCudaLongTensor_fillSliceWithIndex(state, indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
THCTensor_(copy)(state, sorted, input);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
THCTensor_(sortKeyValueInplace)(state, sorted, indices, dim, order);
} else {
// Otherwise, fall back upon Thrust, which handles all other cases
// (potentially slowly, with extra copies/memory allocations)
THCTensor_(sortViaThrust)(state, sorted, indices, input, dim, (bool) order);
}
THCudaCheck(cudaGetLastError());
}
#endif
|
3f2651b17dd64c9ed5d5f2cbd7a9916c36c5e999.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <hip/hip_runtime_api.h>
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i, hostRef[i],
gpuRef[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int n, int offset)
{
for (int idx = offset, k = 0; idx < n; idx++, k++)
{
C[k] = A[idx] + B[idx];
}
}
__global__ void warmup(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[i] = A[k] + B[k];
}
// offset > 0 causes misalign
__global__ void readOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[i] = A[k] + B[k];
}
int main(int argc, char **argv)
{
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int nElem = 1 << 20;
printf(" with array size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
int blocksize = 512;
int offset = 0;
if (argc > 1) offset = atoi(argv[1]);
if (argc > 2) blocksize = atoi(argv[2]);
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
initialData(h_A, nElem);
memcpy(h_B, h_A, nBytes);
sumArraysOnHost(h_A, h_B, hostRef, nElem, offset);
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_A, nBytes, hipMemcpyHostToDevice));
double iStart = seconds();
hipLaunchKernelGGL(( warmup), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize());
double iElaps = seconds() - iStart;
printf("warmup <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(hipGetLastError());
iStart = seconds();
CHECK(hipProfilerStart());
hipLaunchKernelGGL(( readOffset), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipProfilerStop());
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("readOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(hipGetLastError());
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
checkResult(hostRef, gpuRef, nElem - offset);
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
free(h_A);
free(h_B);
// CHECK(hipProfilerStop());
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
|
3f2651b17dd64c9ed5d5f2cbd7a9916c36c5e999.cu
|
#include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <cuda_profiler_api.h>
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i, hostRef[i],
gpuRef[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int n, int offset)
{
for (int idx = offset, k = 0; idx < n; idx++, k++)
{
C[k] = A[idx] + B[idx];
}
}
__global__ void warmup(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[i] = A[k] + B[k];
}
// offset > 0 causes misalign
__global__ void readOffset(float *A, float *B, float *C, const int n,
int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[i] = A[k] + B[k];
}
int main(int argc, char **argv)
{
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nElem = 1 << 20;
printf(" with array size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
int blocksize = 512;
int offset = 0;
if (argc > 1) offset = atoi(argv[1]);
if (argc > 2) blocksize = atoi(argv[2]);
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
initialData(h_A, nElem);
memcpy(h_B, h_A, nBytes);
sumArraysOnHost(h_A, h_B, hostRef, nElem, offset);
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_A, nBytes, cudaMemcpyHostToDevice));
double iStart = seconds();
warmup<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize());
double iElaps = seconds() - iStart;
printf("warmup <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(cudaGetLastError());
iStart = seconds();
CHECK(cudaProfilerStart());
readOffset<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaProfilerStop());
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("readOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x,
block.x, offset, iElaps);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
checkResult(hostRef, gpuRef, nElem - offset);
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
free(h_A);
free(h_B);
// CHECK(cuProfilerStop());
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
90eaddab0e08b0322ec2a8934dfca22686da5410.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d %d)"
"blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
//
int nElem = 6;
//
// ()3
dim3 block(3);
//
// (6 + (3-1)) / 3 = 8 / 3 = 2
// grid size2
dim3 grid((nElem + block.x - 1) / block.x);
//
hipLaunchKernelGGL(( checkIndex), dim3(grid), dim3(block), 0, 0, );
//
hipDeviceReset();
return(0);
}
|
90eaddab0e08b0322ec2a8934dfca22686da5410.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d %d)"
"blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
// データ要素の合計数を定義
int nElem = 6;
// グリッドとブロックの構造を定義
// ブロックサイズ(スレッド数)は3
dim3 block(3);
// グリッドのサイズをブロックのサイズの倍数に切り上げる
// (6 + (3-1)) / 3 = 8 / 3 = 2
// つまりgrid sizeは2となる。
dim3 grid((nElem + block.x - 1) / block.x);
// グリッドとブロックのサイズをデバイス側からチェック
checkIndex<<<grid, block>>>();
// デバイスをリセット
cudaDeviceReset();
return(0);
}
|
dc45516fdd4a113ae841c3fd9d4699ce47bae1d3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include "utilities/graph_utils.cuh"
#include "cugraph.h"
#include "rmm_utils.h"
#include "utilities/error_utils.h"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_row_sum(IdxType n,
IdxType *csrPtr,
IdxType *csrInd,
ValType *v,
ValType *work) {
IdxType row, start, end, length;
ValType sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y;
row < n;
row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
//compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0)
work[row] = sum;
}
else {
work[row] = (ValType) length;
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_is(IdxType n,
IdxType *csrPtr,
IdxType *csrInd,
ValType *v,
ValType *work,
ValType *weight_i,
ValType *weight_s) {
IdxType i, j, row, col, Ni, Nj;
IdxType ref, cur, ref_col, cur_col, match;
ValType ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z;
row < n;
row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y;
j < csrPtr[row + 1];
j += gridDim.y * blockDim.y) {
col = csrInd[j];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[j] = work[row] + work[col];
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
}
else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
IdxType left = csrPtr[cur];
IdxType right = csrPtr[cur + 1] - 1;
while (left <= right) {
IdxType middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[j], ref_val);
}
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_is_pairs(IdxType num_pairs,
IdxType *csrPtr,
IdxType *csrInd,
IdxType *first_pair,
IdxType *second_pair,
ValType *v,
ValType *work,
ValType *weight_i,
ValType *weight_s) {
IdxType i, idx, row, col, Ni, Nj;
IdxType ref, cur, ref_col, cur_col, match;
ValType ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z;
idx < num_pairs;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[idx] = work[row] + work[col];
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x;
i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
}
else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
IdxType left = csrPtr[cur];
IdxType right = csrPtr[cur + 1] - 1;
while (left <= right) {
IdxType middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[idx], ref_val);
}
}
}
}
//Jaccard weights (*weight)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_jw(IdxType e,
IdxType *csrPtr,
IdxType *csrInd,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
IdxType j;
ValType Wi, Ws, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x;
j < e;
j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Ws = weight_s[j];
Wu = Ws - Wi;
weight_j[j] = (Wi / Wu);
}
}
template<bool weighted, typename IdxType, typename ValType>
int jaccard(IdxType n,
IdxType e,
IdxType *csrPtr,
IdxType *csrInd,
ValType *weight_in,
ValType *work,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS);
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( jaccard_row_sum<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, n,
csrPtr,
csrInd,
weight_in,
work);
hipDeviceSynchronize();
fill(e, weight_i, (ValType) 0.0);
//setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1;
//launch kernel
hipLaunchKernelGGL(( jaccard_is<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, n,
csrPtr,
csrInd,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(e, (IdxType) CUDA_MAX_KERNEL_THREADS);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( jaccard_jw<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, e,
csrPtr,
csrInd,
weight_i,
weight_s,
weight_j);
return 0;
}
template<bool weighted, typename IdxType, typename ValType>
int jaccard_pairs(IdxType n,
IdxType num_pairs,
IdxType *csrPtr,
IdxType *csrInd,
IdxType *first_pair,
IdxType *second_pair,
ValType *weight_in,
ValType *work,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS);
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( jaccard_row_sum<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, n,
csrPtr,
csrInd,
weight_in,
work);
hipDeviceSynchronize();
fill(num_pairs, weight_i, (ValType) 0.0);
//setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1;
//launch kernel
hipLaunchKernelGGL(( jaccard_is_pairs<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, num_pairs,
csrPtr,
csrInd,
first_pair,
second_pair,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(num_pairs, (IdxType) CUDA_MAX_KERNEL_THREADS);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
hipLaunchKernelGGL(( jaccard_jw<weighted, IdxType, ValType>) , dim3(nblocks), dim3(nthreads), 0, 0, num_pairs,
csrPtr,
csrInd,
weight_i,
weight_s,
weight_j);
return 0;
}
} //namespace detail
void jaccard(Graph *graph, gdf_column *weights, gdf_column *result) {
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!result->valid, "Column must be valid");
bool weighted = (weights != nullptr);
gdf_dtype ValueType = result->dtype;
gdf_dtype IndexType = graph->adjList->offsets->dtype;
void *csrPtr = graph->adjList->offsets->data;
void *csrInd = graph->adjList->indices->data;
void *weight_i = nullptr;
void *weight_s = nullptr;
void *weight_j = result->data;
void *work = nullptr;
void *weight_in = nullptr;
if (weighted)
weight_in = weights->data;
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard<true, int32_t, float>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard<false, int32_t, float>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard<true, int32_t, double>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard<false, int32_t, double>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard<true, int64_t, float>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard<false, int64_t, float>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard<true, int64_t, double>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard<false, int64_t, double>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
// Clean up temp arrays
ALLOC_FREE_TRY(weight_i, nullptr);
ALLOC_FREE_TRY(weight_s, nullptr);
ALLOC_FREE_TRY(work, nullptr);
}
void jaccard_list(Graph* graph,
gdf_column* weights,
gdf_column* first,
gdf_column* second,
gdf_column* result) {
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!result->valid, "Column must be valid");
CUGRAPH_EXPECTS(first != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(first->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!first->valid, "Column must be valid");
CUGRAPH_EXPECTS(second != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(second->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!second->valid, "Column must be valid");
bool weighted = (weights != nullptr);
gdf_dtype ValueType = result->dtype;
gdf_dtype IndexType = graph->adjList->offsets->dtype;
CUGRAPH_EXPECTS(first->dtype == IndexType, "Invalid API parameter");
CUGRAPH_EXPECTS(second->dtype == IndexType, "Invalid API parameter");
void *first_pair = first->data;
void *second_pair = second->data;
void *csrPtr = graph->adjList->offsets->data;
void *csrInd = graph->adjList->indices->data;
void *weight_i = nullptr;
void *weight_s = nullptr;
void *weight_j = result->data;
void *work = nullptr;
void *weight_in = nullptr;
if (weighted)
weight_in = weights->data;
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard_pairs<true, int32_t, float>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard_pairs<false, int32_t, float>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard_pairs<true, int32_t, double>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard_pairs<false, int32_t, double>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard_pairs<true, int64_t, float>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard_pairs<false, int64_t, float>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard_pairs<true, int64_t, double>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard_pairs<false, int64_t, double>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
// Clean up temp arrays
ALLOC_FREE_TRY(weight_i, nullptr);
ALLOC_FREE_TRY(weight_s, nullptr);
ALLOC_FREE_TRY(work, nullptr);
}
} //namespace cugraph
|
dc45516fdd4a113ae841c3fd9d4699ce47bae1d3.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief The cugraph Jaccard core functionality
*
* @file jaccard.cu
* ---------------------------------------------------------------------------**/
#include "utilities/graph_utils.cuh"
#include "cugraph.h"
#include "rmm_utils.h"
#include "utilities/error_utils.h"
namespace cugraph {
namespace detail {
// Volume of neighboors (*weight_s)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_row_sum(IdxType n,
IdxType *csrPtr,
IdxType *csrInd,
ValType *v,
ValType *work) {
IdxType row, start, end, length;
ValType sum;
for (row = threadIdx.y + blockIdx.y * blockDim.y;
row < n;
row += gridDim.y * blockDim.y) {
start = csrPtr[row];
end = csrPtr[row + 1];
length = end - start;
//compute row sums
if (weighted) {
sum = parallel_prefix_sum(length, csrInd + start, v);
if (threadIdx.x == 0)
work[row] = sum;
}
else {
work[row] = (ValType) length;
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_is(IdxType n,
IdxType *csrPtr,
IdxType *csrInd,
ValType *v,
ValType *work,
ValType *weight_i,
ValType *weight_s) {
IdxType i, j, row, col, Ni, Nj;
IdxType ref, cur, ref_col, cur_col, match;
ValType ref_val;
for (row = threadIdx.z + blockIdx.z * blockDim.z;
row < n;
row += gridDim.z * blockDim.z) {
for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y;
j < csrPtr[row + 1];
j += gridDim.y * blockDim.y) {
col = csrInd[j];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[j] = work[row] + work[col];
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
}
else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
IdxType left = csrPtr[cur];
IdxType right = csrPtr[cur + 1] - 1;
while (left <= right) {
IdxType middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[j], ref_val);
}
}
}
}
}
// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s)
// Using list of node pairs
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_is_pairs(IdxType num_pairs,
IdxType *csrPtr,
IdxType *csrInd,
IdxType *first_pair,
IdxType *second_pair,
ValType *v,
ValType *work,
ValType *weight_i,
ValType *weight_s) {
IdxType i, idx, row, col, Ni, Nj;
IdxType ref, cur, ref_col, cur_col, match;
ValType ref_val;
for (idx = threadIdx.z + blockIdx.z * blockDim.z;
idx < num_pairs;
idx += gridDim.z * blockDim.z) {
row = first_pair[idx];
col = second_pair[idx];
//find which row has least elements (and call it reference row)
Ni = csrPtr[row + 1] - csrPtr[row];
Nj = csrPtr[col + 1] - csrPtr[col];
ref = (Ni < Nj) ? row : col;
cur = (Ni < Nj) ? col : row;
//compute new sum weights
weight_s[idx] = work[row] + work[col];
//compute new intersection weights
//search for the element with the same column index in the reference row
for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x;
i < csrPtr[ref + 1];
i += gridDim.x * blockDim.x) {
match = -1;
ref_col = csrInd[i];
if (weighted) {
ref_val = v[ref_col];
}
else {
ref_val = 1.0;
}
//binary search (column indices are sorted within each row)
IdxType left = csrPtr[cur];
IdxType right = csrPtr[cur + 1] - 1;
while (left <= right) {
IdxType middle = (left + right) >> 1;
cur_col = csrInd[middle];
if (cur_col > ref_col) {
right = middle - 1;
}
else if (cur_col < ref_col) {
left = middle + 1;
}
else {
match = middle;
break;
}
}
//if the element with the same column index in the reference row has been found
if (match != -1) {
atomicAdd(&weight_i[idx], ref_val);
}
}
}
}
//Jaccard weights (*weight)
template<bool weighted, typename IdxType, typename ValType>
__global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS)
jaccard_jw(IdxType e,
IdxType *csrPtr,
IdxType *csrInd,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
IdxType j;
ValType Wi, Ws, Wu;
for (j = threadIdx.x + blockIdx.x * blockDim.x;
j < e;
j += gridDim.x * blockDim.x) {
Wi = weight_i[j];
Ws = weight_s[j];
Wu = Ws - Wi;
weight_j[j] = (Wi / Wu);
}
}
template<bool weighted, typename IdxType, typename ValType>
int jaccard(IdxType n,
IdxType e,
IdxType *csrPtr,
IdxType *csrInd,
ValType *weight_in,
ValType *work,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS);
nblocks.z = 1;
//launch kernel
jaccard_row_sum<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(n,
csrPtr,
csrInd,
weight_in,
work);
cudaDeviceSynchronize();
fill(e, weight_i, (ValType) 0.0);
//setup launch configuration
nthreads.x = 32 / y;
nthreads.y = y;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1;
//launch kernel
jaccard_is<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(n,
csrPtr,
csrInd,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(e, (IdxType) CUDA_MAX_KERNEL_THREADS);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((e + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
jaccard_jw<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(e,
csrPtr,
csrInd,
weight_i,
weight_s,
weight_j);
return 0;
}
template<bool weighted, typename IdxType, typename ValType>
int jaccard_pairs(IdxType n,
IdxType num_pairs,
IdxType *csrPtr,
IdxType *csrInd,
IdxType *first_pair,
IdxType *second_pair,
ValType *weight_in,
ValType *work,
ValType *weight_i,
ValType *weight_s,
ValType *weight_j) {
dim3 nthreads, nblocks;
int y = 4;
//setup launch configuration
nthreads.x = 32;
nthreads.y = y;
nthreads.z = 1;
nblocks.x = 1;
nblocks.y = min((n + nthreads.y - 1) / nthreads.y, (IdxType) CUDA_MAX_BLOCKS);
nblocks.z = 1;
//launch kernel
jaccard_row_sum<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(n,
csrPtr,
csrInd,
weight_in,
work);
cudaDeviceSynchronize();
fill(num_pairs, weight_i, (ValType) 0.0);
//setup launch configuration
nthreads.x = 32;
nthreads.y = 1;
nthreads.z = 8;
nblocks.x = 1;
nblocks.y = 1;
nblocks.z = min((n + nthreads.z - 1) / nthreads.z, (IdxType) CUDA_MAX_BLOCKS); //1;
//launch kernel
jaccard_is_pairs<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(num_pairs,
csrPtr,
csrInd,
first_pair,
second_pair,
weight_in,
work,
weight_i,
weight_s);
//setup launch configuration
nthreads.x = min(num_pairs, (IdxType) CUDA_MAX_KERNEL_THREADS);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, (IdxType) CUDA_MAX_BLOCKS);
nblocks.y = 1;
nblocks.z = 1;
//launch kernel
jaccard_jw<weighted, IdxType, ValType> <<<nblocks, nthreads>>>(num_pairs,
csrPtr,
csrInd,
weight_i,
weight_s,
weight_j);
return 0;
}
} //namespace detail
void jaccard(Graph *graph, gdf_column *weights, gdf_column *result) {
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!result->valid, "Column must be valid");
bool weighted = (weights != nullptr);
gdf_dtype ValueType = result->dtype;
gdf_dtype IndexType = graph->adjList->offsets->dtype;
void *csrPtr = graph->adjList->offsets->data;
void *csrInd = graph->adjList->indices->data;
void *weight_i = nullptr;
void *weight_s = nullptr;
void *weight_j = result->data;
void *work = nullptr;
void *weight_in = nullptr;
if (weighted)
weight_in = weights->data;
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard<true, int32_t, float>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard<false, int32_t, float>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard<true, int32_t, double>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard<false, int32_t, double>(n,
e,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard<true, int64_t, float>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(float) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * e, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard<false, int64_t, float>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard<true, int64_t, double>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t e = graph->adjList->indices->size;
ALLOC_TRY(&weight_i, sizeof(double) * e, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * e, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard<false, int64_t, double>(n,
e,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
// Clean up temp arrays
ALLOC_FREE_TRY(weight_i, nullptr);
ALLOC_FREE_TRY(weight_s, nullptr);
ALLOC_FREE_TRY(work, nullptr);
}
void jaccard_list(Graph* graph,
gdf_column* weights,
gdf_column* first,
gdf_column* second,
gdf_column* result) {
CUGRAPH_EXPECTS(graph != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(graph->adjList != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(result->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!result->valid, "Column must be valid");
CUGRAPH_EXPECTS(first != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(first->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!first->valid, "Column must be valid");
CUGRAPH_EXPECTS(second != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(second->data != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(!second->valid, "Column must be valid");
bool weighted = (weights != nullptr);
gdf_dtype ValueType = result->dtype;
gdf_dtype IndexType = graph->adjList->offsets->dtype;
CUGRAPH_EXPECTS(first->dtype == IndexType, "Invalid API parameter");
CUGRAPH_EXPECTS(second->dtype == IndexType, "Invalid API parameter");
void *first_pair = first->data;
void *second_pair = second->data;
void *csrPtr = graph->adjList->offsets->data;
void *csrInd = graph->adjList->indices->data;
void *weight_i = nullptr;
void *weight_s = nullptr;
void *weight_j = result->data;
void *work = nullptr;
void *weight_in = nullptr;
if (weighted)
weight_in = weights->data;
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard_pairs<true, int32_t, float>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard_pairs<false, int32_t, float>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard_pairs<true, int32_t, double>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT32 && !weighted) {
int32_t n = graph->adjList->offsets->size - 1;
int32_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard_pairs<false, int32_t, double>(n,
num_pairs,
(int32_t*) csrPtr,
(int32_t*) csrInd,
(int32_t*) first_pair,
(int32_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard_pairs<true, int64_t, float>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT32 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(float) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(float) * n, nullptr);
cugraph::detail::jaccard_pairs<false, int64_t, float>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(float*) weight_in,
(float*) work,
(float*) weight_i,
(float*) weight_s,
(float*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard_pairs<true, int64_t, double>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
if (ValueType == GDF_FLOAT64 && IndexType == GDF_INT64 && !weighted) {
int64_t n = graph->adjList->offsets->size - 1;
int64_t num_pairs = first->size;
ALLOC_TRY(&weight_i, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&weight_s, sizeof(double) * num_pairs, nullptr);
ALLOC_TRY(&work, sizeof(double) * n, nullptr);
cugraph::detail::jaccard_pairs<false, int64_t, double>(n,
num_pairs,
(int64_t*) csrPtr,
(int64_t*) csrInd,
(int64_t*) first_pair,
(int64_t*) second_pair,
(double*) weight_in,
(double*) work,
(double*) weight_i,
(double*) weight_s,
(double*) weight_j);
}
// Clean up temp arrays
ALLOC_FREE_TRY(weight_i, nullptr);
ALLOC_FREE_TRY(weight_s, nullptr);
ALLOC_FREE_TRY(work, nullptr);
}
} //namespace cugraph
|
35644b15fd5abe796f610799d7b56e6cf9f9f50a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <iostream>
#include <set>
#include <hip/hip_runtime.h>
#include "gpu/gpu_memory.hpp"
#include "queries/data/data_structures.hpp"
#include "queries/algorithms/algorithm.cuh"
#include "queries/algorithms/distance_type.hpp"
#include "queries/algorithms/gpu_common.cuh"
#define SHARED_MEM_ELEMENTS 1024
__constant__ sl::queries::data::Point device_input_q[MAX_CONST_MEM_ELEMENTS];
template<class Comparator>
__device__ void _ComputePartialSkyline(
const sl::queries::data::WeightedPoint *input_p,
size_t input_p_size,
int input_q_size,
Comparator comparator_function,
sl::queries::data::Statistics *statistics,
float *result) {
__shared__ sl::queries::data::WeightedPoint shared_input_p[SHARED_MEM_ELEMENTS];
int block_offset = blockIdx.x * blockDim.x; // we just have one dimension grids
size_t global_pos = block_offset + threadIdx.x;
sl::queries::data::WeightedPoint skyline_candidate(input_p[global_pos]);
bool is_skyline = global_pos < input_p_size;
sl::queries::data::Statistics thread_statistics;
for (size_t current_input_p_pos = 0; current_input_p_pos < input_p_size; current_input_p_pos += SHARED_MEM_ELEMENTS) {
//all threads in the block loads to shared
shared_input_p[threadIdx.x] = input_p[threadIdx.x + current_input_p_pos];
__syncthreads();
if (is_skyline) {
for (int i = 0; i < SHARED_MEM_ELEMENTS; i++) {
if (current_input_p_pos + i != global_pos && current_input_p_pos + i < input_p_size) { // do not check against the same point
if (sl::queries::algorithms::IsDominated(skyline_candidate, shared_input_p[i], device_input_q, input_q_size, &thread_statistics.num_comparisions_, comparator_function)) {
is_skyline = false;
break;
}
}
}
}
__syncthreads();
}
if (is_skyline) {
float max_distance = 0;
for (int i = 0; i < input_q_size; i++) {
float distance = skyline_candidate.SquaredDistance(device_input_q[i]);
if (distance > max_distance) {
max_distance = distance;
}
}
result[global_pos] = max_distance;
}
else {
result[global_pos] = -1;
}
atomicAdd(&statistics->num_comparisions_, thread_statistics.num_comparisions_);
}
__global__ void ComputePartialSkyline(
const sl::queries::data::WeightedPoint *input_p,
size_t input_p_size,
int input_q_size,
sl::queries::algorithms::DistanceType distance_type,
sl::queries::data::Statistics *statistics,
float *result) {
switch (distance_type) {
case sl::queries::algorithms::DistanceType::Nearest:
_ComputePartialSkyline(input_p, input_p_size, input_q_size, NearestFunc, statistics, result);
break;
case sl::queries::algorithms::DistanceType::Furthest:
_ComputePartialSkyline(input_p, input_p_size, input_q_size, FurthestFunc, statistics, result);
break;
default:
break;
}
}
extern "C" void ComputeGPUSkyline(
const std::vector<sl::queries::data::WeightedPoint> &input_p,
const std::vector<sl::queries::data::Point> &input_q,
std::vector<sl::queries::data::WeightedPoint> *output,
sl::queries::algorithms::DistanceType distance_type,
size_t top_k,
sl::queries::data::Statistics *stadistics_results) {
sl::gpu::GPUStream gpu_stream;
//copy to const memory the input Q
hipMemcpyToSymbolAsync(device_input_q, input_q.data(), sizeof(sl::queries::data::Point) * input_q.size(), 0, hipMemcpyKind::hipMemcpyHostToDevice, gpu_stream());
size_t input_p_size = input_p.size();
int input_q_size = static_cast<int>(input_q.size());
size_t input_p_size_SHARED_MEM_SIZE_multiple = roundUp<size_t>(input_p.size(), SHARED_MEM_ELEMENTS);
//copy to global memory the input P
sl::gpu::GPUMemory<sl::queries::data::WeightedPoint> input_p_d(input_p_size_SHARED_MEM_SIZE_multiple);
input_p_d.UploadToDeviceAsync(input_p, gpu_stream); //the final values maybe empty
//copy statistics
sl::gpu::GPUMemory<sl::queries::data::Statistics> statistics_d(1);
statistics_d.UploadToDeviceAsync(stadistics_results, 1, gpu_stream);
sl::gpu::GPUMemory<float> result_d(input_p_size_SHARED_MEM_SIZE_multiple);
/*
MAX number of threads per MS is 2048.
MAX number of threads per block 1024 => max blockDim.y = 1
*/
dim3 threadsPerBlock(SHARED_MEM_ELEMENTS, 1);
int total_numBlocks = static_cast<int>(divUp(input_p_size, static_cast<size_t>(threadsPerBlock.x * threadsPerBlock.y)));
dim3 grid(total_numBlocks, 1);
hipLaunchKernelGGL(( ComputePartialSkyline), dim3(grid), dim3(threadsPerBlock), 0, gpu_stream(), input_p_d(), input_p_size, input_q_size, distance_type, statistics_d(), result_d());
hipError_t e = hipGetLastError();
if (e != hipSuccess) {
std::cout << hipGetErrorString(e) << '\n';
}
std::vector<float> result(input_p_size);
result_d.DownloadToHostAsync(result.data(), input_p_size, gpu_stream);
statistics_d.DownloadToHostAsync(stadistics_results, gpu_stream);
e = gpu_stream.Syncronize();
if (e != hipSuccess) {
std::cout << hipGetErrorString(e) << '\n';
}
std::set<sl::queries::algorithms::PointStatistics> points;
float max_distance_in_set = 99999;
for (size_t i = 0; i < result.size(); i++) {
float distance = result[i];
if (distance != -1) {
//it's a skyline
if (points.size() < top_k || distance < max_distance_in_set) {
points.insert(sl::queries::algorithms::PointStatistics(input_p[i], std::make_pair(0.f, distance)));
if (points.size() > top_k)
points.erase(points.begin());
max_distance_in_set = points.begin()->s_.second;
}
}
}
for (const sl::queries::algorithms::PointStatistics &ps : points) {
output->emplace_back(ps.wp_);
}
stadistics_results->output_size_ = output->size();
}
|
35644b15fd5abe796f610799d7b56e6cf9f9f50a.cu
|
#include <vector>
#include <iostream>
#include <set>
#include <cuda_runtime.h>
#include "gpu/gpu_memory.hpp"
#include "queries/data/data_structures.hpp"
#include "queries/algorithms/algorithm.cuh"
#include "queries/algorithms/distance_type.hpp"
#include "queries/algorithms/gpu_common.cuh"
#define SHARED_MEM_ELEMENTS 1024
__constant__ sl::queries::data::Point device_input_q[MAX_CONST_MEM_ELEMENTS];
template<class Comparator>
__device__ void _ComputePartialSkyline(
const sl::queries::data::WeightedPoint *input_p,
size_t input_p_size,
int input_q_size,
Comparator comparator_function,
sl::queries::data::Statistics *statistics,
float *result) {
__shared__ sl::queries::data::WeightedPoint shared_input_p[SHARED_MEM_ELEMENTS];
int block_offset = blockIdx.x * blockDim.x; // we just have one dimension grids
size_t global_pos = block_offset + threadIdx.x;
sl::queries::data::WeightedPoint skyline_candidate(input_p[global_pos]);
bool is_skyline = global_pos < input_p_size;
sl::queries::data::Statistics thread_statistics;
for (size_t current_input_p_pos = 0; current_input_p_pos < input_p_size; current_input_p_pos += SHARED_MEM_ELEMENTS) {
//all threads in the block loads to shared
shared_input_p[threadIdx.x] = input_p[threadIdx.x + current_input_p_pos];
__syncthreads();
if (is_skyline) {
for (int i = 0; i < SHARED_MEM_ELEMENTS; i++) {
if (current_input_p_pos + i != global_pos && current_input_p_pos + i < input_p_size) { // do not check against the same point
if (sl::queries::algorithms::IsDominated(skyline_candidate, shared_input_p[i], device_input_q, input_q_size, &thread_statistics.num_comparisions_, comparator_function)) {
is_skyline = false;
break;
}
}
}
}
__syncthreads();
}
if (is_skyline) {
float max_distance = 0;
for (int i = 0; i < input_q_size; i++) {
float distance = skyline_candidate.SquaredDistance(device_input_q[i]);
if (distance > max_distance) {
max_distance = distance;
}
}
result[global_pos] = max_distance;
}
else {
result[global_pos] = -1;
}
atomicAdd(&statistics->num_comparisions_, thread_statistics.num_comparisions_);
}
__global__ void ComputePartialSkyline(
const sl::queries::data::WeightedPoint *input_p,
size_t input_p_size,
int input_q_size,
sl::queries::algorithms::DistanceType distance_type,
sl::queries::data::Statistics *statistics,
float *result) {
switch (distance_type) {
case sl::queries::algorithms::DistanceType::Nearest:
_ComputePartialSkyline(input_p, input_p_size, input_q_size, NearestFunc, statistics, result);
break;
case sl::queries::algorithms::DistanceType::Furthest:
_ComputePartialSkyline(input_p, input_p_size, input_q_size, FurthestFunc, statistics, result);
break;
default:
break;
}
}
extern "C" void ComputeGPUSkyline(
const std::vector<sl::queries::data::WeightedPoint> &input_p,
const std::vector<sl::queries::data::Point> &input_q,
std::vector<sl::queries::data::WeightedPoint> *output,
sl::queries::algorithms::DistanceType distance_type,
size_t top_k,
sl::queries::data::Statistics *stadistics_results) {
sl::gpu::GPUStream gpu_stream;
//copy to const memory the input Q
cudaMemcpyToSymbolAsync(device_input_q, input_q.data(), sizeof(sl::queries::data::Point) * input_q.size(), 0, cudaMemcpyKind::cudaMemcpyHostToDevice, gpu_stream());
size_t input_p_size = input_p.size();
int input_q_size = static_cast<int>(input_q.size());
size_t input_p_size_SHARED_MEM_SIZE_multiple = roundUp<size_t>(input_p.size(), SHARED_MEM_ELEMENTS);
//copy to global memory the input P
sl::gpu::GPUMemory<sl::queries::data::WeightedPoint> input_p_d(input_p_size_SHARED_MEM_SIZE_multiple);
input_p_d.UploadToDeviceAsync(input_p, gpu_stream); //the final values maybe empty
//copy statistics
sl::gpu::GPUMemory<sl::queries::data::Statistics> statistics_d(1);
statistics_d.UploadToDeviceAsync(stadistics_results, 1, gpu_stream);
sl::gpu::GPUMemory<float> result_d(input_p_size_SHARED_MEM_SIZE_multiple);
/*
MAX number of threads per MS is 2048.
MAX number of threads per block 1024 => max blockDim.y = 1
*/
dim3 threadsPerBlock(SHARED_MEM_ELEMENTS, 1);
int total_numBlocks = static_cast<int>(divUp(input_p_size, static_cast<size_t>(threadsPerBlock.x * threadsPerBlock.y)));
dim3 grid(total_numBlocks, 1);
ComputePartialSkyline<<<grid, threadsPerBlock, 0, gpu_stream()>>>(input_p_d(), input_p_size, input_q_size, distance_type, statistics_d(), result_d());
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess) {
std::cout << cudaGetErrorString(e) << '\n';
}
std::vector<float> result(input_p_size);
result_d.DownloadToHostAsync(result.data(), input_p_size, gpu_stream);
statistics_d.DownloadToHostAsync(stadistics_results, gpu_stream);
e = gpu_stream.Syncronize();
if (e != cudaSuccess) {
std::cout << cudaGetErrorString(e) << '\n';
}
std::set<sl::queries::algorithms::PointStatistics> points;
float max_distance_in_set = 99999;
for (size_t i = 0; i < result.size(); i++) {
float distance = result[i];
if (distance != -1) {
//it's a skyline
if (points.size() < top_k || distance < max_distance_in_set) {
points.insert(sl::queries::algorithms::PointStatistics(input_p[i], std::make_pair(0.f, distance)));
if (points.size() > top_k)
points.erase(points.begin());
max_distance_in_set = points.begin()->s_.second;
}
}
}
for (const sl::queries::algorithms::PointStatistics &ps : points) {
output->emplace_back(ps.wp_);
}
stadistics_results->output_size_ = output->size();
}
|
34bb21140704ea87588448fb76834f4cd181dafb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[32,32,1] --blockDim=[16,16,1]
#include "common.h"
__global__ void
d_renderBicubic(uchar4 *d_output, uint width, uint height, float tx, float ty, float scale, float cx, float cy)
{
__requires(width == 512);
__requires(height == 512);
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint i = __umul24(y, width) + x;
float u = (x-cx)*scale+cx + tx;
float v = (y-cy)*scale+cy + ty;
if ((x < width) && (y < height))
{
// write output color
float c = tex2DBicubic<uchar, float>(tex, u, v);
d_output[i] = make_uchar4(c * 0xff, c * 0xff, c * 0xff, 0);
}
}
|
34bb21140704ea87588448fb76834f4cd181dafb.cu
|
//pass
//--gridDim=[32,32,1] --blockDim=[16,16,1]
#include "common.h"
__global__ void
d_renderBicubic(uchar4 *d_output, uint width, uint height, float tx, float ty, float scale, float cx, float cy)
{
__requires(width == 512);
__requires(height == 512);
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint i = __umul24(y, width) + x;
float u = (x-cx)*scale+cx + tx;
float v = (y-cy)*scale+cy + ty;
if ((x < width) && (y < height))
{
// write output color
float c = tex2DBicubic<uchar, float>(tex, u, v);
d_output[i] = make_uchar4(c * 0xff, c * 0xff, c * 0xff, 0);
}
}
|
f0f73f0c2c079f83650f2ee8ede229aa2b9f22cf.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Flood fill
*
* The basic idea is very simple; each iteration we examine each pixel;
* it the pixel borders a filled pixel, we test it, and if the test
* succeeds, then we fill it, also setting a global flag that something
* has changed. We repeat this until nothing changes.
*
* That implementation turned out to be very slow... One problem is
* that unfilled pixels have to examine all of their neighbors.
* We will try a second implementation in which when a pixel is
* filled, it marks it's un-filled neighbors.
*
* No difference. Eliminating the flag checks after each kernel
* launch reduces the time (for 100 iterations) from 11 msec to 7 msec!
* This could probably be speeded up quite a bit if the control
* logic could be run on the device instead of on the host...
*
* But can we launch a thread array from a device function?
* Or should we launch the whole grid and have one special thread
* which is the master?
* The slow implementation has one thread per pixel in the image;
* but many iterations are required... better perhaps to have one
* thread per filled pixel with unchecked neighbors?
*
* We can only synchronize threads within a block, so we would have to
* do this with a single block. Let's say we have one thread per
* filled pixel... Each pixel has up to 4 fillable neighbors (although
* only the first seed pixel with have all 4 unfilled). So we have
* an array in shared memory that we fill with the pixel values. (Need
* to check how to avoid bank conflicts!) Then we have a table of
* of future pixels. Each thread gets 4 slots. After these have
* been filled, we would like to prune duplicates; we won't have many
* when filling parallel to a coordinate axis, but there will be lots
* for an oblique front... we could use a hash function? Or use the
* flag image. We could use these values:
* 0 - unchecked
* 1 - filled
* 2 - queued
* 3 - rejected
*
* 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0
* 0 0 0 0 0 0 0 2 0 0 0 2 1 2 0
* 0 0 2 0 0 -> 0 2 1 2 0 -> 2 1 1 1 2
* 0 0 0 0 0 0 0 2 0 0 0 2 1 2 0
* 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0
*
* Shared memory per block is only 16k, so we can't put the whole image
* there...
*
* We have an array of pixels to check, sized 4 times the max number
* of threads in a block. We have an array of active pixels, sized
* the max number of threads. After syncing the threads, we need to make
* up the new active pixel list. We may not have enough threads to do all
* of the pixels, so we have several lists. After processing each list,
* we transfer new pixels to be checked to the list, marking them as queued.
* If we run out of space, we will have to set a flag that says we
* have unrecorded pixels that need to be queued; if that is set when
* we are all done, we should scan the entire image again looking for them,
* maybe using a special flag value to indicated un-fulfilled queue request?
* If we can allocate 2048 queue request slots it ought to be enough
* for a 512x512 image...
*
* We probably want to have the shared memory allocated at launch time...
*/
#include "quip_config.h"
#ifdef HAVE_CUDA
#define BUILD_FOR_CUDA
#include <stdio.h>
#include <hiprand/hiprand.h>
#include "quip_prot.h"
#include "my_cuda.h"
#include "cuda_supp.h" // describe_cuda_error
#include "veclib/gpu_call_utils.h"
#include "veclib/slow_defs.h"
#include "../cu2/cu2_host_call_defs.h"
#include "../cu2/cu2_kern_call_defs.h"
//#include "my_vector_functions.h" // max_threads_per_block
#define CHECK_CUDA_RETURN_VAL(msg) \
if( e != hipSuccess ){ \
NWARN(msg); \
}
// The fill routine kernel
#define FILL_IF \
if( fabs( dst - v ) < tol ){ \
src1 = 1; \
dst = fill_val; \
*flag = 1; \
return; \
}
// why not use vset ???
KERNEL_FUNC_QUALIFIER void zeroit(dim5 szarr, unsigned char* a, dim5 len )
{
//int x,y;
DECL_INDICES_1
//x = blockIdx.x * blockDim.x + threadIdx.x;
//y = blockIdx.y * blockDim.y + threadIdx.y;
SET_INDICES_1
/*
if( x < len.x && y < len.y ){
a[x+y*len.y] = 0;
}
*/
dst = 0;
}
KERNEL_FUNC_QUALIFIER void g_sp_ifl_incs(dim5 szarr, float* a, dim5 inc1,
unsigned char* b, dim5 inc2,
dim5 len,
float v, float tol, float fill_val, int *flag)
{
/* BLOCK_VARS_DECLS */
//INIT_INDICES_XYZ_2
DECL_INDICES_2
SET_INDICES_2
//if (index1.x < len.x && index1.y < len.y ) {
//SCALE_INDICES_XYZ_2
if( src1 == 0 ){ // not filled yet
// check each neighbor if filled
if( index2.d5_dim[1] > 0 ){ // in-bounds
index2.d5_dim[1] -= inc2.d5_dim[1];
if( src1 > 0 ){ // neighbor filled?
index2.d5_dim[1] += inc2.d5_dim[1];
FILL_IF
}
index2.d5_dim[1] += inc2.d5_dim[1];
}
if( index2.d5_dim[1] < (len.d5_dim[1]-1)*inc2.d5_dim[1] ){
index2.d5_dim[1] += inc2.d5_dim[1];
if( src1 > 0 ){
index2.d5_dim[1] -= inc2.d5_dim[1];
FILL_IF
}
index2.d5_dim[1] -= inc2.d5_dim[1];
}
if( index2.d5_dim[2] > 0 ){
index2.d5_dim[2] -= inc2.d5_dim[2];
if( src1 > 0 ){
index2.d5_dim[2] += inc2.d5_dim[2];
FILL_IF
}
index2.d5_dim[2] += inc2.d5_dim[2];
}
if( index2.d5_dim[2] < (len.d5_dim[2]-1)*inc2.d5_dim[2] ){
index2.d5_dim[2] += inc2.d5_dim[2];
if( src1 > 0 ){
index2.d5_dim[2] -= inc2.d5_dim[2];
FILL_IF
}
index2.d5_dim[2] -= inc2.d5_dim[2];
}
}
//}
}
__constant__ float test_value[1];
__constant__ float tolerance[1];
__constant__ float fill_value[1];
#define FILL_IF2 \
if( fabs( dst - test_value[0] ) < tolerance[0] ){ \
src1 = 1; \
dst = fill_value[0]; \
return; \
}
KERNEL_FUNC_QUALIFIER void g_sp_ifl2_incs(dim5 szarr, float* a, dim5 inc1,
unsigned char* b, dim5 inc2,
dim5 len)
{
/* BLOCK_VARS_DECLS */
//INIT_INDICES_XYZ_2
DECL_INDICES_2
SET_INDICES_2
if (index1.d5_dim[1] < len.d5_dim[1] && index1.d5_dim[2] < len.d5_dim[2] ) {
//SCALE_INDICES_XYZ_2
if( src1 == 0 ){ // not filled yet
// check each neighbor if filled
if( index2.d5_dim[1] > 0 ){ // in-bounds
index2.d5_dim[1] -= inc2.d5_dim[1];
if( src1 > 0 ){ // neighbor filled?
index2.d5_dim[1] += inc2.d5_dim[1];
FILL_IF2
}
index2.d5_dim[1] += inc2.d5_dim[1];
}
if( index2.d5_dim[1] < (len.d5_dim[1]-1)*inc2.d5_dim[1] ){
index2.d5_dim[1] += inc2.d5_dim[1];
if( src1 > 0 ){
index2.d5_dim[1] -= inc2.d5_dim[1];
FILL_IF2
}
index2.d5_dim[1] -= inc2.d5_dim[1];
}
if( index2.d5_dim[2] > 0 ){
index2.d5_dim[2] -= inc2.d5_dim[2];
if( src1 > 0 ){
index2.d5_dim[2] += inc2.d5_dim[2];
FILL_IF2
}
index2.d5_dim[2] += inc2.d5_dim[2];
}
if( index2.d5_dim[2] < (len.d5_dim[2]-1)*inc2.d5_dim[2] ){
index2.d5_dim[2] += inc2.d5_dim[2];
if( src1 > 0 ){
index2.d5_dim[2] -= inc2.d5_dim[2];
FILL_IF2
}
index2.d5_dim[2] -= inc2.d5_dim[2];
}
}
}
}
void h_sp_ifl( Data_Obj *dp, int x, int y, float tol, float fill_val )
{
BLOCK_VARS_DECLS
hipError_t e;
dim5 len, inc1, inc2;
unsigned char *filled, b_one;
float *f_p, v;
int h_flag, *flag_p;
int n_iterations;
Vector_Args va1, *vap=(&va1);
dim5 szarr;
len.d5_dim[1] = OBJ_COLS(dp);
len.d5_dim[2] = OBJ_ROWS(dp);
//GET_MAX_THREADS(dp)
SETUP_BLOCKS_XYZ(OBJ_PFDEV(dp))
inc1.d5_dim[1] = OBJ_TYPE_INC(dp,1);
inc1.d5_dim[2] = OBJ_TYPE_INC(dp,2);
inc1.d5_dim[0] = inc1.d5_dim[3] = inc1.d5_dim[4] = 0;
inc2 = inc1;
if( hipMalloc(&flag_p,sizeof(*flag_p)) != hipSuccess ){
NERROR1("cuda malloc error getting flag word");
}
/* use 2d allocator for better stride? */
if( hipMalloc(&filled,len.d5_dim[1]*len.d5_dim[2]) != hipSuccess ){
NERROR1("cuda malloc error getting filled array");
}
/* set filled to zero */
//CLEAR_CUDA_ERROR2("h_sp_ifl","zeroit")
CLEAR_CUDA_ERROR("zeroit")
zeroit<<< NN_GPU >>>(szarr,filled,len);
//CHECK_CUDA_ERROR("h_sp_ifl","zeroit")
CHECK_CUDA_ERROR(h_sp_ifl: zeroit)
// Get the value at the seed point
f_p = (float *)OBJ_DATA_PTR(dp);
f_p += x + y * inc1.d5_dim[2];
e = hipMemcpy(&v, f_p, sizeof(v), hipMemcpyDeviceToHost);
CHECK_CUDA_RETURN_VAL("hipMemcpy device to host");
// Fill the seed point
b_one = 1;
e = hipMemcpy(filled+x+y*len.d5_dim[1], &b_one, sizeof(b_one),
hipMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("hipMemcpy host to device");
e = hipMemcpy(f_p, &fill_val, sizeof(fill_val),
hipMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("hipMemcpy host to device");
n_iterations=0;
do {
/* Clear the flag */
h_flag = 0;
e = hipMemcpy(flag_p, &h_flag, sizeof(h_flag),
hipMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("hipMemcpy host to device");
//CLEAR_CUDA_ERROR2("h_sp_ifl","g_sp_ifl_incs")
CLEAR_CUDA_ERROR("g_sp_ifl_incs")
g_sp_ifl_incs<<< NN_GPU >>>
(szarr,(float *)OBJ_DATA_PTR(dp),inc1,filled,inc2,len,v,tol,fill_val,flag_p);
//CHECK_CUDA_ERROR("h_sp_ifl","g_sp_ifl_incs")
CHECK_CUDA_ERROR(h_sp_ifl: g_sp_ifl_incs)
// download flag to see what happened.
e = hipMemcpy(&h_flag, flag_p, 1,
hipMemcpyDeviceToHost);
CHECK_CUDA_RETURN_VAL("hipMemcpy device to host");
n_iterations++;
} while( h_flag );
if( verbose ){
sprintf(DEFAULT_ERROR_STRING,"Fill completed after %d iterations",n_iterations);
NADVISE(DEFAULT_ERROR_STRING);
}
}
void h_sp_ifl2( Data_Obj *dp, int seed_x, int seed_y, float tol, float fill_val )
{
BLOCK_VARS_DECLS
hipError_t e;
dim5 len, inc1, inc2;
unsigned char *filled, b_one;
float *f_p, v;
int n_iterations;
Vector_Args va1, *vap=(&va1);
dim5 szarr;
len.d5_dim[1] = OBJ_COLS(dp);
len.d5_dim[2] = OBJ_ROWS(dp);
//GET_MAX_THREADS(dp)
SETUP_BLOCKS_XYZ(OBJ_PFDEV(dp))
inc1.d5_dim[1] = OBJ_TYPE_INC(dp,1);
inc1.d5_dim[2] = OBJ_TYPE_INC(dp,2);
inc1.d5_dim[0] = inc1.d5_dim[3] = inc1.d5_dim[4] = 0;
inc2 = inc1;
/* use 2d allocator for better stride? */
if( hipMalloc(&filled,len.d5_dim[1]*len.d5_dim[2]) != hipSuccess ){
NERROR1("cuda malloc error getting filled array");
}
/* set filled to zero */
//CLEAR_CUDA_ERROR2("h_sp_ifl2","zeroit")
CLEAR_CUDA_ERROR("zeroit")
zeroit<<< NN_GPU >>>(szarr,filled,len);
//CHECK_CUDA_ERROR("h_sp_ifl2","zeroit")
CHECK_CUDA_ERROR(h_sp_ifl2: zeroit)
// Get the value at the seed point
f_p = (float *)OBJ_DATA_PTR(dp);
f_p += seed_x + seed_y * inc1.d5_dim[2];
e = hipMemcpy(&v, f_p, sizeof(v), hipMemcpyDeviceToHost);
CHECK_CUDA_RETURN_VAL("hipMemcpy device to host");
// Fill the seed point
b_one = 1;
e = hipMemcpy(filled+seed_x+seed_y*len.d5_dim[1], &b_one, sizeof(b_one),
hipMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("hipMemcpy host to device");
e = hipMemcpy(f_p, &fill_val, sizeof(fill_val),
hipMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("hipMemcpy host to device");
e = hipMemcpyToSymbol(fill_value, &fill_val, sizeof(float));
CHECK_CUDA_RETURN_VAL("hipMemcpyToSymbol");
e = hipMemcpyToSymbol(tolerance, &tol, sizeof(float));
CHECK_CUDA_RETURN_VAL("hipMemcpyToSymbol");
e = hipMemcpyToSymbol(test_value, &v, sizeof(float));
CHECK_CUDA_RETURN_VAL("hipMemcpyToSymbol");
n_iterations=0;
for( n_iterations = 0 ; n_iterations < 300 ; n_iterations++ ){
//CLEAR_CUDA_ERROR2("h_sp_ifl2","g_sp_ifl2_incs")
CLEAR_CUDA_ERROR("g_sp_ifl2_incs")
g_sp_ifl2_incs<<< NN_GPU >>>
(szarr,(float *)OBJ_DATA_PTR(dp),inc1,filled,inc2,len);
//CHECK_CUDA_ERROR("h_sp_ifl2","g_sp_ifl2_incs")
CHECK_CUDA_ERROR(h_sp_ifl2: g_sp_ifl2_incs)
}
if( verbose ){
sprintf(DEFAULT_ERROR_STRING,"Fill completed after %d iterations",n_iterations);
NADVISE(DEFAULT_ERROR_STRING);
}
}
#endif /* HAVE_CUDA */
|
f0f73f0c2c079f83650f2ee8ede229aa2b9f22cf.cu
|
/* Flood fill
*
* The basic idea is very simple; each iteration we examine each pixel;
* it the pixel borders a filled pixel, we test it, and if the test
* succeeds, then we fill it, also setting a global flag that something
* has changed. We repeat this until nothing changes.
*
* That implementation turned out to be very slow... One problem is
* that unfilled pixels have to examine all of their neighbors.
* We will try a second implementation in which when a pixel is
* filled, it marks it's un-filled neighbors.
*
* No difference. Eliminating the flag checks after each kernel
* launch reduces the time (for 100 iterations) from 11 msec to 7 msec!
* This could probably be speeded up quite a bit if the control
* logic could be run on the device instead of on the host...
*
* But can we launch a thread array from a device function?
* Or should we launch the whole grid and have one special thread
* which is the master?
* The slow implementation has one thread per pixel in the image;
* but many iterations are required... better perhaps to have one
* thread per filled pixel with unchecked neighbors?
*
* We can only synchronize threads within a block, so we would have to
* do this with a single block. Let's say we have one thread per
* filled pixel... Each pixel has up to 4 fillable neighbors (although
* only the first seed pixel with have all 4 unfilled). So we have
* an array in shared memory that we fill with the pixel values. (Need
* to check how to avoid bank conflicts!) Then we have a table of
* of future pixels. Each thread gets 4 slots. After these have
* been filled, we would like to prune duplicates; we won't have many
* when filling parallel to a coordinate axis, but there will be lots
* for an oblique front... we could use a hash function? Or use the
* flag image. We could use these values:
* 0 - unchecked
* 1 - filled
* 2 - queued
* 3 - rejected
*
* 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0
* 0 0 0 0 0 0 0 2 0 0 0 2 1 2 0
* 0 0 2 0 0 -> 0 2 1 2 0 -> 2 1 1 1 2
* 0 0 0 0 0 0 0 2 0 0 0 2 1 2 0
* 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0
*
* Shared memory per block is only 16k, so we can't put the whole image
* there...
*
* We have an array of pixels to check, sized 4 times the max number
* of threads in a block. We have an array of active pixels, sized
* the max number of threads. After syncing the threads, we need to make
* up the new active pixel list. We may not have enough threads to do all
* of the pixels, so we have several lists. After processing each list,
* we transfer new pixels to be checked to the list, marking them as queued.
* If we run out of space, we will have to set a flag that says we
* have unrecorded pixels that need to be queued; if that is set when
* we are all done, we should scan the entire image again looking for them,
* maybe using a special flag value to indicated un-fulfilled queue request?
* If we can allocate 2048 queue request slots it ought to be enough
* for a 512x512 image...
*
* We probably want to have the shared memory allocated at launch time...
*/
#include "quip_config.h"
#ifdef HAVE_CUDA
#define BUILD_FOR_CUDA
#include <stdio.h>
#include <curand.h>
#include "quip_prot.h"
#include "my_cuda.h"
#include "cuda_supp.h" // describe_cuda_error
#include "veclib/gpu_call_utils.h"
#include "veclib/slow_defs.h"
#include "../cu2/cu2_host_call_defs.h"
#include "../cu2/cu2_kern_call_defs.h"
//#include "my_vector_functions.h" // max_threads_per_block
#define CHECK_CUDA_RETURN_VAL(msg) \
if( e != cudaSuccess ){ \
NWARN(msg); \
}
// The fill routine kernel
#define FILL_IF \
if( fabs( dst - v ) < tol ){ \
src1 = 1; \
dst = fill_val; \
*flag = 1; \
return; \
}
// why not use vset ???
KERNEL_FUNC_QUALIFIER void zeroit(dim5 szarr, unsigned char* a, dim5 len )
{
//int x,y;
DECL_INDICES_1
//x = blockIdx.x * blockDim.x + threadIdx.x;
//y = blockIdx.y * blockDim.y + threadIdx.y;
SET_INDICES_1
/*
if( x < len.x && y < len.y ){
a[x+y*len.y] = 0;
}
*/
dst = 0;
}
KERNEL_FUNC_QUALIFIER void g_sp_ifl_incs(dim5 szarr, float* a, dim5 inc1,
unsigned char* b, dim5 inc2,
dim5 len,
float v, float tol, float fill_val, int *flag)
{
/* BLOCK_VARS_DECLS */
//INIT_INDICES_XYZ_2
DECL_INDICES_2
SET_INDICES_2
//if (index1.x < len.x && index1.y < len.y ) {
//SCALE_INDICES_XYZ_2
if( src1 == 0 ){ // not filled yet
// check each neighbor if filled
if( index2.d5_dim[1] > 0 ){ // in-bounds
index2.d5_dim[1] -= inc2.d5_dim[1];
if( src1 > 0 ){ // neighbor filled?
index2.d5_dim[1] += inc2.d5_dim[1];
FILL_IF
}
index2.d5_dim[1] += inc2.d5_dim[1];
}
if( index2.d5_dim[1] < (len.d5_dim[1]-1)*inc2.d5_dim[1] ){
index2.d5_dim[1] += inc2.d5_dim[1];
if( src1 > 0 ){
index2.d5_dim[1] -= inc2.d5_dim[1];
FILL_IF
}
index2.d5_dim[1] -= inc2.d5_dim[1];
}
if( index2.d5_dim[2] > 0 ){
index2.d5_dim[2] -= inc2.d5_dim[2];
if( src1 > 0 ){
index2.d5_dim[2] += inc2.d5_dim[2];
FILL_IF
}
index2.d5_dim[2] += inc2.d5_dim[2];
}
if( index2.d5_dim[2] < (len.d5_dim[2]-1)*inc2.d5_dim[2] ){
index2.d5_dim[2] += inc2.d5_dim[2];
if( src1 > 0 ){
index2.d5_dim[2] -= inc2.d5_dim[2];
FILL_IF
}
index2.d5_dim[2] -= inc2.d5_dim[2];
}
}
//}
}
__constant__ float test_value[1];
__constant__ float tolerance[1];
__constant__ float fill_value[1];
#define FILL_IF2 \
if( fabs( dst - test_value[0] ) < tolerance[0] ){ \
src1 = 1; \
dst = fill_value[0]; \
return; \
}
KERNEL_FUNC_QUALIFIER void g_sp_ifl2_incs(dim5 szarr, float* a, dim5 inc1,
unsigned char* b, dim5 inc2,
dim5 len)
{
/* BLOCK_VARS_DECLS */
//INIT_INDICES_XYZ_2
DECL_INDICES_2
SET_INDICES_2
if (index1.d5_dim[1] < len.d5_dim[1] && index1.d5_dim[2] < len.d5_dim[2] ) {
//SCALE_INDICES_XYZ_2
if( src1 == 0 ){ // not filled yet
// check each neighbor if filled
if( index2.d5_dim[1] > 0 ){ // in-bounds
index2.d5_dim[1] -= inc2.d5_dim[1];
if( src1 > 0 ){ // neighbor filled?
index2.d5_dim[1] += inc2.d5_dim[1];
FILL_IF2
}
index2.d5_dim[1] += inc2.d5_dim[1];
}
if( index2.d5_dim[1] < (len.d5_dim[1]-1)*inc2.d5_dim[1] ){
index2.d5_dim[1] += inc2.d5_dim[1];
if( src1 > 0 ){
index2.d5_dim[1] -= inc2.d5_dim[1];
FILL_IF2
}
index2.d5_dim[1] -= inc2.d5_dim[1];
}
if( index2.d5_dim[2] > 0 ){
index2.d5_dim[2] -= inc2.d5_dim[2];
if( src1 > 0 ){
index2.d5_dim[2] += inc2.d5_dim[2];
FILL_IF2
}
index2.d5_dim[2] += inc2.d5_dim[2];
}
if( index2.d5_dim[2] < (len.d5_dim[2]-1)*inc2.d5_dim[2] ){
index2.d5_dim[2] += inc2.d5_dim[2];
if( src1 > 0 ){
index2.d5_dim[2] -= inc2.d5_dim[2];
FILL_IF2
}
index2.d5_dim[2] -= inc2.d5_dim[2];
}
}
}
}
void h_sp_ifl( Data_Obj *dp, int x, int y, float tol, float fill_val )
{
BLOCK_VARS_DECLS
cudaError_t e;
dim5 len, inc1, inc2;
unsigned char *filled, b_one;
float *f_p, v;
int h_flag, *flag_p;
int n_iterations;
Vector_Args va1, *vap=(&va1);
dim5 szarr;
len.d5_dim[1] = OBJ_COLS(dp);
len.d5_dim[2] = OBJ_ROWS(dp);
//GET_MAX_THREADS(dp)
SETUP_BLOCKS_XYZ(OBJ_PFDEV(dp))
inc1.d5_dim[1] = OBJ_TYPE_INC(dp,1);
inc1.d5_dim[2] = OBJ_TYPE_INC(dp,2);
inc1.d5_dim[0] = inc1.d5_dim[3] = inc1.d5_dim[4] = 0;
inc2 = inc1;
if( cudaMalloc(&flag_p,sizeof(*flag_p)) != cudaSuccess ){
NERROR1("cuda malloc error getting flag word");
}
/* use 2d allocator for better stride? */
if( cudaMalloc(&filled,len.d5_dim[1]*len.d5_dim[2]) != cudaSuccess ){
NERROR1("cuda malloc error getting filled array");
}
/* set filled to zero */
//CLEAR_CUDA_ERROR2("h_sp_ifl","zeroit")
CLEAR_CUDA_ERROR("zeroit")
zeroit<<< NN_GPU >>>(szarr,filled,len);
//CHECK_CUDA_ERROR("h_sp_ifl","zeroit")
CHECK_CUDA_ERROR(h_sp_ifl: zeroit)
// Get the value at the seed point
f_p = (float *)OBJ_DATA_PTR(dp);
f_p += x + y * inc1.d5_dim[2];
e = cudaMemcpy(&v, f_p, sizeof(v), cudaMemcpyDeviceToHost);
CHECK_CUDA_RETURN_VAL("cudaMemcpy device to host");
// Fill the seed point
b_one = 1;
e = cudaMemcpy(filled+x+y*len.d5_dim[1], &b_one, sizeof(b_one),
cudaMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("cudaMemcpy host to device");
e = cudaMemcpy(f_p, &fill_val, sizeof(fill_val),
cudaMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("cudaMemcpy host to device");
n_iterations=0;
do {
/* Clear the flag */
h_flag = 0;
e = cudaMemcpy(flag_p, &h_flag, sizeof(h_flag),
cudaMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("cudaMemcpy host to device");
//CLEAR_CUDA_ERROR2("h_sp_ifl","g_sp_ifl_incs")
CLEAR_CUDA_ERROR("g_sp_ifl_incs")
g_sp_ifl_incs<<< NN_GPU >>>
(szarr,(float *)OBJ_DATA_PTR(dp),inc1,filled,inc2,len,v,tol,fill_val,flag_p);
//CHECK_CUDA_ERROR("h_sp_ifl","g_sp_ifl_incs")
CHECK_CUDA_ERROR(h_sp_ifl: g_sp_ifl_incs)
// download flag to see what happened.
e = cudaMemcpy(&h_flag, flag_p, 1,
cudaMemcpyDeviceToHost);
CHECK_CUDA_RETURN_VAL("cudaMemcpy device to host");
n_iterations++;
} while( h_flag );
if( verbose ){
sprintf(DEFAULT_ERROR_STRING,"Fill completed after %d iterations",n_iterations);
NADVISE(DEFAULT_ERROR_STRING);
}
}
void h_sp_ifl2( Data_Obj *dp, int seed_x, int seed_y, float tol, float fill_val )
{
BLOCK_VARS_DECLS
cudaError_t e;
dim5 len, inc1, inc2;
unsigned char *filled, b_one;
float *f_p, v;
int n_iterations;
Vector_Args va1, *vap=(&va1);
dim5 szarr;
len.d5_dim[1] = OBJ_COLS(dp);
len.d5_dim[2] = OBJ_ROWS(dp);
//GET_MAX_THREADS(dp)
SETUP_BLOCKS_XYZ(OBJ_PFDEV(dp))
inc1.d5_dim[1] = OBJ_TYPE_INC(dp,1);
inc1.d5_dim[2] = OBJ_TYPE_INC(dp,2);
inc1.d5_dim[0] = inc1.d5_dim[3] = inc1.d5_dim[4] = 0;
inc2 = inc1;
/* use 2d allocator for better stride? */
if( cudaMalloc(&filled,len.d5_dim[1]*len.d5_dim[2]) != cudaSuccess ){
NERROR1("cuda malloc error getting filled array");
}
/* set filled to zero */
//CLEAR_CUDA_ERROR2("h_sp_ifl2","zeroit")
CLEAR_CUDA_ERROR("zeroit")
zeroit<<< NN_GPU >>>(szarr,filled,len);
//CHECK_CUDA_ERROR("h_sp_ifl2","zeroit")
CHECK_CUDA_ERROR(h_sp_ifl2: zeroit)
// Get the value at the seed point
f_p = (float *)OBJ_DATA_PTR(dp);
f_p += seed_x + seed_y * inc1.d5_dim[2];
e = cudaMemcpy(&v, f_p, sizeof(v), cudaMemcpyDeviceToHost);
CHECK_CUDA_RETURN_VAL("cudaMemcpy device to host");
// Fill the seed point
b_one = 1;
e = cudaMemcpy(filled+seed_x+seed_y*len.d5_dim[1], &b_one, sizeof(b_one),
cudaMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("cudaMemcpy host to device");
e = cudaMemcpy(f_p, &fill_val, sizeof(fill_val),
cudaMemcpyHostToDevice);
CHECK_CUDA_RETURN_VAL("cudaMemcpy host to device");
e = cudaMemcpyToSymbol(fill_value, &fill_val, sizeof(float));
CHECK_CUDA_RETURN_VAL("cudaMemcpyToSymbol");
e = cudaMemcpyToSymbol(tolerance, &tol, sizeof(float));
CHECK_CUDA_RETURN_VAL("cudaMemcpyToSymbol");
e = cudaMemcpyToSymbol(test_value, &v, sizeof(float));
CHECK_CUDA_RETURN_VAL("cudaMemcpyToSymbol");
n_iterations=0;
for( n_iterations = 0 ; n_iterations < 300 ; n_iterations++ ){
//CLEAR_CUDA_ERROR2("h_sp_ifl2","g_sp_ifl2_incs")
CLEAR_CUDA_ERROR("g_sp_ifl2_incs")
g_sp_ifl2_incs<<< NN_GPU >>>
(szarr,(float *)OBJ_DATA_PTR(dp),inc1,filled,inc2,len);
//CHECK_CUDA_ERROR("h_sp_ifl2","g_sp_ifl2_incs")
CHECK_CUDA_ERROR(h_sp_ifl2: g_sp_ifl2_incs)
}
if( verbose ){
sprintf(DEFAULT_ERROR_STRING,"Fill completed after %d iterations",n_iterations);
NADVISE(DEFAULT_ERROR_STRING);
}
}
#endif /* HAVE_CUDA */
|
6194bb654e95bceab53a184c91b6a0d8deda8d63.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file saxpy.c
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief Saxpy
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 27)
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE (512)
#endif
#ifndef N_STREAMS
#define N_STREAMS (16)
#endif
/*
*SAXPY (host implementation)
* y := a * x + y
*/
void host_saxpy(float * __restrict__ y, float a, float * __restrict__ x, int n)
{
#pragma omp parallel for simd schedule(simd: static)
for (int i = 0; i < n; i++)
{
y[i] = a * x[i] + y[i];
}
}
__global__ void gpu_saxpy(float * __restrict__ y, float a, float * __restrict__ x, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main(int argc, const char **argv)
{
int iret = 0;
int n = N;
float *h_x, *d_x;
float *h_y, *d_y;
float *h_z;
float a = 101.0f / TWO02,
b, c;
if (argc > 1)
n = atoi(argv[1]);
if (NULL == (h_x = (float *)malloc(sizeof(float) * n)))
{
printf("error: memory allocation for 'x'\n");
iret = -1;
}
if (NULL == (h_y = (float *)malloc(sizeof(float) * n)))
{
printf("error: memory allocation for 'y'\n");
iret = -1;
}
if (NULL == (h_z = (float *)malloc(sizeof(float) * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
free(h_x);
free(h_y);
free(h_z);
exit(EXIT_FAILURE);
}
//Init Data
b = rand() % TWO04;
c = rand() % TWO08;
for (int i = 0; i < n; i++)
{
h_x[i] = b / (float)TWO02;
h_y[i] = h_z[i] = c / (float)TWO04;
}
//CUDA Buffer Allocation
gpuErrchk(hipMalloc((void **)&d_x, sizeof(float) * n));
gpuErrchk(hipMalloc((void **)&d_y, sizeof(float) * n));
start_timer();
int TILE = n / N_STREAMS;
hipStream_t stream[N_STREAMS];
for(int i = 0; i < N_STREAMS; i++)
hipStreamCreate(&stream[i]);
//TODO Loop over the Tiles
for (int i = 0; i < n; i += TILE)
{
//TODO Copy in Tile i (stream i)
gpuErrchk(hipMemcpyAsync(&d_x[i], &h_x[i], sizeof(float) * TILE, hipMemcpyHostToDevice, stream[i/TILE]));
gpuErrchk(hipMemcpyAsync(&d_y[i], &h_y[i], sizeof(float) * TILE, hipMemcpyHostToDevice, stream[i/TILE]));
//TODO Kernel Tile i (stream i)
hipLaunchKernelGGL(( gpu_saxpy), dim3(((TILE + BLOCK_SIZE - 1) / BLOCK_SIZE)), dim3(BLOCK_SIZE),0,stream[i/TILE], &d_y[i], a, &d_x[i], TILE);
//TODO Copy out Tile i (stream i)
gpuErrchk(hipMemcpyAsync(&h_y[i], &d_y[i], sizeof(float) * TILE, hipMemcpyDeviceToHost,stream[i/TILE]));
}
//TODO Wait all the streams...
hipDeviceSynchronize();
stop_timer();
printf("saxpy (GPU): %9.3f sec %9.1f GFLOPS\n", elapsed_ns() / 1.0e9, 2 * n / ((float) elapsed_ns()));
//Check Matematical Consistency
start_timer();
host_saxpy(h_z, a, h_x, n);
stop_timer();
printf("saxpy (Host): %9.3f sec %9.1f GFLOPS\n", elapsed_ns() / 1.0e9, 2 * n / ((float) elapsed_ns()));
for (int i = 0; i < n; ++i)
{
iret = *(int *)(h_y + i) ^ *(int *)(h_z + i);
assert(iret == 0);
}
free(h_x);
gpuErrchk(hipFree(d_x));
free(h_y);
gpuErrchk(hipFree(d_y));
free(h_z);
for (int i=0; i<N_STREAMS; ++i)
hipStreamDestroy(stream[i]);
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
return 0;
}
|
6194bb654e95bceab53a184c91b6a0d8deda8d63.cu
|
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file saxpy.c
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief Saxpy
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 27)
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE (512)
#endif
#ifndef N_STREAMS
#define N_STREAMS (16)
#endif
/*
*SAXPY (host implementation)
* y := a * x + y
*/
void host_saxpy(float * __restrict__ y, float a, float * __restrict__ x, int n)
{
#pragma omp parallel for simd schedule(simd: static)
for (int i = 0; i < n; i++)
{
y[i] = a * x[i] + y[i];
}
}
__global__ void gpu_saxpy(float * __restrict__ y, float a, float * __restrict__ x, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main(int argc, const char **argv)
{
int iret = 0;
int n = N;
float *h_x, *d_x;
float *h_y, *d_y;
float *h_z;
float a = 101.0f / TWO02,
b, c;
if (argc > 1)
n = atoi(argv[1]);
if (NULL == (h_x = (float *)malloc(sizeof(float) * n)))
{
printf("error: memory allocation for 'x'\n");
iret = -1;
}
if (NULL == (h_y = (float *)malloc(sizeof(float) * n)))
{
printf("error: memory allocation for 'y'\n");
iret = -1;
}
if (NULL == (h_z = (float *)malloc(sizeof(float) * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
free(h_x);
free(h_y);
free(h_z);
exit(EXIT_FAILURE);
}
//Init Data
b = rand() % TWO04;
c = rand() % TWO08;
for (int i = 0; i < n; i++)
{
h_x[i] = b / (float)TWO02;
h_y[i] = h_z[i] = c / (float)TWO04;
}
//CUDA Buffer Allocation
gpuErrchk(cudaMalloc((void **)&d_x, sizeof(float) * n));
gpuErrchk(cudaMalloc((void **)&d_y, sizeof(float) * n));
start_timer();
int TILE = n / N_STREAMS;
cudaStream_t stream[N_STREAMS];
for(int i = 0; i < N_STREAMS; i++)
cudaStreamCreate(&stream[i]);
//TODO Loop over the Tiles
for (int i = 0; i < n; i += TILE)
{
//TODO Copy in Tile i (stream i)
gpuErrchk(cudaMemcpyAsync(&d_x[i], &h_x[i], sizeof(float) * TILE, cudaMemcpyHostToDevice, stream[i/TILE]));
gpuErrchk(cudaMemcpyAsync(&d_y[i], &h_y[i], sizeof(float) * TILE, cudaMemcpyHostToDevice, stream[i/TILE]));
//TODO Kernel Tile i (stream i)
gpu_saxpy<<<((TILE + BLOCK_SIZE - 1) / BLOCK_SIZE), BLOCK_SIZE,0,stream[i/TILE]>>>(&d_y[i], a, &d_x[i], TILE);
//TODO Copy out Tile i (stream i)
gpuErrchk(cudaMemcpyAsync(&h_y[i], &d_y[i], sizeof(float) * TILE, cudaMemcpyDeviceToHost,stream[i/TILE]));
}
//TODO Wait all the streams...
cudaDeviceSynchronize();
stop_timer();
printf("saxpy (GPU): %9.3f sec %9.1f GFLOPS\n", elapsed_ns() / 1.0e9, 2 * n / ((float) elapsed_ns()));
//Check Matematical Consistency
start_timer();
host_saxpy(h_z, a, h_x, n);
stop_timer();
printf("saxpy (Host): %9.3f sec %9.1f GFLOPS\n", elapsed_ns() / 1.0e9, 2 * n / ((float) elapsed_ns()));
for (int i = 0; i < n; ++i)
{
iret = *(int *)(h_y + i) ^ *(int *)(h_z + i);
assert(iret == 0);
}
free(h_x);
gpuErrchk(cudaFree(d_x));
free(h_y);
gpuErrchk(cudaFree(d_y));
free(h_z);
for (int i=0; i<N_STREAMS; ++i)
cudaStreamDestroy(stream[i]);
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
return 0;
}
|
24e7bd119b4d7a746b1bd97fb0755b7b6e42e52c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mesh.h"
#include "constant.h"
__device__ double
source(int i, int j, double t){
return (j==200)?8*sin((t*1e10*2*pi)):0;
}
//H
__global__ void
updateH(mesh* m, int W, int H)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int k = idy*W+idx;
//update
if(idx<W-1 && idy<H-1){
m[k].Hzx = m[k].DHx1*m[k].Hzx - m[k].DHx2 * (m[k+H].Ey - m[k].Ey);
m[k].Hzy = m[k].DHy1*m[k].Hzy + m[k].DHy2 * (m[k+1].Ex - m[k].Ex);
}
}
//E
__global__ void
updateE(mesh *m, int W, int H)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int k = idy*W+idx;
//update
if(idx<W && idx>0 && idy>0 && idy<H){
m[k].Ex = m[k].CEx1*m[k].Ex + m[k].CEx2 * (m[k].Hzx + m[k].Hzy - m[k-1].Hzx - m[k-1].Hzy);
m[k].Ey = m[k].CEy1*m[k].Ey - m[k].CEy2 * (m[k].Hzx + m[k].Hzy - m[k-H].Hzx - m[k-H].Hzy);
}
}
__global__ void
updateSource(mesh *m, int W, int H, double time){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int k = idy*W+idx;
if(idx<W && idx>0 && idy>0 && idy<H){
double s = source(idx, idy, time);
//double Hs = source(idx, idy, time);
//double Es = source(idx, idy, time);
m[k].Hzy+= m[k].DHx2* s;
m[k+1].Ey += m[k+1].CEy2*s/(120*pi);
}
}
#define BLOCKSIZ 128
extern "C"
void cudaUpdateKernel(mesh* d_m, int Nx, int Ny, double t){
dim3 dimBlock(BLOCKSIZ,BLOCKSIZ);
dim3 dimGrid(ceil(Nx/BLOCKSIZ), ceil(Ny/BLOCKSIZ));
hipLaunchKernelGGL(( updateH), dim3(dimGrid), dim3(dimBlock), 0, 0, d_m, Nx, Ny);
hipLaunchKernelGGL(( updateE), dim3(dimGrid), dim3(dimBlock), 0, 0, d_m, Nx, Ny);
hipLaunchKernelGGL(( updateSource), dim3(dimGrid), dim3(dimBlock), 0, 0, d_m, Nx, Ny, t);
}
|
24e7bd119b4d7a746b1bd97fb0755b7b6e42e52c.cu
|
#include "mesh.h"
#include "constant.h"
__device__ double
source(int i, int j, double t){
return (j==200)?8*sin((t*1e10*2*pi)):0;
}
//H
__global__ void
updateH(mesh* m, int W, int H)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int k = idy*W+idx;
//update
if(idx<W-1 && idy<H-1){
m[k].Hzx = m[k].DHx1*m[k].Hzx - m[k].DHx2 * (m[k+H].Ey - m[k].Ey);
m[k].Hzy = m[k].DHy1*m[k].Hzy + m[k].DHy2 * (m[k+1].Ex - m[k].Ex);
}
}
//E
__global__ void
updateE(mesh *m, int W, int H)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int k = idy*W+idx;
//update
if(idx<W && idx>0 && idy>0 && idy<H){
m[k].Ex = m[k].CEx1*m[k].Ex + m[k].CEx2 * (m[k].Hzx + m[k].Hzy - m[k-1].Hzx - m[k-1].Hzy);
m[k].Ey = m[k].CEy1*m[k].Ey - m[k].CEy2 * (m[k].Hzx + m[k].Hzy - m[k-H].Hzx - m[k-H].Hzy);
}
}
__global__ void
updateSource(mesh *m, int W, int H, double time){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int k = idy*W+idx;
if(idx<W && idx>0 && idy>0 && idy<H){
double s = source(idx, idy, time);
//double Hs = source(idx, idy, time);
//double Es = source(idx, idy, time);
m[k].Hzy+= m[k].DHx2* s;
m[k+1].Ey += m[k+1].CEy2*s/(120*pi);
}
}
#define BLOCKSIZ 128
extern "C"
void cudaUpdateKernel(mesh* d_m, int Nx, int Ny, double t){
dim3 dimBlock(BLOCKSIZ,BLOCKSIZ);
dim3 dimGrid(ceil(Nx/BLOCKSIZ), ceil(Ny/BLOCKSIZ));
updateH<<<dimGrid, dimBlock>>>(d_m, Nx, Ny);
updateE<<<dimGrid, dimBlock>>>(d_m, Nx, Ny);
updateSource<<<dimGrid, dimBlock>>>(d_m, Nx, Ny, t);
}
|
bf4cce3af43bd151f2fb88317daa209012517a6e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __LONG_SHORT_TERM_MEMORY_BACK_PROPAGATION_BACKWARD_PASS_KERNEL__
#define __LONG_SHORT_TERM_MEMORY_BACK_PROPAGATION_BACKWARD_PASS_KERNEL__
#include "FullyConnectedLeakyIntegrateAndFireKernel.cu"
#include "FullyConnectedAdaptiveLeakyIntegrateAndFireKernel.cu"
#include "FullyConnectedBackPropagationInputOutputKernel.cu"
#include "FullyConnectedInputSpikePropagationKernel.cu"
#include "FullyConnectedHiddenSpikePropagationKernel.cu"
#include "LongShortTermMemoryLearnSignalKernel.cu"
#include "FullyConnectedDeltaErrorKernel.cu"
#include "FullyConnectedFastFiringRateKernel.cu"
#include "LongShortTermMemoryLeakyReadoutGradientKernel.cu"
#include "LongShortTermMemoryInputEligibilityLearnSignalErrorKernel.cu"
/**
* Parallel kernel for a Long Short Term Spiking Network
*/
namespace SNN {
namespace Kernels {
namespace GPU {
#define ERROR_MODE_REGRESSION 0
#define ERROR_MODE_CLASSIFICATION 1
#define ERROR_MODE_INFINITY 2
#define ERROR_MODE_LEARNSIGNAL 3
__device__ void longShortTermMemoryBackPropagationBackwardPassKernel(
/* the number of input neurons */
unsigned numInputs,
/* the number of (leaky integrate and fire) hiddem neurons */
unsigned numStandartHidden,
/* the number of (adaptive leaky integrate and fire) hidden neurons */
unsigned numAdaptiveHidden,
/* the number of output neurons */
unsigned numOutputs,
/* the number of simmulation time steps */
unsigned numSimulationTimesteps,
/* the simulation start and end time */
int startTime,
int endTime,
/* the error mode of this */
unsigned errorMode,
/* the simulation timestep length */
FloatType timeStepLength,
/* neuron spike threshold */
FloatType spikeThreshold,
/* neuron refactory period */
FloatType refactoryPeriod,
/* the hidden voltage decay factor */
FloatType hiddenDecayFactor,
/* the readout voltage decay factor */
FloatType readoutDecayFactor,
/* the decay factor for the adaptive threshold */
FloatType adaptationDecayFactor,
/* the factor about which the base threshold increases */
FloatType thresholdIncreaseConstant,
/* the target firing rate */
FloatType targetFiringRate,
/* the firing rate gradient scalling factor */
FloatType firingRateScallingFactor,
/* the derivative dumping factor */
FloatType derivativeDumpingFactor,
/* the input neuron spikes over one simulation run */
FloatType *inputSpikesOverTime,
/* the input and hidden neuron spikes over one simulation run */
FloatType *spikesOverTime,
/* the hidden neurons firing rates */
FloatType *firingRates,
/* the number of spikes per neuron */
FloatType *numSpikes,
/* the synaptic input weights */
FloatType *inputWeights,
/* the synaptic hidden weights */
FloatType *hiddenWeights,
/* the synaptic output weights */
FloatType *outputWeights,
/* the feedback weights */
FloatType *feedbackWeights,
/* the network target weights */
FloatType *targetWeights,
/* the network targets fore one simulation run */
FloatType *targetsOverTime,
/* the network outputs fore one simulation run */
FloatType *outputsOverTime,
/* the network output errors for one simulation run */
FloatType *outputErrorsOverTime,
/* the network derivatives for one simulation run */
FloatType *derivativesOverTime,
/* the network derivatives for the last simulation run */
FloatType *oldDerivativesOverTime,
/* the network hidden voltage for the last simulation run */
FloatType *voltageOverTime,
/* time since last spike for hidden neurons over time */
FloatType *timeStepsSinceLastSpikeOverTime,
/* the neurons adaptation values over time */
FloatType *thresholdAdaptationOverTime,
/* the network error mask for one simulation run */
FloatType *errorMaskOverTime,
/* the network error factors for one simulation run */
FloatType *outputErrorFactorOverTime,
/* the back propagation gradients for input synapses */
FloatType *inputBackPropagationGradients,
/* the firing rate gradients for input synapses */
FloatType *inputFiringRateGradients,
/* the back propagation gradients for hidden synapses */
FloatType *hiddenBackPropagationGradients,
/* the firing rate gradients for hidden synapses */
FloatType *hiddenFiringRateGradients,
/* the leaky readout gradients */
FloatType *leakyReadoutGradients,
/* the networks squared summed error */
FloatType *networkError,
/* the network summed target for each output */
FloatType *summedTargets,
/* the network squared summed target for each output */
FloatType *squaredSummedTargets,
/* the number of values summed for each output */
FloatType *numSummedValues,
/* the networks classification accuracy error */
FloatType *classificationAccuracy,
/* the networks number of classification samples */
FloatType *classificationSamples,
/***** content managed by kernel ******/
/* the filtered hidden spikes */
FloatType *filteredSpikes,
/* the filtered hidden spikes (by the readout decay factor) */
FloatType *readoutDecayFilteredSpikes,
/* the neurons adaptation values */
FloatType *thresholdAdaptation,
/* hidden derivatives */
FloatType *derivatives,
/* input current for hidden and output neurons */
FloatType *I,
/* hidden and readout voltage */
FloatType *v,
/* hidden spikes */
FloatType *hiddenSpikes,
/* time since last spike for hidden neurons */
FloatType *timeStepsSinceLastSpike,
/* hidden neuron delta errors (voltage component) */
FloatType *deltaErrorsVoltage,
/* hidden neuron delta errors (adaptation component) */
FloatType *deltaErrorsAdaption,
/* the input errors fore one simmulation run */
FloatType *inputErrorsOverTime,
/* the filtered (back propagated) output errors */
FloatType *filteredOutputErrors,
/* summed network output for classification */
FloatType *summedActivation
) {
const unsigned numHidden = numStandartHidden + numAdaptiveHidden;
cudaAssert(max(max(numInputs, numHidden), numOutputs) == blockDim.x);
inputSpikesOverTime += blockIdx.x * numInputs * numSimulationTimesteps;
spikesOverTime += blockIdx.x * (numInputs + numHidden) * numSimulationTimesteps;
numSpikes += blockIdx.x * (numHidden + numInputs);
targetsOverTime += blockIdx.x * numOutputs * numSimulationTimesteps;
outputsOverTime += blockIdx.x * numOutputs * numSimulationTimesteps;
outputErrorsOverTime += blockIdx.x * numOutputs * numSimulationTimesteps;
derivativesOverTime += blockIdx.x * numHidden * numSimulationTimesteps;
oldDerivativesOverTime += blockIdx.x * numHidden * numSimulationTimesteps;
voltageOverTime += blockIdx.x * numHidden * numSimulationTimesteps;
timeStepsSinceLastSpikeOverTime += blockIdx.x * numHidden * numSimulationTimesteps;
thresholdAdaptationOverTime += blockIdx.x * numAdaptiveHidden * numSimulationTimesteps;
errorMaskOverTime += blockIdx.x * numSimulationTimesteps;
outputErrorFactorOverTime += blockIdx.x * numOutputs * numSimulationTimesteps;
inputBackPropagationGradients += blockIdx.x * numInputs * numHidden;
inputFiringRateGradients += blockIdx.x * numInputs * numHidden;
hiddenBackPropagationGradients += blockIdx.x * numHidden * numHidden;
hiddenFiringRateGradients += blockIdx.x * numHidden * numHidden;
leakyReadoutGradients += blockIdx.x * numHidden * numOutputs;
networkError += blockIdx.x;
summedTargets += blockIdx.x;
squaredSummedTargets += blockIdx.x;
numSummedValues += blockIdx.x;
readoutDecayFilteredSpikes += blockIdx.x * numHidden;
thresholdAdaptation += blockIdx.x * numAdaptiveHidden;
derivatives += blockIdx.x * numHidden;
I += blockIdx.x * (numHidden + numOutputs);
v += blockIdx.x * (numHidden + numOutputs);
hiddenSpikes += blockIdx.x * numHidden;
timeStepsSinceLastSpike += blockIdx.x * numHidden;
deltaErrorsVoltage += blockIdx.x * numHidden;
deltaErrorsAdaption += blockIdx.x * numAdaptiveHidden;
inputErrorsOverTime += blockIdx.x * numInputs * numSimulationTimesteps;
filteredOutputErrors += blockIdx.x * numOutputs;
summedActivation += blockIdx.x * numOutputs;
classificationAccuracy += blockIdx.x;
classificationSamples += blockIdx.x;
if (startTime < 0) startTime = 0;
if (endTime < 0) endTime = numSimulationTimesteps;
__syncthreads();
const int i = threadIdx.x;
for (unsigned index = 0; index + i < numInputs * numHidden; index += blockDim.x) {
inputFiringRateGradients[index + i] = 0;
inputBackPropagationGradients[index + i] = 0;
}
for (unsigned index = 0; index + i < numHidden * numHidden; index += blockDim.x) {
hiddenFiringRateGradients[index + i] = 0;
hiddenBackPropagationGradients[index + i] = 0;
}
for (unsigned index = 0; index + i < numHidden * numOutputs; index += blockDim.x) {
leakyReadoutGradients[index + i] = 0;
}
if (startTime > 0) {
startTime = startTime % numSimulationTimesteps;
endTime = ((endTime - 1) % numSimulationTimesteps) + 1;
}
/* backpropagation loop */
for (int t = endTime - 1; t >= startTime; t--) {
if (i < numOutputs) {
if (errorMode != ERROR_MODE_LEARNSIGNAL) {
filteredOutputErrors[i] = filteredOutputErrors[i] * readoutDecayFactor +
(outputsOverTime[t * numOutputs + i] - targetsOverTime[t * numOutputs + i]) *
errorMaskOverTime[t];
} else {
filteredOutputErrors[i] = filteredOutputErrors[i] * readoutDecayFactor +
outputErrorsOverTime[t * numOutputs + i];
}
}
__syncthreads();
fullyConnectedDeltaErrorKernel(
numInputs,
numHidden,
numOutputs,
numStandartHidden,
numAdaptiveHidden,
spikeThreshold,
thresholdIncreaseConstant,
adaptationDecayFactor,
hiddenDecayFactor,
timeStepLength,
filteredOutputErrors,
targetWeights,
outputErrorFactorOverTime + t * numOutputs,
deltaErrorsVoltage,
deltaErrorsAdaption,
spikesOverTime + t * (numInputs + numHidden) + numInputs,
derivativesOverTime + t * numHidden,
voltageOverTime + t * numHidden,
hiddenWeights,
outputWeights
);
__syncthreads();
if (i < numInputs) {
FloatType inputError = 0;
/* compute input errors */
for (unsigned h = 0; h < numHidden; h++)
inputError += inputWeights[i * numHidden + h] * deltaErrorsVoltage[h];
inputErrorsOverTime[t * numInputs + i] = inputError;
}
/* add errors to input gradients */
for (unsigned index = i; index < numInputs * numHidden; index += blockDim.x) {
const unsigned h = index / numInputs;
const unsigned i = index % numInputs;
inputBackPropagationGradients[index] +=
deltaErrorsVoltage[h] * inputSpikesOverTime[t * numInputs + i];
}
/* add errors to hidden gradients */
if (t > 0) {
for (unsigned index = i; index < numHidden * numHidden; index += blockDim.x) {
const unsigned ho = index / numHidden;
const unsigned hi = index % numHidden;
if (spikesOverTime[(t - 1) * (numInputs + numHidden) + numInputs + hi] != 0)
hiddenBackPropagationGradients[index] += deltaErrorsVoltage[ho];
}
}
/* add errors to output gradients */
for (unsigned index = i; index < numHidden * numOutputs; index += blockDim.x) {
const unsigned o = index / numHidden;
const unsigned h = index % numHidden;
if (spikesOverTime[t * (numInputs + numHidden) + numInputs + h] != 0)
leakyReadoutGradients[index] += filteredOutputErrors[o];
}
__syncthreads();
}
fullyConnectedFastFiringRateKernel(
numInputs,
numHidden,
targetFiringRate,
firingRateScallingFactor,
numSpikes,
numSpikes + numInputs,
firingRates,
inputFiringRateGradients
);
fullyConnectedFastFiringRateKernel(
numHidden,
numHidden,
targetFiringRate,
firingRateScallingFactor,
numSpikes + numInputs,
numSpikes + numInputs,
firingRates,
hiddenFiringRateGradients
);
__syncthreads();
}
}
}
}
#endif /* __LONG_SHORT_TERM_MEMORY_BACK_PROPAGATION_BACKWARD_PASS_KERNEL__ */
|
bf4cce3af43bd151f2fb88317daa209012517a6e.cu
|
#ifndef __LONG_SHORT_TERM_MEMORY_BACK_PROPAGATION_BACKWARD_PASS_KERNEL__
#define __LONG_SHORT_TERM_MEMORY_BACK_PROPAGATION_BACKWARD_PASS_KERNEL__
#include "FullyConnectedLeakyIntegrateAndFireKernel.cu"
#include "FullyConnectedAdaptiveLeakyIntegrateAndFireKernel.cu"
#include "FullyConnectedBackPropagationInputOutputKernel.cu"
#include "FullyConnectedInputSpikePropagationKernel.cu"
#include "FullyConnectedHiddenSpikePropagationKernel.cu"
#include "LongShortTermMemoryLearnSignalKernel.cu"
#include "FullyConnectedDeltaErrorKernel.cu"
#include "FullyConnectedFastFiringRateKernel.cu"
#include "LongShortTermMemoryLeakyReadoutGradientKernel.cu"
#include "LongShortTermMemoryInputEligibilityLearnSignalErrorKernel.cu"
/**
* Parallel kernel for a Long Short Term Spiking Network
*/
namespace SNN {
namespace Kernels {
namespace GPU {
#define ERROR_MODE_REGRESSION 0
#define ERROR_MODE_CLASSIFICATION 1
#define ERROR_MODE_INFINITY 2
#define ERROR_MODE_LEARNSIGNAL 3
__device__ void longShortTermMemoryBackPropagationBackwardPassKernel(
/* the number of input neurons */
unsigned numInputs,
/* the number of (leaky integrate and fire) hiddem neurons */
unsigned numStandartHidden,
/* the number of (adaptive leaky integrate and fire) hidden neurons */
unsigned numAdaptiveHidden,
/* the number of output neurons */
unsigned numOutputs,
/* the number of simmulation time steps */
unsigned numSimulationTimesteps,
/* the simulation start and end time */
int startTime,
int endTime,
/* the error mode of this */
unsigned errorMode,
/* the simulation timestep length */
FloatType timeStepLength,
/* neuron spike threshold */
FloatType spikeThreshold,
/* neuron refactory period */
FloatType refactoryPeriod,
/* the hidden voltage decay factor */
FloatType hiddenDecayFactor,
/* the readout voltage decay factor */
FloatType readoutDecayFactor,
/* the decay factor for the adaptive threshold */
FloatType adaptationDecayFactor,
/* the factor about which the base threshold increases */
FloatType thresholdIncreaseConstant,
/* the target firing rate */
FloatType targetFiringRate,
/* the firing rate gradient scalling factor */
FloatType firingRateScallingFactor,
/* the derivative dumping factor */
FloatType derivativeDumpingFactor,
/* the input neuron spikes over one simulation run */
FloatType *inputSpikesOverTime,
/* the input and hidden neuron spikes over one simulation run */
FloatType *spikesOverTime,
/* the hidden neurons firing rates */
FloatType *firingRates,
/* the number of spikes per neuron */
FloatType *numSpikes,
/* the synaptic input weights */
FloatType *inputWeights,
/* the synaptic hidden weights */
FloatType *hiddenWeights,
/* the synaptic output weights */
FloatType *outputWeights,
/* the feedback weights */
FloatType *feedbackWeights,
/* the network target weights */
FloatType *targetWeights,
/* the network targets fore one simulation run */
FloatType *targetsOverTime,
/* the network outputs fore one simulation run */
FloatType *outputsOverTime,
/* the network output errors for one simulation run */
FloatType *outputErrorsOverTime,
/* the network derivatives for one simulation run */
FloatType *derivativesOverTime,
/* the network derivatives for the last simulation run */
FloatType *oldDerivativesOverTime,
/* the network hidden voltage for the last simulation run */
FloatType *voltageOverTime,
/* time since last spike for hidden neurons over time */
FloatType *timeStepsSinceLastSpikeOverTime,
/* the neurons adaptation values over time */
FloatType *thresholdAdaptationOverTime,
/* the network error mask for one simulation run */
FloatType *errorMaskOverTime,
/* the network error factors for one simulation run */
FloatType *outputErrorFactorOverTime,
/* the back propagation gradients for input synapses */
FloatType *inputBackPropagationGradients,
/* the firing rate gradients for input synapses */
FloatType *inputFiringRateGradients,
/* the back propagation gradients for hidden synapses */
FloatType *hiddenBackPropagationGradients,
/* the firing rate gradients for hidden synapses */
FloatType *hiddenFiringRateGradients,
/* the leaky readout gradients */
FloatType *leakyReadoutGradients,
/* the networks squared summed error */
FloatType *networkError,
/* the network summed target for each output */
FloatType *summedTargets,
/* the network squared summed target for each output */
FloatType *squaredSummedTargets,
/* the number of values summed for each output */
FloatType *numSummedValues,
/* the networks classification accuracy error */
FloatType *classificationAccuracy,
/* the networks number of classification samples */
FloatType *classificationSamples,
/***** content managed by kernel ******/
/* the filtered hidden spikes */
FloatType *filteredSpikes,
/* the filtered hidden spikes (by the readout decay factor) */
FloatType *readoutDecayFilteredSpikes,
/* the neurons adaptation values */
FloatType *thresholdAdaptation,
/* hidden derivatives */
FloatType *derivatives,
/* input current for hidden and output neurons */
FloatType *I,
/* hidden and readout voltage */
FloatType *v,
/* hidden spikes */
FloatType *hiddenSpikes,
/* time since last spike for hidden neurons */
FloatType *timeStepsSinceLastSpike,
/* hidden neuron delta errors (voltage component) */
FloatType *deltaErrorsVoltage,
/* hidden neuron delta errors (adaptation component) */
FloatType *deltaErrorsAdaption,
/* the input errors fore one simmulation run */
FloatType *inputErrorsOverTime,
/* the filtered (back propagated) output errors */
FloatType *filteredOutputErrors,
/* summed network output for classification */
FloatType *summedActivation
) {
const unsigned numHidden = numStandartHidden + numAdaptiveHidden;
cudaAssert(max(max(numInputs, numHidden), numOutputs) == blockDim.x);
inputSpikesOverTime += blockIdx.x * numInputs * numSimulationTimesteps;
spikesOverTime += blockIdx.x * (numInputs + numHidden) * numSimulationTimesteps;
numSpikes += blockIdx.x * (numHidden + numInputs);
targetsOverTime += blockIdx.x * numOutputs * numSimulationTimesteps;
outputsOverTime += blockIdx.x * numOutputs * numSimulationTimesteps;
outputErrorsOverTime += blockIdx.x * numOutputs * numSimulationTimesteps;
derivativesOverTime += blockIdx.x * numHidden * numSimulationTimesteps;
oldDerivativesOverTime += blockIdx.x * numHidden * numSimulationTimesteps;
voltageOverTime += blockIdx.x * numHidden * numSimulationTimesteps;
timeStepsSinceLastSpikeOverTime += blockIdx.x * numHidden * numSimulationTimesteps;
thresholdAdaptationOverTime += blockIdx.x * numAdaptiveHidden * numSimulationTimesteps;
errorMaskOverTime += blockIdx.x * numSimulationTimesteps;
outputErrorFactorOverTime += blockIdx.x * numOutputs * numSimulationTimesteps;
inputBackPropagationGradients += blockIdx.x * numInputs * numHidden;
inputFiringRateGradients += blockIdx.x * numInputs * numHidden;
hiddenBackPropagationGradients += blockIdx.x * numHidden * numHidden;
hiddenFiringRateGradients += blockIdx.x * numHidden * numHidden;
leakyReadoutGradients += blockIdx.x * numHidden * numOutputs;
networkError += blockIdx.x;
summedTargets += blockIdx.x;
squaredSummedTargets += blockIdx.x;
numSummedValues += blockIdx.x;
readoutDecayFilteredSpikes += blockIdx.x * numHidden;
thresholdAdaptation += blockIdx.x * numAdaptiveHidden;
derivatives += blockIdx.x * numHidden;
I += blockIdx.x * (numHidden + numOutputs);
v += blockIdx.x * (numHidden + numOutputs);
hiddenSpikes += blockIdx.x * numHidden;
timeStepsSinceLastSpike += blockIdx.x * numHidden;
deltaErrorsVoltage += blockIdx.x * numHidden;
deltaErrorsAdaption += blockIdx.x * numAdaptiveHidden;
inputErrorsOverTime += blockIdx.x * numInputs * numSimulationTimesteps;
filteredOutputErrors += blockIdx.x * numOutputs;
summedActivation += blockIdx.x * numOutputs;
classificationAccuracy += blockIdx.x;
classificationSamples += blockIdx.x;
if (startTime < 0) startTime = 0;
if (endTime < 0) endTime = numSimulationTimesteps;
__syncthreads();
const int i = threadIdx.x;
for (unsigned index = 0; index + i < numInputs * numHidden; index += blockDim.x) {
inputFiringRateGradients[index + i] = 0;
inputBackPropagationGradients[index + i] = 0;
}
for (unsigned index = 0; index + i < numHidden * numHidden; index += blockDim.x) {
hiddenFiringRateGradients[index + i] = 0;
hiddenBackPropagationGradients[index + i] = 0;
}
for (unsigned index = 0; index + i < numHidden * numOutputs; index += blockDim.x) {
leakyReadoutGradients[index + i] = 0;
}
if (startTime > 0) {
startTime = startTime % numSimulationTimesteps;
endTime = ((endTime - 1) % numSimulationTimesteps) + 1;
}
/* backpropagation loop */
for (int t = endTime - 1; t >= startTime; t--) {
if (i < numOutputs) {
if (errorMode != ERROR_MODE_LEARNSIGNAL) {
filteredOutputErrors[i] = filteredOutputErrors[i] * readoutDecayFactor +
(outputsOverTime[t * numOutputs + i] - targetsOverTime[t * numOutputs + i]) *
errorMaskOverTime[t];
} else {
filteredOutputErrors[i] = filteredOutputErrors[i] * readoutDecayFactor +
outputErrorsOverTime[t * numOutputs + i];
}
}
__syncthreads();
fullyConnectedDeltaErrorKernel(
numInputs,
numHidden,
numOutputs,
numStandartHidden,
numAdaptiveHidden,
spikeThreshold,
thresholdIncreaseConstant,
adaptationDecayFactor,
hiddenDecayFactor,
timeStepLength,
filteredOutputErrors,
targetWeights,
outputErrorFactorOverTime + t * numOutputs,
deltaErrorsVoltage,
deltaErrorsAdaption,
spikesOverTime + t * (numInputs + numHidden) + numInputs,
derivativesOverTime + t * numHidden,
voltageOverTime + t * numHidden,
hiddenWeights,
outputWeights
);
__syncthreads();
if (i < numInputs) {
FloatType inputError = 0;
/* compute input errors */
for (unsigned h = 0; h < numHidden; h++)
inputError += inputWeights[i * numHidden + h] * deltaErrorsVoltage[h];
inputErrorsOverTime[t * numInputs + i] = inputError;
}
/* add errors to input gradients */
for (unsigned index = i; index < numInputs * numHidden; index += blockDim.x) {
const unsigned h = index / numInputs;
const unsigned i = index % numInputs;
inputBackPropagationGradients[index] +=
deltaErrorsVoltage[h] * inputSpikesOverTime[t * numInputs + i];
}
/* add errors to hidden gradients */
if (t > 0) {
for (unsigned index = i; index < numHidden * numHidden; index += blockDim.x) {
const unsigned ho = index / numHidden;
const unsigned hi = index % numHidden;
if (spikesOverTime[(t - 1) * (numInputs + numHidden) + numInputs + hi] != 0)
hiddenBackPropagationGradients[index] += deltaErrorsVoltage[ho];
}
}
/* add errors to output gradients */
for (unsigned index = i; index < numHidden * numOutputs; index += blockDim.x) {
const unsigned o = index / numHidden;
const unsigned h = index % numHidden;
if (spikesOverTime[t * (numInputs + numHidden) + numInputs + h] != 0)
leakyReadoutGradients[index] += filteredOutputErrors[o];
}
__syncthreads();
}
fullyConnectedFastFiringRateKernel(
numInputs,
numHidden,
targetFiringRate,
firingRateScallingFactor,
numSpikes,
numSpikes + numInputs,
firingRates,
inputFiringRateGradients
);
fullyConnectedFastFiringRateKernel(
numHidden,
numHidden,
targetFiringRate,
firingRateScallingFactor,
numSpikes + numInputs,
numSpikes + numInputs,
firingRates,
hiddenFiringRateGradients
);
__syncthreads();
}
}
}
}
#endif /* __LONG_SHORT_TERM_MEMORY_BACK_PROPAGATION_BACKWARD_PASS_KERNEL__ */
|
565e4ba82b679beecae556c02c3e9378aecb549e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Rashid Hafez
*/
#include "ISplit.h"
dim3 GRID;
dim3 BLOCK;
static hipDeviceProp_t PROPS;
/******************************
Increment Kernel
*******************************/
__global__ void Incr(float * aC, unsigned long n, unsigned long long it){
unsigned long long x = (unsigned long long)threadIdx.x + (unsigned long long)blockIdx.x * (unsigned long long)blockDim.x;
unsigned long long y = (unsigned long long) threadIdx.y + (unsigned long long)blockIdx.y * (unsigned long long)blockDim.y;
unsigned long long offset = x + y * (unsigned long long)blockDim.x * (unsigned long long)gridDim.x; //works for any size and anything
if(offset<=n){
aC[offset]*=3.3;
}
}
void ISplit(float * & arr, unsigned long sz, hipDeviceProp_t* prop){
}
void setProp(int d){
gpuErrchk(hipSetDevice(d));
gpuErrchk(hipGetDeviceProperties(&PROPS,d));
}
hipDeviceProp_t getProp(){
return(PROPS);
}
|
565e4ba82b679beecae556c02c3e9378aecb549e.cu
|
/*
Rashid Hafez
*/
#include "ISplit.h"
dim3 GRID;
dim3 BLOCK;
static cudaDeviceProp PROPS;
/******************************
Increment Kernel
*******************************/
__global__ void Incr(float * aC, unsigned long n, unsigned long long it){
unsigned long long x = (unsigned long long)threadIdx.x + (unsigned long long)blockIdx.x * (unsigned long long)blockDim.x;
unsigned long long y = (unsigned long long) threadIdx.y + (unsigned long long)blockIdx.y * (unsigned long long)blockDim.y;
unsigned long long offset = x + y * (unsigned long long)blockDim.x * (unsigned long long)gridDim.x; //works for any size and anything
if(offset<=n){
aC[offset]*=3.3;
}
}
void ISplit(float * & arr, unsigned long sz, cudaDeviceProp* prop){
}
void setProp(int d){
gpuErrchk(cudaSetDevice(d));
gpuErrchk(cudaGetDeviceProperties(&PROPS,d));
}
cudaDeviceProp getProp(){
return(PROPS);
}
|
5428ca8234f089baf18d63d016783313a7bd4bbd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sumaVectores.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_a = NULL;
hipMalloc(&d_a, XSIZE*YSIZE);
float *d_b = NULL;
hipMalloc(&d_b, XSIZE*YSIZE);
float *d_c = NULL;
hipMalloc(&d_c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sumaVectores), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sumaVectores), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sumaVectores), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5428ca8234f089baf18d63d016783313a7bd4bbd.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sumaVectores.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_a = NULL;
cudaMalloc(&d_a, XSIZE*YSIZE);
float *d_b = NULL;
cudaMalloc(&d_b, XSIZE*YSIZE);
float *d_c = NULL;
cudaMalloc(&d_c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sumaVectores<<<gridBlock,threadBlock>>>(d_a,d_b,d_c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sumaVectores<<<gridBlock,threadBlock>>>(d_a,d_b,d_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sumaVectores<<<gridBlock,threadBlock>>>(d_a,d_b,d_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d715e6756651d51fa52dff12293a4f8e32eabf77.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernalNaiveScan(int n, int d, int* input, int* output) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n) return;
// if k >= 2 ^ (d - 1) <-- see example 2 in Ch 39 Patch
if (k >= (1 << (d - 1))) {
output[k] = input[k - (1 << (d - 1))] + input[k];
}
else {
output[k] = input[k];
}
}
__global__ void kernalInc2Exc(int n, int* input, int* output) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n) return;
// shift everything to the right
// the default is 0
if (k == 0) {
output[k] = 0;
}
else {
output[k] = input[k - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// set up the blocks and grids
int blockSize = 64;
dim3 blocksPerGrid((n + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
// initialize some temporary buffers to write in place
int* temp_input;
hipMalloc((void**)&temp_input, n * sizeof(int));
// fill temp input buffer with the original input
hipMemcpy(temp_input, idata, n * sizeof(int), hipMemcpyHostToDevice);
int* temp_output;
hipMalloc((void**)&temp_output, n * sizeof(int));
timer().startGpuTimer();
// iterate through for d = 1 to d = ilog2ceil(n)
for (int d = 1; d <= ilog2ceil(n); d++) {
// during each time, we want to call kernel to parallel scan
// from input to output
hipLaunchKernelGGL(( kernalNaiveScan), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, n, d, temp_input, temp_output);
// remember to swap the buffers!
std::swap(temp_input, temp_output);
}
// we want an exclusive scan so we have to convert
kernalInc2Exc << <blocksPerGrid, threadsPerBlock >> > (n, temp_input, temp_output);
timer().endGpuTimer();
// now we want to write everything to our real output buffer
hipMemcpy(odata, temp_output, n * sizeof(int), hipMemcpyDeviceToHost);
// cleanup
hipFree(temp_input);
hipFree(temp_output);
}
}
}
|
d715e6756651d51fa52dff12293a4f8e32eabf77.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernalNaiveScan(int n, int d, int* input, int* output) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n) return;
// if k >= 2 ^ (d - 1) <-- see example 2 in Ch 39 Patch
if (k >= (1 << (d - 1))) {
output[k] = input[k - (1 << (d - 1))] + input[k];
}
else {
output[k] = input[k];
}
}
__global__ void kernalInc2Exc(int n, int* input, int* output) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n) return;
// shift everything to the right
// the default is 0
if (k == 0) {
output[k] = 0;
}
else {
output[k] = input[k - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// set up the blocks and grids
int blockSize = 64;
dim3 blocksPerGrid((n + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
// initialize some temporary buffers to write in place
int* temp_input;
cudaMalloc((void**)&temp_input, n * sizeof(int));
// fill temp input buffer with the original input
cudaMemcpy(temp_input, idata, n * sizeof(int), cudaMemcpyHostToDevice);
int* temp_output;
cudaMalloc((void**)&temp_output, n * sizeof(int));
timer().startGpuTimer();
// iterate through for d = 1 to d = ilog2ceil(n)
for (int d = 1; d <= ilog2ceil(n); d++) {
// during each time, we want to call kernel to parallel scan
// from input to output
kernalNaiveScan<<<blocksPerGrid, threadsPerBlock>>>(n, d, temp_input, temp_output);
// remember to swap the buffers!
std::swap(temp_input, temp_output);
}
// we want an exclusive scan so we have to convert
kernalInc2Exc << <blocksPerGrid, threadsPerBlock >> > (n, temp_input, temp_output);
timer().endGpuTimer();
// now we want to write everything to our real output buffer
cudaMemcpy(odata, temp_output, n * sizeof(int), cudaMemcpyDeviceToHost);
// cleanup
cudaFree(temp_input);
cudaFree(temp_output);
}
}
}
|
4d8c3716b50141faf451d52095c939ee84444f56.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "squeezenet_params.h"
///#include "dog.h"
#include "hourglass.h"
//#include "tiger.h"
//#include "truck.h"
__global__ void maxpool(
int input_size, int output_size,
float *input_im, float *output_im){
int channels = blockIdx.x * blockDim.x + threadIdx.x;
input_im += channels*input_size*input_size;
output_im += channels*output_size*output_size;
for(int i=0; i<output_size; i++){
for(int j=0; j<output_size; j++){
float tmp = 0.0;
for(int k =0; k<3; k++){
for(int l = 0; l<3; l++){
float value = input_im[(i * 2 + k) * input_size + j *2 +l];
if(value > tmp)
tmp = value;
}
}
output_im[i*output_size + j] = tmp;
}
}
}
__global__ void conv3x3(
int input_channels, int input_size,
int pad, int stride, int start_channel,
int output_size, float* input_im, float* filter_weight,
float* filter_bias, float* output_im){
int filter_index = blockIdx.x * blockDim.x + threadIdx.x;
filter_weight += filter_index * input_channels * 9;
float bias = filter_bias[filter_index];
output_im += (start_channel + filter_index) * output_size * output_size;
//loop over output feature map
for(int i = 0; i < output_size; i++)
{
for(int j = 0; j < output_size; j++)
{
//compute one element in the output feature map
float tmp = bias;
//compute dot product of 2 input_channels x 3 x 3 matrix
for(int k = 0; k < input_channels; k++)
{
for(int l = 0; l < 3; l++)
{
int h = i * stride + l - pad;
for(int m = 0; m < 3; m++)
{
int w = j * stride + m - pad;
if((h >= 0) && (h < input_size) && (w >= 0) && (w < input_size))
{
tmp += input_im[k * input_size * input_size + (i * stride + l - pad) * input_size + j * stride + m - pad] \
* filter_weight[9 * k + 3 * l + m];
}
}
}
}
//add relu activation after conv
output_im[i * output_size + j] = (tmp > 0.0) ? tmp : 0.0;
}
}
}
__global__ void conv1x1(
int input_channels, int input_size,int threads,
float* input_im, float* filter_weight,
float* filter_bias, float* output_im){
int filter_index = blockIdx.x * blockDim.x + threadIdx.x;
if(filter_index<threads){
filter_weight += filter_index * input_channels;
float bias = filter_bias[filter_index];
output_im += filter_index * input_size * input_size;//start_channel is for 1x1 feature map in fire layer
//loop over output feature map
//out
for(int i = 0; i < input_size; i++)
{
for(int j = 0; j < input_size; j++)
{
float tmp = bias;
for(int k = 0; k < input_channels; k++)
{
tmp += input_im[k * input_size * input_size + i * input_size + j] * filter_weight[k];
}
//add relu after conv
output_im[i * input_size + j] = (tmp > 0.0) ? tmp : 0.0;
}
}
}
}
//last layer use a 13 x 13 avgPool layer as classifier
//one class score per kernel
__global__ void avgpool(
int threads,
float* input_im,
float* output_im)
{
int class_index = blockIdx.x * blockDim.x + threadIdx.x;//get class score index
if(class_index<threads){
input_im += 169 * class_index;
float tmp = 0.0f;
for(int i = 0; i < 169; i++)
{
tmp += input_im[i];
}
output_im[class_index] = tmp / 169.0;
}
}
void getLabel(unsigned int class_index);
void cleanup();
float * h_result_classifier = (float*)malloc((1000)*sizeof(float));
char class_label[201];
float * d_sample;
float * d_conv1_weight;
float * d_conv1_bias;
float * d_result_conv;
float * d_result_pool1;
float * d_result_block1_squeeze;
float * d_result_block1_expand;
float * d_result_pool2;
float * d_fire1_squeeze_weight;
float * d_fire1_squeeze_bias;
float * d_fire1_expand1x1_weight;
float * d_fire1_expand1x1_bias;
float * d_fire1_expand3x3_weight;
float * d_fire1_expand3x3_bias;
float * d_fire2_squeeze_weight;
float * d_fire2_squeeze_bias;
float * d_fire2_expand1x1_weight;
float * d_fire2_expand1x1_bias;
float * d_fire2_expand3x3_weight;
float * d_fire2_expand3x3_bias;
float * d_result_block2_squeeze;
float * d_result_block2_expand;
float * d_result_pool3;
float * d_fire3_squeeze_weight;
float * d_fire3_squeeze_bias;
float * d_fire3_expand1x1_weight;
float * d_fire3_expand1x1_bias;
float * d_fire3_expand3x3_weight;
float * d_fire3_expand3x3_bias;
float * d_fire4_squeeze_weight;
float * d_fire4_squeeze_bias;
float * d_fire4_expand1x1_weight;
float * d_fire4_expand1x1_bias;
float * d_fire4_expand3x3_weight;
float * d_fire4_expand3x3_bias;
float * d_result_block3_squeeze1;
float * d_result_block3_expand1;
float * d_result_block3_squeeze2;
float * d_result_block3_expand2;
float * d_fire5_squeeze_weight;
float * d_fire5_squeeze_bias;
float * d_fire5_expand1x1_weight;
float * d_fire5_expand1x1_bias;
float * d_fire5_expand3x3_weight;
float * d_fire5_expand3x3_bias;
float * d_fire6_squeeze_weight;
float * d_fire6_squeeze_bias;
float * d_fire6_expand1x1_weight;
float * d_fire6_expand1x1_bias;
float * d_fire6_expand3x3_weight;
float * d_fire6_expand3x3_bias;
float * d_fire7_squeeze_weight;
float * d_fire7_squeeze_bias;
float * d_fire7_expand1x1_weight;
float * d_fire7_expand1x1_bias;
float * d_fire7_expand3x3_weight;
float * d_fire7_expand3x3_bias;
float * d_fire8_squeeze_weight;
float * d_fire8_squeeze_bias;
float * d_fire8_expand1x1_weight;
float * d_fire8_expand1x1_bias;
float * d_fire8_expand3x3_weight;
float * d_fire8_expand3x3_bias;
float * d_classifier_conv_weight;
float * d_classifier_conv_bias;
float * d_result_classifier_conv;
float * d_result_classifier;
int main(){
float time,total_time=0.0;
hipEvent_t start, stop;
// conv1 and fire 1
hipMalloc(&d_sample,3*224*224*sizeof(float));
hipMalloc(&d_conv1_weight,sizeof(conv1_weight));
hipMalloc(&d_conv1_bias,sizeof(conv1_bias));
hipMalloc(&d_result_conv,sizeof(float) * (1 * 64 * 111 * 111));
hipMalloc(&d_result_pool1, sizeof(float) * (1 * 64 * 55 * 55));
hipMalloc(&d_result_block1_squeeze,sizeof(float) * (1 * 16* 55 * 55));
hipMalloc(&d_result_block1_expand,sizeof(float) * (1 * 128 * 55 * 55));
hipMalloc(&d_result_pool2,sizeof(float) * (1 * 128 * 27 * 27));
hipMalloc(&d_fire1_squeeze_weight,sizeof(fire1_squeeze_weight));
//printf("%d\n",sizeof(fire1_squeeze_weight)/sizeof(float));
//printf("%d\n",sizeof(fire1_squeeze_bias)/sizeof(float));
hipMalloc(&d_fire1_squeeze_bias,sizeof(fire1_squeeze_bias));
hipMalloc(&d_fire1_expand1x1_weight,sizeof(fire1_expand1x1_weight));
//printf("fire1_expand1x1_weight:%d\n",sizeof(fire1_expand1x1_weight)/sizeof(float));
hipMalloc(&d_fire1_expand1x1_bias,sizeof(fire1_expand1x1_bias));
//printf("fire1_expand1x1_bias:%d\n",sizeof(fire1_expand1x1_bias)/sizeof(float));
hipMalloc(&d_fire1_expand3x3_weight,sizeof(fire1_expand3x3_weight));
//printf("fire1_expand3x3_weight:%d\n",sizeof(fire1_expand3x3_weight)/sizeof(float));
hipMalloc(&d_fire1_expand3x3_bias,sizeof(fire1_expand3x3_bias));
//printf("fire1_expand3x3_bias:%d\n",sizeof(fire1_expand3x3_bias)/sizeof(float));
//fire2
hipMalloc(&d_fire2_squeeze_weight,sizeof(fire2_squeeze_weight));
//printf("fire2_squeeze_weight:%d\n",sizeof(fire2_squeeze_weight)/sizeof(float));
hipMalloc(&d_fire2_squeeze_bias,sizeof(fire2_squeeze_bias));
//printf("fire2_squeeze_bias:%d\n",sizeof(fire2_squeeze_bias)/sizeof(float));
hipMalloc(&d_fire2_expand1x1_weight,sizeof(fire2_expand1x1_weight));
//printf("fire2_expand1x1_weight:%d\n",sizeof(fire2_expand1x1_weight)/sizeof(float));
hipMalloc(&d_fire2_expand1x1_bias,sizeof(fire2_expand1x1_bias));
//printf("fire2_expand1x1_bias:%d\n",sizeof(fire2_expand1x1_bias)/sizeof(float));
hipMalloc(&d_fire2_expand3x3_weight,sizeof(fire2_expand3x3_weight));
//printf("fire2_expand3x3_weight:%d\n",sizeof(fire2_expand3x3_weight)/sizeof(float));
hipMalloc(&d_fire2_expand3x3_bias,sizeof(fire2_expand3x3_bias));
//printf("fire2_expand3x3_bias:%d\n",sizeof(fire2_expand3x3_bias)/sizeof(float));
//block2
hipMalloc(&d_result_block2_squeeze,sizeof(float) * (1 * 32 * 27 * 27));
hipMalloc(&d_result_block2_expand,sizeof(float) * (1 * 256 * 27 *27));
hipMalloc(&d_result_pool3,sizeof(float) * (1 * 256 * 13 * 13));
//fire3
hipMalloc(&d_fire3_squeeze_weight,sizeof(fire3_squeeze_weight));
//printf("fire3_squeeze_weight:%d\n",sizeof(fire3_squeeze_weight)/sizeof(float));
hipMalloc(&d_fire3_squeeze_bias,sizeof(fire3_squeeze_bias));
//printf("fire3_squeeze_bias:%d\n",sizeof(fire3_squeeze_bias)/sizeof(float));
hipMalloc(&d_fire3_expand1x1_weight,sizeof(fire3_expand1x1_weight));
//printf("fire3_expand1x1_weight:%d\n",sizeof(fire3_expand1x1_weight)/sizeof(float));
hipMalloc(&d_fire3_expand1x1_bias,sizeof(fire3_expand1x1_bias));
//printf("fire3_expand1x1_bias:%d\n",sizeof(fire3_expand1x1_bias)/sizeof(float));
hipMalloc(&d_fire3_expand3x3_weight,sizeof(fire3_expand3x3_weight));
//printf("fire3_expand3x3_weight:%d\n",sizeof(fire3_expand3x3_weight)/sizeof(float));
hipMalloc(&d_fire3_expand3x3_bias,sizeof(fire3_expand3x3_bias));
//printf("fire3_expand3x3_bias:%d\n",sizeof(fire3_expand3x3_bias)/sizeof(float));
//fire4
hipMalloc(&d_fire4_squeeze_weight,sizeof(fire4_squeeze_weight));
//printf("fire4_squeeze_weight:%d\n",sizeof(fire4_squeeze_weight)/sizeof(float));
hipMalloc(&d_fire4_squeeze_bias,sizeof(fire4_squeeze_bias));
//printf("fire4_squeeze_bias:%d\n",sizeof(fire4_squeeze_bias)/sizeof(float));
hipMalloc(&d_fire4_expand1x1_weight,sizeof(fire4_expand1x1_weight));
//printf("fire4_expand1x1_weight:%d\n",sizeof(fire4_expand1x1_weight)/sizeof(float));
hipMalloc(&d_fire4_expand1x1_bias,sizeof(fire4_expand1x1_bias));
//printf("fire4_expand1x1_bias:%d\n",sizeof(fire4_expand1x1_bias)/sizeof(float));
hipMalloc(&d_fire4_expand3x3_weight,sizeof(fire4_expand3x3_weight));
//printf("fire4_expand3x3_weight:%d\n",sizeof(fire4_expand3x3_weight)/sizeof(float));
hipMalloc(&d_fire4_expand3x3_bias,sizeof(fire4_expand3x3_bias));
//printf("fire4_expand3x3_bias:%d\n",sizeof(fire4_expand3x3_bias)/sizeof(float));
hipMalloc(&d_result_block3_squeeze1,sizeof(float) * (1 * 48 * 13 * 13));
hipMalloc(&d_result_block3_expand1,sizeof(float) * (1 * 384 * 13 * 13));
hipMalloc(&d_result_block3_squeeze2,sizeof(float) * (1 * 64 * 13 * 13));
hipMalloc(&d_result_block3_expand2,sizeof(float) * (1 * 512 * 13 * 13));
//fire5
hipMalloc(&d_fire5_squeeze_weight,sizeof(fire5_squeeze_weight));
//printf("fire5_squeeze_weight:%d\n",sizeof(fire5_squeeze_weight)/sizeof(float));
hipMalloc(&d_fire5_squeeze_bias,sizeof(fire5_squeeze_bias));
//printf("fire5_squeeze_bias:%d\n",sizeof(fire5_squeeze_bias)/sizeof(float));
hipMalloc(&d_fire5_expand1x1_weight,sizeof(fire5_expand1x1_weight));
//printf("fire5_expand1x1_weight:%d\n",sizeof(fire5_expand1x1_weight)/sizeof(float));
hipMalloc(&d_fire5_expand1x1_bias,sizeof(fire5_expand1x1_bias));
//printf("fire5_expand1x1_bias:%d\n",sizeof(fire5_expand1x1_bias)/sizeof(float));
hipMalloc(&d_fire5_expand3x3_weight,sizeof(fire5_expand3x3_weight));
//printf("fire5_expand3x3_weight:%d\n",sizeof(fire5_expand3x3_weight)/sizeof(float));
hipMalloc(&d_fire5_expand3x3_bias,sizeof(fire5_expand3x3_bias));
//printf("fire5_expand3x3_bias:%d\n",sizeof(fire5_expand3x3_bias)/sizeof(float));
//fire 6
hipMalloc(&d_fire6_squeeze_weight,sizeof(fire6_squeeze_weight));
//printf("fire6_squeeze_weight:%d\n",sizeof(fire6_squeeze_weight)/sizeof(float));
hipMalloc(&d_fire6_squeeze_bias,sizeof(fire6_squeeze_bias));
//printf("fire6_squeeze_bias:%d\n",sizeof(fire6_squeeze_bias)/sizeof(float));
hipMalloc(&d_fire6_expand1x1_weight,sizeof(fire6_expand1x1_weight));
//printf("fire6_expand1x1_weight:%d\n",sizeof(fire6_expand1x1_weight)/sizeof(float));
hipMalloc(&d_fire6_expand1x1_bias,sizeof(fire6_expand1x1_bias));
//printf("fire6_expand1x1_bias:%d\n",sizeof(fire6_expand1x1_bias)/sizeof(float));
hipMalloc(&d_fire6_expand3x3_weight,sizeof(fire6_expand3x3_weight));
//printf("fire6_expand3x3_weight:%d\n",sizeof(fire6_expand3x3_weight)/sizeof(float));
hipMalloc(&d_fire6_expand3x3_bias,sizeof(fire6_expand3x3_bias));
//printf("fire6_expand3x3_bias:%d\n",sizeof(fire6_expand3x3_bias)/sizeof(float));
//fire 7
hipMalloc(&d_fire7_squeeze_weight,sizeof(fire7_squeeze_weight));
//printf("fire7_squeeze_weight:%d\n",sizeof(fire7_squeeze_weight)/sizeof(float));
hipMalloc(&d_fire7_squeeze_bias,sizeof(fire7_squeeze_bias));
//printf("fire7_squeeze_bias:%d\n",sizeof(fire7_squeeze_bias)/sizeof(float));
hipMalloc(&d_fire7_expand1x1_weight,sizeof(fire7_expand1x1_weight));
//printf("fire7_expand1x1_weight:%d\n",sizeof(fire7_expand1x1_weight)/sizeof(float));
hipMalloc(&d_fire7_expand1x1_bias,sizeof(fire7_expand1x1_bias));
//printf("fire7_expand1x1_bias:%d\n",sizeof(fire7_expand1x1_bias)/sizeof(float));
hipMalloc(&d_fire7_expand3x3_weight,sizeof(fire7_expand3x3_weight));
//printf("fire7_expand3x3_weight:%d\n",sizeof(fire7_expand3x3_weight)/sizeof(float));
hipMalloc(&d_fire7_expand3x3_bias,sizeof(fire7_expand3x3_bias));
//printf("fire7_expand3x3_bias:%d\n",sizeof(fire7_expand3x3_bias)/sizeof(float));
//fire 8
hipMalloc(&d_fire8_squeeze_weight,sizeof(fire8_squeeze_weight));
//printf("fire8_squeeze_weight:%d\n",sizeof(fire8_squeeze_weight)/sizeof(float));
hipMalloc(&d_fire8_squeeze_bias,sizeof(fire8_squeeze_bias));
//printf("fire8_squeeze_bias:%d\n",sizeof(fire8_squeeze_bias)/sizeof(float));
hipMalloc(&d_fire8_expand1x1_weight,sizeof(fire8_expand1x1_weight));
//printf("fire8_expand1x1_weight:%d\n",sizeof(fire8_expand1x1_weight)/sizeof(float));
hipMalloc(&d_fire8_expand1x1_bias,sizeof(fire8_expand1x1_bias));
//printf("fire8_expand1x1_bias:%d\n",sizeof(fire8_expand1x1_bias)/sizeof(float));
hipMalloc(&d_fire8_expand3x3_weight,sizeof(fire8_expand3x3_weight));
//printf("fire8_expand3x3_weight:%d\n",sizeof(fire8_expand3x3_weight)/sizeof(float));
hipMalloc(&d_fire8_expand3x3_bias,sizeof(fire8_expand3x3_bias));
//printf("fire8_expand3x3_bias:%d\n",sizeof(fire8_expand3x3_bias)/sizeof(float));
//classifier
hipMalloc(&d_classifier_conv_weight,sizeof(classifier_conv_weight));
//printf("%d\n",sizeof(classifier_conv_weight)/sizeof(float));
hipMalloc(&d_classifier_conv_bias,sizeof(classifier_conv_bias));
//printf("%d\n",sizeof(classifier_conv_bias)/sizeof(float));
hipMalloc(&d_result_classifier_conv,sizeof(float) * (1 * 1000 * 13 * 13));
hipMalloc(&d_result_classifier,sizeof(float) * 1000);
printf("squeezenet starting\n");
printf("conv1\n");
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_sample,sample,3*224*224*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_conv1_weight,conv1_weight,sizeof(conv1_weight),hipMemcpyHostToDevice);
hipMemcpy(d_conv1_bias,conv1_bias,sizeof(conv1_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv3x3), dim3(1),dim3(64), 0, 0, 3,224,0,2,0,111,d_sample,d_conv1_weight,d_conv1_bias,d_result_conv);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( maxpool), dim3(1),dim3(64), 0, 0, 111,55,d_result_conv,d_result_pool1);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire1_squeeze_weight,fire1_squeeze_weight,sizeof(fire1_squeeze_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire1_squeeze_bias,fire1_squeeze_bias,sizeof(fire1_squeeze_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(16), 0, 0, 64,55,16,d_result_pool1,d_fire1_squeeze_weight,d_fire1_squeeze_bias,d_result_block1_squeeze);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire1_expand1x1_weight,fire1_expand1x1_weight,sizeof(fire1_expand1x1_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire1_expand1x1_bias,fire1_expand1x1_bias,sizeof(fire1_expand1x1_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(64), 0, 0, 16,55,64,d_result_block1_squeeze,d_fire1_expand1x1_weight,d_fire1_expand1x1_bias,d_result_block1_expand);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire1_expand3x3_weight,fire1_expand3x3_weight,sizeof(fire1_expand3x3_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire1_expand3x3_bias,fire1_expand3x3_bias,sizeof(fire1_expand3x3_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv3x3), dim3(1),dim3(64), 0, 0, 16,55,1,1,64,55,d_result_block1_squeeze,d_fire1_expand3x3_weight,d_fire1_expand3x3_bias,d_result_block1_expand);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire2_squeeze_weight,fire2_squeeze_weight,sizeof(fire2_squeeze_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire2_squeeze_bias,fire2_squeeze_bias,sizeof(fire2_squeeze_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(16), 0, 0, 128,55,16,d_result_block1_expand,d_fire2_squeeze_weight,d_fire2_squeeze_bias,d_result_block1_squeeze);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
///
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire2_expand1x1_weight,fire2_expand1x1_weight,sizeof(fire2_expand1x1_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire2_expand1x1_bias,fire2_expand1x1_bias,sizeof(fire2_expand1x1_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(64), 0, 0, 16,55,64,d_result_block1_squeeze,d_fire2_expand1x1_weight,d_fire2_expand1x1_bias,d_result_block1_expand);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire2_expand3x3_weight,fire2_expand3x3_weight,sizeof(fire2_expand3x3_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire2_expand3x3_bias,fire2_expand3x3_bias,sizeof(fire2_expand3x3_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv3x3), dim3(1),dim3(64), 0, 0, 16,55,1,1,64,55,d_result_block1_squeeze,d_fire2_expand3x3_weight,d_fire2_expand3x3_bias,d_result_block1_expand);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( maxpool), dim3(1),dim3(128), 0, 0, 55,27,d_result_block1_expand,d_result_pool2);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
//Block2
//fire3
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire3_squeeze_weight,fire3_squeeze_weight,sizeof(fire3_squeeze_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire3_squeeze_bias,fire3_squeeze_bias,sizeof(fire3_squeeze_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(32), 0, 0, 128,27,32,d_result_pool2,d_fire3_squeeze_weight,d_fire3_squeeze_bias,d_result_block2_squeeze);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire3_expand1x1_weight,fire3_expand1x1_weight,sizeof(fire3_expand1x1_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire3_expand1x1_bias,fire3_expand1x1_bias,sizeof(fire3_expand1x1_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(128), 0, 0, 32,27,128,d_result_block2_squeeze,d_fire3_expand1x1_weight,d_fire3_expand1x1_bias,d_result_block2_expand);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire3_expand3x3_weight,fire3_expand3x3_weight,sizeof(fire3_expand3x3_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire3_expand3x3_bias,fire3_expand3x3_bias,sizeof(fire3_expand3x3_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv3x3), dim3(1),dim3(128), 0, 0, 32,27,1,1,128,27,d_result_block2_squeeze,d_fire3_expand3x3_weight,d_fire3_expand3x3_bias,d_result_block2_expand);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
//fire4
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire4_squeeze_weight,fire4_squeeze_weight,sizeof(fire4_squeeze_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire4_squeeze_bias,fire4_squeeze_bias,sizeof(fire4_squeeze_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(32), 0, 0, 256,27,32,d_result_block2_expand,d_fire4_squeeze_weight,d_fire4_squeeze_bias,d_result_block2_squeeze);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire4_expand1x1_weight,fire4_expand1x1_weight,sizeof(fire4_expand1x1_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire4_expand1x1_bias,fire4_expand1x1_bias,sizeof(fire4_expand1x1_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(128), 0, 0, 32,27,128,d_result_block2_squeeze,d_fire4_expand1x1_weight,d_fire4_expand1x1_bias,d_result_block2_expand);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire4_expand3x3_weight,fire4_expand3x3_weight,sizeof(fire4_expand3x3_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire4_expand3x3_bias,fire4_expand3x3_bias,sizeof(fire4_expand3x3_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv3x3), dim3(1),dim3(128), 0, 0, 32,27,1,1,128,27,d_result_block2_squeeze,d_fire4_expand3x3_weight,d_fire4_expand3x3_bias,d_result_block2_expand);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( maxpool), dim3(1),dim3(256), 0, 0, 27,13,d_result_block2_expand,d_result_pool3);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
//block3
//fire5_squeeze
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire5_squeeze_weight,fire5_squeeze_weight,sizeof(fire5_squeeze_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire5_squeeze_bias,fire5_squeeze_bias,sizeof(fire5_squeeze_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(48), 0, 0, 256,13,48,d_result_pool3,d_fire5_squeeze_weight,d_fire5_squeeze_bias,d_result_block3_squeeze1);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire5_expand1x1_weight,fire5_expand1x1_weight,sizeof(fire5_expand1x1_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire5_expand1x1_bias,fire5_expand1x1_bias,sizeof(fire5_expand1x1_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(192), 0, 0, 48,13,192,d_result_block3_squeeze1,d_fire5_expand1x1_weight,d_fire5_expand1x1_bias,d_result_block3_expand1);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire5_expand3x3_weight,fire5_expand3x3_weight,sizeof(fire5_expand3x3_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire5_expand3x3_bias,fire5_expand3x3_bias,sizeof(fire5_expand3x3_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv3x3), dim3(1),dim3(192), 0, 0, 48,13,1,1,192,13,d_result_block3_squeeze1,d_fire5_expand3x3_weight,d_fire5_expand3x3_bias,d_result_block3_expand1);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
//fire6
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire6_squeeze_weight,fire6_squeeze_weight,sizeof(fire6_squeeze_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire6_squeeze_bias,fire6_squeeze_bias,sizeof(fire6_squeeze_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(48), 0, 0, 384,13,48,d_result_block3_expand1,d_fire6_squeeze_weight,d_fire6_squeeze_bias,d_result_block3_squeeze1);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire6_expand1x1_weight,fire6_expand1x1_weight,sizeof(fire6_expand1x1_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire6_expand1x1_bias,fire6_expand1x1_bias,sizeof(fire6_expand1x1_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(192), 0, 0, 48,13,192,d_result_block3_squeeze1,d_fire6_expand1x1_weight,d_fire6_expand1x1_bias,d_result_block3_expand1);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire6_expand3x3_weight,fire6_expand3x3_weight,sizeof(fire6_expand3x3_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire6_expand3x3_bias,fire6_expand3x3_bias,sizeof(fire6_expand3x3_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv3x3), dim3(1),dim3(192), 0, 0, 48,13,1,1,192,13,d_result_block3_squeeze1,d_fire6_expand3x3_weight,d_fire6_expand3x3_bias,d_result_block3_expand1);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
//fire7
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire7_squeeze_weight,fire7_squeeze_weight,sizeof(fire7_squeeze_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire7_squeeze_bias,fire7_squeeze_bias,sizeof(fire7_squeeze_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(64), 0, 0, 384,13,64,d_result_block3_expand1,d_fire7_squeeze_weight,d_fire7_squeeze_bias,d_result_block3_squeeze2);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire7_expand1x1_weight,fire7_expand1x1_weight,sizeof(fire7_expand1x1_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire7_expand1x1_bias,fire7_expand1x1_bias,sizeof(fire7_expand1x1_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(256), 0, 0, 64,13,256,d_result_block3_squeeze2,d_fire7_expand1x1_weight,d_fire7_expand1x1_bias,d_result_block3_expand2);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire7_expand3x3_weight,fire7_expand3x3_weight,sizeof(fire7_expand3x3_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire7_expand3x3_bias,fire7_expand3x3_bias,sizeof(fire7_expand3x3_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv3x3), dim3(1),dim3(256), 0, 0, 64,13,1,1,256,13,d_result_block3_squeeze2,d_fire7_expand3x3_weight,d_fire7_expand3x3_bias,d_result_block3_expand2);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
//fire8
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire8_squeeze_weight,fire8_squeeze_weight,sizeof(fire8_squeeze_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire8_squeeze_bias,fire8_squeeze_bias,sizeof(fire8_squeeze_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(64), 0, 0, 512,13,64,d_result_block3_expand2,d_fire8_squeeze_weight,d_fire8_squeeze_bias,d_result_block3_squeeze2);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire8_expand1x1_weight,fire8_expand1x1_weight,sizeof(fire8_expand1x1_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire8_expand1x1_bias,fire8_expand1x1_bias,sizeof(fire8_expand1x1_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(1),dim3(256), 0, 0, 64,13,256,d_result_block3_squeeze2,d_fire8_expand1x1_weight,d_fire8_expand1x1_bias,d_result_block3_expand2);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_fire8_expand3x3_weight,fire8_expand3x3_weight,sizeof(fire8_expand3x3_weight),hipMemcpyHostToDevice);
hipMemcpy(d_fire8_expand3x3_bias,fire8_expand3x3_bias,sizeof(fire8_expand3x3_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv3x3), dim3(1),dim3(256), 0, 0, 64,13,1,1,256,13,d_result_block3_squeeze2,d_fire8_expand3x3_weight,d_fire8_expand3x3_bias,d_result_block3_expand2);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
//Classifier
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipMemcpy(d_classifier_conv_weight,classifier_conv_weight,sizeof(classifier_conv_weight),hipMemcpyHostToDevice);
hipMemcpy(d_classifier_conv_bias,classifier_conv_bias,sizeof(classifier_conv_bias),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conv1x1), dim3(4),dim3(256), 0, 0, 512,13,1000,d_result_block3_expand2,d_classifier_conv_weight,d_classifier_conv_bias,d_result_classifier_conv);
float * result = (float*)malloc(sizeof(float) * (1 * 1000 * 13 * 13));
hipMemcpy(result,d_result_classifier_conv,sizeof(float) * (1 * 1000 * 13 * 13),hipMemcpyDeviceToHost);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
total_time += time;
hipEventDestroy( start );
hipEventDestroy( stop );
for(int i=0;i<1000;++i){
float tmp =0;
for(int j=0;j<169;++j){
tmp = tmp + result[j];
}
h_result_classifier[i] = tmp/169;
result = result + 169;
}
float tmp = 0.0f;
unsigned int class_index = 0;
for(int j = 0; j < 1000; j++)
{
if(h_result_classifier[j] > tmp)
{
tmp = h_result_classifier[j];
class_index = j;
}
}
getLabel(class_index);
printf("\r\npredicted label: %s\r\n", class_label);
cleanup();
printf("Total Kernel execution time: %f\n",total_time);
printf("done\n");
return 0;
}
void getLabel(unsigned int class_index)
{
int i;
FILE *fp;
fp = fopen("synset_words.txt", "r");
for(i = 0; i < class_index + 1; i++)
{
fgets(class_label, sizeof(class_label), fp);
}
fclose(fp);
}
void cleanup(){
hipFree(d_sample);
hipFree(d_conv1_weight);
hipFree(d_conv1_bias);
hipFree(d_result_conv);
hipFree(d_result_pool1);
hipFree(d_result_block1_squeeze);
hipFree(d_result_block1_expand);
hipFree(d_fire1_squeeze_weight);
hipFree(d_fire1_squeeze_bias);
hipFree(d_fire1_expand1x1_weight);
hipFree(d_fire1_expand1x1_bias);
hipFree(d_fire1_expand3x3_weight);
hipFree(d_fire1_expand3x3_bias);
hipFree(d_fire2_squeeze_weight);
hipFree(d_fire2_squeeze_bias);
hipFree(d_fire2_expand1x1_weight);
hipFree(d_fire2_expand1x1_bias);
hipFree(d_fire2_expand3x3_weight);
hipFree(d_fire2_expand3x3_bias);
hipFree(d_result_block2_squeeze);
hipFree(d_result_block2_expand);
hipFree(d_result_pool2);
hipFree(d_fire3_squeeze_weight);
hipFree(d_fire3_squeeze_bias);
hipFree(d_fire3_expand1x1_weight);
hipFree(d_fire3_expand1x1_bias);
hipFree(d_fire3_expand3x3_weight);
hipFree(d_fire3_expand3x3_bias);
hipFree(d_fire4_squeeze_weight);
hipFree(d_fire4_squeeze_bias);
hipFree(d_fire4_expand1x1_weight);
hipFree(d_fire4_expand1x1_bias);
hipFree(d_fire4_expand3x3_weight);
hipFree(d_fire4_expand3x3_bias);
hipFree(d_result_pool3);
hipFree(d_result_block3_squeeze1);
hipFree(d_result_block3_expand1);
hipFree(d_result_block3_squeeze2);
hipFree(d_fire5_squeeze_weight);
hipFree(d_fire5_squeeze_bias);
hipFree(d_fire5_expand1x1_weight);
hipFree(d_fire5_expand1x1_bias);
hipFree(d_fire5_expand3x3_weight);
hipFree(d_fire5_expand3x3_bias);
hipFree(d_fire6_squeeze_weight);
hipFree(d_fire6_squeeze_bias);
hipFree(d_fire6_expand1x1_weight);
hipFree(d_fire6_expand1x1_bias);
hipFree(d_fire6_expand3x3_weight);
hipFree(d_fire6_expand3x3_bias);
hipFree(d_fire7_squeeze_weight);
hipFree(d_fire7_squeeze_bias);
hipFree(d_fire7_expand1x1_weight);
hipFree(d_fire7_expand1x1_bias);
hipFree(d_fire7_expand3x3_weight);
hipFree(d_fire7_expand3x3_bias);
hipFree(d_fire8_squeeze_weight);
hipFree(d_fire8_squeeze_bias);
hipFree(d_fire8_expand1x1_weight);
hipFree(d_fire8_expand1x1_bias);
hipFree(d_fire8_expand3x3_weight);
hipFree(d_fire8_expand3x3_bias);
hipFree(d_result_block3_expand2);
hipFree(d_result_classifier_conv);
hipFree(d_classifier_conv_weight);
hipFree(d_classifier_conv_bias);
hipFree(d_result_classifier);
free(h_result_classifier);
}
|
4d8c3716b50141faf451d52095c939ee84444f56.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "squeezenet_params.h"
///#include "dog.h"
#include "hourglass.h"
//#include "tiger.h"
//#include "truck.h"
__global__ void maxpool(
int input_size, int output_size,
float *input_im, float *output_im){
int channels = blockIdx.x * blockDim.x + threadIdx.x;
input_im += channels*input_size*input_size;
output_im += channels*output_size*output_size;
for(int i=0; i<output_size; i++){
for(int j=0; j<output_size; j++){
float tmp = 0.0;
for(int k =0; k<3; k++){
for(int l = 0; l<3; l++){
float value = input_im[(i * 2 + k) * input_size + j *2 +l];
if(value > tmp)
tmp = value;
}
}
output_im[i*output_size + j] = tmp;
}
}
}
__global__ void conv3x3(
int input_channels, int input_size,
int pad, int stride, int start_channel,
int output_size, float* input_im, float* filter_weight,
float* filter_bias, float* output_im){
int filter_index = blockIdx.x * blockDim.x + threadIdx.x;
filter_weight += filter_index * input_channels * 9;
float bias = filter_bias[filter_index];
output_im += (start_channel + filter_index) * output_size * output_size;
//loop over output feature map
for(int i = 0; i < output_size; i++)
{
for(int j = 0; j < output_size; j++)
{
//compute one element in the output feature map
float tmp = bias;
//compute dot product of 2 input_channels x 3 x 3 matrix
for(int k = 0; k < input_channels; k++)
{
for(int l = 0; l < 3; l++)
{
int h = i * stride + l - pad;
for(int m = 0; m < 3; m++)
{
int w = j * stride + m - pad;
if((h >= 0) && (h < input_size) && (w >= 0) && (w < input_size))
{
tmp += input_im[k * input_size * input_size + (i * stride + l - pad) * input_size + j * stride + m - pad] \
* filter_weight[9 * k + 3 * l + m];
}
}
}
}
//add relu activation after conv
output_im[i * output_size + j] = (tmp > 0.0) ? tmp : 0.0;
}
}
}
__global__ void conv1x1(
int input_channels, int input_size,int threads,
float* input_im, float* filter_weight,
float* filter_bias, float* output_im){
int filter_index = blockIdx.x * blockDim.x + threadIdx.x;
if(filter_index<threads){
filter_weight += filter_index * input_channels;
float bias = filter_bias[filter_index];
output_im += filter_index * input_size * input_size;//start_channel is for 1x1 feature map in fire layer
//loop over output feature map
//out
for(int i = 0; i < input_size; i++)
{
for(int j = 0; j < input_size; j++)
{
float tmp = bias;
for(int k = 0; k < input_channels; k++)
{
tmp += input_im[k * input_size * input_size + i * input_size + j] * filter_weight[k];
}
//add relu after conv
output_im[i * input_size + j] = (tmp > 0.0) ? tmp : 0.0;
}
}
}
}
//last layer use a 13 x 13 avgPool layer as classifier
//one class score per kernel
__global__ void avgpool(
int threads,
float* input_im,
float* output_im)
{
int class_index = blockIdx.x * blockDim.x + threadIdx.x;//get class score index
if(class_index<threads){
input_im += 169 * class_index;
float tmp = 0.0f;
for(int i = 0; i < 169; i++)
{
tmp += input_im[i];
}
output_im[class_index] = tmp / 169.0;
}
}
void getLabel(unsigned int class_index);
void cleanup();
float * h_result_classifier = (float*)malloc((1000)*sizeof(float));
char class_label[201];
float * d_sample;
float * d_conv1_weight;
float * d_conv1_bias;
float * d_result_conv;
float * d_result_pool1;
float * d_result_block1_squeeze;
float * d_result_block1_expand;
float * d_result_pool2;
float * d_fire1_squeeze_weight;
float * d_fire1_squeeze_bias;
float * d_fire1_expand1x1_weight;
float * d_fire1_expand1x1_bias;
float * d_fire1_expand3x3_weight;
float * d_fire1_expand3x3_bias;
float * d_fire2_squeeze_weight;
float * d_fire2_squeeze_bias;
float * d_fire2_expand1x1_weight;
float * d_fire2_expand1x1_bias;
float * d_fire2_expand3x3_weight;
float * d_fire2_expand3x3_bias;
float * d_result_block2_squeeze;
float * d_result_block2_expand;
float * d_result_pool3;
float * d_fire3_squeeze_weight;
float * d_fire3_squeeze_bias;
float * d_fire3_expand1x1_weight;
float * d_fire3_expand1x1_bias;
float * d_fire3_expand3x3_weight;
float * d_fire3_expand3x3_bias;
float * d_fire4_squeeze_weight;
float * d_fire4_squeeze_bias;
float * d_fire4_expand1x1_weight;
float * d_fire4_expand1x1_bias;
float * d_fire4_expand3x3_weight;
float * d_fire4_expand3x3_bias;
float * d_result_block3_squeeze1;
float * d_result_block3_expand1;
float * d_result_block3_squeeze2;
float * d_result_block3_expand2;
float * d_fire5_squeeze_weight;
float * d_fire5_squeeze_bias;
float * d_fire5_expand1x1_weight;
float * d_fire5_expand1x1_bias;
float * d_fire5_expand3x3_weight;
float * d_fire5_expand3x3_bias;
float * d_fire6_squeeze_weight;
float * d_fire6_squeeze_bias;
float * d_fire6_expand1x1_weight;
float * d_fire6_expand1x1_bias;
float * d_fire6_expand3x3_weight;
float * d_fire6_expand3x3_bias;
float * d_fire7_squeeze_weight;
float * d_fire7_squeeze_bias;
float * d_fire7_expand1x1_weight;
float * d_fire7_expand1x1_bias;
float * d_fire7_expand3x3_weight;
float * d_fire7_expand3x3_bias;
float * d_fire8_squeeze_weight;
float * d_fire8_squeeze_bias;
float * d_fire8_expand1x1_weight;
float * d_fire8_expand1x1_bias;
float * d_fire8_expand3x3_weight;
float * d_fire8_expand3x3_bias;
float * d_classifier_conv_weight;
float * d_classifier_conv_bias;
float * d_result_classifier_conv;
float * d_result_classifier;
int main(){
float time,total_time=0.0;
cudaEvent_t start, stop;
// conv1 and fire 1
cudaMalloc(&d_sample,3*224*224*sizeof(float));
cudaMalloc(&d_conv1_weight,sizeof(conv1_weight));
cudaMalloc(&d_conv1_bias,sizeof(conv1_bias));
cudaMalloc(&d_result_conv,sizeof(float) * (1 * 64 * 111 * 111));
cudaMalloc(&d_result_pool1, sizeof(float) * (1 * 64 * 55 * 55));
cudaMalloc(&d_result_block1_squeeze,sizeof(float) * (1 * 16* 55 * 55));
cudaMalloc(&d_result_block1_expand,sizeof(float) * (1 * 128 * 55 * 55));
cudaMalloc(&d_result_pool2,sizeof(float) * (1 * 128 * 27 * 27));
cudaMalloc(&d_fire1_squeeze_weight,sizeof(fire1_squeeze_weight));
//printf("%d\n",sizeof(fire1_squeeze_weight)/sizeof(float));
//printf("%d\n",sizeof(fire1_squeeze_bias)/sizeof(float));
cudaMalloc(&d_fire1_squeeze_bias,sizeof(fire1_squeeze_bias));
cudaMalloc(&d_fire1_expand1x1_weight,sizeof(fire1_expand1x1_weight));
//printf("fire1_expand1x1_weight:%d\n",sizeof(fire1_expand1x1_weight)/sizeof(float));
cudaMalloc(&d_fire1_expand1x1_bias,sizeof(fire1_expand1x1_bias));
//printf("fire1_expand1x1_bias:%d\n",sizeof(fire1_expand1x1_bias)/sizeof(float));
cudaMalloc(&d_fire1_expand3x3_weight,sizeof(fire1_expand3x3_weight));
//printf("fire1_expand3x3_weight:%d\n",sizeof(fire1_expand3x3_weight)/sizeof(float));
cudaMalloc(&d_fire1_expand3x3_bias,sizeof(fire1_expand3x3_bias));
//printf("fire1_expand3x3_bias:%d\n",sizeof(fire1_expand3x3_bias)/sizeof(float));
//fire2
cudaMalloc(&d_fire2_squeeze_weight,sizeof(fire2_squeeze_weight));
//printf("fire2_squeeze_weight:%d\n",sizeof(fire2_squeeze_weight)/sizeof(float));
cudaMalloc(&d_fire2_squeeze_bias,sizeof(fire2_squeeze_bias));
//printf("fire2_squeeze_bias:%d\n",sizeof(fire2_squeeze_bias)/sizeof(float));
cudaMalloc(&d_fire2_expand1x1_weight,sizeof(fire2_expand1x1_weight));
//printf("fire2_expand1x1_weight:%d\n",sizeof(fire2_expand1x1_weight)/sizeof(float));
cudaMalloc(&d_fire2_expand1x1_bias,sizeof(fire2_expand1x1_bias));
//printf("fire2_expand1x1_bias:%d\n",sizeof(fire2_expand1x1_bias)/sizeof(float));
cudaMalloc(&d_fire2_expand3x3_weight,sizeof(fire2_expand3x3_weight));
//printf("fire2_expand3x3_weight:%d\n",sizeof(fire2_expand3x3_weight)/sizeof(float));
cudaMalloc(&d_fire2_expand3x3_bias,sizeof(fire2_expand3x3_bias));
//printf("fire2_expand3x3_bias:%d\n",sizeof(fire2_expand3x3_bias)/sizeof(float));
//block2
cudaMalloc(&d_result_block2_squeeze,sizeof(float) * (1 * 32 * 27 * 27));
cudaMalloc(&d_result_block2_expand,sizeof(float) * (1 * 256 * 27 *27));
cudaMalloc(&d_result_pool3,sizeof(float) * (1 * 256 * 13 * 13));
//fire3
cudaMalloc(&d_fire3_squeeze_weight,sizeof(fire3_squeeze_weight));
//printf("fire3_squeeze_weight:%d\n",sizeof(fire3_squeeze_weight)/sizeof(float));
cudaMalloc(&d_fire3_squeeze_bias,sizeof(fire3_squeeze_bias));
//printf("fire3_squeeze_bias:%d\n",sizeof(fire3_squeeze_bias)/sizeof(float));
cudaMalloc(&d_fire3_expand1x1_weight,sizeof(fire3_expand1x1_weight));
//printf("fire3_expand1x1_weight:%d\n",sizeof(fire3_expand1x1_weight)/sizeof(float));
cudaMalloc(&d_fire3_expand1x1_bias,sizeof(fire3_expand1x1_bias));
//printf("fire3_expand1x1_bias:%d\n",sizeof(fire3_expand1x1_bias)/sizeof(float));
cudaMalloc(&d_fire3_expand3x3_weight,sizeof(fire3_expand3x3_weight));
//printf("fire3_expand3x3_weight:%d\n",sizeof(fire3_expand3x3_weight)/sizeof(float));
cudaMalloc(&d_fire3_expand3x3_bias,sizeof(fire3_expand3x3_bias));
//printf("fire3_expand3x3_bias:%d\n",sizeof(fire3_expand3x3_bias)/sizeof(float));
//fire4
cudaMalloc(&d_fire4_squeeze_weight,sizeof(fire4_squeeze_weight));
//printf("fire4_squeeze_weight:%d\n",sizeof(fire4_squeeze_weight)/sizeof(float));
cudaMalloc(&d_fire4_squeeze_bias,sizeof(fire4_squeeze_bias));
//printf("fire4_squeeze_bias:%d\n",sizeof(fire4_squeeze_bias)/sizeof(float));
cudaMalloc(&d_fire4_expand1x1_weight,sizeof(fire4_expand1x1_weight));
//printf("fire4_expand1x1_weight:%d\n",sizeof(fire4_expand1x1_weight)/sizeof(float));
cudaMalloc(&d_fire4_expand1x1_bias,sizeof(fire4_expand1x1_bias));
//printf("fire4_expand1x1_bias:%d\n",sizeof(fire4_expand1x1_bias)/sizeof(float));
cudaMalloc(&d_fire4_expand3x3_weight,sizeof(fire4_expand3x3_weight));
//printf("fire4_expand3x3_weight:%d\n",sizeof(fire4_expand3x3_weight)/sizeof(float));
cudaMalloc(&d_fire4_expand3x3_bias,sizeof(fire4_expand3x3_bias));
//printf("fire4_expand3x3_bias:%d\n",sizeof(fire4_expand3x3_bias)/sizeof(float));
cudaMalloc(&d_result_block3_squeeze1,sizeof(float) * (1 * 48 * 13 * 13));
cudaMalloc(&d_result_block3_expand1,sizeof(float) * (1 * 384 * 13 * 13));
cudaMalloc(&d_result_block3_squeeze2,sizeof(float) * (1 * 64 * 13 * 13));
cudaMalloc(&d_result_block3_expand2,sizeof(float) * (1 * 512 * 13 * 13));
//fire5
cudaMalloc(&d_fire5_squeeze_weight,sizeof(fire5_squeeze_weight));
//printf("fire5_squeeze_weight:%d\n",sizeof(fire5_squeeze_weight)/sizeof(float));
cudaMalloc(&d_fire5_squeeze_bias,sizeof(fire5_squeeze_bias));
//printf("fire5_squeeze_bias:%d\n",sizeof(fire5_squeeze_bias)/sizeof(float));
cudaMalloc(&d_fire5_expand1x1_weight,sizeof(fire5_expand1x1_weight));
//printf("fire5_expand1x1_weight:%d\n",sizeof(fire5_expand1x1_weight)/sizeof(float));
cudaMalloc(&d_fire5_expand1x1_bias,sizeof(fire5_expand1x1_bias));
//printf("fire5_expand1x1_bias:%d\n",sizeof(fire5_expand1x1_bias)/sizeof(float));
cudaMalloc(&d_fire5_expand3x3_weight,sizeof(fire5_expand3x3_weight));
//printf("fire5_expand3x3_weight:%d\n",sizeof(fire5_expand3x3_weight)/sizeof(float));
cudaMalloc(&d_fire5_expand3x3_bias,sizeof(fire5_expand3x3_bias));
//printf("fire5_expand3x3_bias:%d\n",sizeof(fire5_expand3x3_bias)/sizeof(float));
//fire 6
cudaMalloc(&d_fire6_squeeze_weight,sizeof(fire6_squeeze_weight));
//printf("fire6_squeeze_weight:%d\n",sizeof(fire6_squeeze_weight)/sizeof(float));
cudaMalloc(&d_fire6_squeeze_bias,sizeof(fire6_squeeze_bias));
//printf("fire6_squeeze_bias:%d\n",sizeof(fire6_squeeze_bias)/sizeof(float));
cudaMalloc(&d_fire6_expand1x1_weight,sizeof(fire6_expand1x1_weight));
//printf("fire6_expand1x1_weight:%d\n",sizeof(fire6_expand1x1_weight)/sizeof(float));
cudaMalloc(&d_fire6_expand1x1_bias,sizeof(fire6_expand1x1_bias));
//printf("fire6_expand1x1_bias:%d\n",sizeof(fire6_expand1x1_bias)/sizeof(float));
cudaMalloc(&d_fire6_expand3x3_weight,sizeof(fire6_expand3x3_weight));
//printf("fire6_expand3x3_weight:%d\n",sizeof(fire6_expand3x3_weight)/sizeof(float));
cudaMalloc(&d_fire6_expand3x3_bias,sizeof(fire6_expand3x3_bias));
//printf("fire6_expand3x3_bias:%d\n",sizeof(fire6_expand3x3_bias)/sizeof(float));
//fire 7
cudaMalloc(&d_fire7_squeeze_weight,sizeof(fire7_squeeze_weight));
//printf("fire7_squeeze_weight:%d\n",sizeof(fire7_squeeze_weight)/sizeof(float));
cudaMalloc(&d_fire7_squeeze_bias,sizeof(fire7_squeeze_bias));
//printf("fire7_squeeze_bias:%d\n",sizeof(fire7_squeeze_bias)/sizeof(float));
cudaMalloc(&d_fire7_expand1x1_weight,sizeof(fire7_expand1x1_weight));
//printf("fire7_expand1x1_weight:%d\n",sizeof(fire7_expand1x1_weight)/sizeof(float));
cudaMalloc(&d_fire7_expand1x1_bias,sizeof(fire7_expand1x1_bias));
//printf("fire7_expand1x1_bias:%d\n",sizeof(fire7_expand1x1_bias)/sizeof(float));
cudaMalloc(&d_fire7_expand3x3_weight,sizeof(fire7_expand3x3_weight));
//printf("fire7_expand3x3_weight:%d\n",sizeof(fire7_expand3x3_weight)/sizeof(float));
cudaMalloc(&d_fire7_expand3x3_bias,sizeof(fire7_expand3x3_bias));
//printf("fire7_expand3x3_bias:%d\n",sizeof(fire7_expand3x3_bias)/sizeof(float));
//fire 8
cudaMalloc(&d_fire8_squeeze_weight,sizeof(fire8_squeeze_weight));
//printf("fire8_squeeze_weight:%d\n",sizeof(fire8_squeeze_weight)/sizeof(float));
cudaMalloc(&d_fire8_squeeze_bias,sizeof(fire8_squeeze_bias));
//printf("fire8_squeeze_bias:%d\n",sizeof(fire8_squeeze_bias)/sizeof(float));
cudaMalloc(&d_fire8_expand1x1_weight,sizeof(fire8_expand1x1_weight));
//printf("fire8_expand1x1_weight:%d\n",sizeof(fire8_expand1x1_weight)/sizeof(float));
cudaMalloc(&d_fire8_expand1x1_bias,sizeof(fire8_expand1x1_bias));
//printf("fire8_expand1x1_bias:%d\n",sizeof(fire8_expand1x1_bias)/sizeof(float));
cudaMalloc(&d_fire8_expand3x3_weight,sizeof(fire8_expand3x3_weight));
//printf("fire8_expand3x3_weight:%d\n",sizeof(fire8_expand3x3_weight)/sizeof(float));
cudaMalloc(&d_fire8_expand3x3_bias,sizeof(fire8_expand3x3_bias));
//printf("fire8_expand3x3_bias:%d\n",sizeof(fire8_expand3x3_bias)/sizeof(float));
//classifier
cudaMalloc(&d_classifier_conv_weight,sizeof(classifier_conv_weight));
//printf("%d\n",sizeof(classifier_conv_weight)/sizeof(float));
cudaMalloc(&d_classifier_conv_bias,sizeof(classifier_conv_bias));
//printf("%d\n",sizeof(classifier_conv_bias)/sizeof(float));
cudaMalloc(&d_result_classifier_conv,sizeof(float) * (1 * 1000 * 13 * 13));
cudaMalloc(&d_result_classifier,sizeof(float) * 1000);
printf("squeezenet starting\n");
printf("conv1\n");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_sample,sample,3*224*224*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_conv1_weight,conv1_weight,sizeof(conv1_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_conv1_bias,conv1_bias,sizeof(conv1_bias),cudaMemcpyHostToDevice);
conv3x3<<<1,64>>>(3,224,0,2,0,111,d_sample,d_conv1_weight,d_conv1_bias,d_result_conv);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
maxpool<<<1,64>>>(111,55,d_result_conv,d_result_pool1);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire1_squeeze_weight,fire1_squeeze_weight,sizeof(fire1_squeeze_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire1_squeeze_bias,fire1_squeeze_bias,sizeof(fire1_squeeze_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,16>>>(64,55,16,d_result_pool1,d_fire1_squeeze_weight,d_fire1_squeeze_bias,d_result_block1_squeeze);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire1_expand1x1_weight,fire1_expand1x1_weight,sizeof(fire1_expand1x1_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire1_expand1x1_bias,fire1_expand1x1_bias,sizeof(fire1_expand1x1_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,64>>>(16,55,64,d_result_block1_squeeze,d_fire1_expand1x1_weight,d_fire1_expand1x1_bias,d_result_block1_expand);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire1_expand3x3_weight,fire1_expand3x3_weight,sizeof(fire1_expand3x3_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire1_expand3x3_bias,fire1_expand3x3_bias,sizeof(fire1_expand3x3_bias),cudaMemcpyHostToDevice);
conv3x3<<<1,64>>>(16,55,1,1,64,55,d_result_block1_squeeze,d_fire1_expand3x3_weight,d_fire1_expand3x3_bias,d_result_block1_expand);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire2_squeeze_weight,fire2_squeeze_weight,sizeof(fire2_squeeze_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire2_squeeze_bias,fire2_squeeze_bias,sizeof(fire2_squeeze_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,16>>>(128,55,16,d_result_block1_expand,d_fire2_squeeze_weight,d_fire2_squeeze_bias,d_result_block1_squeeze);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
///
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire2_expand1x1_weight,fire2_expand1x1_weight,sizeof(fire2_expand1x1_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire2_expand1x1_bias,fire2_expand1x1_bias,sizeof(fire2_expand1x1_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,64>>>(16,55,64,d_result_block1_squeeze,d_fire2_expand1x1_weight,d_fire2_expand1x1_bias,d_result_block1_expand);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire2_expand3x3_weight,fire2_expand3x3_weight,sizeof(fire2_expand3x3_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire2_expand3x3_bias,fire2_expand3x3_bias,sizeof(fire2_expand3x3_bias),cudaMemcpyHostToDevice);
conv3x3<<<1,64>>>(16,55,1,1,64,55,d_result_block1_squeeze,d_fire2_expand3x3_weight,d_fire2_expand3x3_bias,d_result_block1_expand);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
maxpool<<<1,128>>>(55,27,d_result_block1_expand,d_result_pool2);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
//Block2
//fire3
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire3_squeeze_weight,fire3_squeeze_weight,sizeof(fire3_squeeze_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire3_squeeze_bias,fire3_squeeze_bias,sizeof(fire3_squeeze_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,32>>>(128,27,32,d_result_pool2,d_fire3_squeeze_weight,d_fire3_squeeze_bias,d_result_block2_squeeze);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire3_expand1x1_weight,fire3_expand1x1_weight,sizeof(fire3_expand1x1_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire3_expand1x1_bias,fire3_expand1x1_bias,sizeof(fire3_expand1x1_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,128>>>(32,27,128,d_result_block2_squeeze,d_fire3_expand1x1_weight,d_fire3_expand1x1_bias,d_result_block2_expand);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire3_expand3x3_weight,fire3_expand3x3_weight,sizeof(fire3_expand3x3_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire3_expand3x3_bias,fire3_expand3x3_bias,sizeof(fire3_expand3x3_bias),cudaMemcpyHostToDevice);
conv3x3<<<1,128>>>(32,27,1,1,128,27,d_result_block2_squeeze,d_fire3_expand3x3_weight,d_fire3_expand3x3_bias,d_result_block2_expand);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
//fire4
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire4_squeeze_weight,fire4_squeeze_weight,sizeof(fire4_squeeze_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire4_squeeze_bias,fire4_squeeze_bias,sizeof(fire4_squeeze_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,32>>>(256,27,32,d_result_block2_expand,d_fire4_squeeze_weight,d_fire4_squeeze_bias,d_result_block2_squeeze);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire4_expand1x1_weight,fire4_expand1x1_weight,sizeof(fire4_expand1x1_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire4_expand1x1_bias,fire4_expand1x1_bias,sizeof(fire4_expand1x1_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,128>>>(32,27,128,d_result_block2_squeeze,d_fire4_expand1x1_weight,d_fire4_expand1x1_bias,d_result_block2_expand);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire4_expand3x3_weight,fire4_expand3x3_weight,sizeof(fire4_expand3x3_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire4_expand3x3_bias,fire4_expand3x3_bias,sizeof(fire4_expand3x3_bias),cudaMemcpyHostToDevice);
conv3x3<<<1,128>>>(32,27,1,1,128,27,d_result_block2_squeeze,d_fire4_expand3x3_weight,d_fire4_expand3x3_bias,d_result_block2_expand);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
maxpool<<<1,256>>>(27,13,d_result_block2_expand,d_result_pool3);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
//block3
//fire5_squeeze
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire5_squeeze_weight,fire5_squeeze_weight,sizeof(fire5_squeeze_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire5_squeeze_bias,fire5_squeeze_bias,sizeof(fire5_squeeze_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,48>>>(256,13,48,d_result_pool3,d_fire5_squeeze_weight,d_fire5_squeeze_bias,d_result_block3_squeeze1);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire5_expand1x1_weight,fire5_expand1x1_weight,sizeof(fire5_expand1x1_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire5_expand1x1_bias,fire5_expand1x1_bias,sizeof(fire5_expand1x1_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,192>>>(48,13,192,d_result_block3_squeeze1,d_fire5_expand1x1_weight,d_fire5_expand1x1_bias,d_result_block3_expand1);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire5_expand3x3_weight,fire5_expand3x3_weight,sizeof(fire5_expand3x3_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire5_expand3x3_bias,fire5_expand3x3_bias,sizeof(fire5_expand3x3_bias),cudaMemcpyHostToDevice);
conv3x3<<<1,192>>>(48,13,1,1,192,13,d_result_block3_squeeze1,d_fire5_expand3x3_weight,d_fire5_expand3x3_bias,d_result_block3_expand1);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
//fire6
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire6_squeeze_weight,fire6_squeeze_weight,sizeof(fire6_squeeze_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire6_squeeze_bias,fire6_squeeze_bias,sizeof(fire6_squeeze_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,48>>>(384,13,48,d_result_block3_expand1,d_fire6_squeeze_weight,d_fire6_squeeze_bias,d_result_block3_squeeze1);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire6_expand1x1_weight,fire6_expand1x1_weight,sizeof(fire6_expand1x1_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire6_expand1x1_bias,fire6_expand1x1_bias,sizeof(fire6_expand1x1_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,192>>>(48,13,192,d_result_block3_squeeze1,d_fire6_expand1x1_weight,d_fire6_expand1x1_bias,d_result_block3_expand1);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire6_expand3x3_weight,fire6_expand3x3_weight,sizeof(fire6_expand3x3_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire6_expand3x3_bias,fire6_expand3x3_bias,sizeof(fire6_expand3x3_bias),cudaMemcpyHostToDevice);
conv3x3<<<1,192>>>(48,13,1,1,192,13,d_result_block3_squeeze1,d_fire6_expand3x3_weight,d_fire6_expand3x3_bias,d_result_block3_expand1);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
//fire7
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire7_squeeze_weight,fire7_squeeze_weight,sizeof(fire7_squeeze_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire7_squeeze_bias,fire7_squeeze_bias,sizeof(fire7_squeeze_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,64>>>(384,13,64,d_result_block3_expand1,d_fire7_squeeze_weight,d_fire7_squeeze_bias,d_result_block3_squeeze2);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire7_expand1x1_weight,fire7_expand1x1_weight,sizeof(fire7_expand1x1_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire7_expand1x1_bias,fire7_expand1x1_bias,sizeof(fire7_expand1x1_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,256>>>(64,13,256,d_result_block3_squeeze2,d_fire7_expand1x1_weight,d_fire7_expand1x1_bias,d_result_block3_expand2);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire7_expand3x3_weight,fire7_expand3x3_weight,sizeof(fire7_expand3x3_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire7_expand3x3_bias,fire7_expand3x3_bias,sizeof(fire7_expand3x3_bias),cudaMemcpyHostToDevice);
conv3x3<<<1,256>>>(64,13,1,1,256,13,d_result_block3_squeeze2,d_fire7_expand3x3_weight,d_fire7_expand3x3_bias,d_result_block3_expand2);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
//fire8
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire8_squeeze_weight,fire8_squeeze_weight,sizeof(fire8_squeeze_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire8_squeeze_bias,fire8_squeeze_bias,sizeof(fire8_squeeze_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,64>>>(512,13,64,d_result_block3_expand2,d_fire8_squeeze_weight,d_fire8_squeeze_bias,d_result_block3_squeeze2);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire8_expand1x1_weight,fire8_expand1x1_weight,sizeof(fire8_expand1x1_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire8_expand1x1_bias,fire8_expand1x1_bias,sizeof(fire8_expand1x1_bias),cudaMemcpyHostToDevice);
conv1x1<<<1,256>>>(64,13,256,d_result_block3_squeeze2,d_fire8_expand1x1_weight,d_fire8_expand1x1_bias,d_result_block3_expand2);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_fire8_expand3x3_weight,fire8_expand3x3_weight,sizeof(fire8_expand3x3_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_fire8_expand3x3_bias,fire8_expand3x3_bias,sizeof(fire8_expand3x3_bias),cudaMemcpyHostToDevice);
conv3x3<<<1,256>>>(64,13,1,1,256,13,d_result_block3_squeeze2,d_fire8_expand3x3_weight,d_fire8_expand3x3_bias,d_result_block3_expand2);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
//Classifier
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(d_classifier_conv_weight,classifier_conv_weight,sizeof(classifier_conv_weight),cudaMemcpyHostToDevice);
cudaMemcpy(d_classifier_conv_bias,classifier_conv_bias,sizeof(classifier_conv_bias),cudaMemcpyHostToDevice);
conv1x1<<<4,256>>>(512,13,1000,d_result_block3_expand2,d_classifier_conv_weight,d_classifier_conv_bias,d_result_classifier_conv);
float * result = (float*)malloc(sizeof(float) * (1 * 1000 * 13 * 13));
cudaMemcpy(result,d_result_classifier_conv,sizeof(float) * (1 * 1000 * 13 * 13),cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
total_time += time;
cudaEventDestroy( start );
cudaEventDestroy( stop );
for(int i=0;i<1000;++i){
float tmp =0;
for(int j=0;j<169;++j){
tmp = tmp + result[j];
}
h_result_classifier[i] = tmp/169;
result = result + 169;
}
float tmp = 0.0f;
unsigned int class_index = 0;
for(int j = 0; j < 1000; j++)
{
if(h_result_classifier[j] > tmp)
{
tmp = h_result_classifier[j];
class_index = j;
}
}
getLabel(class_index);
printf("\r\npredicted label: %s\r\n", class_label);
cleanup();
printf("Total Kernel execution time: %f\n",total_time);
printf("done\n");
return 0;
}
void getLabel(unsigned int class_index)
{
int i;
FILE *fp;
fp = fopen("synset_words.txt", "r");
for(i = 0; i < class_index + 1; i++)
{
fgets(class_label, sizeof(class_label), fp);
}
fclose(fp);
}
void cleanup(){
cudaFree(d_sample);
cudaFree(d_conv1_weight);
cudaFree(d_conv1_bias);
cudaFree(d_result_conv);
cudaFree(d_result_pool1);
cudaFree(d_result_block1_squeeze);
cudaFree(d_result_block1_expand);
cudaFree(d_fire1_squeeze_weight);
cudaFree(d_fire1_squeeze_bias);
cudaFree(d_fire1_expand1x1_weight);
cudaFree(d_fire1_expand1x1_bias);
cudaFree(d_fire1_expand3x3_weight);
cudaFree(d_fire1_expand3x3_bias);
cudaFree(d_fire2_squeeze_weight);
cudaFree(d_fire2_squeeze_bias);
cudaFree(d_fire2_expand1x1_weight);
cudaFree(d_fire2_expand1x1_bias);
cudaFree(d_fire2_expand3x3_weight);
cudaFree(d_fire2_expand3x3_bias);
cudaFree(d_result_block2_squeeze);
cudaFree(d_result_block2_expand);
cudaFree(d_result_pool2);
cudaFree(d_fire3_squeeze_weight);
cudaFree(d_fire3_squeeze_bias);
cudaFree(d_fire3_expand1x1_weight);
cudaFree(d_fire3_expand1x1_bias);
cudaFree(d_fire3_expand3x3_weight);
cudaFree(d_fire3_expand3x3_bias);
cudaFree(d_fire4_squeeze_weight);
cudaFree(d_fire4_squeeze_bias);
cudaFree(d_fire4_expand1x1_weight);
cudaFree(d_fire4_expand1x1_bias);
cudaFree(d_fire4_expand3x3_weight);
cudaFree(d_fire4_expand3x3_bias);
cudaFree(d_result_pool3);
cudaFree(d_result_block3_squeeze1);
cudaFree(d_result_block3_expand1);
cudaFree(d_result_block3_squeeze2);
cudaFree(d_fire5_squeeze_weight);
cudaFree(d_fire5_squeeze_bias);
cudaFree(d_fire5_expand1x1_weight);
cudaFree(d_fire5_expand1x1_bias);
cudaFree(d_fire5_expand3x3_weight);
cudaFree(d_fire5_expand3x3_bias);
cudaFree(d_fire6_squeeze_weight);
cudaFree(d_fire6_squeeze_bias);
cudaFree(d_fire6_expand1x1_weight);
cudaFree(d_fire6_expand1x1_bias);
cudaFree(d_fire6_expand3x3_weight);
cudaFree(d_fire6_expand3x3_bias);
cudaFree(d_fire7_squeeze_weight);
cudaFree(d_fire7_squeeze_bias);
cudaFree(d_fire7_expand1x1_weight);
cudaFree(d_fire7_expand1x1_bias);
cudaFree(d_fire7_expand3x3_weight);
cudaFree(d_fire7_expand3x3_bias);
cudaFree(d_fire8_squeeze_weight);
cudaFree(d_fire8_squeeze_bias);
cudaFree(d_fire8_expand1x1_weight);
cudaFree(d_fire8_expand1x1_bias);
cudaFree(d_fire8_expand3x3_weight);
cudaFree(d_fire8_expand3x3_bias);
cudaFree(d_result_block3_expand2);
cudaFree(d_result_classifier_conv);
cudaFree(d_classifier_conv_weight);
cudaFree(d_classifier_conv_bias);
cudaFree(d_result_classifier);
free(h_result_classifier);
}
|
eb3bb15579a7a1229821176bc37fd4517fd25e00.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
struct compressed_stream_s {
CompressedStreamInfo info;
gpu_inflate_input_s ctl;
};
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData(
CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr)
{
__shared__ compressed_stream_s strm_g[4];
compressed_stream_s* const s = &strm_g[threadIdx.x / 32];
int strm_id = blockIdx.x * 4 + (threadIdx.x / 32);
int lane_id = threadIdx.x % 32;
if (strm_id < num_streams && lane_id == 0) { s->info = strm_info[strm_id]; }
__syncthreads();
if (strm_id < num_streams) {
// Walk through the compressed blocks
const uint8_t* cur = s->info.compressed_data;
const uint8_t* end = cur + s->info.compressed_data_size;
uint8_t* uncompressed = s->info.uncompressed_data;
size_t max_uncompressed_size = 0;
uint32_t num_compressed_blocks = 0;
uint32_t num_uncompressed_blocks = 0;
while (cur + 3 < end) {
uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size;
gpu_inflate_input_s* init_ctl = nullptr;
block_len >>= 1;
cur += 3;
if (block_len > block_size || cur + block_len > end) {
// Fatal
num_compressed_blocks = 0;
max_uncompressed_size = 0;
break;
}
// TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual
// uncompressed size and avoid waste due to block size alignment For now, rely on the max
// compression ratio to limit waste for the most extreme cases (small single-block streams)
uncompressed_size = (is_uncompressed) ? block_len
: (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr
: block_size;
if (is_uncompressed) {
if (uncompressed_size <= 32) {
// For short blocks, copy the uncompressed data to output
if (uncompressed &&
max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size &&
lane_id < uncompressed_size) {
uncompressed[max_uncompressed_size + lane_id] = cur[lane_id];
}
} else {
init_ctl = s->info.copyctl;
init_ctl = (init_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks)
? &init_ctl[num_uncompressed_blocks]
: nullptr;
num_uncompressed_blocks++;
}
} else {
init_ctl = s->info.decctl;
init_ctl = (init_ctl && num_compressed_blocks < s->info.num_compressed_blocks)
? &init_ctl[num_compressed_blocks]
: nullptr;
num_compressed_blocks++;
}
if (!lane_id && init_ctl) {
s->ctl.srcDevice = const_cast<uint8_t*>(cur);
s->ctl.srcSize = block_len;
s->ctl.dstDevice = uncompressed + max_uncompressed_size;
s->ctl.dstSize = uncompressed_size;
}
__syncwarp();
if (init_ctl && lane_id == 0) *init_ctl = s->ctl;
cur += block_len;
max_uncompressed_size += uncompressed_size;
}
__syncwarp();
if (!lane_id) {
s->info.num_compressed_blocks = num_compressed_blocks;
s->info.num_uncompressed_blocks = num_uncompressed_blocks;
s->info.max_uncompressed_size = max_uncompressed_size;
}
}
__syncthreads();
if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info;
}
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuPostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams)
{
__shared__ compressed_stream_s strm_g[4];
compressed_stream_s* const s = &strm_g[threadIdx.x / 32];
int strm_id = blockIdx.x * 4 + (threadIdx.x / 32);
int lane_id = threadIdx.x % 32;
if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id];
__syncthreads();
if (strm_id < num_streams &&
s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 &&
s->info.max_uncompressed_size > 0) {
// Walk through the compressed blocks
const uint8_t* cur = s->info.compressed_data;
const uint8_t* end = cur + s->info.compressed_data_size;
const gpu_inflate_input_s* dec_in = s->info.decctl;
const gpu_inflate_status_s* dec_out = s->info.decstatus;
uint8_t* uncompressed_actual = s->info.uncompressed_data;
uint8_t* uncompressed_estimated = uncompressed_actual;
uint32_t num_compressed_blocks = 0;
uint32_t max_compressed_blocks = s->info.num_compressed_blocks;
while (cur + 3 < end) {
uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size_est, uncompressed_size_actual;
block_len >>= 1;
cur += 3;
if (cur + block_len > end) { break; }
if (is_uncompressed) {
uncompressed_size_est = block_len;
uncompressed_size_actual = block_len;
} else {
if (num_compressed_blocks > max_compressed_blocks) { break; }
if (shuffle((lane_id == 0) ? dec_out[num_compressed_blocks].status : 0) != 0) {
// Decompression failed, not much point in doing anything else
break;
}
uncompressed_size_est =
shuffle((lane_id == 0) ? *(const uint32_t*)&dec_in[num_compressed_blocks].dstSize : 0);
uncompressed_size_actual = shuffle(
(lane_id == 0) ? *(const uint32_t*)&dec_out[num_compressed_blocks].bytes_written : 0);
}
// In practice, this should never happen with a well-behaved writer, as we would expect the
// uncompressed size to always be equal to the compression block size except for the last
// block
if (uncompressed_actual < uncompressed_estimated) {
// warp-level memmove
for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) {
uncompressed_actual[i] = uncompressed_estimated[i];
}
}
cur += block_len;
num_compressed_blocks += 1 - is_uncompressed;
uncompressed_estimated += uncompressed_size_est;
uncompressed_actual += uncompressed_size_actual;
}
// Update info with actual uncompressed size
if (!lane_id) {
size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data;
// Set uncompressed size to zero if there were any errors
strm_info[strm_id].max_uncompressed_size =
(num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0;
}
}
}
/**
* @brief Shared mem state for gpuParseRowGroupIndex
*/
struct rowindex_state_s {
ColumnDesc chunk;
uint32_t rowgroup_start;
uint32_t rowgroup_end;
int is_compressed;
uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2
CompressedStreamInfo strm_info[2];
RowGroup rowgroups[128];
uint32_t compressed_offset[128][2];
};
enum row_entry_state_e {
NOT_FOUND = 0,
GET_LENGTH,
SKIP_VARINT,
SKIP_FIXEDLEN,
STORE_INDEX0,
STORE_INDEX1,
STORE_INDEX2,
};
/**
* @brief Decode a single row group index entry
*
* @param[in,out] s row group index state
* @param[in] start start position in byte stream
* @param[in] end end of byte stream
* @return bytes consumed
*/
static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s* s,
const uint8_t* start,
const uint8_t* end)
{
constexpr uint32_t pb_rowindexentry_id = static_cast<uint32_t>(PB_TYPE_FIXEDLEN) + 8;
const uint8_t* cur = start;
row_entry_state_e state = NOT_FOUND;
uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT,
pos_end = 0;
while (cur < end) {
uint32_t v = 0;
for (uint32_t l = 0; l <= 28; l += 7) {
uint32_t c = (cur < end) ? *cur++ : 0;
v |= (c & 0x7f) << l;
if (c <= 0x7f) break;
}
switch (state) {
case NOT_FOUND:
if (v == pb_rowindexentry_id) {
state = GET_LENGTH;
} else {
v &= 7;
if (v == PB_TYPE_FIXED64)
cur += 8;
else if (v == PB_TYPE_FIXED32)
cur += 4;
else if (v == PB_TYPE_VARINT)
state = SKIP_VARINT;
else if (v == PB_TYPE_FIXEDLEN)
state = SKIP_FIXEDLEN;
}
break;
case SKIP_VARINT: state = NOT_FOUND; break;
case SKIP_FIXEDLEN:
cur += v;
state = NOT_FOUND;
break;
case GET_LENGTH:
if (length == 0) {
length = (uint32_t)(cur + v - start);
state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry
// entry)
} else {
pos_end = min((uint32_t)(cur + v - start), length);
state = STORE_INDEX0;
}
break;
case STORE_INDEX0:
ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA
: (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2
: CI_PRESENT;
idx_id++;
if (s->is_compressed) {
if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v;
if (cur >= start + pos_end) return length;
state = STORE_INDEX1;
break;
} else {
if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0;
// Fall through to STORE_INDEX1 for uncompressed (always block0)
}
case STORE_INDEX1:
if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v;
if (cur >= start + pos_end) return length;
state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY &&
s->chunk.encoding_kind != DICTIONARY_V2 &&
(s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY ||
s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR ||
s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT ||
s->chunk.type_kind == DOUBLE))
? STORE_INDEX0
: STORE_INDEX2;
break;
case STORE_INDEX2:
if (ci_id < CI_PRESENT) {
// Boolean columns have an extra byte to indicate the position of the bit within the byte
s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v;
}
if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++;
if (cur >= start + pos_end) return length;
state = STORE_INDEX0;
break;
}
}
return (uint32_t)(end - start);
}
/**
* @brief Decode row group index entries
*
* @param[in,out] s row group index state
* @param[in] num_rowgroups Number of index entries to read
*/
static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s* s, int num_rowgroups)
{
const uint8_t* index_data = s->chunk.streams[CI_INDEX];
int index_data_len = s->chunk.strm_len[CI_INDEX];
for (int i = 0; i < num_rowgroups; i++) {
s->row_index_entry[0][0] = 0;
s->row_index_entry[0][1] = 0;
s->row_index_entry[1][0] = 0;
s->row_index_entry[1][1] = 0;
s->row_index_entry[2][0] = 0;
s->row_index_entry[2][1] = 0;
if (index_data_len > 0) {
int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len);
index_data += len;
index_data_len = max(index_data_len - len, 0);
for (int j = 0; j < 2; j++) {
s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j];
s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j];
s->compressed_offset[i][j] = s->row_index_entry[0][j];
}
}
}
s->chunk.streams[CI_INDEX] = index_data;
s->chunk.strm_len[CI_INDEX] = index_data_len;
}
/**
* @brief Translate block+offset compressed position into an uncompressed offset
*
* @param[in,out] s row group index state
* @param[in] ci_id index to convert (CI_DATA or CI_DATA2)
* @param[in] num_rowgroups Number of index entries
* @param[in] t thread id
*/
static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s* s,
int ci_id,
int num_rowgroups,
int t)
{
int32_t strm_len = s->chunk.strm_len[ci_id];
if (strm_len > 0) {
int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0;
if (compressed_offset > 0) {
const uint8_t* start = s->strm_info[ci_id].compressed_data;
const uint8_t* cur = start;
const uint8_t* end = cur + s->strm_info[ci_id].compressed_data_size;
gpu_inflate_status_s* decstatus = s->strm_info[ci_id].decstatus;
uint32_t uncomp_offset = 0;
for (;;) {
uint32_t block_len, is_uncompressed;
if (cur + 3 > end || cur + 3 >= start + compressed_offset) { break; }
block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16);
cur += 3;
is_uncompressed = block_len & 1;
block_len >>= 1;
cur += block_len;
if (cur > end) { break; }
if (is_uncompressed) {
uncomp_offset += block_len;
} else {
uncomp_offset += decstatus->bytes_written;
decstatus++;
}
}
s->rowgroups[t].strm_offset[ci_id] += uncomp_offset;
}
}
}
/**
* @brief Decode index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] rowidx_stride Row index stride
* @param[in] use_base_stride Whether to use base stride obtained from meta or use the computed
* value
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuParseRowGroupIndex(RowGroup* row_groups,
CompressedStreamInfo* strm_info,
ColumnDesc* chunks,
uint32_t num_columns,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t rowidx_stride,
bool use_base_stride)
{
__shared__ __align__(16) rowindex_state_s state_g;
rowindex_state_s* const s = &state_g;
uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[chunk_id];
if (strm_info) {
if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]];
if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]];
}
uint32_t rowgroups_in_chunk = s->chunk.num_rowgroups;
s->rowgroup_start = s->chunk.rowgroup_id;
s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk;
s->is_compressed = (strm_info != NULL);
}
__syncthreads();
while (s->rowgroup_start < s->rowgroup_end) {
int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128);
int rowgroup_size4, t4, t32;
s->rowgroups[t].chunk_id = chunk_id;
if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); }
__syncthreads();
if (s->is_compressed) {
// Convert the block + blk_offset pair into a raw offset into the decompressed stream
if (s->chunk.strm_len[CI_DATA] > 0) {
gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t);
}
if (s->chunk.strm_len[CI_DATA2] > 0) {
gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t);
}
__syncthreads();
}
rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t);
t4 = t & 3;
t32 = t >> 2;
for (int i = t32; i < num_rowgroups; i += 32) {
auto const num_rows =
(use_base_stride) ? rowidx_stride
: row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows;
auto const start_row =
(use_base_stride)
? rowidx_stride
: row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row;
for (int j = t4; j < rowgroup_size4; j += 4) {
((uint32_t*)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] =
((volatile uint32_t*)&s->rowgroups[i])[j];
}
row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows = num_rows;
// Updating in case of struct
row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_child_rows = num_rows;
row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row = start_row;
}
__syncthreads();
if (t == 0) { s->rowgroup_start += num_rowgroups; }
__syncthreads();
}
}
void __host__ ParseCompressedStripeData(CompressedStreamInfo* strm_info,
int32_t num_streams,
uint32_t compression_block_size,
uint32_t log2maxcr,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
hipLaunchKernelGGL(( gpuParseCompressedStripeData), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
strm_info, num_streams, compression_block_size, log2maxcr);
}
void __host__ PostDecompressionReassemble(CompressedStreamInfo* strm_info,
int32_t num_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
hipLaunchKernelGGL(( gpuPostDecompressionReassemble), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_info,
num_streams);
}
/**
* @brief Launches kernel for constructing rowgroup from index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] rowidx_stride Row index stride
* @param[in] use_base_stride Whether to use base stride obtained from meta or the computed value
* @param[in] stream CUDA stream used for device memory operations and kernel launches
*/
void __host__ ParseRowGroupIndex(RowGroup* row_groups,
CompressedStreamInfo* strm_info,
ColumnDesc* chunks,
uint32_t num_columns,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t rowidx_stride,
bool use_base_stride,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block
hipLaunchKernelGGL(( gpuParseRowGroupIndex), dim3(dim_grid), dim3(dim_block), 0, stream.value(), row_groups,
strm_info,
chunks,
num_columns,
num_stripes,
num_rowgroups,
rowidx_stride,
use_base_stride);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
eb3bb15579a7a1229821176bc37fd4517fd25e00.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
struct compressed_stream_s {
CompressedStreamInfo info;
gpu_inflate_input_s ctl;
};
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8) gpuParseCompressedStripeData(
CompressedStreamInfo* strm_info, int32_t num_streams, uint32_t block_size, uint32_t log2maxcr)
{
__shared__ compressed_stream_s strm_g[4];
compressed_stream_s* const s = &strm_g[threadIdx.x / 32];
int strm_id = blockIdx.x * 4 + (threadIdx.x / 32);
int lane_id = threadIdx.x % 32;
if (strm_id < num_streams && lane_id == 0) { s->info = strm_info[strm_id]; }
__syncthreads();
if (strm_id < num_streams) {
// Walk through the compressed blocks
const uint8_t* cur = s->info.compressed_data;
const uint8_t* end = cur + s->info.compressed_data_size;
uint8_t* uncompressed = s->info.uncompressed_data;
size_t max_uncompressed_size = 0;
uint32_t num_compressed_blocks = 0;
uint32_t num_uncompressed_blocks = 0;
while (cur + 3 < end) {
uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size;
gpu_inflate_input_s* init_ctl = nullptr;
block_len >>= 1;
cur += 3;
if (block_len > block_size || cur + block_len > end) {
// Fatal
num_compressed_blocks = 0;
max_uncompressed_size = 0;
break;
}
// TBD: For some codecs like snappy, it wouldn't be too difficult to get the actual
// uncompressed size and avoid waste due to block size alignment For now, rely on the max
// compression ratio to limit waste for the most extreme cases (small single-block streams)
uncompressed_size = (is_uncompressed) ? block_len
: (block_len < (block_size >> log2maxcr)) ? block_len << log2maxcr
: block_size;
if (is_uncompressed) {
if (uncompressed_size <= 32) {
// For short blocks, copy the uncompressed data to output
if (uncompressed &&
max_uncompressed_size + uncompressed_size <= s->info.max_uncompressed_size &&
lane_id < uncompressed_size) {
uncompressed[max_uncompressed_size + lane_id] = cur[lane_id];
}
} else {
init_ctl = s->info.copyctl;
init_ctl = (init_ctl && num_uncompressed_blocks < s->info.num_uncompressed_blocks)
? &init_ctl[num_uncompressed_blocks]
: nullptr;
num_uncompressed_blocks++;
}
} else {
init_ctl = s->info.decctl;
init_ctl = (init_ctl && num_compressed_blocks < s->info.num_compressed_blocks)
? &init_ctl[num_compressed_blocks]
: nullptr;
num_compressed_blocks++;
}
if (!lane_id && init_ctl) {
s->ctl.srcDevice = const_cast<uint8_t*>(cur);
s->ctl.srcSize = block_len;
s->ctl.dstDevice = uncompressed + max_uncompressed_size;
s->ctl.dstSize = uncompressed_size;
}
__syncwarp();
if (init_ctl && lane_id == 0) *init_ctl = s->ctl;
cur += block_len;
max_uncompressed_size += uncompressed_size;
}
__syncwarp();
if (!lane_id) {
s->info.num_compressed_blocks = num_compressed_blocks;
s->info.num_uncompressed_blocks = num_uncompressed_blocks;
s->info.max_uncompressed_size = max_uncompressed_size;
}
}
__syncthreads();
if (strm_id < num_streams && lane_id == 0) strm_info[strm_id] = s->info;
}
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuPostDecompressionReassemble(CompressedStreamInfo* strm_info, int32_t num_streams)
{
__shared__ compressed_stream_s strm_g[4];
compressed_stream_s* const s = &strm_g[threadIdx.x / 32];
int strm_id = blockIdx.x * 4 + (threadIdx.x / 32);
int lane_id = threadIdx.x % 32;
if (strm_id < num_streams && lane_id == 0) s->info = strm_info[strm_id];
__syncthreads();
if (strm_id < num_streams &&
s->info.num_compressed_blocks + s->info.num_uncompressed_blocks > 0 &&
s->info.max_uncompressed_size > 0) {
// Walk through the compressed blocks
const uint8_t* cur = s->info.compressed_data;
const uint8_t* end = cur + s->info.compressed_data_size;
const gpu_inflate_input_s* dec_in = s->info.decctl;
const gpu_inflate_status_s* dec_out = s->info.decstatus;
uint8_t* uncompressed_actual = s->info.uncompressed_data;
uint8_t* uncompressed_estimated = uncompressed_actual;
uint32_t num_compressed_blocks = 0;
uint32_t max_compressed_blocks = s->info.num_compressed_blocks;
while (cur + 3 < end) {
uint32_t block_len = shuffle((lane_id == 0) ? cur[0] | (cur[1] << 8) | (cur[2] << 16) : 0);
uint32_t is_uncompressed = block_len & 1;
uint32_t uncompressed_size_est, uncompressed_size_actual;
block_len >>= 1;
cur += 3;
if (cur + block_len > end) { break; }
if (is_uncompressed) {
uncompressed_size_est = block_len;
uncompressed_size_actual = block_len;
} else {
if (num_compressed_blocks > max_compressed_blocks) { break; }
if (shuffle((lane_id == 0) ? dec_out[num_compressed_blocks].status : 0) != 0) {
// Decompression failed, not much point in doing anything else
break;
}
uncompressed_size_est =
shuffle((lane_id == 0) ? *(const uint32_t*)&dec_in[num_compressed_blocks].dstSize : 0);
uncompressed_size_actual = shuffle(
(lane_id == 0) ? *(const uint32_t*)&dec_out[num_compressed_blocks].bytes_written : 0);
}
// In practice, this should never happen with a well-behaved writer, as we would expect the
// uncompressed size to always be equal to the compression block size except for the last
// block
if (uncompressed_actual < uncompressed_estimated) {
// warp-level memmove
for (int i = lane_id; i < (int)uncompressed_size_actual; i += 32) {
uncompressed_actual[i] = uncompressed_estimated[i];
}
}
cur += block_len;
num_compressed_blocks += 1 - is_uncompressed;
uncompressed_estimated += uncompressed_size_est;
uncompressed_actual += uncompressed_size_actual;
}
// Update info with actual uncompressed size
if (!lane_id) {
size_t total_uncompressed_size = uncompressed_actual - s->info.uncompressed_data;
// Set uncompressed size to zero if there were any errors
strm_info[strm_id].max_uncompressed_size =
(num_compressed_blocks == s->info.num_compressed_blocks) ? total_uncompressed_size : 0;
}
}
}
/**
* @brief Shared mem state for gpuParseRowGroupIndex
*/
struct rowindex_state_s {
ColumnDesc chunk;
uint32_t rowgroup_start;
uint32_t rowgroup_end;
int is_compressed;
uint32_t row_index_entry[3][CI_PRESENT]; // NOTE: Assumes CI_PRESENT follows CI_DATA and CI_DATA2
CompressedStreamInfo strm_info[2];
RowGroup rowgroups[128];
uint32_t compressed_offset[128][2];
};
enum row_entry_state_e {
NOT_FOUND = 0,
GET_LENGTH,
SKIP_VARINT,
SKIP_FIXEDLEN,
STORE_INDEX0,
STORE_INDEX1,
STORE_INDEX2,
};
/**
* @brief Decode a single row group index entry
*
* @param[in,out] s row group index state
* @param[in] start start position in byte stream
* @param[in] end end of byte stream
* @return bytes consumed
*/
static uint32_t __device__ ProtobufParseRowIndexEntry(rowindex_state_s* s,
const uint8_t* start,
const uint8_t* end)
{
constexpr uint32_t pb_rowindexentry_id = static_cast<uint32_t>(PB_TYPE_FIXEDLEN) + 8;
const uint8_t* cur = start;
row_entry_state_e state = NOT_FOUND;
uint32_t length = 0, strm_idx_id = s->chunk.skip_count >> 8, idx_id = 1, ci_id = CI_PRESENT,
pos_end = 0;
while (cur < end) {
uint32_t v = 0;
for (uint32_t l = 0; l <= 28; l += 7) {
uint32_t c = (cur < end) ? *cur++ : 0;
v |= (c & 0x7f) << l;
if (c <= 0x7f) break;
}
switch (state) {
case NOT_FOUND:
if (v == pb_rowindexentry_id) {
state = GET_LENGTH;
} else {
v &= 7;
if (v == PB_TYPE_FIXED64)
cur += 8;
else if (v == PB_TYPE_FIXED32)
cur += 4;
else if (v == PB_TYPE_VARINT)
state = SKIP_VARINT;
else if (v == PB_TYPE_FIXEDLEN)
state = SKIP_FIXEDLEN;
}
break;
case SKIP_VARINT: state = NOT_FOUND; break;
case SKIP_FIXEDLEN:
cur += v;
state = NOT_FOUND;
break;
case GET_LENGTH:
if (length == 0) {
length = (uint32_t)(cur + v - start);
state = NOT_FOUND; // Scan for positions (same field id & low-level type as RowIndexEntry
// entry)
} else {
pos_end = min((uint32_t)(cur + v - start), length);
state = STORE_INDEX0;
}
break;
case STORE_INDEX0:
ci_id = (idx_id == (strm_idx_id & 0xff)) ? CI_DATA
: (idx_id == ((strm_idx_id >> 8) & 0xff)) ? CI_DATA2
: CI_PRESENT;
idx_id++;
if (s->is_compressed) {
if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = v;
if (cur >= start + pos_end) return length;
state = STORE_INDEX1;
break;
} else {
if (ci_id < CI_PRESENT) s->row_index_entry[0][ci_id] = 0;
// Fall through to STORE_INDEX1 for uncompressed (always block0)
}
case STORE_INDEX1:
if (ci_id < CI_PRESENT) s->row_index_entry[1][ci_id] = v;
if (cur >= start + pos_end) return length;
state = (ci_id == CI_DATA && s->chunk.encoding_kind != DICTIONARY &&
s->chunk.encoding_kind != DICTIONARY_V2 &&
(s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY ||
s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR ||
s->chunk.type_kind == DECIMAL || s->chunk.type_kind == FLOAT ||
s->chunk.type_kind == DOUBLE))
? STORE_INDEX0
: STORE_INDEX2;
break;
case STORE_INDEX2:
if (ci_id < CI_PRESENT) {
// Boolean columns have an extra byte to indicate the position of the bit within the byte
s->row_index_entry[2][ci_id] = (s->chunk.type_kind == BOOLEAN) ? (v << 3) + *cur : v;
}
if (ci_id == CI_PRESENT || s->chunk.type_kind == BOOLEAN) cur++;
if (cur >= start + pos_end) return length;
state = STORE_INDEX0;
break;
}
}
return (uint32_t)(end - start);
}
/**
* @brief Decode row group index entries
*
* @param[in,out] s row group index state
* @param[in] num_rowgroups Number of index entries to read
*/
static __device__ void gpuReadRowGroupIndexEntries(rowindex_state_s* s, int num_rowgroups)
{
const uint8_t* index_data = s->chunk.streams[CI_INDEX];
int index_data_len = s->chunk.strm_len[CI_INDEX];
for (int i = 0; i < num_rowgroups; i++) {
s->row_index_entry[0][0] = 0;
s->row_index_entry[0][1] = 0;
s->row_index_entry[1][0] = 0;
s->row_index_entry[1][1] = 0;
s->row_index_entry[2][0] = 0;
s->row_index_entry[2][1] = 0;
if (index_data_len > 0) {
int len = ProtobufParseRowIndexEntry(s, index_data, index_data + index_data_len);
index_data += len;
index_data_len = max(index_data_len - len, 0);
for (int j = 0; j < 2; j++) {
s->rowgroups[i].strm_offset[j] = s->row_index_entry[1][j];
s->rowgroups[i].run_pos[j] = s->row_index_entry[2][j];
s->compressed_offset[i][j] = s->row_index_entry[0][j];
}
}
}
s->chunk.streams[CI_INDEX] = index_data;
s->chunk.strm_len[CI_INDEX] = index_data_len;
}
/**
* @brief Translate block+offset compressed position into an uncompressed offset
*
* @param[in,out] s row group index state
* @param[in] ci_id index to convert (CI_DATA or CI_DATA2)
* @param[in] num_rowgroups Number of index entries
* @param[in] t thread id
*/
static __device__ void gpuMapRowIndexToUncompressed(rowindex_state_s* s,
int ci_id,
int num_rowgroups,
int t)
{
int32_t strm_len = s->chunk.strm_len[ci_id];
if (strm_len > 0) {
int32_t compressed_offset = (t < num_rowgroups) ? s->compressed_offset[t][ci_id] : 0;
if (compressed_offset > 0) {
const uint8_t* start = s->strm_info[ci_id].compressed_data;
const uint8_t* cur = start;
const uint8_t* end = cur + s->strm_info[ci_id].compressed_data_size;
gpu_inflate_status_s* decstatus = s->strm_info[ci_id].decstatus;
uint32_t uncomp_offset = 0;
for (;;) {
uint32_t block_len, is_uncompressed;
if (cur + 3 > end || cur + 3 >= start + compressed_offset) { break; }
block_len = cur[0] | (cur[1] << 8) | (cur[2] << 16);
cur += 3;
is_uncompressed = block_len & 1;
block_len >>= 1;
cur += block_len;
if (cur > end) { break; }
if (is_uncompressed) {
uncomp_offset += block_len;
} else {
uncomp_offset += decstatus->bytes_written;
decstatus++;
}
}
s->rowgroups[t].strm_offset[ci_id] += uncomp_offset;
}
}
}
/**
* @brief Decode index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] rowidx_stride Row index stride
* @param[in] use_base_stride Whether to use base stride obtained from meta or use the computed
* value
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128, 8)
gpuParseRowGroupIndex(RowGroup* row_groups,
CompressedStreamInfo* strm_info,
ColumnDesc* chunks,
uint32_t num_columns,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t rowidx_stride,
bool use_base_stride)
{
__shared__ __align__(16) rowindex_state_s state_g;
rowindex_state_s* const s = &state_g;
uint32_t chunk_id = blockIdx.y * num_columns + blockIdx.x;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[chunk_id];
if (strm_info) {
if (s->chunk.strm_len[0] > 0) s->strm_info[0] = strm_info[s->chunk.strm_id[0]];
if (s->chunk.strm_len[1] > 0) s->strm_info[1] = strm_info[s->chunk.strm_id[1]];
}
uint32_t rowgroups_in_chunk = s->chunk.num_rowgroups;
s->rowgroup_start = s->chunk.rowgroup_id;
s->rowgroup_end = s->rowgroup_start + rowgroups_in_chunk;
s->is_compressed = (strm_info != NULL);
}
__syncthreads();
while (s->rowgroup_start < s->rowgroup_end) {
int num_rowgroups = min(s->rowgroup_end - s->rowgroup_start, 128);
int rowgroup_size4, t4, t32;
s->rowgroups[t].chunk_id = chunk_id;
if (t == 0) { gpuReadRowGroupIndexEntries(s, num_rowgroups); }
__syncthreads();
if (s->is_compressed) {
// Convert the block + blk_offset pair into a raw offset into the decompressed stream
if (s->chunk.strm_len[CI_DATA] > 0) {
gpuMapRowIndexToUncompressed(s, CI_DATA, num_rowgroups, t);
}
if (s->chunk.strm_len[CI_DATA2] > 0) {
gpuMapRowIndexToUncompressed(s, CI_DATA2, num_rowgroups, t);
}
__syncthreads();
}
rowgroup_size4 = sizeof(RowGroup) / sizeof(uint32_t);
t4 = t & 3;
t32 = t >> 2;
for (int i = t32; i < num_rowgroups; i += 32) {
auto const num_rows =
(use_base_stride) ? rowidx_stride
: row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows;
auto const start_row =
(use_base_stride)
? rowidx_stride
: row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row;
for (int j = t4; j < rowgroup_size4; j += 4) {
((uint32_t*)&row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x])[j] =
((volatile uint32_t*)&s->rowgroups[i])[j];
}
row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_rows = num_rows;
// Updating in case of struct
row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].num_child_rows = num_rows;
row_groups[(s->rowgroup_start + i) * num_columns + blockIdx.x].start_row = start_row;
}
__syncthreads();
if (t == 0) { s->rowgroup_start += num_rowgroups; }
__syncthreads();
}
}
void __host__ ParseCompressedStripeData(CompressedStreamInfo* strm_info,
int32_t num_streams,
uint32_t compression_block_size,
uint32_t log2maxcr,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
gpuParseCompressedStripeData<<<dim_grid, dim_block, 0, stream.value()>>>(
strm_info, num_streams, compression_block_size, log2maxcr);
}
void __host__ PostDecompressionReassemble(CompressedStreamInfo* strm_info,
int32_t num_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_streams + 3) >> 2, 1); // 1 stream per warp, 4 warps per block
gpuPostDecompressionReassemble<<<dim_grid, dim_block, 0, stream.value()>>>(strm_info,
num_streams);
}
/**
* @brief Launches kernel for constructing rowgroup from index streams
*
* @param[out] row_groups RowGroup device array [rowgroup][column]
* @param[in] strm_info List of compressed streams (or NULL if uncompressed)
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] num_rowgroups Number of row groups
* @param[in] rowidx_stride Row index stride
* @param[in] use_base_stride Whether to use base stride obtained from meta or the computed value
* @param[in] stream CUDA stream used for device memory operations and kernel launches
*/
void __host__ ParseRowGroupIndex(RowGroup* row_groups,
CompressedStreamInfo* strm_info,
ColumnDesc* chunks,
uint32_t num_columns,
uint32_t num_stripes,
uint32_t num_rowgroups,
uint32_t rowidx_stride,
bool use_base_stride,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid(num_columns, num_stripes); // 1 column chunk per block
gpuParseRowGroupIndex<<<dim_grid, dim_block, 0, stream.value()>>>(row_groups,
strm_info,
chunks,
num_columns,
num_stripes,
num_rowgroups,
rowidx_stride,
use_base_stride);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
2ae300d942e1392f36515afad9460769f998e3ec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nll_loss_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/nll_loss.h"
namespace phi {
template <typename T, typename Context>
void NllLossRawKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& label,
const paddle::optional<DenseTensor>& weight,
int64_t ignore_index,
const std::string& reduction,
DenseTensor* out,
DenseTensor* total_weight) {
auto* x = &input;
auto x_data = x->data<T>();
auto out_data = dev_ctx.template Alloc<T>(out);
auto total_weight_data = dev_ctx.template Alloc<T>(total_weight);
auto label_data = label.data<int64_t>();
auto weight_data = weight.get_ptr() ? weight.get_ptr()->data<T>() : nullptr;
#ifdef PADDLE_WITH_HIP
hipMemset(total_weight_data, 0, sizeof(T));
#else
hipMemset(total_weight_data, 0, sizeof(T));
#endif
auto x_dims = x->dims();
auto batch_size = x_dims[0];
auto n_classes = x_dims[1];
int64_t size_average = (int64_t)(reduction == "mean");
if (x_dims.size() == 2) {
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
if (reduction == "none") {
hipLaunchKernelGGL(( GPUNLLLossForward1D_no_reduce<
T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), out_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
ignore_index);
} else {
hipLaunchKernelGGL(( GPUNLLLossForward1D_with_reduce<T>), dim3(1), dim3(NTHREADS), 0, dev_ctx.stream(),
out_data,
total_weight_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
size_average,
ignore_index);
}
} else if (x_dims.size() == 4) {
const auto in_dim2 = x_dims[2];
const auto in_dim3 = x_dims[3];
const auto map_size = in_dim2 * in_dim3;
const auto out_numel = batch_size * in_dim2 * in_dim3;
int blocks = NumBlocks(out_numel);
int threads = kNumCUDAThreads;
if (reduction == "none") {
hipLaunchKernelGGL(( GPUNLLLossForward2D_no_reduce<
T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), out_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
in_dim2,
in_dim3,
ignore_index);
} else {
int blocks_per_sample = NumBlocks(map_size) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
hipLaunchKernelGGL(( GPUNLLLossForward2D_with_reduce<
T>), dim3(total_blocks), dim3(threads), 0, dev_ctx.stream(), out_data,
total_weight_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
map_size,
blocks_per_sample,
ignore_index);
if (size_average) {
hipLaunchKernelGGL(( GPUNLLLossForward2D_size_average<T>), dim3(1), dim3(1), 0, dev_ctx.stream(),
out_data, total_weight_data);
}
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(
nll_loss, GPU, ALL_LAYOUT, phi::NllLossRawKernel, float, double) {}
|
2ae300d942e1392f36515afad9460769f998e3ec.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nll_loss_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/nll_loss.h"
namespace phi {
template <typename T, typename Context>
void NllLossRawKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& label,
const paddle::optional<DenseTensor>& weight,
int64_t ignore_index,
const std::string& reduction,
DenseTensor* out,
DenseTensor* total_weight) {
auto* x = &input;
auto x_data = x->data<T>();
auto out_data = dev_ctx.template Alloc<T>(out);
auto total_weight_data = dev_ctx.template Alloc<T>(total_weight);
auto label_data = label.data<int64_t>();
auto weight_data = weight.get_ptr() ? weight.get_ptr()->data<T>() : nullptr;
#ifdef PADDLE_WITH_HIP
hipMemset(total_weight_data, 0, sizeof(T));
#else
cudaMemset(total_weight_data, 0, sizeof(T));
#endif
auto x_dims = x->dims();
auto batch_size = x_dims[0];
auto n_classes = x_dims[1];
int64_t size_average = (int64_t)(reduction == "mean");
if (x_dims.size() == 2) {
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
if (reduction == "none") {
GPUNLLLossForward1D_no_reduce<
T><<<blocks, threads, 0, dev_ctx.stream()>>>(out_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
ignore_index);
} else {
GPUNLLLossForward1D_with_reduce<T><<<1, NTHREADS, 0, dev_ctx.stream()>>>(
out_data,
total_weight_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
size_average,
ignore_index);
}
} else if (x_dims.size() == 4) {
const auto in_dim2 = x_dims[2];
const auto in_dim3 = x_dims[3];
const auto map_size = in_dim2 * in_dim3;
const auto out_numel = batch_size * in_dim2 * in_dim3;
int blocks = NumBlocks(out_numel);
int threads = kNumCUDAThreads;
if (reduction == "none") {
GPUNLLLossForward2D_no_reduce<
T><<<blocks, threads, 0, dev_ctx.stream()>>>(out_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
in_dim2,
in_dim3,
ignore_index);
} else {
int blocks_per_sample = NumBlocks(map_size) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
GPUNLLLossForward2D_with_reduce<
T><<<total_blocks, threads, 0, dev_ctx.stream()>>>(out_data,
total_weight_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
map_size,
blocks_per_sample,
ignore_index);
if (size_average) {
GPUNLLLossForward2D_size_average<T><<<1, 1, 0, dev_ctx.stream()>>>(
out_data, total_weight_data);
}
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(
nll_loss, GPU, ALL_LAYOUT, phi::NllLossRawKernel, float, double) {}
|
af1e0b1a482fa4b8a14680826678e8d304c13a48.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void average_snips(const double *Params, const int *iC, const int *call, const int *id, const float *uproj, const float *cmax, float *WU){
int my_chan, this_chan, tidx, tidy, bid, ind, Nspikes, NrankPC, NchanNear, Nchan;
float xsum = 0.0f;
Nspikes = (int) Params[0];
NrankPC = (int) Params[1];
Nchan = (int) Params[7];
NchanNear = (int) Params[6];
tidx = threadIdx.x;
tidy = threadIdx.y;
bid = blockIdx.x;
for(ind=0; ind<Nspikes;ind++)
if (id[ind]==bid){
my_chan = call[ind];
this_chan = iC[tidy + NchanNear * my_chan];
xsum = uproj[tidx + NrankPC*tidy + NrankPC*NchanNear * ind];
WU[tidx + NrankPC*this_chan + NrankPC*Nchan * bid] += xsum;
}
}
|
af1e0b1a482fa4b8a14680826678e8d304c13a48.cu
|
#include "includes.h"
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void average_snips(const double *Params, const int *iC, const int *call, const int *id, const float *uproj, const float *cmax, float *WU){
int my_chan, this_chan, tidx, tidy, bid, ind, Nspikes, NrankPC, NchanNear, Nchan;
float xsum = 0.0f;
Nspikes = (int) Params[0];
NrankPC = (int) Params[1];
Nchan = (int) Params[7];
NchanNear = (int) Params[6];
tidx = threadIdx.x;
tidy = threadIdx.y;
bid = blockIdx.x;
for(ind=0; ind<Nspikes;ind++)
if (id[ind]==bid){
my_chan = call[ind];
this_chan = iC[tidy + NchanNear * my_chan];
xsum = uproj[tidx + NrankPC*tidy + NrankPC*NchanNear * ind];
WU[tidx + NrankPC*this_chan + NrankPC*Nchan * bid] += xsum;
}
}
|
6b087bc7fa3aea71f749b18fdbd430f2696bac21.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void modcpy(void *destination, void *source, size_t destination_size, size_t source_size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int pos;
int ds = destination_size/sizeof(int4), ss = source_size/sizeof(int4);
for(int i = idx; i < ds; i += gridDim.x * blockDim.x){
pos = i % ss;
reinterpret_cast<int4*>(destination)[i] = reinterpret_cast<int4*>(source)[pos];
}
}
|
6b087bc7fa3aea71f749b18fdbd430f2696bac21.cu
|
#include "includes.h"
__global__ void modcpy(void *destination, void *source, size_t destination_size, size_t source_size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int pos;
int ds = destination_size/sizeof(int4), ss = source_size/sizeof(int4);
for(int i = idx; i < ds; i += gridDim.x * blockDim.x){
pos = i % ss;
reinterpret_cast<int4*>(destination)[i] = reinterpret_cast<int4*>(source)[pos];
}
}
|
55d8d3bb9773b3807bba2a155206a1424d09b7b9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Network.cuh"
#include <stdio.h>
#include <assert.h>
#include "FERNIntegrator.cuh"
Network::Network()
{
}
void Network::loadNetwork(const char *filename)
{
// Unused variables
char isotopeLabel[10];
unsigned short A;
fern_real massExcess;
fern_real pf;
fern_real Y;
FILE *file = fopen(filename, "r");
// Exit if the file doesn't exist or can't be read
if (!file)
{
fprintf(stderr, "Could not read file '%s'\n", filename);
exit(1);
}
// Read 4 lines at a time
for (int n = 0; n < species; n++)
{
int status;
// Line #1
#ifdef FERN_SINGLE
status = fscanf(file, "%s %hu %hhu %hhu %f %f\n",
isotopeLabel, &A, &Z[n], &N[n], &Y, &massExcess);
#else
status = fscanf(file, "%s %hu %hhu %hhu %lf %lf\n",
isotopeLabel, &A, &Z[n], &N[n], &Y, &massExcess);
#endif
if (status == EOF)
break;
// Line #2...4
for (int i = 0; i < 8 * 3; i++)
{
#ifdef FERN_SINGLE
status = fscanf(file, "%f", &pf);
#else
status = fscanf(file, "%lf", &pf);
#endif
}
}
}
void Network::loadReactions(const char *filename)
{
static const bool displayInput = false;
// Unused variables
char reactionLabel[100];
int RGclass;
int RGmemberIndex;
int reaclibClass;
int isEC;
int isReverseR;
int ProductIndex[4];
// Allocate the host-only memory to be used by parseFlux()
int *numProducts = new int [reactions];
// Each element of these dynamic arrays are pointers to static arrays of size 4.
vec_4i *reactantZ = new vec_4i [reactions]; // [reactions]
vec_4i *reactantN = new vec_4i [reactions]; // [reactions]
vec_4i *productZ = new vec_4i [reactions]; // [reactions]
vec_4i *productN = new vec_4i [reactions]; // [reactions]
FILE *file = fopen(filename, "r");
// Exit if the file doesn't exist or can't be read
if (!file)
{
fprintf(stderr, "File Input Error: No readable file named %s\n", filename);
exit(1);
}
// Read eight lines at a time
for (int n = 0; n < reactions; n++)
{
int status;
// Line #1
#ifdef FERN_SINGLE
status = fscanf(file, "%s %d %d %d %hhu %d %d %d %f %f",
reactionLabel, &RGclass, &RGmemberIndex, &reaclibClass,
&numReactingSpecies[n], &numProducts[n], &isEC, &isReverseR,
&statFac[n], &Q[n]);
#else
status = fscanf(file, "%s %d %d %d %hhu %d %d %d %lf %lf",
reactionLabel, &RGclass, &RGmemberIndex, &reaclibClass,
&numReactingSpecies[n], &numProducts[n], &isEC, &isReverseR,
&statFac[n], &Q[n]);
#endif
if (status == EOF)
break;
if (displayInput)
{
printf("Reaction Index = %d\n", n);
printf("isReverseR = %d reaclibIndex = %d\n",
isReverseR, reaclibClass);
printf("%s %d %d %d %d %d %d %d %f %f\n",
reactionLabel, RGclass, RGmemberIndex, reaclibClass,
numReactingSpecies[n], numProducts[n], isEC,
isReverseR, statFac[n], Q[n]);
}
// Line #2
if (displayInput)
printf("P: { ");
for (int i = 0; i < 7; i++)
{
#ifdef FERN_SINGLE
status = fscanf(file, "%f", &P[i][n]);
#else
status = fscanf(file, "%lf", &P[i][n]);
#endif
if (displayInput)
printf("%f, ", P[i][n]);
}
if (displayInput)
printf("}\n");
// Line #3
for (int mm = 0; mm < numReactingSpecies[n]; mm++)
{
status = fscanf(file, "%d", &reactantZ[n][mm]);
if (displayInput)
printf("\tReactant[%d]: Z=%d\n", mm, reactantZ[n][mm]);
}
// Line #4
for (int mm = 0; mm < numReactingSpecies[n]; mm++)
{
status = fscanf(file, "%d", &reactantN[n][mm]);
if (displayInput)
printf("\tReactant[%d]: N=%d\n", mm, reactantN[n][mm]);
}
// Line #5
for (int mm = 0; mm < numProducts[n]; mm++)
{
status = fscanf(file, "%d", &productZ[n][mm]);
if (displayInput)
printf("\tProduct[%d]: Z=%d\n", mm, productZ[n][mm]);
}
// Line #6
for (int mm = 0; mm < numProducts[n]; mm++)
{
status = fscanf(file, "%d", &productN[n][mm]);
if (displayInput)
printf("\tProduct[%d]: N=%d\n", mm, productN[n][mm]);
}
// Line #7
for (int mm = 0; mm < numReactingSpecies[n]; mm++)
{
status = fscanf(file, "%hu", &reactant[mm][n]);
if (displayInput)
printf("\treactant[%d]: N=%d\n", mm, reactant[mm][n]);
}
// Line #8
for (int mm = 0; mm < numProducts[n]; mm++)
{
status = fscanf(file, "%d", &ProductIndex[mm]);
if (displayInput)
printf("\tProductIndex[%d]: N=%d\n", mm, ProductIndex[mm]);
}
if (displayInput)
printf("\n");
}
fclose(file);
// We're not done yet.
// Finally parse the flux
parseFlux(numProducts, reactantZ, reactantN, productZ, productN);
// Cleanup dynamic memory
delete [] numProducts;
delete [] reactantZ;
delete [] reactantN;
delete [] productZ;
delete [] productN;
}
void Network::parseFlux(int *numProducts, vec_4i *reactantZ, vec_4i *reactantN,
vec_4i *productZ, vec_4i *productN)
{
const static bool showParsing = false;
// These tempInt blocks will become MapFPlus and MapFMinus eventually.
size_t tempIntSize = species * reactions / 2;
unsigned short *tempInt1 = new unsigned short [tempIntSize];
unsigned short *tempInt2 = new unsigned short [tempIntSize];
// Access elements by reacMask[speciesIndex + species * reactionIndex].
int *reacMask = new int [species * reactions]; // [species][reactions]
int *numFluxPlus = new int [species];
int *numFluxMinus = new int [species];
// Start of Guidry's original parseF() code
if (showParsing)
printf("Use parseF() to find F+ and F- flux components for each species:\n");
int incrementPlus = 0;
int incrementMinus = 0;
totalFplus = 0;
totalFminus = 0;
// Loop over all isotopes in the network
for (int i = 0; i < species; i++)
{
int total = 0;
int numFplus = 0;
int numFminus = 0;
// Loop over all possible reactions for this isotope, finding those that
// change its population up (contributing to F+) or down (contributing
// to F-).
for (int j = 0; j < reactions; j++)
{
int totalL = 0;
int totalR = 0;
// Loop over reactants for this reaction
for (int k = 0; k < numReactingSpecies[j]; k++)
{
if (Z[i] == reactantZ[j][k] && N[i] == reactantN[j][k])
totalL++;
}
// Loop over products for this reaction
for (int k = 0; k < numProducts[j]; k++)
{
if (Z[i] == productZ[j][k] && N[i] == productN[j][k])
totalR++;
}
total = totalL - totalR;
if (total > 0) // Contributes to F- for this isotope
{
numFminus++;
reacMask[i + species * j] = -total;
tempInt2[incrementMinus + numFminus - 1] = j;
// if (showParsing)
// printf("%s reacIndex=%d %s nReac=%d nProd=%d totL=%d totR=%d tot=%d F-\n",
// isoLabel[i], j, reacLabel[j], NumReactingSpecies[j], NumProducts[j], totalL,
// totalR, total);
}
else if (total < 0) // Contributes to F+ for this isotope
{
numFplus++;
reacMask[i + species * j] = -total;
tempInt1[incrementPlus + numFplus - 1] = j;
// if (showParsing)
// printf("%s reacIndex=%d %s nReac=%d nProd=%d totL=%d totR=%d tot=%d F+\n",
// isoLabel[i], j, reacLabel[j], NumReactingSpecies[j], NumProducts[j], totalL,
// totalR, total);
}
else // Does not contribute to flux for this isotope
{
reacMask[i + species * j] = 0;
}
}
// Keep track of the total number of F+ and F- terms in the network for all isotopes
totalFplus += numFplus;
totalFminus += numFminus;
numFluxPlus[i] = numFplus;
numFluxMinus[i] = numFminus;
incrementPlus += numFplus;
incrementMinus += numFminus;
// if (showParsing == 1)
// printf("%d %s numF+ = %d numF- = %d\n", i, isoLabel[i], numFplus, numFminus);
}
// Display some cases
printf("\n");
printf("PART OF FLUX-ISOTOPE COMPONENT ARRAY (-n --> F-; +n --> F+ for given isotope):\n");
printf("\n");
printf("FLUX SPARSENESS: Non-zero F+ = %d; Non-zero F- = %d, out of %d x %d = %d possibilities.\n",
totalFplus, totalFminus, reactions, species, reactions * species);
/*******************************************/
// Create 1D arrays to hold non-zero F+ and F- for all reactions for all isotopes,
// the arrays holding the species factors FplusFac and FminusFac,
// and also arrays to hold their sums for each isotope. Note that parseF() must
// be run first because it determines totalFplus and totalFminus.
FplusFac = new fern_real [totalFplus];
FminusFac = new fern_real [totalFminus];
// Create 1D arrays that will hold the index of the isotope for the F+ or F- term
MapFplus = new unsigned short [totalFplus];
MapFminus = new unsigned short [totalFminus];
// Create 1D arrays that will be used to map finite F+ and F- to the Flux array.
int *FplusIsotopeCut = new int [species];
int *FminusIsotopeCut = new int [species];
int *FplusIsotopeIndex = new int [totalFplus];
int *FminusIsotopeIndex = new int [totalFminus];
FplusIsotopeCut[0] = numFluxPlus[0];
FminusIsotopeCut[0] = numFluxMinus[0];
for (int i = 1; i < species; i++)
{
FplusIsotopeCut[i] = numFluxPlus[i] + FplusIsotopeCut[i - 1];
FminusIsotopeCut[i] = numFluxMinus[i] + FminusIsotopeCut[i - 1];
}
int currentIso = 0;
for (int i = 0; i < totalFplus; i++)
{
FplusIsotopeIndex[i] = currentIso;
if (i == (FplusIsotopeCut[currentIso] - 1)) currentIso ++;
}
currentIso = 0;
for (int i = 0; i < totalFminus; i++)
{
FminusIsotopeIndex[i] = currentIso;
if (i == (FminusIsotopeCut[currentIso] - 1)) currentIso ++;
}
// Diagnostic output
// if (showFparsing == 1)
// {
// printf("\n\n");
// printf("MAX F+ and F- INDEX FOR EACH ISOTOPE:\n");
// for (int i = 0; i < species; i++)
// {
// printf("\n");
// printf("Isotope index = %d %s Max index F+ = %d Max index F- = %d\n",
// i, isoLabel[i], FplusIsotopeCut[i] - 1, FminusIsotopeCut[i] - 1);
// }
// }
for (int i = 0; i < totalFplus; i++)
{
MapFplus[i] = tempInt1[i];
}
for (int i = 0; i < totalFminus; i++)
{
MapFminus[i] = tempInt2[i];
}
// Populate the FplusMin and FplusMax arrays
unsigned short *FplusMin = new unsigned short [species];
unsigned short *FminusMin = new unsigned short [species];
FplusMin[0] = 0;
FplusMax[0] = numFluxPlus[0] - 1;
for (int i = 1; i < species; i++)
{
FplusMin[i] = FplusMax[i - 1] + 1;
FplusMax[i] = FplusMin[i] + numFluxPlus[i] - 1 ;
}
// Populate the FminusMin and FminusMax arrays
FminusMin[0] = 0;
FminusMax[0] = numFluxMinus[0] - 1;
for (int i = 1; i < species; i++)
{
FminusMin[i] = FminusMax[i - 1] + 1;
FminusMax[i] = FminusMin[i] + numFluxMinus[i] - 1 ;
}
// Populate the FplusFac and FminusFac arrays that hold the factors counting the
// number of occurences of the species in the reaction. Note that this can only
// be done after parseF() has been run to give reacMask[i][j].
int tempCountPlus = 0;
int tempCountMinus = 0;
for (int i = 0; i < species; i++)
{
for (int j = 0; j < reactions; j++)
{
if (reacMask[i + species * j] > 0)
{
FplusFac[tempCountPlus] = (fern_real)reacMask[i + species * j];
tempCountPlus++;
}
else if (reacMask[i + species * j] < 0)
{
FminusFac[tempCountMinus] = -(fern_real) reacMask[i + species * j];
tempCountMinus++;
}
}
}
// Clean up dynamic memory
delete [] reacMask;
delete [] FplusIsotopeCut;
delete [] FminusIsotopeCut;
delete [] FplusIsotopeIndex;
delete [] FminusIsotopeIndex;
delete [] tempInt1;
delete [] tempInt2;
delete [] numFluxPlus;
delete [] numFluxMinus;
delete [] FplusMin;
delete [] FminusMin;
}
void Network::allocate()
{
// Allocate the network data
Z = new unsigned char[species];
N = new unsigned char[species];
FplusMax = new unsigned short [species];
FminusMax = new unsigned short [species];
// Allocate the reaction data
for (int i = 0; i < 7; i++)
P[i] = new fern_real[reactions];
numReactingSpecies = new unsigned char[reactions];
statFac = new fern_real[reactions];
Q = new fern_real[reactions];
for (int i = 0; i < 3; i++)
reactant[i] = new unsigned short[reactions];
}
void Network::cudaAllocate()
{
// Allocate network data
hipMalloc(&Z, sizeof(unsigned char) * species);
hipMalloc(&N, sizeof(unsigned char) * species);
hipMalloc(&FplusFac, sizeof(fern_real) * totalFplus);
hipMalloc(&FminusFac, sizeof(fern_real) * totalFminus);
hipMalloc(&MapFplus, sizeof(unsigned short) * totalFplus);
hipMalloc(&MapFminus, sizeof(unsigned short) * totalFminus);
hipMalloc(&FplusMax, sizeof(unsigned short) * species);
hipMalloc(&FminusMax, sizeof(unsigned short) * species);
// Allocate reaction data
for (int i = 0; i < 7; i++)
{
hipMalloc(&P[i], sizeof(fern_real) * reactions);
}
hipMalloc(&numReactingSpecies, sizeof(unsigned char) * reactions);
hipMalloc(&statFac, sizeof(fern_real) * reactions);
hipMalloc(&Q, sizeof(fern_real) * reactions);
for (int i = 0; i < 3; i++)
hipMalloc(&reactant[i], sizeof(unsigned short) * reactions);
}
void Network::setSizes(const Network &source)
{
species = source.species;
reactions = source.reactions;
totalFplus = source.totalFplus;
totalFminus = source.totalFminus;
}
void Network::cudaCopy(const Network &source, hipMemcpyKind kind)
{
// Copy scalars
massTol = source.massTol;
fluxFrac = source.fluxFrac;
// Copy network vectors
hipMemcpy(Z, source.Z, sizeof(unsigned char) * species, kind);
hipMemcpy(N, source.N, sizeof(unsigned char) * species, kind);
FERNIntegrator::checkCudaErrors();
hipMemcpy(FplusFac, source.FplusFac, sizeof(fern_real) * totalFplus, kind);
FERNIntegrator::checkCudaErrors();
hipMemcpy(FminusFac, source.FminusFac, sizeof(fern_real) * totalFminus, kind);
FERNIntegrator::checkCudaErrors();
hipMemcpy(MapFplus, source.MapFplus, sizeof(unsigned short) * totalFplus, kind);
hipMemcpy(MapFminus, source.MapFminus, sizeof(unsigned short) * totalFminus, kind);
hipMemcpy(FplusMax, source.FplusMax, sizeof(unsigned short) * species, kind);
hipMemcpy(FminusMax, source.FminusMax, sizeof(unsigned short) * species, kind);
// Copy reaction vectors
for (int i = 0; i < 7; i++)
{
hipMemcpy(P[i], source.P[i], sizeof(fern_real) * reactions, kind);
}
hipMemcpy(numReactingSpecies, source.numReactingSpecies,
sizeof(unsigned char) * reactions, kind);
hipMemcpy(statFac, source.statFac, sizeof(fern_real) * reactions, kind);
hipMemcpy(Q, source.Q, sizeof(fern_real) * reactions, kind);
for (int i = 0; i < 3; i++)
{
hipMemcpy(reactant[i], source.reactant[i],
sizeof(unsigned short) * reactions, kind);
}
}
void Network::print()
{
// Network data
printf("species: %d\n", species);
printf("Z: { ");
for (int i = 0; i < species; i++)
printf("%4d ", Z[i]);
printf("}\n");
printf("N: { ");
for (int i = 0; i < species; i++)
printf("%4d ", N[i]);
printf("}\n");
// Reaction data
printf("\n");
printf("reactions: %d\n", reactions);
for (int n = 0; n < 7; n++)
{
printf("P[%d]: { ", n);
for (int i = 0; i < reactions; i++)
printf("%e ", P[n][i]);;
printf("\n");
}
printf("numReactingSpecies: { ");
for (int i = 0; i < reactions; i++)
printf("%4d ", numReactingSpecies[i]);
printf("}\n");
printf("statFac: { ");
for (int i = 0; i < reactions; i++)
printf("%e ", statFac[i]);
printf("}\n");
printf("Q: { ");
for (int i = 0; i < reactions; i++)
printf("%e ", Q[i]);
printf("}\n");
for (int n = 0; n < 3; n++)
{
printf("reactant[%d]: { ", n);
for (int i = 0; i < reactions; i++)
printf("%4d ", reactant[n][i]);
printf("}\n");
}
printf("totalFplus: %d\n", totalFplus);
printf("totalFminus: %d\n", totalFminus);
printf("FplusFac: { ");
for (int i = 0; i < totalFplus; i++)
printf("%e ", FplusFac[i]);
printf("}\n");
printf("FminusFac: { ");
for (int i = 0; i < totalFminus; i++)
printf("%e ", FminusFac[i]);
printf("}\n");
printf("MapFplus: { ");
for (int i = 0; i < totalFplus; i++)
printf("%4u ", MapFplus[i]);
printf("}\n");
printf("MapFminus: { ");
for (int i = 0; i < totalFminus; i++)
printf("%4u ", MapFminus[i]);
printf("}\n");
printf("FplusMax: { ");
for (int i = 0; i < species; i++)
printf("%4u ", FplusMax[i]);
printf("}\n");
printf("FminusMax: { ");
for (int i = 0; i < species; i++)
printf("%4u ", FminusMax[i]);
printf("}\n");
}
|
55d8d3bb9773b3807bba2a155206a1424d09b7b9.cu
|
#include "Network.cuh"
#include <stdio.h>
#include <assert.h>
#include "FERNIntegrator.cuh"
Network::Network()
{
}
void Network::loadNetwork(const char *filename)
{
// Unused variables
char isotopeLabel[10];
unsigned short A;
fern_real massExcess;
fern_real pf;
fern_real Y;
FILE *file = fopen(filename, "r");
// Exit if the file doesn't exist or can't be read
if (!file)
{
fprintf(stderr, "Could not read file '%s'\n", filename);
exit(1);
}
// Read 4 lines at a time
for (int n = 0; n < species; n++)
{
int status;
// Line #1
#ifdef FERN_SINGLE
status = fscanf(file, "%s %hu %hhu %hhu %f %f\n",
isotopeLabel, &A, &Z[n], &N[n], &Y, &massExcess);
#else
status = fscanf(file, "%s %hu %hhu %hhu %lf %lf\n",
isotopeLabel, &A, &Z[n], &N[n], &Y, &massExcess);
#endif
if (status == EOF)
break;
// Line #2...4
for (int i = 0; i < 8 * 3; i++)
{
#ifdef FERN_SINGLE
status = fscanf(file, "%f", &pf);
#else
status = fscanf(file, "%lf", &pf);
#endif
}
}
}
void Network::loadReactions(const char *filename)
{
static const bool displayInput = false;
// Unused variables
char reactionLabel[100];
int RGclass;
int RGmemberIndex;
int reaclibClass;
int isEC;
int isReverseR;
int ProductIndex[4];
// Allocate the host-only memory to be used by parseFlux()
int *numProducts = new int [reactions];
// Each element of these dynamic arrays are pointers to static arrays of size 4.
vec_4i *reactantZ = new vec_4i [reactions]; // [reactions]
vec_4i *reactantN = new vec_4i [reactions]; // [reactions]
vec_4i *productZ = new vec_4i [reactions]; // [reactions]
vec_4i *productN = new vec_4i [reactions]; // [reactions]
FILE *file = fopen(filename, "r");
// Exit if the file doesn't exist or can't be read
if (!file)
{
fprintf(stderr, "File Input Error: No readable file named %s\n", filename);
exit(1);
}
// Read eight lines at a time
for (int n = 0; n < reactions; n++)
{
int status;
// Line #1
#ifdef FERN_SINGLE
status = fscanf(file, "%s %d %d %d %hhu %d %d %d %f %f",
reactionLabel, &RGclass, &RGmemberIndex, &reaclibClass,
&numReactingSpecies[n], &numProducts[n], &isEC, &isReverseR,
&statFac[n], &Q[n]);
#else
status = fscanf(file, "%s %d %d %d %hhu %d %d %d %lf %lf",
reactionLabel, &RGclass, &RGmemberIndex, &reaclibClass,
&numReactingSpecies[n], &numProducts[n], &isEC, &isReverseR,
&statFac[n], &Q[n]);
#endif
if (status == EOF)
break;
if (displayInput)
{
printf("Reaction Index = %d\n", n);
printf("isReverseR = %d reaclibIndex = %d\n",
isReverseR, reaclibClass);
printf("%s %d %d %d %d %d %d %d %f %f\n",
reactionLabel, RGclass, RGmemberIndex, reaclibClass,
numReactingSpecies[n], numProducts[n], isEC,
isReverseR, statFac[n], Q[n]);
}
// Line #2
if (displayInput)
printf("P: { ");
for (int i = 0; i < 7; i++)
{
#ifdef FERN_SINGLE
status = fscanf(file, "%f", &P[i][n]);
#else
status = fscanf(file, "%lf", &P[i][n]);
#endif
if (displayInput)
printf("%f, ", P[i][n]);
}
if (displayInput)
printf("}\n");
// Line #3
for (int mm = 0; mm < numReactingSpecies[n]; mm++)
{
status = fscanf(file, "%d", &reactantZ[n][mm]);
if (displayInput)
printf("\tReactant[%d]: Z=%d\n", mm, reactantZ[n][mm]);
}
// Line #4
for (int mm = 0; mm < numReactingSpecies[n]; mm++)
{
status = fscanf(file, "%d", &reactantN[n][mm]);
if (displayInput)
printf("\tReactant[%d]: N=%d\n", mm, reactantN[n][mm]);
}
// Line #5
for (int mm = 0; mm < numProducts[n]; mm++)
{
status = fscanf(file, "%d", &productZ[n][mm]);
if (displayInput)
printf("\tProduct[%d]: Z=%d\n", mm, productZ[n][mm]);
}
// Line #6
for (int mm = 0; mm < numProducts[n]; mm++)
{
status = fscanf(file, "%d", &productN[n][mm]);
if (displayInput)
printf("\tProduct[%d]: N=%d\n", mm, productN[n][mm]);
}
// Line #7
for (int mm = 0; mm < numReactingSpecies[n]; mm++)
{
status = fscanf(file, "%hu", &reactant[mm][n]);
if (displayInput)
printf("\treactant[%d]: N=%d\n", mm, reactant[mm][n]);
}
// Line #8
for (int mm = 0; mm < numProducts[n]; mm++)
{
status = fscanf(file, "%d", &ProductIndex[mm]);
if (displayInput)
printf("\tProductIndex[%d]: N=%d\n", mm, ProductIndex[mm]);
}
if (displayInput)
printf("\n");
}
fclose(file);
// We're not done yet.
// Finally parse the flux
parseFlux(numProducts, reactantZ, reactantN, productZ, productN);
// Cleanup dynamic memory
delete [] numProducts;
delete [] reactantZ;
delete [] reactantN;
delete [] productZ;
delete [] productN;
}
void Network::parseFlux(int *numProducts, vec_4i *reactantZ, vec_4i *reactantN,
vec_4i *productZ, vec_4i *productN)
{
const static bool showParsing = false;
// These tempInt blocks will become MapFPlus and MapFMinus eventually.
size_t tempIntSize = species * reactions / 2;
unsigned short *tempInt1 = new unsigned short [tempIntSize];
unsigned short *tempInt2 = new unsigned short [tempIntSize];
// Access elements by reacMask[speciesIndex + species * reactionIndex].
int *reacMask = new int [species * reactions]; // [species][reactions]
int *numFluxPlus = new int [species];
int *numFluxMinus = new int [species];
// Start of Guidry's original parseF() code
if (showParsing)
printf("Use parseF() to find F+ and F- flux components for each species:\n");
int incrementPlus = 0;
int incrementMinus = 0;
totalFplus = 0;
totalFminus = 0;
// Loop over all isotopes in the network
for (int i = 0; i < species; i++)
{
int total = 0;
int numFplus = 0;
int numFminus = 0;
// Loop over all possible reactions for this isotope, finding those that
// change its population up (contributing to F+) or down (contributing
// to F-).
for (int j = 0; j < reactions; j++)
{
int totalL = 0;
int totalR = 0;
// Loop over reactants for this reaction
for (int k = 0; k < numReactingSpecies[j]; k++)
{
if (Z[i] == reactantZ[j][k] && N[i] == reactantN[j][k])
totalL++;
}
// Loop over products for this reaction
for (int k = 0; k < numProducts[j]; k++)
{
if (Z[i] == productZ[j][k] && N[i] == productN[j][k])
totalR++;
}
total = totalL - totalR;
if (total > 0) // Contributes to F- for this isotope
{
numFminus++;
reacMask[i + species * j] = -total;
tempInt2[incrementMinus + numFminus - 1] = j;
// if (showParsing)
// printf("%s reacIndex=%d %s nReac=%d nProd=%d totL=%d totR=%d tot=%d F-\n",
// isoLabel[i], j, reacLabel[j], NumReactingSpecies[j], NumProducts[j], totalL,
// totalR, total);
}
else if (total < 0) // Contributes to F+ for this isotope
{
numFplus++;
reacMask[i + species * j] = -total;
tempInt1[incrementPlus + numFplus - 1] = j;
// if (showParsing)
// printf("%s reacIndex=%d %s nReac=%d nProd=%d totL=%d totR=%d tot=%d F+\n",
// isoLabel[i], j, reacLabel[j], NumReactingSpecies[j], NumProducts[j], totalL,
// totalR, total);
}
else // Does not contribute to flux for this isotope
{
reacMask[i + species * j] = 0;
}
}
// Keep track of the total number of F+ and F- terms in the network for all isotopes
totalFplus += numFplus;
totalFminus += numFminus;
numFluxPlus[i] = numFplus;
numFluxMinus[i] = numFminus;
incrementPlus += numFplus;
incrementMinus += numFminus;
// if (showParsing == 1)
// printf("%d %s numF+ = %d numF- = %d\n", i, isoLabel[i], numFplus, numFminus);
}
// Display some cases
printf("\n");
printf("PART OF FLUX-ISOTOPE COMPONENT ARRAY (-n --> F-; +n --> F+ for given isotope):\n");
printf("\n");
printf("FLUX SPARSENESS: Non-zero F+ = %d; Non-zero F- = %d, out of %d x %d = %d possibilities.\n",
totalFplus, totalFminus, reactions, species, reactions * species);
/*******************************************/
// Create 1D arrays to hold non-zero F+ and F- for all reactions for all isotopes,
// the arrays holding the species factors FplusFac and FminusFac,
// and also arrays to hold their sums for each isotope. Note that parseF() must
// be run first because it determines totalFplus and totalFminus.
FplusFac = new fern_real [totalFplus];
FminusFac = new fern_real [totalFminus];
// Create 1D arrays that will hold the index of the isotope for the F+ or F- term
MapFplus = new unsigned short [totalFplus];
MapFminus = new unsigned short [totalFminus];
// Create 1D arrays that will be used to map finite F+ and F- to the Flux array.
int *FplusIsotopeCut = new int [species];
int *FminusIsotopeCut = new int [species];
int *FplusIsotopeIndex = new int [totalFplus];
int *FminusIsotopeIndex = new int [totalFminus];
FplusIsotopeCut[0] = numFluxPlus[0];
FminusIsotopeCut[0] = numFluxMinus[0];
for (int i = 1; i < species; i++)
{
FplusIsotopeCut[i] = numFluxPlus[i] + FplusIsotopeCut[i - 1];
FminusIsotopeCut[i] = numFluxMinus[i] + FminusIsotopeCut[i - 1];
}
int currentIso = 0;
for (int i = 0; i < totalFplus; i++)
{
FplusIsotopeIndex[i] = currentIso;
if (i == (FplusIsotopeCut[currentIso] - 1)) currentIso ++;
}
currentIso = 0;
for (int i = 0; i < totalFminus; i++)
{
FminusIsotopeIndex[i] = currentIso;
if (i == (FminusIsotopeCut[currentIso] - 1)) currentIso ++;
}
// Diagnostic output
// if (showFparsing == 1)
// {
// printf("\n\n");
// printf("MAX F+ and F- INDEX FOR EACH ISOTOPE:\n");
// for (int i = 0; i < species; i++)
// {
// printf("\n");
// printf("Isotope index = %d %s Max index F+ = %d Max index F- = %d\n",
// i, isoLabel[i], FplusIsotopeCut[i] - 1, FminusIsotopeCut[i] - 1);
// }
// }
for (int i = 0; i < totalFplus; i++)
{
MapFplus[i] = tempInt1[i];
}
for (int i = 0; i < totalFminus; i++)
{
MapFminus[i] = tempInt2[i];
}
// Populate the FplusMin and FplusMax arrays
unsigned short *FplusMin = new unsigned short [species];
unsigned short *FminusMin = new unsigned short [species];
FplusMin[0] = 0;
FplusMax[0] = numFluxPlus[0] - 1;
for (int i = 1; i < species; i++)
{
FplusMin[i] = FplusMax[i - 1] + 1;
FplusMax[i] = FplusMin[i] + numFluxPlus[i] - 1 ;
}
// Populate the FminusMin and FminusMax arrays
FminusMin[0] = 0;
FminusMax[0] = numFluxMinus[0] - 1;
for (int i = 1; i < species; i++)
{
FminusMin[i] = FminusMax[i - 1] + 1;
FminusMax[i] = FminusMin[i] + numFluxMinus[i] - 1 ;
}
// Populate the FplusFac and FminusFac arrays that hold the factors counting the
// number of occurences of the species in the reaction. Note that this can only
// be done after parseF() has been run to give reacMask[i][j].
int tempCountPlus = 0;
int tempCountMinus = 0;
for (int i = 0; i < species; i++)
{
for (int j = 0; j < reactions; j++)
{
if (reacMask[i + species * j] > 0)
{
FplusFac[tempCountPlus] = (fern_real)reacMask[i + species * j];
tempCountPlus++;
}
else if (reacMask[i + species * j] < 0)
{
FminusFac[tempCountMinus] = -(fern_real) reacMask[i + species * j];
tempCountMinus++;
}
}
}
// Clean up dynamic memory
delete [] reacMask;
delete [] FplusIsotopeCut;
delete [] FminusIsotopeCut;
delete [] FplusIsotopeIndex;
delete [] FminusIsotopeIndex;
delete [] tempInt1;
delete [] tempInt2;
delete [] numFluxPlus;
delete [] numFluxMinus;
delete [] FplusMin;
delete [] FminusMin;
}
void Network::allocate()
{
// Allocate the network data
Z = new unsigned char[species];
N = new unsigned char[species];
FplusMax = new unsigned short [species];
FminusMax = new unsigned short [species];
// Allocate the reaction data
for (int i = 0; i < 7; i++)
P[i] = new fern_real[reactions];
numReactingSpecies = new unsigned char[reactions];
statFac = new fern_real[reactions];
Q = new fern_real[reactions];
for (int i = 0; i < 3; i++)
reactant[i] = new unsigned short[reactions];
}
void Network::cudaAllocate()
{
// Allocate network data
cudaMalloc(&Z, sizeof(unsigned char) * species);
cudaMalloc(&N, sizeof(unsigned char) * species);
cudaMalloc(&FplusFac, sizeof(fern_real) * totalFplus);
cudaMalloc(&FminusFac, sizeof(fern_real) * totalFminus);
cudaMalloc(&MapFplus, sizeof(unsigned short) * totalFplus);
cudaMalloc(&MapFminus, sizeof(unsigned short) * totalFminus);
cudaMalloc(&FplusMax, sizeof(unsigned short) * species);
cudaMalloc(&FminusMax, sizeof(unsigned short) * species);
// Allocate reaction data
for (int i = 0; i < 7; i++)
{
cudaMalloc(&P[i], sizeof(fern_real) * reactions);
}
cudaMalloc(&numReactingSpecies, sizeof(unsigned char) * reactions);
cudaMalloc(&statFac, sizeof(fern_real) * reactions);
cudaMalloc(&Q, sizeof(fern_real) * reactions);
for (int i = 0; i < 3; i++)
cudaMalloc(&reactant[i], sizeof(unsigned short) * reactions);
}
void Network::setSizes(const Network &source)
{
species = source.species;
reactions = source.reactions;
totalFplus = source.totalFplus;
totalFminus = source.totalFminus;
}
void Network::cudaCopy(const Network &source, cudaMemcpyKind kind)
{
// Copy scalars
massTol = source.massTol;
fluxFrac = source.fluxFrac;
// Copy network vectors
cudaMemcpy(Z, source.Z, sizeof(unsigned char) * species, kind);
cudaMemcpy(N, source.N, sizeof(unsigned char) * species, kind);
FERNIntegrator::checkCudaErrors();
cudaMemcpy(FplusFac, source.FplusFac, sizeof(fern_real) * totalFplus, kind);
FERNIntegrator::checkCudaErrors();
cudaMemcpy(FminusFac, source.FminusFac, sizeof(fern_real) * totalFminus, kind);
FERNIntegrator::checkCudaErrors();
cudaMemcpy(MapFplus, source.MapFplus, sizeof(unsigned short) * totalFplus, kind);
cudaMemcpy(MapFminus, source.MapFminus, sizeof(unsigned short) * totalFminus, kind);
cudaMemcpy(FplusMax, source.FplusMax, sizeof(unsigned short) * species, kind);
cudaMemcpy(FminusMax, source.FminusMax, sizeof(unsigned short) * species, kind);
// Copy reaction vectors
for (int i = 0; i < 7; i++)
{
cudaMemcpy(P[i], source.P[i], sizeof(fern_real) * reactions, kind);
}
cudaMemcpy(numReactingSpecies, source.numReactingSpecies,
sizeof(unsigned char) * reactions, kind);
cudaMemcpy(statFac, source.statFac, sizeof(fern_real) * reactions, kind);
cudaMemcpy(Q, source.Q, sizeof(fern_real) * reactions, kind);
for (int i = 0; i < 3; i++)
{
cudaMemcpy(reactant[i], source.reactant[i],
sizeof(unsigned short) * reactions, kind);
}
}
void Network::print()
{
// Network data
printf("species: %d\n", species);
printf("Z: { ");
for (int i = 0; i < species; i++)
printf("%4d ", Z[i]);
printf("}\n");
printf("N: { ");
for (int i = 0; i < species; i++)
printf("%4d ", N[i]);
printf("}\n");
// Reaction data
printf("\n");
printf("reactions: %d\n", reactions);
for (int n = 0; n < 7; n++)
{
printf("P[%d]: { ", n);
for (int i = 0; i < reactions; i++)
printf("%e ", P[n][i]);;
printf("\n");
}
printf("numReactingSpecies: { ");
for (int i = 0; i < reactions; i++)
printf("%4d ", numReactingSpecies[i]);
printf("}\n");
printf("statFac: { ");
for (int i = 0; i < reactions; i++)
printf("%e ", statFac[i]);
printf("}\n");
printf("Q: { ");
for (int i = 0; i < reactions; i++)
printf("%e ", Q[i]);
printf("}\n");
for (int n = 0; n < 3; n++)
{
printf("reactant[%d]: { ", n);
for (int i = 0; i < reactions; i++)
printf("%4d ", reactant[n][i]);
printf("}\n");
}
printf("totalFplus: %d\n", totalFplus);
printf("totalFminus: %d\n", totalFminus);
printf("FplusFac: { ");
for (int i = 0; i < totalFplus; i++)
printf("%e ", FplusFac[i]);
printf("}\n");
printf("FminusFac: { ");
for (int i = 0; i < totalFminus; i++)
printf("%e ", FminusFac[i]);
printf("}\n");
printf("MapFplus: { ");
for (int i = 0; i < totalFplus; i++)
printf("%4u ", MapFplus[i]);
printf("}\n");
printf("MapFminus: { ");
for (int i = 0; i < totalFminus; i++)
printf("%4u ", MapFminus[i]);
printf("}\n");
printf("FplusMax: { ");
for (int i = 0; i < species; i++)
printf("%4u ", FplusMax[i]);
printf("}\n");
printf("FminusMax: { ");
for (int i = 0; i < species; i++)
printf("%4u ", FminusMax[i]);
printf("}\n");
}
|
81ed07f0e038b2bee7db0b38e55160a42736251d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <npp.h>
#include "common.h"
extern
void split_3d_overdim2_float(
const float* __restrict__ g_i,
float* __restrict__ g_o,
unsigned int dim0,
unsigned int dim1,
unsigned int dim2,
unsigned int new_dim2);
extern
void split_3d_overdim2_integer(
const int* __restrict__ g_i,
int* __restrict__ g_o,
unsigned int dim0,
unsigned int dim1,
unsigned int dim2,
unsigned int new_dim2);
// COPYRIGHT TO CHARLESQ34 @ GitHub : PointNet++
// input: k (1), distance matrix dist (b,m,n)
// output: idx (b,m,n), dist_out (b,m,n)
// only the top k results within n are useful
__global__ void kernel_selection_sort_gpu(int b, int n, int m, int k, const float * __restrict__ dist, int * __restrict__ outi, float * __restrict__ out) {
int batch_index = blockIdx.x;
dist+=m*n*batch_index;
outi+=m*n*batch_index;
out+=m*n*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
// copy from dist to dist_out
for (int j=index;j<m;j+=stride) {
for (int s=0;s<n;++s) {
out[j*n+s] = dist[j*n+s];
outi[j*n+s] = s;
}
}
float *p_dist;
for (int j=index;j<m;j+=stride) {
p_dist = out+j*n;
// selection sort for the first k elements
for (int s=0;s<k;++s) {
int min=s;
// find the min
for (int t=s+1;t<n;++t) {
if (p_dist[t]<p_dist[min]) {
min = t;
}
}
// swap min-th and i-th element
if (min!=s) {
float tmp = p_dist[min];
p_dist[min] = p_dist[s];
p_dist[s] = tmp;
int tmpi = outi[j*n+min];
outi[j*n+min] = outi[j*n+s];
outi[j*n+s] = tmpi;
}
}
}
}
void top_k(
const float* __restrict__ distance_matrix, // (b,m,n)
int * __restrict__ output_indices, // (b,m,k)
float* output_values, // (b,m,k)
int b,
int n,
int m,
int k){
unsigned int blockSize = 256;
float* tmpVal;
int* tmpIndices;
CHECK(hipMalloc((float**)&tmpVal , (b*m*n)*sizeof(float)));
CHECK(hipMalloc((int**)&tmpIndices, (b*m*n)*sizeof(int)));
//hipDeviceSynchronize();
hipLaunchKernelGGL(( kernel_selection_sort_gpu), dim3(b),dim3(blockSize), 0, 0, b,n,m,k,distance_matrix,tmpIndices,tmpVal);
split_3d_overdim2_float(tmpVal, output_values,b,m,n,k); //split BxMxN into BxMxK (float)
split_3d_overdim2_integer(tmpIndices, output_indices,b,m,n,k); //split BxMxN into BxMxK (integer)
//hipDeviceSynchronize();
CHECK(hipFree(tmpVal));
CHECK(hipFree(tmpIndices));
}
|
81ed07f0e038b2bee7db0b38e55160a42736251d.cu
|
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <npp.h>
#include "common.h"
extern
void split_3d_overdim2_float(
const float* __restrict__ g_i,
float* __restrict__ g_o,
unsigned int dim0,
unsigned int dim1,
unsigned int dim2,
unsigned int new_dim2);
extern
void split_3d_overdim2_integer(
const int* __restrict__ g_i,
int* __restrict__ g_o,
unsigned int dim0,
unsigned int dim1,
unsigned int dim2,
unsigned int new_dim2);
// COPYRIGHT TO CHARLESQ34 @ GitHub : PointNet++
// input: k (1), distance matrix dist (b,m,n)
// output: idx (b,m,n), dist_out (b,m,n)
// only the top k results within n are useful
__global__ void kernel_selection_sort_gpu(int b, int n, int m, int k, const float * __restrict__ dist, int * __restrict__ outi, float * __restrict__ out) {
int batch_index = blockIdx.x;
dist+=m*n*batch_index;
outi+=m*n*batch_index;
out+=m*n*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
// copy from dist to dist_out
for (int j=index;j<m;j+=stride) {
for (int s=0;s<n;++s) {
out[j*n+s] = dist[j*n+s];
outi[j*n+s] = s;
}
}
float *p_dist;
for (int j=index;j<m;j+=stride) {
p_dist = out+j*n;
// selection sort for the first k elements
for (int s=0;s<k;++s) {
int min=s;
// find the min
for (int t=s+1;t<n;++t) {
if (p_dist[t]<p_dist[min]) {
min = t;
}
}
// swap min-th and i-th element
if (min!=s) {
float tmp = p_dist[min];
p_dist[min] = p_dist[s];
p_dist[s] = tmp;
int tmpi = outi[j*n+min];
outi[j*n+min] = outi[j*n+s];
outi[j*n+s] = tmpi;
}
}
}
}
void top_k(
const float* __restrict__ distance_matrix, // (b,m,n)
int * __restrict__ output_indices, // (b,m,k)
float* output_values, // (b,m,k)
int b,
int n,
int m,
int k){
unsigned int blockSize = 256;
float* tmpVal;
int* tmpIndices;
CHECK(cudaMalloc((float**)&tmpVal , (b*m*n)*sizeof(float)));
CHECK(cudaMalloc((int**)&tmpIndices, (b*m*n)*sizeof(int)));
//cudaDeviceSynchronize();
kernel_selection_sort_gpu<<<b,blockSize>>>(b,n,m,k,distance_matrix,tmpIndices,tmpVal);
split_3d_overdim2_float(tmpVal, output_values,b,m,n,k); //split BxMxN into BxMxK (float)
split_3d_overdim2_integer(tmpIndices, output_indices,b,m,n,k); //split BxMxN into BxMxK (integer)
//cudaDeviceSynchronize();
CHECK(cudaFree(tmpVal));
CHECK(cudaFree(tmpIndices));
}
|
f63d6b1a0da835fe8259853dacca6bd56cba3dcf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// SLEAnimate.cpp
// SLE
//
// Created by Henry Jackson on 22/04/2015.
// Copyright (c) 2015 n/a. All rights reserved.
//
#include "SLEAnimate.cuh"
#include <iostream>
#define TILE_WIDTH 32
/*--- CUDA Error checking ---*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////
//// SLEAnimate private member function definitions ////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void SLEAnimate::initialiseLeft(){
drawColours(leftPlot, pxOriginal, pxOriginalRows, pxOriginalCols);
drawLines(leftPlot, horizontal, horizontalRows, horizontalCols, hzColour);
drawLinesTranspose(leftPlot, vertical, verticalRows, verticalCols, vtColour);
leftPlot.drawAxis();
}
void SLEAnimate::drawLines(class plot& plot,
cpx* matrix,
int matrixRows,
int matrixCols,
Mat& colours){
double offset = -stabilser.real();
for (int i=0; i<matrixRows; ++i) {
vector<cpx> line;
for (int j=0; j<matrixCols-1; ++j) {
line = vector<cpx>{matrix[i*matrixCols + j] + offset, matrix[i*matrixCols + j + 1] + offset};
Vec3b col = colours.at<Vec3b>(i,j);
Scalar colScalar = Scalar( (double)col[0], (double)col[1], (double)col[2] );
plot.drawLine(line, colScalar);
}
}
// Draws the little blue thing at the bottom of the screen
vector<cpx> offsetLine;
offsetLine.push_back(offset - cpx(0, gridSpacing/2));
offsetLine.push_back(offset + cpx(0, gridSpacing/2));
plot.drawLine(offsetLine, Scalar(255,0,0));
}
void SLEAnimate::drawLinesTranspose(class plot& plot,
cpx* matrix,
int matrixRows,
int matrixCols,
Mat& colours){
double offset = -stabilser.real();
Mat tmpColours = colours.t();
for (int j=0; j<matrixCols; ++j) {
vector<cpx> line;
for (int i=0; i<matrixRows-1; ++i) {
line = vector<cpx>{matrix[i*matrixCols + j] + offset, matrix[(i+1)*matrixCols + j] + offset};
Vec3b col = colours.at<Vec3b>(i,j);
Scalar colScalar = Scalar( (double)col[0], (double)col[1], (double)col[2] );
plot.drawLine(line, colScalar);
}
}
// Draws the little blue thing at the bottom of the screen
vector<cpx> offsetLine;
offsetLine.push_back(offset - cpx(0, gridSpacing/2));
offsetLine.push_back(offset + cpx(0, gridSpacing/2));
plot.drawLine(offsetLine, Scalar(255,0,0));
}
void SLEAnimate::drawColours(class plot& plot,
cpx* points,
int pointsRows,
int pointsCols){
Mat colours = generateColours(points, pointsRows, pointsCols, false);
for (int i = 0; i < pointsRows; ++i) {
for (int j = 0; j < pointsCols; ++j){
plot.colour(i, j, colours.at<Vec3b>(i,j) );
}
}
}
cpx* SLEAnimate::generateHorizontal(){
double width = 2*(leftPlot.maxX() - leftPlot.minX());
double height = 2*leftPlot.maxY();
horizontalRows = (int)(height/gridSpacing);
horizontalCols = (int)(width/gridRes);
cpx* result = new cpx [horizontalRows*horizontalCols];
cpx heightIncrement(0, gridSpacing);
cpx widthIncrement(gridRes, 0);
for (int i=0; i<horizontalRows; ++i) {
for (int j=0; j<horizontalCols; ++j) {
result[horizontalCols*i + j] = heightIncrement*(double)(i+1)
+ widthIncrement*(double)j - width/2;
}
}
return result;
}
cpx* SLEAnimate::generateVertical() {
double width = 2*(leftPlot.maxX() - leftPlot.minX());
double height = 2*leftPlot.maxY();
verticalRows = (int)(height/gridRes) + 1;
verticalCols = (int)(width/gridSpacing) + 2;
cpx* result;
result = new cpx [verticalRows*verticalCols];
cpx heightIncrement(0, gridRes);
cpx widthIncrement(gridSpacing, 0);
for (int i=0; i<verticalRows; ++i) {
for (int j=0; j<verticalCols; ++j) {
result[i*verticalCols +j] = heightIncrement*(double)i
+ widthIncrement*(double)j - width/2;
}
}
return result;
}
/*
cv::Mat_<cpx> SLEAnimate::generatePixelPos() {
int rows = rightPlot.rows();
int cols = rightPlot.cols();
cv::Mat_<cpx> result( rows, cols );
for (int i=0; i<rows; ++i) {
for (int j=0; j<cols; ++j) {
result(i,j) = rightPlot.CVTocpx(Point(i,j));
}
}
return result;
}
*/
Vec3b SLEAnimate::cpxToColour(cpx z, bool shader) {
Vec3b result;
double x = z.real();
double y = z.imag();
// The height of the point, relative to the maximum
double yProp = y/lineHeight;
if (yProp < 0) yProp = 0;
int yCV = int((darkRows-1)*(1-yProp));
// Similar for x, but x can be negative
double xProp = (x/lineWidth + 1)/2;
if (xProp > 1) xProp = 1;
int xCV = int((darkCols-1)*xProp);
if (shader) {
result = dark.at<Vec3b>(yCV, xCV);
} else {
result = light.at<Vec3b>(yCV, xCV);
}
return result;
}
Mat SLEAnimate::generateColours(cpx* points,
int pointsRows,
int pointsCols,
bool shader) {
Mat result = Mat::Mat(pointsRows, pointsCols, CV_8UC3, Scalar(255,255,255));
for (int i = 0; i < pointsRows; ++i){
for (int j = 0; j < pointsCols; ++j) {
result.at<Vec3b>(i,j) = cpxToColour( points[i*pointsCols +j], shader);
}
}
return result;
}
void SLEAnimate::updateMatrixForward(int start, int end,
cpx* inMat,
cpx* outMat,
cpx* outMatHost,
int rows,
int cols){
dim3 dimGrid((cols - 1) / TILE_WIDTH + 1, (rows - 1) / TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
hipLaunchKernelGGL(( updateMatrixForwardGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, start, end, inMat, outMat, dtDevice, shiftsDevice, rows, cols);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(outMatHost, outMat, rows*cols*sizeof(cpx), hipMemcpyDeviceToHost));
}
void SLEAnimate::updateValueForward(int start, int end,
cpx inValue,
cpx& outValue){
outValue = slitMapInverse(start, inValue);
for (int i = start + 1; i < end; ++i) {
outValue = slitMapInverse(i, outValue);
}
}
cpx SLEAnimate::slitMapInverse(int index, cpx inValue){
//cout << shifts[index] << " " << dt[index] << endl;
cpx outValue = sqrt( -(inValue - shifts[index])*(inValue - shifts[index]) - 4*dt[index]) * cpx(0,1);
return outValue;
}
void SLEAnimate::updateMatrixReverse(int start, int end,
double offset,
cpx* inMat,
cpx* outMat,
cpx* outMatHost,
int rows,
int cols)
{
dim3 dimGrid( (cols-1)/TILE_WIDTH + 1, (rows-1)/TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
hipLaunchKernelGGL(( updateMatrixReverseGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, start, end, offset, inMat, outMat, dtDevice, shiftsDevice, rows, cols);
gpuErrchk(hipPeekAtLastError());
hipMemcpy(outMatHost, outMat, rows*cols*sizeof(cpx), hipMemcpyDeviceToHost);
}
void SLEAnimate::updateValueReverse(int start, int end,
double offset,
cpx inValue,
cpx& outValue){
outValue = slitMap(end-1, inValue - offset);
for (int i = end-2; i >= start; --i) {
outValue = slitMap(i, outValue);
}
}
cpx SLEAnimate::slitMap(int index, cpx inValue){
return sqrt(4*dt[index] - inValue*inValue)*cpx(0,1) + shifts[index];
}
void SLEAnimate::updateMatrixForward(SlitMap& h,
cpx* inMat,
cpx* outMat,
int rows,
int cols){
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
outMat[i*cols + j] = h.inverse(inMat[i*cols + j]);
}
}
}
void SLEAnimate::updateMatrixReverse(SlitMap& h,
double offset,
cpx* inMat,
cpx* outMat,
int rows,
int cols){
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
outMat[i*cols + j] = h(inMat[i*cols + j] - offset);
}
}
}
void SLEAnimate::plot() {
rightPlot.clear();
drawColours(rightPlot, pxNow, pxOriginalRows, pxOriginalCols);
drawLines(rightPlot, horizontal, horizontalRows, horizontalCols, hzColour);
drawLinesTranspose(rightPlot, vertical, verticalRows, verticalCols, vtColour);
if (currentTime > 0) {
leftPlot.drawLine(g.forwardLine( *(--frameTimes.lower_bound(currentTime)),
currentTime),
Scalar(255,0,0));
}
rightPlot.drawAxis();
}
////////////////////////////////////////////////////////////////////////////////
//// SLEAnimate member function definitions ////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
SLEAnimate::SLEAnimate(double gridRes,
double gridSpacing,
SLE& g,
class plot& left,
class plot& right)
:gridRes(gridRes), gridSpacing(gridSpacing),
g(g), leftPlot(left), rightPlot(right) {
/*--- Initialise CUDA data ---*/
numMaps = g.numMaps();
int numBits = numMaps*sizeof(double);
dt = g.times();
hipMalloc((void**)&dtDevice, numBits);
hipMemcpy(dtDevice, dt, numBits, hipMemcpyHostToDevice);
shifts = g.shifts();
hipMalloc((void**)&shiftsDevice, numBits);
hipMemcpy(shiftsDevice, shifts, numBits, hipMemcpyHostToDevice);
// Import the colour matrices
dark = imread("D:\\sleOutput\\col\\dark.png", CV_LOAD_IMAGE_COLOR);
//dark = imread("/Users/Henry/tmp/colours/dark.png", CV_LOAD_IMAGE_COLOR);
light = imread("D:\\sleOutput\\col\\light.png", CV_LOAD_IMAGE_COLOR);
//light = imread("/Users/Henry/tmp/colours/light.png", CV_LOAD_IMAGE_COLOR);
darkRows = dark.rows;
darkCols = dark.cols;
lightRows = light.rows;
lightCols = light.cols;
// Initialise the line matrices
lineHeight = 2*leftPlot.maxY();
lineWidth = 2*leftPlot.maxX();
horizontal = generateHorizontal();
hzColour = generateColours(horizontal, horizontalRows, horizontalCols, true);
vertical = generateVertical();
vtColour = generateColours(vertical, verticalRows, verticalCols, true);
// Copy line matrices to device
hipMalloc((void**)&horizontalDevice, horizontalRows*horizontalCols*sizeof(cpx));
hipMemcpy(horizontalDevice, horizontal,
horizontalRows*horizontalCols*sizeof(cpx), hipMemcpyHostToDevice);
hipMalloc((void**)&verticalDevice, verticalRows*verticalCols*sizeof(cpx));
hipMemcpy(verticalDevice, vertical,
verticalRows*verticalCols*sizeof(cpx), hipMemcpyHostToDevice);
// Initialise pixel position matrix
pxOriginal = leftPlot.points();
pxOriginalRows = leftPlot.pointsRows();
pxOriginalCols = leftPlot.pointsCols();
pxNow = leftPlot.points();
// Copy pixel matrices to device
hipMalloc((void**)&pxOriginalDevice, pxOriginalRows*pxOriginalCols*sizeof(cpx));
hipMemcpy(pxOriginalDevice, pxOriginal,
pxOriginalRows*pxOriginalCols*sizeof(cpx), hipMemcpyHostToDevice);
hipMalloc((void**)&pxNowDevice, pxOriginalRows*pxOriginalCols*sizeof(cpx));
hipMemcpy(pxNowDevice, pxNow,
pxOriginalRows*pxOriginalCols*sizeof(cpx), hipMemcpyHostToDevice);
// Initialise the stabilisation point to somewhere far away
// on the imaginary axis
stabilser = cpx(0, 1000);
stabiliserReverse = cpx(0, 1000);
// Initialise the pixel matrix
//pixelPos = generatePixelPos();
// Initialise the times
times = g.getOrderedTimes();
frameTimes = g.orderedFrameTimes();
currentTime = 0;
initialiseLeft();
plot();
}
bool SLEAnimate::nextFrame() {
auto nextTimePtr = frameTimes.upper_bound(currentTime);
if (nextTimePtr != frameTimes.end() ) {
double nextTime = *nextTimePtr;
vector<SlitMap> gridMaps;
vector<SlitMap> pixelMaps;
auto start = times.begin();
auto lower = times.lower_bound(currentTime);
auto upper = times.lower_bound(nextTime);
int gridStart = (int)distance(start, lower);
int end = (int)distance(start, upper);
updateMatrixForward(gridStart, end,
horizontalDevice,
horizontalDevice,
horizontal,
horizontalRows,
horizontalCols);
updateMatrixForward(gridStart, end,
verticalDevice,
verticalDevice,
vertical,
verticalRows,
verticalCols);
SlitMap h = g.slitMap(0);
for (auto it = times.lower_bound(currentTime); *it < nextTime; ++it) {
h = g.slitMap(*it);
gridMaps.push_back(h);
stabilser = h.inverse(stabilser);
stabiliserReverse = h(stabiliserReverse);
//cout << "h dt " << h.getDt() << " h dx " << h.getOffset() << endl;
//cout << "a dt " << dt[i] << " a dx " << shifts[i] << endl;
}
cout << "Vector length: " << gridMaps.size() << endl;
for (auto it = times.begin(); *it < nextTime; ++it) {
h = g.slitMap(*it);
pixelMaps.push_back(h);
}
//updateMatrixForward(gridMaps, horizontal, horizontal, horizontalRows, horizontalCols);
//updateMatrixForward(gridMaps, vertical, vertical, horizontalRows, horizontalCols);
double offset = -stabilser.real();
//updateMatrixReverse(pixelMaps, offset, pxOriginal, pxNow, pxOriginalRows, pxOriginalCols);
updateMatrixReverse(0, end, offset, pxOriginalDevice, pxNowDevice, pxNow, pxOriginalRows, pxOriginalCols);
currentTime = nextTime;
plot();
return true;
}
else {
return false;
}
}
void SLEAnimate::show() {
leftPlot.show();
rightPlot.show();
}
void SLEAnimate::output(int frame) {
// Set up filenames
std::string strLeft = "D:\\sleOutput\\left\\";
std::string strRight = "D:\\sleOutput\\right\\";
//std::string strLeft = "/Users/henry/tmp/left/";
//std::string strRight = "/Users/henry/tmp/right/";
std::string ltName;
std::string rtName;
std::stringstream ss;
ss << std::setfill('0') << std::setw(4);
ss << frame;
ltName = strLeft + ss.str() + ".png";
rtName = strRight + ss.str() + ".png";
ss.str(std::string());
ss.clear();
leftPlot.output(ltName.c_str());
rightPlot.output(rtName.c_str());
}
SLEAnimate::~SLEAnimate(){
delete[] horizontal;
delete[] vertical;
delete [] pxOriginal;
delete[] pxNow;
}
|
f63d6b1a0da835fe8259853dacca6bd56cba3dcf.cu
|
//
// SLEAnimate.cpp
// SLE
//
// Created by Henry Jackson on 22/04/2015.
// Copyright (c) 2015 n/a. All rights reserved.
//
#include "SLEAnimate.cuh"
#include <iostream>
#define TILE_WIDTH 32
/*--- CUDA Error checking ---*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////
//// SLEAnimate private member function definitions ////////////////////////////
////////////////////////////////////////////////////////////////////////////////
void SLEAnimate::initialiseLeft(){
drawColours(leftPlot, pxOriginal, pxOriginalRows, pxOriginalCols);
drawLines(leftPlot, horizontal, horizontalRows, horizontalCols, hzColour);
drawLinesTranspose(leftPlot, vertical, verticalRows, verticalCols, vtColour);
leftPlot.drawAxis();
}
void SLEAnimate::drawLines(class plot& plot,
cpx* matrix,
int matrixRows,
int matrixCols,
Mat& colours){
double offset = -stabilser.real();
for (int i=0; i<matrixRows; ++i) {
vector<cpx> line;
for (int j=0; j<matrixCols-1; ++j) {
line = vector<cpx>{matrix[i*matrixCols + j] + offset, matrix[i*matrixCols + j + 1] + offset};
Vec3b col = colours.at<Vec3b>(i,j);
Scalar colScalar = Scalar( (double)col[0], (double)col[1], (double)col[2] );
plot.drawLine(line, colScalar);
}
}
// Draws the little blue thing at the bottom of the screen
vector<cpx> offsetLine;
offsetLine.push_back(offset - cpx(0, gridSpacing/2));
offsetLine.push_back(offset + cpx(0, gridSpacing/2));
plot.drawLine(offsetLine, Scalar(255,0,0));
}
void SLEAnimate::drawLinesTranspose(class plot& plot,
cpx* matrix,
int matrixRows,
int matrixCols,
Mat& colours){
double offset = -stabilser.real();
Mat tmpColours = colours.t();
for (int j=0; j<matrixCols; ++j) {
vector<cpx> line;
for (int i=0; i<matrixRows-1; ++i) {
line = vector<cpx>{matrix[i*matrixCols + j] + offset, matrix[(i+1)*matrixCols + j] + offset};
Vec3b col = colours.at<Vec3b>(i,j);
Scalar colScalar = Scalar( (double)col[0], (double)col[1], (double)col[2] );
plot.drawLine(line, colScalar);
}
}
// Draws the little blue thing at the bottom of the screen
vector<cpx> offsetLine;
offsetLine.push_back(offset - cpx(0, gridSpacing/2));
offsetLine.push_back(offset + cpx(0, gridSpacing/2));
plot.drawLine(offsetLine, Scalar(255,0,0));
}
void SLEAnimate::drawColours(class plot& plot,
cpx* points,
int pointsRows,
int pointsCols){
Mat colours = generateColours(points, pointsRows, pointsCols, false);
for (int i = 0; i < pointsRows; ++i) {
for (int j = 0; j < pointsCols; ++j){
plot.colour(i, j, colours.at<Vec3b>(i,j) );
}
}
}
cpx* SLEAnimate::generateHorizontal(){
double width = 2*(leftPlot.maxX() - leftPlot.minX());
double height = 2*leftPlot.maxY();
horizontalRows = (int)(height/gridSpacing);
horizontalCols = (int)(width/gridRes);
cpx* result = new cpx [horizontalRows*horizontalCols];
cpx heightIncrement(0, gridSpacing);
cpx widthIncrement(gridRes, 0);
for (int i=0; i<horizontalRows; ++i) {
for (int j=0; j<horizontalCols; ++j) {
result[horizontalCols*i + j] = heightIncrement*(double)(i+1)
+ widthIncrement*(double)j - width/2;
}
}
return result;
}
cpx* SLEAnimate::generateVertical() {
double width = 2*(leftPlot.maxX() - leftPlot.minX());
double height = 2*leftPlot.maxY();
verticalRows = (int)(height/gridRes) + 1;
verticalCols = (int)(width/gridSpacing) + 2;
cpx* result;
result = new cpx [verticalRows*verticalCols];
cpx heightIncrement(0, gridRes);
cpx widthIncrement(gridSpacing, 0);
for (int i=0; i<verticalRows; ++i) {
for (int j=0; j<verticalCols; ++j) {
result[i*verticalCols +j] = heightIncrement*(double)i
+ widthIncrement*(double)j - width/2;
}
}
return result;
}
/*
cv::Mat_<cpx> SLEAnimate::generatePixelPos() {
int rows = rightPlot.rows();
int cols = rightPlot.cols();
cv::Mat_<cpx> result( rows, cols );
for (int i=0; i<rows; ++i) {
for (int j=0; j<cols; ++j) {
result(i,j) = rightPlot.CVTocpx(Point(i,j));
}
}
return result;
}
*/
Vec3b SLEAnimate::cpxToColour(cpx z, bool shader) {
Vec3b result;
double x = z.real();
double y = z.imag();
// The height of the point, relative to the maximum
double yProp = y/lineHeight;
if (yProp < 0) yProp = 0;
int yCV = int((darkRows-1)*(1-yProp));
// Similar for x, but x can be negative
double xProp = (x/lineWidth + 1)/2;
if (xProp > 1) xProp = 1;
int xCV = int((darkCols-1)*xProp);
if (shader) {
result = dark.at<Vec3b>(yCV, xCV);
} else {
result = light.at<Vec3b>(yCV, xCV);
}
return result;
}
Mat SLEAnimate::generateColours(cpx* points,
int pointsRows,
int pointsCols,
bool shader) {
Mat result = Mat::Mat(pointsRows, pointsCols, CV_8UC3, Scalar(255,255,255));
for (int i = 0; i < pointsRows; ++i){
for (int j = 0; j < pointsCols; ++j) {
result.at<Vec3b>(i,j) = cpxToColour( points[i*pointsCols +j], shader);
}
}
return result;
}
void SLEAnimate::updateMatrixForward(int start, int end,
cpx* inMat,
cpx* outMat,
cpx* outMatHost,
int rows,
int cols){
dim3 dimGrid((cols - 1) / TILE_WIDTH + 1, (rows - 1) / TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
updateMatrixForwardGPU<<<dimGrid, dimBlock>>>(start, end, inMat, outMat, dtDevice, shiftsDevice, rows, cols);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(outMatHost, outMat, rows*cols*sizeof(cpx), cudaMemcpyDeviceToHost));
}
void SLEAnimate::updateValueForward(int start, int end,
cpx inValue,
cpx& outValue){
outValue = slitMapInverse(start, inValue);
for (int i = start + 1; i < end; ++i) {
outValue = slitMapInverse(i, outValue);
}
}
cpx SLEAnimate::slitMapInverse(int index, cpx inValue){
//cout << shifts[index] << " " << dt[index] << endl;
cpx outValue = sqrt( -(inValue - shifts[index])*(inValue - shifts[index]) - 4*dt[index]) * cpx(0,1);
return outValue;
}
void SLEAnimate::updateMatrixReverse(int start, int end,
double offset,
cpx* inMat,
cpx* outMat,
cpx* outMatHost,
int rows,
int cols)
{
dim3 dimGrid( (cols-1)/TILE_WIDTH + 1, (rows-1)/TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
updateMatrixReverseGPU<<<dimGrid, dimBlock>>>(start, end, offset, inMat, outMat, dtDevice, shiftsDevice, rows, cols);
gpuErrchk(cudaPeekAtLastError());
cudaMemcpy(outMatHost, outMat, rows*cols*sizeof(cpx), cudaMemcpyDeviceToHost);
}
void SLEAnimate::updateValueReverse(int start, int end,
double offset,
cpx inValue,
cpx& outValue){
outValue = slitMap(end-1, inValue - offset);
for (int i = end-2; i >= start; --i) {
outValue = slitMap(i, outValue);
}
}
cpx SLEAnimate::slitMap(int index, cpx inValue){
return sqrt(4*dt[index] - inValue*inValue)*cpx(0,1) + shifts[index];
}
void SLEAnimate::updateMatrixForward(SlitMap& h,
cpx* inMat,
cpx* outMat,
int rows,
int cols){
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
outMat[i*cols + j] = h.inverse(inMat[i*cols + j]);
}
}
}
void SLEAnimate::updateMatrixReverse(SlitMap& h,
double offset,
cpx* inMat,
cpx* outMat,
int rows,
int cols){
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
outMat[i*cols + j] = h(inMat[i*cols + j] - offset);
}
}
}
void SLEAnimate::plot() {
rightPlot.clear();
drawColours(rightPlot, pxNow, pxOriginalRows, pxOriginalCols);
drawLines(rightPlot, horizontal, horizontalRows, horizontalCols, hzColour);
drawLinesTranspose(rightPlot, vertical, verticalRows, verticalCols, vtColour);
if (currentTime > 0) {
leftPlot.drawLine(g.forwardLine( *(--frameTimes.lower_bound(currentTime)),
currentTime),
Scalar(255,0,0));
}
rightPlot.drawAxis();
}
////////////////////////////////////////////////////////////////////////////////
//// SLEAnimate member function definitions ////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
SLEAnimate::SLEAnimate(double gridRes,
double gridSpacing,
SLE& g,
class plot& left,
class plot& right)
:gridRes(gridRes), gridSpacing(gridSpacing),
g(g), leftPlot(left), rightPlot(right) {
/*--- Initialise CUDA data ---*/
numMaps = g.numMaps();
int numBits = numMaps*sizeof(double);
dt = g.times();
cudaMalloc((void**)&dtDevice, numBits);
cudaMemcpy(dtDevice, dt, numBits, cudaMemcpyHostToDevice);
shifts = g.shifts();
cudaMalloc((void**)&shiftsDevice, numBits);
cudaMemcpy(shiftsDevice, shifts, numBits, cudaMemcpyHostToDevice);
// Import the colour matrices
dark = imread("D:\\sleOutput\\col\\dark.png", CV_LOAD_IMAGE_COLOR);
//dark = imread("/Users/Henry/tmp/colours/dark.png", CV_LOAD_IMAGE_COLOR);
light = imread("D:\\sleOutput\\col\\light.png", CV_LOAD_IMAGE_COLOR);
//light = imread("/Users/Henry/tmp/colours/light.png", CV_LOAD_IMAGE_COLOR);
darkRows = dark.rows;
darkCols = dark.cols;
lightRows = light.rows;
lightCols = light.cols;
// Initialise the line matrices
lineHeight = 2*leftPlot.maxY();
lineWidth = 2*leftPlot.maxX();
horizontal = generateHorizontal();
hzColour = generateColours(horizontal, horizontalRows, horizontalCols, true);
vertical = generateVertical();
vtColour = generateColours(vertical, verticalRows, verticalCols, true);
// Copy line matrices to device
cudaMalloc((void**)&horizontalDevice, horizontalRows*horizontalCols*sizeof(cpx));
cudaMemcpy(horizontalDevice, horizontal,
horizontalRows*horizontalCols*sizeof(cpx), cudaMemcpyHostToDevice);
cudaMalloc((void**)&verticalDevice, verticalRows*verticalCols*sizeof(cpx));
cudaMemcpy(verticalDevice, vertical,
verticalRows*verticalCols*sizeof(cpx), cudaMemcpyHostToDevice);
// Initialise pixel position matrix
pxOriginal = leftPlot.points();
pxOriginalRows = leftPlot.pointsRows();
pxOriginalCols = leftPlot.pointsCols();
pxNow = leftPlot.points();
// Copy pixel matrices to device
cudaMalloc((void**)&pxOriginalDevice, pxOriginalRows*pxOriginalCols*sizeof(cpx));
cudaMemcpy(pxOriginalDevice, pxOriginal,
pxOriginalRows*pxOriginalCols*sizeof(cpx), cudaMemcpyHostToDevice);
cudaMalloc((void**)&pxNowDevice, pxOriginalRows*pxOriginalCols*sizeof(cpx));
cudaMemcpy(pxNowDevice, pxNow,
pxOriginalRows*pxOriginalCols*sizeof(cpx), cudaMemcpyHostToDevice);
// Initialise the stabilisation point to somewhere far away
// on the imaginary axis
stabilser = cpx(0, 1000);
stabiliserReverse = cpx(0, 1000);
// Initialise the pixel matrix
//pixelPos = generatePixelPos();
// Initialise the times
times = g.getOrderedTimes();
frameTimes = g.orderedFrameTimes();
currentTime = 0;
initialiseLeft();
plot();
}
bool SLEAnimate::nextFrame() {
auto nextTimePtr = frameTimes.upper_bound(currentTime);
if (nextTimePtr != frameTimes.end() ) {
double nextTime = *nextTimePtr;
vector<SlitMap> gridMaps;
vector<SlitMap> pixelMaps;
auto start = times.begin();
auto lower = times.lower_bound(currentTime);
auto upper = times.lower_bound(nextTime);
int gridStart = (int)distance(start, lower);
int end = (int)distance(start, upper);
updateMatrixForward(gridStart, end,
horizontalDevice,
horizontalDevice,
horizontal,
horizontalRows,
horizontalCols);
updateMatrixForward(gridStart, end,
verticalDevice,
verticalDevice,
vertical,
verticalRows,
verticalCols);
SlitMap h = g.slitMap(0);
for (auto it = times.lower_bound(currentTime); *it < nextTime; ++it) {
h = g.slitMap(*it);
gridMaps.push_back(h);
stabilser = h.inverse(stabilser);
stabiliserReverse = h(stabiliserReverse);
//cout << "h dt " << h.getDt() << " h dx " << h.getOffset() << endl;
//cout << "a dt " << dt[i] << " a dx " << shifts[i] << endl;
}
cout << "Vector length: " << gridMaps.size() << endl;
for (auto it = times.begin(); *it < nextTime; ++it) {
h = g.slitMap(*it);
pixelMaps.push_back(h);
}
//updateMatrixForward(gridMaps, horizontal, horizontal, horizontalRows, horizontalCols);
//updateMatrixForward(gridMaps, vertical, vertical, horizontalRows, horizontalCols);
double offset = -stabilser.real();
//updateMatrixReverse(pixelMaps, offset, pxOriginal, pxNow, pxOriginalRows, pxOriginalCols);
updateMatrixReverse(0, end, offset, pxOriginalDevice, pxNowDevice, pxNow, pxOriginalRows, pxOriginalCols);
currentTime = nextTime;
plot();
return true;
}
else {
return false;
}
}
void SLEAnimate::show() {
leftPlot.show();
rightPlot.show();
}
void SLEAnimate::output(int frame) {
// Set up filenames
std::string strLeft = "D:\\sleOutput\\left\\";
std::string strRight = "D:\\sleOutput\\right\\";
//std::string strLeft = "/Users/henry/tmp/left/";
//std::string strRight = "/Users/henry/tmp/right/";
std::string ltName;
std::string rtName;
std::stringstream ss;
ss << std::setfill('0') << std::setw(4);
ss << frame;
ltName = strLeft + ss.str() + ".png";
rtName = strRight + ss.str() + ".png";
ss.str(std::string());
ss.clear();
leftPlot.output(ltName.c_str());
rightPlot.output(rtName.c_str());
}
SLEAnimate::~SLEAnimate(){
delete[] horizontal;
delete[] vertical;
delete [] pxOriginal;
delete[] pxNow;
}
|
f18a127986a568a4f840bc04cefaaa304e5f7924.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ void mul(double a, double b, double *res)
{
*res = _FPC_CHECK_(a * b, 6, ".._cuda/src/compute_copy.cu");
// NaN
*res = _FPC_CHECK_((*res)-(*res) / (*res)-(*res), 8, ".._cuda/src/compute_copy.cu");
}
__global__ void compute(double *x, double *y, int size)
{
double d;
for (int i=0; i < size; ++i)
{
double tmp;
mul(x[i], y[i], &tmp);
d += _FPC_CHECK_(tmp, 18, ".._cuda/src/compute_copy.cu");
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
}
|
f18a127986a568a4f840bc04cefaaa304e5f7924.cu
|
#include <stdio.h>
__device__ void mul(double a, double b, double *res)
{
*res = _FPC_CHECK_(a * b, 6, ".._cuda/src/compute_copy.cu");
// NaN
*res = _FPC_CHECK_((*res)-(*res) / (*res)-(*res), 8, ".._cuda/src/compute_copy.cu");
}
__global__ void compute(double *x, double *y, int size)
{
double d;
for (int i=0; i < size; ++i)
{
double tmp;
mul(x[i], y[i], &tmp);
d += _FPC_CHECK_(tmp, 18, ".._cuda/src/compute_copy.cu");
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
}
|
256b5db954adc1a32b21fad21088df763be2f75f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <conio.h>
#include <Windows.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
for(int j = 0; j < 1000000; j++)
c[i] = a[i] * b[i];
}
double get_cpu_time(){
FILETIME a,b,c,d;
if (GetProcessTimes(GetCurrentProcess(),&a,&b,&c,&d) != 0){
// Returns total user time.
// Can be tweaked to include kernel times as well.
return
(double)(d.dwLowDateTime |
((unsigned long long)d.dwHighDateTime << 32)) * 0.0000001;
}else{
// Handle error
return 0;
}
}
int main()
{
int arraySize = 1024;
int* a = new int[arraySize];
int* b = new int[arraySize];
int* c = new int[arraySize];
for(auto i = 0; i < arraySize; i++)
{
a[i] = i;
b[i] = i;
c[i] = 0;
}
getch();
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
double cpu0 = get_cpu_time();
for(int i = 0; i < arraySize; i++)
{
for(int j = 0; j < 1000000; j++)
c[i] = a[i] * b[i];
}
double cpu1 = get_cpu_time();
printf("CPU time: %3.1f ms\n", (cpu1 - cpu0) * 1000);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
delete[] a;
delete[] b;
delete[] c;
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipEvent_t start, stop;
hipEventCreate(&start, 0);
hipEventCreate(&stop, 0);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("GPU time: %3.1f ms\n" , elapsedTime);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
|
256b5db954adc1a32b21fad21088df763be2f75f.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <conio.h>
#include <Windows.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
for(int j = 0; j < 1000000; j++)
c[i] = a[i] * b[i];
}
double get_cpu_time(){
FILETIME a,b,c,d;
if (GetProcessTimes(GetCurrentProcess(),&a,&b,&c,&d) != 0){
// Returns total user time.
// Can be tweaked to include kernel times as well.
return
(double)(d.dwLowDateTime |
((unsigned long long)d.dwHighDateTime << 32)) * 0.0000001;
}else{
// Handle error
return 0;
}
}
int main()
{
int arraySize = 1024;
int* a = new int[arraySize];
int* b = new int[arraySize];
int* c = new int[arraySize];
for(auto i = 0; i < arraySize; i++)
{
a[i] = i;
b[i] = i;
c[i] = 0;
}
getch();
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
double cpu0 = get_cpu_time();
for(int i = 0; i < arraySize; i++)
{
for(int j = 0; j < 1000000; j++)
c[i] = a[i] * b[i];
}
double cpu1 = get_cpu_time();
printf("CPU time: %3.1f ms\n", (cpu1 - cpu0) * 1000);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
delete[] a;
delete[] b;
delete[] c;
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
cudaEvent_t start, stop;
cudaEventCreate(&start, 0);
cudaEventCreate(&stop, 0);
cudaEventRecord(start, 0);
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU time: %3.1f ms\n" , elapsedTime);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
83e3ded7fe42fb843057a066392242b2c471f9a2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
|
83e3ded7fe42fb843057a066392242b2c471f9a2.cu
|
#include "includes.h"
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
|
2987bfa7480adc3cba0c3488f926d4eb9129951f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
__global__ void vecAddKernel(float* A, float* B, float* C, int n) {
// Calculate global thread index based on the block and thread indices ----
int i = threadIdx.x + (blockDim.x * blockIdx.x) ;
// Use global index to determine which elements to read, add, and write ---
if ( i < n) C[i] = A[i] + B[i] ;
}
|
2987bfa7480adc3cba0c3488f926d4eb9129951f.cu
|
/******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
__global__ void vecAddKernel(float* A, float* B, float* C, int n) {
// Calculate global thread index based on the block and thread indices ----
int i = threadIdx.x + (blockDim.x * blockIdx.x) ;
// Use global index to determine which elements to read, add, and write ---
if ( i < n) C[i] = A[i] + B[i] ;
}
|
d925c43b847840a1d61dcc921c1964b77e7bb92b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/cuda_runtime.h"
#include "backend/kernel_compiler/gpu/cuda_impl/float_status_impl.cuh"
template <typename T>
__global__ void IsNan(const size_t size, const T* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (isnan(input[pos])) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <>
__global__ void IsNan(const size_t size, const half* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (__hisnan(input[pos])) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <typename T>
__global__ void IsInf(const size_t size, const T* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (isinf(input[pos]) != 0) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <>
__global__ void IsInf(const size_t size, const half* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (__hisinf(input[pos]) != 0) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <typename T>
__global__ void IsFinite(const size_t size, const T* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (isinf(input[pos]) == 0 && !isnan(input[pos])) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <>
__global__ void IsFinite(const size_t size, const half* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (__hisinf(input[pos]) == 0 && !__hisnan(input[pos])) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <typename T>
__global__ void FloatStatus(const size_t size, const T* input, T* out) {
out[0] = 0;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (isinf(input[pos]) != 0 || isnan(input[pos])) {
out[0] = 1;
}
}
return;
}
template <>
__global__ void FloatStatus(const size_t size, const half* input, half* out) {
out[0] = 0;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (__hisinf(input[pos]) != 0 || __hisnan(input[pos])) {
out[0] = 1;
}
}
return;
}
template <typename T>
void CalFloatStatus(const size_t size, const T* input, T* output, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( FloatStatus), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input, output);
return;
}
template <typename T>
void CalIsNan(const size_t size, const T* input, bool* output, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( IsNan), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input, output);
return;
}
template <typename T>
void CalIsInf(const size_t size, const T* input, bool* output, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( IsInf), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input, output);
return;
}
template <typename T>
void CalIsFinite(const size_t size, const T* input, bool* output, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( IsFinite), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input, output);
return;
}
template void CalFloatStatus<float>(const size_t size, const float* input, float* output, hipStream_t cuda_stream);
template void CalFloatStatus<half>(const size_t size, const half* input, half* output, hipStream_t cuda_stream);
template void CalIsInf<float>(const size_t size, const float* input, bool* output, hipStream_t cuda_stream);
template void CalIsInf<half>(const size_t size, const half* input, bool* output, hipStream_t cuda_stream);
template void CalIsNan<float>(const size_t size, const float* input, bool* output, hipStream_t cuda_stream);
template void CalIsNan<half>(const size_t size, const half* input, bool* output, hipStream_t cuda_stream);
template void CalIsFinite<float>(const size_t size, const float* input, bool* output, hipStream_t cuda_stream);
template void CalIsFinite<half>(const size_t size, const half* input, bool* output, hipStream_t cuda_stream);
|
d925c43b847840a1d61dcc921c1964b77e7bb92b.cu
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/cuda_runtime.h"
#include "backend/kernel_compiler/gpu/cuda_impl/float_status_impl.cuh"
template <typename T>
__global__ void IsNan(const size_t size, const T* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (isnan(input[pos])) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <>
__global__ void IsNan(const size_t size, const half* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (__hisnan(input[pos])) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <typename T>
__global__ void IsInf(const size_t size, const T* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (isinf(input[pos]) != 0) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <>
__global__ void IsInf(const size_t size, const half* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (__hisinf(input[pos]) != 0) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <typename T>
__global__ void IsFinite(const size_t size, const T* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (isinf(input[pos]) == 0 && !isnan(input[pos])) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <>
__global__ void IsFinite(const size_t size, const half* input, bool* out) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (__hisinf(input[pos]) == 0 && !__hisnan(input[pos])) {
out[pos] = true;
} else {
out[pos] = false;
}
}
return;
}
template <typename T>
__global__ void FloatStatus(const size_t size, const T* input, T* out) {
out[0] = 0;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (isinf(input[pos]) != 0 || isnan(input[pos])) {
out[0] = 1;
}
}
return;
}
template <>
__global__ void FloatStatus(const size_t size, const half* input, half* out) {
out[0] = 0;
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
if (__hisinf(input[pos]) != 0 || __hisnan(input[pos])) {
out[0] = 1;
}
}
return;
}
template <typename T>
void CalFloatStatus(const size_t size, const T* input, T* output, cudaStream_t cuda_stream) {
FloatStatus<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, output);
return;
}
template <typename T>
void CalIsNan(const size_t size, const T* input, bool* output, cudaStream_t cuda_stream) {
IsNan<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, output);
return;
}
template <typename T>
void CalIsInf(const size_t size, const T* input, bool* output, cudaStream_t cuda_stream) {
IsInf<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, output);
return;
}
template <typename T>
void CalIsFinite(const size_t size, const T* input, bool* output, cudaStream_t cuda_stream) {
IsFinite<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, output);
return;
}
template void CalFloatStatus<float>(const size_t size, const float* input, float* output, cudaStream_t cuda_stream);
template void CalFloatStatus<half>(const size_t size, const half* input, half* output, cudaStream_t cuda_stream);
template void CalIsInf<float>(const size_t size, const float* input, bool* output, cudaStream_t cuda_stream);
template void CalIsInf<half>(const size_t size, const half* input, bool* output, cudaStream_t cuda_stream);
template void CalIsNan<float>(const size_t size, const float* input, bool* output, cudaStream_t cuda_stream);
template void CalIsNan<half>(const size_t size, const half* input, bool* output, cudaStream_t cuda_stream);
template void CalIsFinite<float>(const size_t size, const float* input, bool* output, cudaStream_t cuda_stream);
template void CalIsFinite<half>(const size_t size, const half* input, bool* output, cudaStream_t cuda_stream);
|
cb7e13b6b42e6c24c97bbf6282e2270c53d96806.hip
|
// !!! This is a file automatically generated by hipify!!!
#define MATRIXSIZE 1024
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
template <int BLOCK_SIZE> __global__ void MatrixMulCUDASample(float* A,
float* B, float* C, int WIDTH) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = WIDTH * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + WIDTH - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * WIDTH;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + WIDTH * ty + tx];
Bs[ty][tx] = B[b + WIDTH * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = WIDTH * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + WIDTH * ty + tx] = Csub;
}
void ConstantInit(float* data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char** argv,
int block_size, const dim3& dimsA,
const dim3& dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = reinterpret_cast<float*>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = reinterpret_cast<float*>(malloc(mem_size_B));
hipStream_t stream;
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float* d_A, * d_B, * d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float* h_C = reinterpret_cast<float*>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void**>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
// copy host memory to device
checkCudaErrors(hipMemcpyAsync(d_A, h_A, mem_size_A, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(d_B, h_B, mem_size_B, hipMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel////////////
// !!! ------------------------------------------------ !!!
switch(block_size){
case 8:
MatrixMulCUDASample<8> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
case 16:
MatrixMulCUDASample<16> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
case 32:
MatrixMulCUDASample<32> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
}
printf("done\n");
checkCudaErrors(hipStreamSynchronize(stream));
// Record the start event
checkCudaErrors(hipEventRecord(start, stream));
// Execute the kernel
int nIter = 300;
// !!! ------------------------------------------------ !!!
switch(block_size){
case 8:
for (int j = 0; j < nIter; j++) {
MatrixMulCUDASample<8> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
case 16:
for (int j = 0; j < nIter; j++) {
MatrixMulCUDASample<16> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
case 32:
for (int j = 0; j < nIter; j++) {
MatrixMulCUDASample<32> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(hipMemcpyAsync(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
}
else {
return EXIT_FAILURE;
}
}
int main(int argc, char** argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char**)argv, "help") ||
checkCmdLineFlag(argc, (const char**)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char**)argv);
int block_size = std::stoi(argv[1]);
printf("Rozmiar bloku: %d\n", block_size);
dim3 dimsA(MATRIXSIZE, MATRIXSIZE, 1);
dim3 dimsB(MATRIXSIZE, MATRIXSIZE, 1);
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
cb7e13b6b42e6c24c97bbf6282e2270c53d96806.cu
|
#define MATRIXSIZE 1024
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
template <int BLOCK_SIZE> __global__ void MatrixMulCUDASample(float* A,
float* B, float* C, int WIDTH) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = WIDTH * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + WIDTH - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * WIDTH;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + WIDTH * ty + tx];
Bs[ty][tx] = B[b + WIDTH * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = WIDTH * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + WIDTH * ty + tx] = Csub;
}
void ConstantInit(float* data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char** argv,
int block_size, const dim3& dimsA,
const dim3& dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = reinterpret_cast<float*>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = reinterpret_cast<float*>(malloc(mem_size_B));
cudaStream_t stream;
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float* d_A, * d_B, * d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float* h_C = reinterpret_cast<float*>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void**>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
// copy host memory to device
checkCudaErrors(cudaMemcpyAsync(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel////////////
// !!! ------------------------------------------------ !!!
switch(block_size){
case 8:
MatrixMulCUDASample<8> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
case 16:
MatrixMulCUDASample<16> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
case 32:
MatrixMulCUDASample<32> << < grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
break;
}
printf("done\n");
checkCudaErrors(cudaStreamSynchronize(stream));
// Record the start event
checkCudaErrors(cudaEventRecord(start, stream));
// Execute the kernel
int nIter = 300;
// !!! ------------------------------------------------ !!!
switch(block_size){
case 8:
for (int j = 0; j < nIter; j++) {
MatrixMulCUDASample<8> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
case 16:
for (int j = 0; j < nIter; j++) {
MatrixMulCUDASample<16> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
case 32:
for (int j = 0; j < nIter; j++) {
MatrixMulCUDASample<32> << <grid, threads, 0, stream >> > (d_A, d_B, d_C, dimsA.x);
}
break;
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(cudaMemcpyAsync(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
}
else {
return EXIT_FAILURE;
}
}
int main(int argc, char** argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char**)argv, "help") ||
checkCmdLineFlag(argc, (const char**)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char**)argv);
int block_size = std::stoi(argv[1]);
printf("Rozmiar bloku: %d\n", block_size);
dim3 dimsA(MATRIXSIZE, MATRIXSIZE, 1);
dim3 dimsB(MATRIXSIZE, MATRIXSIZE, 1);
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
0b54544e6038f984edee11f07ec3365f85d2691d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_z
__global__ void magma_zgemv_kernel3(int m, const magmaDoubleComplex * __restrict__ V, int ldv,
magmaDoubleComplex *c, magmaDoubleComplex *dwork,
magmaDoubleComplex *tau);
__global__ void magma_ztrmv_kernel(const magmaDoubleComplex *T, int ldt, magmaDoubleComplex *v);
__global__ void magma_ztrmv_kernel2(const magmaDoubleComplex *T, int ldt,
magmaDoubleComplex *v, magmaDoubleComplex *y, magmaDoubleComplex *tau);
//==============================================================================
__global__
void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm,
magmaDoubleComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
__shared__ double xnorm;
magmaDoubleComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
if ( xnorm == 0 || n == 1) {
*dtau = MAGMA_Z_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
double alpha = *dx0;
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.;
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
magmaDoubleComplex alpha = *dx0;
double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha);
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_Z_MAKE( 1., 0.);
*dA = MAGMA_Z_MAKE(beta, 0.);
}
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_Z_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfgx_gpu(magma_int_t n, magmaDoubleComplex *dx0, magmaDoubleComplex *dx,
magmaDoubleComplex *dtau, double *dxnorm,
magmaDoubleComplex *dA, magma_int_t it)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_zlarfgx_gpu_kernel), dim3(blocks), dim3(threads), 0, magma_stream , n, dx0, dx, dtau, dxnorm, dA, it);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfgtx_gpu(magma_int_t n, magmaDoubleComplex *dx0, magmaDoubleComplex *dx,
magmaDoubleComplex *dtau, double *dxnorm,
magmaDoubleComplex *dA, magma_int_t i,
magmaDoubleComplex *V, magma_int_t ldv, magmaDoubleComplex *T, magma_int_t ldt,
magmaDoubleComplex *work)
{
/* Generate the elementary reflector H(i) */
magma_zlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, i);
if (i==0) {
magmaDoubleComplex tt = MAGMA_Z_ONE;
magmablas_zlacpy(MagmaUpperLower, 1, 1, dtau, 1, T+i+i*ldt, 1);
magma_zsetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the i-th column of T */
hipLaunchKernelGGL(( magma_zgemv_kernel3), dim3(i), dim3(BLOCK_SIZE), 0, magma_stream , n, V, ldv, dx0, work, dtau);
hipLaunchKernelGGL(( magma_ztrmv_kernel2), dim3(i), dim3(i), 0, magma_stream , T, ldt, work, T+i*ldt, dtau);
}
}
//==============================================================================
|
0b54544e6038f984edee11f07ec3365f85d2691d.cu
|
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_z
__global__ void magma_zgemv_kernel3(int m, const magmaDoubleComplex * __restrict__ V, int ldv,
magmaDoubleComplex *c, magmaDoubleComplex *dwork,
magmaDoubleComplex *tau);
__global__ void magma_ztrmv_kernel(const magmaDoubleComplex *T, int ldt, magmaDoubleComplex *v);
__global__ void magma_ztrmv_kernel2(const magmaDoubleComplex *T, int ldt,
magmaDoubleComplex *v, magmaDoubleComplex *y, magmaDoubleComplex *tau);
//==============================================================================
__global__
void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm,
magmaDoubleComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
__shared__ double xnorm;
magmaDoubleComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
if ( xnorm == 0 || n == 1) {
*dtau = MAGMA_Z_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
double alpha = *dx0;
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.;
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
magmaDoubleComplex alpha = *dx0;
double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha);
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_Z_MAKE( 1., 0.);
*dA = MAGMA_Z_MAKE(beta, 0.);
}
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_Z_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfgx_gpu(magma_int_t n, magmaDoubleComplex *dx0, magmaDoubleComplex *dx,
magmaDoubleComplex *dtau, double *dxnorm,
magmaDoubleComplex *dA, magma_int_t it)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
magma_zlarfgx_gpu_kernel<<< blocks, threads, 0, magma_stream >>>( n, dx0, dx, dtau, dxnorm, dA, it);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfgtx_gpu(magma_int_t n, magmaDoubleComplex *dx0, magmaDoubleComplex *dx,
magmaDoubleComplex *dtau, double *dxnorm,
magmaDoubleComplex *dA, magma_int_t i,
magmaDoubleComplex *V, magma_int_t ldv, magmaDoubleComplex *T, magma_int_t ldt,
magmaDoubleComplex *work)
{
/* Generate the elementary reflector H(i) */
magma_zlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, i);
if (i==0) {
magmaDoubleComplex tt = MAGMA_Z_ONE;
magmablas_zlacpy(MagmaUpperLower, 1, 1, dtau, 1, T+i+i*ldt, 1);
magma_zsetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the i-th column of T */
magma_zgemv_kernel3<<< i, BLOCK_SIZE, 0, magma_stream >>>(n, V, ldv, dx0, work, dtau);
magma_ztrmv_kernel2<<< i, i, 0, magma_stream >>>( T, ldt, work, T+i*ldt, dtau);
}
}
//==============================================================================
|
kFloor.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kFloor(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = floor(mat[i]);
}
|
kFloor.cu
|
#include "includes.h"
__global__ void kFloor(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = floor(mat[i]);
}
|
10759a877994e329273a1ee7d857c713794a20cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions mixed zc -> ds
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to clat2z and zlaset.
*/
__global__
void clag2z_kernel(
int m, int n,
const magmaFloatComplex *SA, int ldsa,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ));
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ));
}
}
}
}
/***************************************************************************//**
Purpose
-------
CLAG2Z converts a single-complex matrix, SA,
to a double-complex matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA COMPLEX array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A COMPLEX_16 array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_clag2z(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr SA, magma_int_t ldsa,
magmaDoubleComplex_ptr A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( clag2z_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, SA, ldsa, A, lda );
}
|
10759a877994e329273a1ee7d857c713794a20cd.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions mixed zc -> ds
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to clat2z and zlaset.
*/
__global__
void clag2z_kernel(
int m, int n,
const magmaFloatComplex *SA, int ldsa,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ));
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ));
}
}
}
}
/***************************************************************************//**
Purpose
-------
CLAG2Z converts a single-complex matrix, SA,
to a double-complex matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA COMPLEX array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A COMPLEX_16 array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_clag2z(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr SA, magma_int_t ldsa,
magmaDoubleComplex_ptr A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
clag2z_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, SA, ldsa, A, lda );
}
|
b55d9750a05fccbda6819c013eb46056cba30aa4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/math/math_cuda_utils.h"
#include "paddle/fluid/operators/softmax_impl.cuh"
#include "paddle/fluid/operators/softmax_op.h"
#include "paddle/fluid/platform/cuda_device_function.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/platform/miopen_helper.h"
#else
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
namespace paddle {
namespace platform {
struct CUDAPlace;
struct float16;
} // namespace platform
} // namespace paddle
namespace paddle {
namespace operators {
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using DataLayout = platform::DataLayout;
using Tensor = framework::Tensor;
// Vectorization trait 4 * sizeof(T)
template <typename T>
class VecT4 {};
template <>
class VecT4<double> {
public:
using Type = long4;
};
template <>
class VecT4<float> {
public:
using Type = int4;
};
template <>
class VecT4<platform::float16> {
public:
using Type = int2;
};
// Vectorization trait 2 * sizeof(T)
template <typename T>
class VecT2 {};
template <>
class VecT2<double> {
public:
using Type = int4;
};
template <>
class VecT2<float> {
public:
using Type = int2;
};
template <>
class VecT2<platform::float16> {
public:
using Type = int;
};
int static inline log2_ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
}
/*
Core function of computing softmax forward for axis=-1.
The computation includes
- Compute maximum of batch: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp batch: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
- Compute: (a_{i,j} - maxvalue_{i}) / s_{i}
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle
api to compute max (sum) in one warp.
*/
template <typename T, typename VecT, typename AccT, int Log2Elements,
bool LogMode = false>
__global__ void WarpSoftmaxForward(T* softmax, const T* src,
const int batch_size, const int stride,
const int element_count) {
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 32) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
// max index to read
int idx_max_v[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
int idx_max = ((i + first_batch) < batch_size) ? element_count : 0;
idx_max_v[i] = idx_max / kVSize;
}
// read data from global memory
AccT srcdata[kBatchSize][kIterationsV][kVSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// read data
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) {
if (src_idx < idx_max_v[i]) {
srcdata[i][it][0] =
static_cast<AccT>(src[(first_batch + i) * stride + src_idx]);
} else {
srcdata[i][it][0] = -std::numeric_limits<AccT>::infinity();
}
} else {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
if (src_idx < idx_max_v[i]) {
VecT srctmp = src_v[src_idx];
const T* srcinptr = reinterpret_cast<const T*>(&srctmp);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = static_cast<AccT>(srcinptr[s]);
}
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = -std::numeric_limits<AccT>::infinity();
}
}
}
}
}
// compute max value
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
AccT valmax = srcdata[i][0][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][0][s]) ? valmax : srcdata[i][0][s];
}
max_value[i] = valmax;
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
AccT valmax = srcdata[i][it][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][it][s]) ? valmax : srcdata[i][it][s];
}
max_value[i] = (max_value[i] > valmax) ? max_value[i] : valmax;
}
}
WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum
AccT sum[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
if (LogMode) {
sum[i] = ::exp(srcdata[i][0][0] - max_value[i]);
} else {
srcdata[i][0][0] = ::exp(srcdata[i][0][0] - max_value[i]);
sum[i] = srcdata[i][0][0];
}
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
if (LogMode) {
sum[i] += ::exp(srcdata[i][0][s] - max_value[i]);
} else {
srcdata[i][0][s] = ::exp(srcdata[i][0][s] - max_value[i]);
sum[i] += srcdata[i][0][s];
}
}
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
sum[i] += ::exp(srcdata[i][it][s] - max_value[i]);
} else {
srcdata[i][it][s] = ::exp(srcdata[i][it][s] - max_value[i]);
sum[i] += srcdata[i][it][s];
}
}
}
}
WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// write result to global memory
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (LogMode) {
sum[i] = ::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) {
if (idx < idx_max_v[i]) {
if (LogMode) {
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] - max_value[i] - sum[i];
} else {
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] / sum[i];
}
} else {
break;
}
} else {
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
VecT tmpdata;
T* tmpptr = reinterpret_cast<T*>(&tmpdata);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
tmpptr[s] = srcdata[i][it][s] - max_value[i] - sum[i];
} else {
tmpptr[s] = srcdata[i][it][s] / sum[i];
}
}
if (idx < idx_max_v[i]) {
softmax_v[idx] = tmpdata;
} else {
break;
}
}
}
}
}
/*
Core function of computing softmax backward for axis=-1.
The computation includes
- Compute sum of exp batch: s_{i} = sum_{j} {src_{i,j} * grad_{i,j}
- Compute src_{i,j} * ( grad_{i,j}) - s_{i} )
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle
api to compute max (sum) in one warp.
*/
template <typename T, typename VecT, typename AccT, int Log2Elements,
bool LogMode = false>
__global__ void WarpSoftmaxBackward(T* dst, const T* grad, const T* src,
int batch_size, int stride,
int element_count) {
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
int element_count_v = element_count / kVSize;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
int local_batches = batch_size - first_batch;
if (local_batches > kBatchSize) {
local_batches = kBatchSize;
}
// read data from global memory
VecT src_reg[kBatchSize][kIterationsV];
VecT grad_reg[kBatchSize][kIterationsV];
for (int i = 0; i < kBatchSize; ++i) {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
const VecT* grad_v =
reinterpret_cast<const VecT*>(&grad[(first_batch + i) * stride]);
// max index to read
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
// read data
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (src_idx < idx_max_v) {
src_reg[i][it] = src_v[src_idx];
grad_reg[i][it] = grad_v[src_idx];
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
reinterpret_cast<T*>(&src_reg[i][it])[s] = 0.0;
reinterpret_cast<T*>(&grad_reg[i][it])[s] = 0.0;
}
}
}
}
// compute sum
AccT sum[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* gradptr = reinterpret_cast<T*>(&grad_reg[i][it]);
T* srcptr = reinterpret_cast<T*>(&src_reg[i][it]);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
sum[i] += static_cast<AccT>(gradptr[s]);
} else {
sum[i] += static_cast<AccT>(gradptr[s] * srcptr[s]);
}
}
}
}
WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// write result
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (i >= local_batches) break;
VecT* dst_v = reinterpret_cast<VecT*>(&dst[(first_batch + i) * stride]);
// max index to write
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
VecT tmpdata;
T* tmpptr = reinterpret_cast<T*>(&tmpdata);
T* gradptr = reinterpret_cast<T*>(&grad_reg[i][it]);
T* srcptr = reinterpret_cast<T*>(&src_reg[i][it]);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
tmpptr[s] = static_cast<AccT>(gradptr[s]) -
::exp(static_cast<AccT>(srcptr[s])) * sum[i];
} else {
tmpptr[s] = static_cast<AccT>(srcptr[s]) *
(static_cast<AccT>(gradptr[s]) - sum[i]);
}
}
int idx = threadIdx.x + it * kWarpSize;
if (idx < idx_max_v) {
dst_v[idx] = tmpdata;
}
}
}
}
#define SOFTMAX_WARP_FORWARD_CASE(Log2Elements, AccT) \
case Log2Elements: \
hipLaunchKernelGGL(( WarpSoftmaxForward< \
T, VecT, AccT, Log2Elements, \
LogMode>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), \
dst, src, batch_size, stride, element_count); \
break;
/*
Wrapper of softmax formward with template instantiation on size of input.
*/
template <typename T, typename VecT, bool LogMode>
void SwitchWarpSoftmaxForward(const int blocks, const dim3 threads,
const framework::ExecutionContext& ctx, T* dst,
const T* src, const int batch_size,
const int stride, const int element_count,
int Log2Elements) {
using AccT = typename details::MPTypeTrait<T>::Type;
switch (Log2Elements) {
SOFTMAX_WARP_FORWARD_CASE(0, AccT);
SOFTMAX_WARP_FORWARD_CASE(1, AccT);
SOFTMAX_WARP_FORWARD_CASE(2, AccT);
SOFTMAX_WARP_FORWARD_CASE(3, AccT);
SOFTMAX_WARP_FORWARD_CASE(4, AccT);
SOFTMAX_WARP_FORWARD_CASE(5, AccT);
SOFTMAX_WARP_FORWARD_CASE(6, AccT);
SOFTMAX_WARP_FORWARD_CASE(7, AccT);
SOFTMAX_WARP_FORWARD_CASE(8, AccT);
SOFTMAX_WARP_FORWARD_CASE(9, AccT);
default:
break;
}
}
#define SOFTMAX_WARP_BACKWARD_CASE(Log2Elements, AccT) \
case Log2Elements: \
hipLaunchKernelGGL(( WarpSoftmaxBackward< \
T, VecT, AccT, Log2Elements, \
LogMode>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), \
dst, grad, src, batch_size, stride, element_count); \
break;
/*
Wrapper of softmax backward with template instantiation on size of input.
*/
template <typename T, typename VecT, bool LogMode>
void SwitchWarpSoftmaxBackward(const int blocks, const dim3 threads,
const framework::ExecutionContext& ctx, T* dst,
const T* grad, const T* src,
const int batch_size, const int stride,
const int element_count, int Log2Elements) {
using AccT = typename details::MPTypeTrait<T>::Type;
switch (Log2Elements) {
SOFTMAX_WARP_BACKWARD_CASE(0, AccT);
SOFTMAX_WARP_BACKWARD_CASE(1, AccT);
SOFTMAX_WARP_BACKWARD_CASE(2, AccT);
SOFTMAX_WARP_BACKWARD_CASE(3, AccT);
SOFTMAX_WARP_BACKWARD_CASE(4, AccT);
SOFTMAX_WARP_BACKWARD_CASE(5, AccT);
SOFTMAX_WARP_BACKWARD_CASE(6, AccT);
SOFTMAX_WARP_BACKWARD_CASE(7, AccT);
SOFTMAX_WARP_BACKWARD_CASE(8, AccT);
SOFTMAX_WARP_BACKWARD_CASE(9, AccT);
default:
break;
}
}
#undef SOFTMAX_WARP_FORWARD_CASE
#undef SOFTMAX_WARP_BACKWARD_CASE
template <typename T, bool LogMode = false>
class SoftmaxCUDNNKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* out = ctx.Output<Tensor>("Out");
out->mutable_data<T>(ctx.GetPlace());
auto* out_data = out->data<T>();
auto dims = x->dims();
const int rank = dims.size();
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), rank);
const int dim = dims[axis];
const int N = SizeToAxis(axis, dims);
const int D = SizeOutAxis(axis, dims);
constexpr int max_dim = 320;
constexpr int warps_per_block = 4;
if (D == 1 && dim <= max_dim && sizeof(T) <= 4) {
const int kDimLog2 = static_cast<int>(log2_ceil(dim));
const int kDimCeil = 1 << kDimLog2;
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 32) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (N + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
// vectorization read/write
using T4 = typename VecT4<T>::Type;
using T2 = typename VecT2<T>::Type;
if (dim % 4 == 0) {
SwitchWarpSoftmaxForward<T, T4, LogMode>(blocks, threads, ctx, out_data,
x->data<T>(), N, dim, dim,
kDimLog2);
} else if (dim % 2 == 0) {
SwitchWarpSoftmaxForward<T, T2, LogMode>(blocks, threads, ctx, out_data,
x->data<T>(), N, dim, dim,
kDimLog2);
} else {
SwitchWarpSoftmaxForward<T, T, LogMode>(blocks, threads, ctx, out_data,
x->data<T>(), N, dim, dim,
kDimLog2);
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
#endif
auto& dev_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
if (LogMode) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxForward_V2(
handle, platform::CudnnDataType<T>::kOne(), desc_, x->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, out_data,
MIOPEN_SOFTMAX_LOG, mode));
} else {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxForward_V2(
handle, platform::CudnnDataType<T>::kOne(), desc_, x->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, out_data,
MIOPEN_SOFTMAX_ACCURATE, mode));
}
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
if (LogMode) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(),
desc_, x->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
out_data));
} else {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_ACCURATE, mode,
platform::CudnnDataType<T>::kOne(), desc_, x->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, out_data));
}
#endif
}
}
};
template <typename T, bool LogMode = false>
class SoftmaxGradCUDNNKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* out = ctx.Input<Tensor>("Out");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
auto* dx_data = dx->data<T>();
auto dims = out->dims();
const int rank = dims.size();
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), rank);
const int dim = dims[axis];
const int N = SizeToAxis(axis, dims);
const int D = SizeOutAxis(axis, dims);
constexpr int max_dim = 320;
constexpr int warps_per_block = 4;
if (D == 1 && dim <= max_dim && sizeof(T) <= 4) {
const int kDimLog2 = log2_ceil(dim);
const int kDimCeil = 1 << kDimLog2;
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (N + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
// vectorization read/write
using T4 = typename VecT4<T>::Type;
using T2 = typename VecT2<T>::Type;
if (dim % 4 == 0) {
SwitchWarpSoftmaxBackward<T, T4, LogMode>(
blocks, threads, ctx, dx_data, dout->data<T>(), out->data<T>(), N,
dim, dim, kDimLog2);
} else if (dim % 2 == 0) {
SwitchWarpSoftmaxBackward<T, T2, LogMode>(
blocks, threads, ctx, dx_data, dout->data<T>(), out->data<T>(), N,
dim, dim, kDimLog2);
} else {
SwitchWarpSoftmaxBackward<T, T, LogMode>(
blocks, threads, ctx, dx_data, dout->data<T>(), out->data<T>(), N,
dim, dim, kDimLog2);
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
#endif
auto& dev_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
if (LogMode) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2(
handle, platform::CudnnDataType<T>::kOne(), desc_, out->data<T>(),
desc_, dout->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
dx_data, MIOPEN_SOFTMAX_LOG, mode));
} else {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2(
handle, platform::CudnnDataType<T>::kOne(), desc_, out->data<T>(),
desc_, dout->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
dx_data, MIOPEN_SOFTMAX_ACCURATE, mode));
}
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
if (LogMode) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxBackward(
handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(),
desc_, out->data<T>(), desc_, dout->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, dx_data));
} else {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxBackward(
handle, CUDNN_SOFTMAX_ACCURATE, mode,
platform::CudnnDataType<T>::kOne(), desc_, out->data<T>(), desc_,
dout->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
dx_data));
}
#endif
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_KERNEL(softmax, CUDNN, plat::CUDAPlace,
ops::SoftmaxCUDNNKernel<float>,
ops::SoftmaxCUDNNKernel<plat::float16>);
REGISTER_OP_KERNEL(softmax_grad, CUDNN, plat::CUDAPlace,
ops::SoftmaxGradCUDNNKernel<float>,
ops::SoftmaxGradCUDNNKernel<plat::float16>);
#else
REGISTER_OP_KERNEL(softmax, CUDNN, plat::CUDAPlace,
ops::SoftmaxCUDNNKernel<float>,
ops::SoftmaxCUDNNKernel<double>,
ops::SoftmaxCUDNNKernel<plat::float16>);
REGISTER_OP_KERNEL(softmax_grad, CUDNN, plat::CUDAPlace,
ops::SoftmaxGradCUDNNKernel<float>,
ops::SoftmaxGradCUDNNKernel<double>,
ops::SoftmaxGradCUDNNKernel<plat::float16>);
#endif
|
b55d9750a05fccbda6819c013eb46056cba30aa4.cu
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/math/math_cuda_utils.h"
#include "paddle/fluid/operators/softmax_impl.cuh"
#include "paddle/fluid/operators/softmax_op.h"
#include "paddle/fluid/platform/cuda_device_function.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/platform/miopen_helper.h"
#else
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
namespace paddle {
namespace platform {
struct CUDAPlace;
struct float16;
} // namespace platform
} // namespace paddle
namespace paddle {
namespace operators {
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using DataLayout = platform::DataLayout;
using Tensor = framework::Tensor;
// Vectorization trait 4 * sizeof(T)
template <typename T>
class VecT4 {};
template <>
class VecT4<double> {
public:
using Type = long4;
};
template <>
class VecT4<float> {
public:
using Type = int4;
};
template <>
class VecT4<platform::float16> {
public:
using Type = int2;
};
// Vectorization trait 2 * sizeof(T)
template <typename T>
class VecT2 {};
template <>
class VecT2<double> {
public:
using Type = int4;
};
template <>
class VecT2<float> {
public:
using Type = int2;
};
template <>
class VecT2<platform::float16> {
public:
using Type = int;
};
int static inline log2_ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
}
/*
Core function of computing softmax forward for axis=-1.
The computation includes
- Compute maximum of batch: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp batch: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
- Compute: (a_{i,j} - maxvalue_{i}) / s_{i}
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle
api to compute max (sum) in one warp.
*/
template <typename T, typename VecT, typename AccT, int Log2Elements,
bool LogMode = false>
__global__ void WarpSoftmaxForward(T* softmax, const T* src,
const int batch_size, const int stride,
const int element_count) {
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 32) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
// max index to read
int idx_max_v[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
int idx_max = ((i + first_batch) < batch_size) ? element_count : 0;
idx_max_v[i] = idx_max / kVSize;
}
// read data from global memory
AccT srcdata[kBatchSize][kIterationsV][kVSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// read data
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) {
if (src_idx < idx_max_v[i]) {
srcdata[i][it][0] =
static_cast<AccT>(src[(first_batch + i) * stride + src_idx]);
} else {
srcdata[i][it][0] = -std::numeric_limits<AccT>::infinity();
}
} else {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
if (src_idx < idx_max_v[i]) {
VecT srctmp = src_v[src_idx];
const T* srcinptr = reinterpret_cast<const T*>(&srctmp);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = static_cast<AccT>(srcinptr[s]);
}
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = -std::numeric_limits<AccT>::infinity();
}
}
}
}
}
// compute max value
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
AccT valmax = srcdata[i][0][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][0][s]) ? valmax : srcdata[i][0][s];
}
max_value[i] = valmax;
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
AccT valmax = srcdata[i][it][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][it][s]) ? valmax : srcdata[i][it][s];
}
max_value[i] = (max_value[i] > valmax) ? max_value[i] : valmax;
}
}
WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum
AccT sum[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
if (LogMode) {
sum[i] = std::exp(srcdata[i][0][0] - max_value[i]);
} else {
srcdata[i][0][0] = std::exp(srcdata[i][0][0] - max_value[i]);
sum[i] = srcdata[i][0][0];
}
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
if (LogMode) {
sum[i] += std::exp(srcdata[i][0][s] - max_value[i]);
} else {
srcdata[i][0][s] = std::exp(srcdata[i][0][s] - max_value[i]);
sum[i] += srcdata[i][0][s];
}
}
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
sum[i] += std::exp(srcdata[i][it][s] - max_value[i]);
} else {
srcdata[i][it][s] = std::exp(srcdata[i][it][s] - max_value[i]);
sum[i] += srcdata[i][it][s];
}
}
}
}
WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// write result to global memory
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (LogMode) {
sum[i] = std::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) {
if (idx < idx_max_v[i]) {
if (LogMode) {
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] - max_value[i] - sum[i];
} else {
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] / sum[i];
}
} else {
break;
}
} else {
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
VecT tmpdata;
T* tmpptr = reinterpret_cast<T*>(&tmpdata);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
tmpptr[s] = srcdata[i][it][s] - max_value[i] - sum[i];
} else {
tmpptr[s] = srcdata[i][it][s] / sum[i];
}
}
if (idx < idx_max_v[i]) {
softmax_v[idx] = tmpdata;
} else {
break;
}
}
}
}
}
/*
Core function of computing softmax backward for axis=-1.
The computation includes
- Compute sum of exp batch: s_{i} = sum_{j} {src_{i,j} * grad_{i,j}
- Compute src_{i,j} * ( grad_{i,j}) - s_{i} )
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle
api to compute max (sum) in one warp.
*/
template <typename T, typename VecT, typename AccT, int Log2Elements,
bool LogMode = false>
__global__ void WarpSoftmaxBackward(T* dst, const T* grad, const T* src,
int batch_size, int stride,
int element_count) {
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
int element_count_v = element_count / kVSize;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
int local_batches = batch_size - first_batch;
if (local_batches > kBatchSize) {
local_batches = kBatchSize;
}
// read data from global memory
VecT src_reg[kBatchSize][kIterationsV];
VecT grad_reg[kBatchSize][kIterationsV];
for (int i = 0; i < kBatchSize; ++i) {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
const VecT* grad_v =
reinterpret_cast<const VecT*>(&grad[(first_batch + i) * stride]);
// max index to read
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
// read data
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (src_idx < idx_max_v) {
src_reg[i][it] = src_v[src_idx];
grad_reg[i][it] = grad_v[src_idx];
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
reinterpret_cast<T*>(&src_reg[i][it])[s] = 0.0;
reinterpret_cast<T*>(&grad_reg[i][it])[s] = 0.0;
}
}
}
}
// compute sum
AccT sum[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* gradptr = reinterpret_cast<T*>(&grad_reg[i][it]);
T* srcptr = reinterpret_cast<T*>(&src_reg[i][it]);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
sum[i] += static_cast<AccT>(gradptr[s]);
} else {
sum[i] += static_cast<AccT>(gradptr[s] * srcptr[s]);
}
}
}
}
WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// write result
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (i >= local_batches) break;
VecT* dst_v = reinterpret_cast<VecT*>(&dst[(first_batch + i) * stride]);
// max index to write
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
VecT tmpdata;
T* tmpptr = reinterpret_cast<T*>(&tmpdata);
T* gradptr = reinterpret_cast<T*>(&grad_reg[i][it]);
T* srcptr = reinterpret_cast<T*>(&src_reg[i][it]);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
tmpptr[s] = static_cast<AccT>(gradptr[s]) -
std::exp(static_cast<AccT>(srcptr[s])) * sum[i];
} else {
tmpptr[s] = static_cast<AccT>(srcptr[s]) *
(static_cast<AccT>(gradptr[s]) - sum[i]);
}
}
int idx = threadIdx.x + it * kWarpSize;
if (idx < idx_max_v) {
dst_v[idx] = tmpdata;
}
}
}
}
#define SOFTMAX_WARP_FORWARD_CASE(Log2Elements, AccT) \
case Log2Elements: \
WarpSoftmaxForward< \
T, VecT, AccT, Log2Elements, \
LogMode><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( \
dst, src, batch_size, stride, element_count); \
break;
/*
Wrapper of softmax formward with template instantiation on size of input.
*/
template <typename T, typename VecT, bool LogMode>
void SwitchWarpSoftmaxForward(const int blocks, const dim3 threads,
const framework::ExecutionContext& ctx, T* dst,
const T* src, const int batch_size,
const int stride, const int element_count,
int Log2Elements) {
using AccT = typename details::MPTypeTrait<T>::Type;
switch (Log2Elements) {
SOFTMAX_WARP_FORWARD_CASE(0, AccT);
SOFTMAX_WARP_FORWARD_CASE(1, AccT);
SOFTMAX_WARP_FORWARD_CASE(2, AccT);
SOFTMAX_WARP_FORWARD_CASE(3, AccT);
SOFTMAX_WARP_FORWARD_CASE(4, AccT);
SOFTMAX_WARP_FORWARD_CASE(5, AccT);
SOFTMAX_WARP_FORWARD_CASE(6, AccT);
SOFTMAX_WARP_FORWARD_CASE(7, AccT);
SOFTMAX_WARP_FORWARD_CASE(8, AccT);
SOFTMAX_WARP_FORWARD_CASE(9, AccT);
default:
break;
}
}
#define SOFTMAX_WARP_BACKWARD_CASE(Log2Elements, AccT) \
case Log2Elements: \
WarpSoftmaxBackward< \
T, VecT, AccT, Log2Elements, \
LogMode><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( \
dst, grad, src, batch_size, stride, element_count); \
break;
/*
Wrapper of softmax backward with template instantiation on size of input.
*/
template <typename T, typename VecT, bool LogMode>
void SwitchWarpSoftmaxBackward(const int blocks, const dim3 threads,
const framework::ExecutionContext& ctx, T* dst,
const T* grad, const T* src,
const int batch_size, const int stride,
const int element_count, int Log2Elements) {
using AccT = typename details::MPTypeTrait<T>::Type;
switch (Log2Elements) {
SOFTMAX_WARP_BACKWARD_CASE(0, AccT);
SOFTMAX_WARP_BACKWARD_CASE(1, AccT);
SOFTMAX_WARP_BACKWARD_CASE(2, AccT);
SOFTMAX_WARP_BACKWARD_CASE(3, AccT);
SOFTMAX_WARP_BACKWARD_CASE(4, AccT);
SOFTMAX_WARP_BACKWARD_CASE(5, AccT);
SOFTMAX_WARP_BACKWARD_CASE(6, AccT);
SOFTMAX_WARP_BACKWARD_CASE(7, AccT);
SOFTMAX_WARP_BACKWARD_CASE(8, AccT);
SOFTMAX_WARP_BACKWARD_CASE(9, AccT);
default:
break;
}
}
#undef SOFTMAX_WARP_FORWARD_CASE
#undef SOFTMAX_WARP_BACKWARD_CASE
template <typename T, bool LogMode = false>
class SoftmaxCUDNNKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* out = ctx.Output<Tensor>("Out");
out->mutable_data<T>(ctx.GetPlace());
auto* out_data = out->data<T>();
auto dims = x->dims();
const int rank = dims.size();
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), rank);
const int dim = dims[axis];
const int N = SizeToAxis(axis, dims);
const int D = SizeOutAxis(axis, dims);
constexpr int max_dim = 320;
constexpr int warps_per_block = 4;
if (D == 1 && dim <= max_dim && sizeof(T) <= 4) {
const int kDimLog2 = static_cast<int>(log2_ceil(dim));
const int kDimCeil = 1 << kDimLog2;
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 32) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (N + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
// vectorization read/write
using T4 = typename VecT4<T>::Type;
using T2 = typename VecT2<T>::Type;
if (dim % 4 == 0) {
SwitchWarpSoftmaxForward<T, T4, LogMode>(blocks, threads, ctx, out_data,
x->data<T>(), N, dim, dim,
kDimLog2);
} else if (dim % 2 == 0) {
SwitchWarpSoftmaxForward<T, T2, LogMode>(blocks, threads, ctx, out_data,
x->data<T>(), N, dim, dim,
kDimLog2);
} else {
SwitchWarpSoftmaxForward<T, T, LogMode>(blocks, threads, ctx, out_data,
x->data<T>(), N, dim, dim,
kDimLog2);
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
#endif
auto& dev_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
if (LogMode) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxForward_V2(
handle, platform::CudnnDataType<T>::kOne(), desc_, x->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, out_data,
MIOPEN_SOFTMAX_LOG, mode));
} else {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxForward_V2(
handle, platform::CudnnDataType<T>::kOne(), desc_, x->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, out_data,
MIOPEN_SOFTMAX_ACCURATE, mode));
}
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
if (LogMode) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(),
desc_, x->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
out_data));
} else {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_ACCURATE, mode,
platform::CudnnDataType<T>::kOne(), desc_, x->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, out_data));
}
#endif
}
}
};
template <typename T, bool LogMode = false>
class SoftmaxGradCUDNNKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* out = ctx.Input<Tensor>("Out");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
auto* dx_data = dx->data<T>();
auto dims = out->dims();
const int rank = dims.size();
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), rank);
const int dim = dims[axis];
const int N = SizeToAxis(axis, dims);
const int D = SizeOutAxis(axis, dims);
constexpr int max_dim = 320;
constexpr int warps_per_block = 4;
if (D == 1 && dim <= max_dim && sizeof(T) <= 4) {
const int kDimLog2 = log2_ceil(dim);
const int kDimCeil = 1 << kDimLog2;
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (N + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
// vectorization read/write
using T4 = typename VecT4<T>::Type;
using T2 = typename VecT2<T>::Type;
if (dim % 4 == 0) {
SwitchWarpSoftmaxBackward<T, T4, LogMode>(
blocks, threads, ctx, dx_data, dout->data<T>(), out->data<T>(), N,
dim, dim, kDimLog2);
} else if (dim % 2 == 0) {
SwitchWarpSoftmaxBackward<T, T2, LogMode>(
blocks, threads, ctx, dx_data, dout->data<T>(), out->data<T>(), N,
dim, dim, kDimLog2);
} else {
SwitchWarpSoftmaxBackward<T, T, LogMode>(
blocks, threads, ctx, dx_data, dout->data<T>(), out->data<T>(), N,
dim, dim, kDimLog2);
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
#endif
auto& dev_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
if (LogMode) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2(
handle, platform::CudnnDataType<T>::kOne(), desc_, out->data<T>(),
desc_, dout->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
dx_data, MIOPEN_SOFTMAX_LOG, mode));
} else {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSoftmaxBackward_V2(
handle, platform::CudnnDataType<T>::kOne(), desc_, out->data<T>(),
desc_, dout->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
dx_data, MIOPEN_SOFTMAX_ACCURATE, mode));
}
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
if (LogMode) {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxBackward(
handle, CUDNN_SOFTMAX_LOG, mode, platform::CudnnDataType<T>::kOne(),
desc_, out->data<T>(), desc_, dout->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, dx_data));
} else {
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxBackward(
handle, CUDNN_SOFTMAX_ACCURATE, mode,
platform::CudnnDataType<T>::kOne(), desc_, out->data<T>(), desc_,
dout->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
dx_data));
}
#endif
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_KERNEL(softmax, CUDNN, plat::CUDAPlace,
ops::SoftmaxCUDNNKernel<float>,
ops::SoftmaxCUDNNKernel<plat::float16>);
REGISTER_OP_KERNEL(softmax_grad, CUDNN, plat::CUDAPlace,
ops::SoftmaxGradCUDNNKernel<float>,
ops::SoftmaxGradCUDNNKernel<plat::float16>);
#else
REGISTER_OP_KERNEL(softmax, CUDNN, plat::CUDAPlace,
ops::SoftmaxCUDNNKernel<float>,
ops::SoftmaxCUDNNKernel<double>,
ops::SoftmaxCUDNNKernel<plat::float16>);
REGISTER_OP_KERNEL(softmax_grad, CUDNN, plat::CUDAPlace,
ops::SoftmaxGradCUDNNKernel<float>,
ops::SoftmaxGradCUDNNKernel<double>,
ops::SoftmaxGradCUDNNKernel<plat::float16>);
#endif
|
c199b295ab0ead05f269d183a873e9d80b571ed9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
//#include <math.h>
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#if defined(__HIPCC__) && (TORCH_HIP_VERSION >= 7000)
#include <cusolverDn.h>
#endif
#include <rocblas.h>
#include <hipfft.h>
#include "Utilities.cuh"
#define DEBUG
/*******************/
/* iDivUp FUNCTION */
/*******************/
extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
extern "C" void gpuErrchk(hipError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/**************************/
/* CUSOLVE ERROR CHECKING */
/**************************/
#if __CUDA_ARCH__ >= 700
static const char *_cusolverGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cusolverGetErrorEnum(err)); \
hipDeviceReset(); assert(0); \
}
}
extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); }
#endif
/*************************/
/* CUBLAS ERROR CHECKING */
/*************************/
static const char *_cublasGetErrorEnum(hipblasStatus_t error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
case HIPBLAS_STATUS_NOT_SUPPORTED:
return "HIPBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
inline void __cublasSafeCall(hipblasStatus_t err, const char *file, const int line)
{
if (HIPBLAS_STATUS_SUCCESS != err) {
fprintf(stderr, "CUBLAS error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cublasGetErrorEnum(err)); \
hipDeviceReset(); assert(0); \
}
}
extern "C" void cublasSafeCall(hipblasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); }
/************************/
/* CUFFT ERROR CHECKING */
/************************/
static const char *_cufftGetErrorEnum(hipfftResult error)
{
switch (error)
{
case HIPFFT_SUCCESS:
return "HIPFFT_SUCCESS";
case HIPFFT_INVALID_PLAN:
return "HIPFFT_INVALID_PLAN";
case HIPFFT_ALLOC_FAILED:
return "HIPFFT_ALLOC_FAILED";
case HIPFFT_INVALID_TYPE:
return "HIPFFT_INVALID_TYPE";
case HIPFFT_INVALID_VALUE:
return "HIPFFT_INVALID_VALUE";
case HIPFFT_INTERNAL_ERROR:
return "HIPFFT_INTERNAL_ERROR";
case HIPFFT_EXEC_FAILED:
return "HIPFFT_EXEC_FAILED";
case HIPFFT_SETUP_FAILED:
return "HIPFFT_SETUP_FAILED";
case HIPFFT_INVALID_SIZE:
return "HIPFFT_INVALID_SIZE";
case HIPFFT_UNALIGNED_DATA:
return "HIPFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
// --- CUFFTSAFECALL
inline void __cufftSafeCall(hipfftResult err, const char *file, const int line)
{
if (HIPFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n \nerror %d: %s\nterminating!\n", __FILE__, __LINE__, err, _cufftGetErrorEnum(err));
hipDeviceReset(); assert(0);
}
}
extern "C" void cufftSafeCall(hipfftResult err) { __cufftSafeCall(err, __FILE__, __LINE__); }
/***************************/
/* CUSPARSE ERROR CHECKING */
/***************************/
static const char *_cusparseGetErrorEnum(hipsparseStatus_t error)
{
switch (error)
{
case HIPSPARSE_STATUS_SUCCESS:
return "HIPSPARSE_STATUS_SUCCESS";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "HIPSPARSE_STATUS_NOT_INITIALIZED";
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "HIPSPARSE_STATUS_ALLOC_FAILED";
case HIPSPARSE_STATUS_INVALID_VALUE:
return "HIPSPARSE_STATUS_INVALID_VALUE";
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "HIPSPARSE_STATUS_ARCH_MISMATCH";
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "HIPSPARSE_STATUS_MAPPING_ERROR";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "HIPSPARSE_STATUS_EXECUTION_FAILED";
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "HIPSPARSE_STATUS_INTERNAL_ERROR";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case HIPSPARSE_STATUS_ZERO_PIVOT:
return "HIPSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void __cusparseSafeCall(hipsparseStatus_t err, const char *file, const int line)
{
if (HIPSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cusparseGetErrorEnum(err)); \
hipDeviceReset(); assert(0); \
}
}
extern "C" void cusparseSafeCall(hipsparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); }
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
#define BLOCKSIZE_REVERSE 256
// --- Credit to http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/208801731?pgno=2
template <class T>
__global__ void reverseArrayKernel(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a)
{
// --- Credit to the simpleTemplates CUDA sample
SharedMemory<T> smem;
T* s_data = smem.getPointer();
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int id = threadIdx.x;
const int offset = blockDim.x * (blockIdx.x + 1);
// --- Load one element per thread from device memory and store it *in reversed order* into shared memory
if (tid < N) s_data[BLOCKSIZE_REVERSE - (id + 1)] = a * d_in[tid];
// --- Block until all threads in the block have written their data to shared memory
__syncthreads();
// --- Write the data from shared memory in forward order
if ((N - offset + id) >= 0) d_out[N - offset + id] = s_data[threadIdx.x];
}
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
template <class T>
void reverseArray(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) {
reverseArrayKernel << <iDivUp(N, BLOCKSIZE_REVERSE), BLOCKSIZE_REVERSE, BLOCKSIZE_REVERSE * sizeof(T) >> >(d_in, d_out, N, a);
#ifdef DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
template void reverseArray<float>(const float * __restrict__, float * __restrict__, const int, const float);
template void reverseArray<double>(const double * __restrict__, double * __restrict__, const int, const double);
/********************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION KERNEL */
/********************************************************/
#define BLOCKSIZE_CART2POL 256
template <class T>
__global__ void Cartesian2PolarKernel(const T * __restrict__ d_x, const T * __restrict__ d_y, T * __restrict__ d_rho, T * __restrict__ d_theta,
const int N, const T a) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
d_rho[tid] = a * hypot(d_x[tid], d_y[tid]);
d_theta[tid] = atan2(d_y[tid], d_x[tid]);
}
}
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - GPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> Cartesian2Polar(const T * __restrict__ d_x, const T * __restrict__ d_y, const int N, const T a) {
//
// T *d_rho; gpuErrchk(hipMalloc((void**)&d_rho, N * sizeof(T)));
// T *d_theta; gpuErrchk(hipMalloc((void**)&d_theta, N * sizeof(T)));
//
// Cartesian2PolarKernel<<<iDivUp(N, BLOCKSIZE_CART2POL), BLOCKSIZE_CART2POL>>>(d_x, d_y, d_rho, d_theta, N, a);
//#ifdef DEBUG
// gpuErrchk(hipPeekAtLastError());
// gpuErrchk(hipDeviceSynchronize());
//#endif
//
// return thrust::make_pair(d_rho, d_theta);
//}
//
//template thrust::pair<float *, float *> Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - CPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> h_Cartesian2Polar(const T * __restrict__ h_x, const T * __restrict__ h_y, const int N, const T a) {
//
// T *h_rho = (T *)malloc(N * sizeof(T));
// T *h_theta = (T *)malloc(N * sizeof(T));
//
// for (int i = 0; i < N; i++) {
// h_rho[i] = a * hypot(h_x[i], h_y[i]);
// h_theta[i] = atan2(h_y[i], h_x[i]);
// }
//
// return thrust::make_pair(h_rho, h_theta);
//}
//
//template thrust::pair<float *, float *> h_Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> h_Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************/
/* COMPUTE L2 NORM OF A VECTOR */
/*******************************/
template<class T>
T h_l2_norm(T *v1, T *v2, const int N) {
T norm = (T)0;
for (int i = 0; i < N; ++i)
{
T d = v1[i] - v2[i];
norm = norm + d * d;
}
return sqrt(norm);
}
template float h_l2_norm<float> (float *, float *, const int);
template double h_l2_norm<double>(double *, double *, const int);
/*******************************/
/* LINEAR COMBINATION FUNCTION */
/*******************************/
void linearCombination(const float * __restrict__ d_coeff, const float * __restrict__ d_basis_functions_real, float * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const hipblasHandle_t handle) {
float alpha = 1.f;
float beta = 0.f;
cublasSafeCall(hipblasSgemv(handle, HIPBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
void linearCombination(const double * __restrict__ d_coeff, const double * __restrict__ d_basis_functions_real, double * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const hipblasHandle_t handle) {
double alpha = 1.;
double beta = 0.;
cublasSafeCall(hipblasDgemv(handle, HIPBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
/******************************/
/* ADD A CONSTANT TO A VECTOR */
/******************************/
#define BLOCKSIZE_VECTORADDCONSTANT 256
template<class T>
__global__ void vectorAddConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] += scalar;
}
template<class T>
void vectorAddConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorAddConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORADDCONSTANT), BLOCKSIZE_VECTORADDCONSTANT >> >(d_in, scalar, N);
}
template void vectorAddConstant<float>(float * __restrict__, const float, const int);
template void vectorAddConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - GPU */
/*****************************************/
#define BLOCKSIZE_VECTORMULCONSTANT 256
template<class T>
__global__ void vectorMulConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] *= scalar;
}
template<class T>
void vectorMulConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorMulConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORMULCONSTANT), BLOCKSIZE_VECTORMULCONSTANT >> >(d_in, scalar, N);
}
template void vectorMulConstant<float>(float * __restrict__, const float, const int);
template void vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - CPU */
/*****************************************/
template<class T>
void h_vectorMulConstant(T * __restrict__ h_in, const T scalar, const int N) {
for (int i = 0; i < N; i++) h_in[i] *= scalar;
}
template void h_vectorMulConstant<float>(float * __restrict__, const float, const int);
template void h_vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************************/
/* FUSED MULTIPLY ADD OPERATIONS FOR HOST AND DEVICE */
/*****************************************************/
template<class T>
__host__ __device__ T fma2(T x, T y, T z) { return x * y + z; }
template float fma2<float >(float, float, float);
template double fma2<double>(double, double, double);
/*******************/
/* MODULO FUNCTION */
/*******************/
__device__ int modulo(int val, int _mod)
{
int P;
if (val > 0) { (!(_mod & (_mod - 1)) ? P = val&(_mod - 1) : P = val % (_mod)); return P; }
else
{
(!(_mod & (_mod - 1)) ? P = (-val)&(_mod - 1) : P = (-val) % (_mod));
if (P > 0) return _mod - P;
else return 0;
}
}
/***************************************/
/* ATOMIC ADDITION FUNCTION ON DOUBLES */
/***************************************/
#if defined(__HIPCC__) && (TORCH_HIP_VERSION < 8000)
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
register unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/*********************************/
/* ATOMIC MIN FUNCTION ON FLOATS */
/*********************************/
__device__ float atomicMin(float* address, float val)
{
int* address_as_i = (int*)address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
|
c199b295ab0ead05f269d183a873e9d80b571ed9.cu
|
#include <stdio.h>
#include <assert.h>
//#include <math.h>
#include "cuda_runtime.h"
#include <cuda.h>
#if defined(__CUDACC__) && (CUDA_VERSION >= 7000)
#include <cusolverDn.h>
#endif
#include <cublas_v2.h>
#include <cufft.h>
#include "Utilities.cuh"
#define DEBUG
/*******************/
/* iDivUp FUNCTION */
/*******************/
extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/********************/
/* CUDA ERROR CHECK */
/********************/
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { exit(code); }
}
}
extern "C" void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); }
/**************************/
/* CUSOLVE ERROR CHECKING */
/**************************/
#if __CUDA_ARCH__ >= 700
static const char *_cusolverGetErrorEnum(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS:
return "CUSOLVER_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED:
return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED:
return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE:
return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH:
return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED:
return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR:
return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSOLVE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cusolverGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); }
#endif
/*************************/
/* CUBLAS ERROR CHECKING */
/*************************/
static const char *_cublasGetErrorEnum(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "<unknown>";
}
inline void __cublasSafeCall(cublasStatus_t err, const char *file, const int line)
{
if (CUBLAS_STATUS_SUCCESS != err) {
fprintf(stderr, "CUBLAS error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cublasGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
extern "C" void cublasSafeCall(cublasStatus_t err) { __cublasSafeCall(err, __FILE__, __LINE__); }
/************************/
/* CUFFT ERROR CHECKING */
/************************/
static const char *_cufftGetErrorEnum(cufftResult error)
{
switch (error)
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
// --- CUFFTSAFECALL
inline void __cufftSafeCall(cufftResult err, const char *file, const int line)
{
if (CUFFT_SUCCESS != err) {
fprintf(stderr, "CUFFT error in file '%s', line %d\n \nerror %d: %s\nterminating!\n", __FILE__, __LINE__, err, _cufftGetErrorEnum(err));
cudaDeviceReset(); assert(0);
}
}
extern "C" void cufftSafeCall(cufftResult err) { __cufftSafeCall(err, __FILE__, __LINE__); }
/***************************/
/* CUSPARSE ERROR CHECKING */
/***************************/
static const char *_cusparseGetErrorEnum(cusparseStatus_t error)
{
switch (error)
{
case CUSPARSE_STATUS_SUCCESS:
return "CUSPARSE_STATUS_SUCCESS";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "CUSPARSE_STATUS_NOT_INITIALIZED";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "CUSPARSE_STATUS_ALLOC_FAILED";
case CUSPARSE_STATUS_INVALID_VALUE:
return "CUSPARSE_STATUS_INVALID_VALUE";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "CUSPARSE_STATUS_ARCH_MISMATCH";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "CUSPARSE_STATUS_MAPPING_ERROR";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "CUSPARSE_STATUS_EXECUTION_FAILED";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "CUSPARSE_STATUS_INTERNAL_ERROR";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "CUSPARSE_STATUS_ZERO_PIVOT";
}
return "<unknown>";
}
inline void __cusparseSafeCall(cusparseStatus_t err, const char *file, const int line)
{
if (CUSPARSE_STATUS_SUCCESS != err) {
fprintf(stderr, "CUSPARSE error in file '%s', line %Ndims\Nobjs %s\nerror %Ndims: %s\nterminating!\Nobjs", __FILE__, __LINE__, err, \
_cusparseGetErrorEnum(err)); \
cudaDeviceReset(); assert(0); \
}
}
extern "C" void cusparseSafeCall(cusparseStatus_t err) { __cusparseSafeCall(err, __FILE__, __LINE__); }
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
#define BLOCKSIZE_REVERSE 256
// --- Credit to http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/208801731?pgno=2
template <class T>
__global__ void reverseArrayKernel(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a)
{
// --- Credit to the simpleTemplates CUDA sample
SharedMemory<T> smem;
T* s_data = smem.getPointer();
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int id = threadIdx.x;
const int offset = blockDim.x * (blockIdx.x + 1);
// --- Load one element per thread from device memory and store it *in reversed order* into shared memory
if (tid < N) s_data[BLOCKSIZE_REVERSE - (id + 1)] = a * d_in[tid];
// --- Block until all threads in the block have written their data to shared memory
__syncthreads();
// --- Write the data from shared memory in forward order
if ((N - offset + id) >= 0) d_out[N - offset + id] = s_data[threadIdx.x];
}
/************************/
/* REVERSE ARRAY KERNEL */
/************************/
template <class T>
void reverseArray(const T * __restrict__ d_in, T * __restrict__ d_out, const int N, const T a) {
reverseArrayKernel << <iDivUp(N, BLOCKSIZE_REVERSE), BLOCKSIZE_REVERSE, BLOCKSIZE_REVERSE * sizeof(T) >> >(d_in, d_out, N, a);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
template void reverseArray<float>(const float * __restrict__, float * __restrict__, const int, const float);
template void reverseArray<double>(const double * __restrict__, double * __restrict__, const int, const double);
/********************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION KERNEL */
/********************************************************/
#define BLOCKSIZE_CART2POL 256
template <class T>
__global__ void Cartesian2PolarKernel(const T * __restrict__ d_x, const T * __restrict__ d_y, T * __restrict__ d_rho, T * __restrict__ d_theta,
const int N, const T a) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
d_rho[tid] = a * hypot(d_x[tid], d_y[tid]);
d_theta[tid] = atan2(d_y[tid], d_x[tid]);
}
}
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - GPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> Cartesian2Polar(const T * __restrict__ d_x, const T * __restrict__ d_y, const int N, const T a) {
//
// T *d_rho; gpuErrchk(cudaMalloc((void**)&d_rho, N * sizeof(T)));
// T *d_theta; gpuErrchk(cudaMalloc((void**)&d_theta, N * sizeof(T)));
//
// Cartesian2PolarKernel<<<iDivUp(N, BLOCKSIZE_CART2POL), BLOCKSIZE_CART2POL>>>(d_x, d_y, d_rho, d_theta, N, a);
//#ifdef DEBUG
// gpuErrchk(cudaPeekAtLastError());
// gpuErrchk(cudaDeviceSynchronize());
//#endif
//
// return thrust::make_pair(d_rho, d_theta);
//}
//
//template thrust::pair<float *, float *> Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************************************/
/* CARTESIAN TO POLAR COORDINATES TRANSFORMATION - CPU */
/*******************************************************/
//template <class T>
//thrust::pair<T *,T *> h_Cartesian2Polar(const T * __restrict__ h_x, const T * __restrict__ h_y, const int N, const T a) {
//
// T *h_rho = (T *)malloc(N * sizeof(T));
// T *h_theta = (T *)malloc(N * sizeof(T));
//
// for (int i = 0; i < N; i++) {
// h_rho[i] = a * hypot(h_x[i], h_y[i]);
// h_theta[i] = atan2(h_y[i], h_x[i]);
// }
//
// return thrust::make_pair(h_rho, h_theta);
//}
//
//template thrust::pair<float *, float *> h_Cartesian2Polar<float> (const float *, const float *, const int, const float);
//template thrust::pair<double *, double *> h_Cartesian2Polar<double> (const double *, const double *, const int, const double);
/*******************************/
/* COMPUTE L2 NORM OF A VECTOR */
/*******************************/
template<class T>
T h_l2_norm(T *v1, T *v2, const int N) {
T norm = (T)0;
for (int i = 0; i < N; ++i)
{
T d = v1[i] - v2[i];
norm = norm + d * d;
}
return sqrt(norm);
}
template float h_l2_norm<float> (float *, float *, const int);
template double h_l2_norm<double>(double *, double *, const int);
/*******************************/
/* LINEAR COMBINATION FUNCTION */
/*******************************/
void linearCombination(const float * __restrict__ d_coeff, const float * __restrict__ d_basis_functions_real, float * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const cublasHandle_t handle) {
float alpha = 1.f;
float beta = 0.f;
cublasSafeCall(cublasSgemv(handle, CUBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
void linearCombination(const double * __restrict__ d_coeff, const double * __restrict__ d_basis_functions_real, double * __restrict__ d_linear_combination,
const int N_basis_functions, const int N_sampling_points, const cublasHandle_t handle) {
double alpha = 1.;
double beta = 0.;
cublasSafeCall(cublasDgemv(handle, CUBLAS_OP_N, N_sampling_points, N_basis_functions, &alpha, d_basis_functions_real, N_sampling_points,
d_coeff, 1, &beta, d_linear_combination, 1));
}
/******************************/
/* ADD A CONSTANT TO A VECTOR */
/******************************/
#define BLOCKSIZE_VECTORADDCONSTANT 256
template<class T>
__global__ void vectorAddConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] += scalar;
}
template<class T>
void vectorAddConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorAddConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORADDCONSTANT), BLOCKSIZE_VECTORADDCONSTANT >> >(d_in, scalar, N);
}
template void vectorAddConstant<float>(float * __restrict__, const float, const int);
template void vectorAddConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - GPU */
/*****************************************/
#define BLOCKSIZE_VECTORMULCONSTANT 256
template<class T>
__global__ void vectorMulConstantKernel(T * __restrict__ d_in, const T scalar, const int N) {
const int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) d_in[tid] *= scalar;
}
template<class T>
void vectorMulConstant(T * __restrict__ d_in, const T scalar, const int N) {
vectorMulConstantKernel << <iDivUp(N, BLOCKSIZE_VECTORMULCONSTANT), BLOCKSIZE_VECTORMULCONSTANT >> >(d_in, scalar, N);
}
template void vectorMulConstant<float>(float * __restrict__, const float, const int);
template void vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************/
/* MULTIPLY A VECTOR BY A CONSTANT - CPU */
/*****************************************/
template<class T>
void h_vectorMulConstant(T * __restrict__ h_in, const T scalar, const int N) {
for (int i = 0; i < N; i++) h_in[i] *= scalar;
}
template void h_vectorMulConstant<float>(float * __restrict__, const float, const int);
template void h_vectorMulConstant<double>(double * __restrict__, const double, const int);
/*****************************************************/
/* FUSED MULTIPLY ADD OPERATIONS FOR HOST AND DEVICE */
/*****************************************************/
template<class T>
__host__ __device__ T fma2(T x, T y, T z) { return x * y + z; }
template float fma2<float >(float, float, float);
template double fma2<double>(double, double, double);
/*******************/
/* MODULO FUNCTION */
/*******************/
__device__ int modulo(int val, int _mod)
{
int P;
if (val > 0) { (!(_mod & (_mod - 1)) ? P = val&(_mod - 1) : P = val % (_mod)); return P; }
else
{
(!(_mod & (_mod - 1)) ? P = (-val)&(_mod - 1) : P = (-val) % (_mod));
if (P > 0) return _mod - P;
else return 0;
}
}
/***************************************/
/* ATOMIC ADDITION FUNCTION ON DOUBLES */
/***************************************/
#if defined(__CUDACC__) && (CUDA_VERSION < 8000)
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
register unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/*********************************/
/* ATOMIC MIN FUNCTION ON FLOATS */
/*********************************/
__device__ float atomicMin(float* address, float val)
{
int* address_as_i = (int*)address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
|
4217d1fc303b410b782aa78022cd1b5ce033956e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <UniformGridKernel.cuh>
#include <double3.h>
#include <AtomicDoubleAdd.cuh>
#include <stdio.h>
/****************************************************************************************************************************/
/****************************************************************************************************************************/
extern "C"
{
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void createCell_Kernel(double xC, double yC, double zC, double l, double w, double d,
float sizeCell, int nbCellsX, int nbCellsY, int nbCellsZ, double3* m_dPos, int* nbParticles)
{
uint indexX = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint indexY = __umul24(blockIdx.y,blockDim.y) + threadIdx.y;
uint indexZ = __umul24(blockIdx.z,blockDim.z) + threadIdx.z;
if(indexX<nbCellsX && indexY<nbCellsY && indexZ<nbCellsZ)
{
uint indexCell = indexX + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY;
nbParticles[indexCell] = 0;
m_dPos[indexCell].x = xC-(l/2)+(sizeCell/2)+indexX*sizeCell;
m_dPos[indexCell].y = yC-(w/2)+(sizeCell/2)+indexY*sizeCell;
m_dPos[indexCell].z = zC-(d/2)+(sizeCell/2)+indexZ*sizeCell;
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void reinitCells_Kernel(int* nbParticles, int nbCellsX, int nbCellsY, int nbCellsZ)
{
uint indexX = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint indexY = __umul24(blockIdx.y,blockDim.y) + threadIdx.y;
uint indexZ = __umul24(blockIdx.z,blockDim.z) + threadIdx.z;
if(indexX<nbCellsX && indexY<nbCellsY && indexZ<nbCellsZ)
{
uint indexCell = indexX + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY;
nbParticles[indexCell] = 0;
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void storeParticles_Kernel(double3* positions, uint nbBodies, int* nbParticles, int* indexParticles, float sizeCell,
float3 Min, int nbCellsX, int nbCellsY, int nbCellsZ)
{
uint indexP = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if(indexP < nbBodies){
int i = floor((positions[indexP].x-Min.x)/sizeCell);
int j = floor((positions[indexP].y-Min.y)/sizeCell);
int k = floor((positions[indexP].z-Min.z)/sizeCell);
uint indexC = i + j*nbCellsX + k*nbCellsX*nbCellsY;
uint nb = atomicAdd(&nbParticles[indexC],1);
atomicExch(&indexParticles[indexC*MAXPARTICLES+nb],indexP);
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void searchNeighbooring_Kernel(double3* positions, double* radius, int* indexParticles, int* nbParticles,
int nbCellsX, int nbCellsY, int nbCellsZ, float sizeCell, float3 Min, double scale,
partVoisine voisines, uint nbBodies)
{
uint indexP = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if(indexP < nbBodies)
{
voisines.nbVoisines[indexP] = 0;
int I = floor((positions[indexP].x-Min.x)/sizeCell);
int J = floor((positions[indexP].y-Min.y)/sizeCell);
int K = floor((positions[indexP].z-Min.z)/sizeCell);
int nbC = ceil(scale);
if(I>=0 && J>=0 && K>=0 && I < nbCellsX && J < nbCellsY && K < nbCellsZ){
for(int i=I-nbC; i<=I+nbC; i++){
for(int j=J-nbC; j<=J+nbC; j++){
for(int k=K-nbC; k<=K+nbC; k++){
int indexC = i + j*nbCellsX + k*nbCellsX*nbCellsY;
if(i>=0 && j>=0 && k>=0 && i < nbCellsX && j < nbCellsY && k < nbCellsZ){
if(nbParticles[indexC]>0){
for(uint n=0;n<nbParticles[indexC];n++){
uint indexP2 = indexParticles[indexC*MAXPARTICLES+n];
double d = length(positions[indexP]-positions[indexP2]);
if(d<=radius[indexP]){
voisines.nbVoisines[indexP] += 1;
voisines.listeVoisine[(indexP*200)+voisines.nbVoisines[indexP]-1] = indexP2;
}
}
}
}
}
}
}
}
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void evaluateIso_Kernel(double4* pos_MC, uint nbCellsX_MC, uint nbCellsY_MC, uint nbCellsZ_MC, double scale,
float3 Min, float sizeCell, int nbCellsX, int nbCellsY, int nbCellsZ, int* nbParticles, int* indexParticles,
double3* pos, double* radius, uint nbBodies)
{
uint indexX = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint indexY = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
uint indexZ = __mul24(blockIdx.z,blockDim.z) + threadIdx.z;
if(indexX<nbCellsX_MC && indexY<nbCellsY_MC && indexZ<nbCellsZ_MC)
{
double iso = 0;
uint indexC_MC = indexX + indexY*nbCellsX_MC + indexZ*nbCellsX_MC*nbCellsY_MC;
int I = floor((pos_MC[indexC_MC].x-Min.x)/sizeCell);
int J = floor((pos_MC[indexC_MC].y-Min.y)/sizeCell);
int K = floor((pos_MC[indexC_MC].z-Min.z)/sizeCell);
double3 pos1 = make_double3(pos_MC[indexC_MC].x,pos_MC[indexC_MC].y,pos_MC[indexC_MC].z);
int nbC = floor(1/scale);
for(int i=I-nbC;i<=I+nbC;i++){
for(int j=J-nbC;j<=J+nbC;j++){
for(int k=K-nbC;k<=K+nbC;k++){
if(i>=0 && j>=0 && k>=0 && i < nbCellsX && j < nbCellsY && k < nbCellsZ){
int indexC = i + j*nbCellsX + k*nbCellsX*nbCellsY;
if(nbParticles[indexC]>0){
for(uint n=0;n<nbParticles[indexC];n++){
uint indexP2 = indexParticles[indexC*MAXPARTICLES+n];
if(indexP2<nbBodies){
double3 pos2 = pos[indexP2];
double r = radius[indexP2]/scale;
double d = length(pos1 - pos2);
if(d<=r)
iso += (1/powf(r,6))*powf(r*r-d*d,3);
}
}
}
}
}
}
}
pos_MC[indexC_MC].w = iso;
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void computeNormales_Vertexs_Kernel(double3* posV, double3* normales, float scale, uint nbV,
float3 Min, float sizeCell, int nbCellsX, int nbCellsY, int nbCellsZ,
int* nbParticles, int* indexParticles,
double3* pos, double* radius, double* mass, double* density, uint nbBodies)
{
uint index = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if(index<nbV)
{
int I = floor((posV[index].x-Min.x)/sizeCell);
int J = floor((posV[index].y-Min.y)/sizeCell);
int K = floor((posV[index].z-Min.z)/sizeCell);
normales[index] = make_double3(0,0,0);
double3 pos1 = posV[index];
int nbC = 2;//floor(1/scale);
for(int i=I-nbC;i<=I+nbC;i++){
for(int j=J-nbC;j<=J+nbC;j++){
for(int k=K-nbC;k<=K+nbC;k++){
if(i>=0 && j>=0 && k>=0 && i < nbCellsX && j < nbCellsY && k < nbCellsZ){
int indexC = i + j*nbCellsX + k*nbCellsX*nbCellsY;
if(nbParticles[indexC]>0){
for(uint n=0;n<nbParticles[indexC];n++){
uint indexP2 = indexParticles[indexC*MAXPARTICLES+n];
if(indexP2<nbBodies){
double3 pos2 = pos[indexP2];
double r = radius[indexP2]/scale;
for(int nbP = 0; nbP<6; nbP++){
double3 P1P2 = pos1 - pos2;
double d = length(P1P2);
if(d<=r){
// normale evaluation
double b = 32*M_PI*powf(radius[indexP2],9);
double mk = -945/b;
normales[index] = normales[index] + P1P2*
(mass[indexP2]/density[indexP2])*pow((radius[indexP2]*
radius[indexP2])-(d*d),2)*mk;
}
}
}
}
}
}
}
}
}
double lN = length(normales[index]);
if(lN<=0)
printf("Normale 0\n");
else
normales[index] = -normales[index]/lN;
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
|
4217d1fc303b410b782aa78022cd1b5ce033956e.cu
|
#include <UniformGridKernel.cuh>
#include <double3.h>
#include <AtomicDoubleAdd.cuh>
#include <stdio.h>
/****************************************************************************************************************************/
/****************************************************************************************************************************/
extern "C"
{
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void createCell_Kernel(double xC, double yC, double zC, double l, double w, double d,
float sizeCell, int nbCellsX, int nbCellsY, int nbCellsZ, double3* m_dPos, int* nbParticles)
{
uint indexX = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint indexY = __umul24(blockIdx.y,blockDim.y) + threadIdx.y;
uint indexZ = __umul24(blockIdx.z,blockDim.z) + threadIdx.z;
if(indexX<nbCellsX && indexY<nbCellsY && indexZ<nbCellsZ)
{
uint indexCell = indexX + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY;
nbParticles[indexCell] = 0;
m_dPos[indexCell].x = xC-(l/2)+(sizeCell/2)+indexX*sizeCell;
m_dPos[indexCell].y = yC-(w/2)+(sizeCell/2)+indexY*sizeCell;
m_dPos[indexCell].z = zC-(d/2)+(sizeCell/2)+indexZ*sizeCell;
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void reinitCells_Kernel(int* nbParticles, int nbCellsX, int nbCellsY, int nbCellsZ)
{
uint indexX = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint indexY = __umul24(blockIdx.y,blockDim.y) + threadIdx.y;
uint indexZ = __umul24(blockIdx.z,blockDim.z) + threadIdx.z;
if(indexX<nbCellsX && indexY<nbCellsY && indexZ<nbCellsZ)
{
uint indexCell = indexX + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY;
nbParticles[indexCell] = 0;
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void storeParticles_Kernel(double3* positions, uint nbBodies, int* nbParticles, int* indexParticles, float sizeCell,
float3 Min, int nbCellsX, int nbCellsY, int nbCellsZ)
{
uint indexP = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if(indexP < nbBodies){
int i = floor((positions[indexP].x-Min.x)/sizeCell);
int j = floor((positions[indexP].y-Min.y)/sizeCell);
int k = floor((positions[indexP].z-Min.z)/sizeCell);
uint indexC = i + j*nbCellsX + k*nbCellsX*nbCellsY;
uint nb = atomicAdd(&nbParticles[indexC],1);
atomicExch(&indexParticles[indexC*MAXPARTICLES+nb],indexP);
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void searchNeighbooring_Kernel(double3* positions, double* radius, int* indexParticles, int* nbParticles,
int nbCellsX, int nbCellsY, int nbCellsZ, float sizeCell, float3 Min, double scale,
partVoisine voisines, uint nbBodies)
{
uint indexP = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if(indexP < nbBodies)
{
voisines.nbVoisines[indexP] = 0;
int I = floor((positions[indexP].x-Min.x)/sizeCell);
int J = floor((positions[indexP].y-Min.y)/sizeCell);
int K = floor((positions[indexP].z-Min.z)/sizeCell);
int nbC = ceil(scale);
if(I>=0 && J>=0 && K>=0 && I < nbCellsX && J < nbCellsY && K < nbCellsZ){
for(int i=I-nbC; i<=I+nbC; i++){
for(int j=J-nbC; j<=J+nbC; j++){
for(int k=K-nbC; k<=K+nbC; k++){
int indexC = i + j*nbCellsX + k*nbCellsX*nbCellsY;
if(i>=0 && j>=0 && k>=0 && i < nbCellsX && j < nbCellsY && k < nbCellsZ){
if(nbParticles[indexC]>0){
for(uint n=0;n<nbParticles[indexC];n++){
uint indexP2 = indexParticles[indexC*MAXPARTICLES+n];
double d = length(positions[indexP]-positions[indexP2]);
if(d<=radius[indexP]){
voisines.nbVoisines[indexP] += 1;
voisines.listeVoisine[(indexP*200)+voisines.nbVoisines[indexP]-1] = indexP2;
}
}
}
}
}
}
}
}
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void evaluateIso_Kernel(double4* pos_MC, uint nbCellsX_MC, uint nbCellsY_MC, uint nbCellsZ_MC, double scale,
float3 Min, float sizeCell, int nbCellsX, int nbCellsY, int nbCellsZ, int* nbParticles, int* indexParticles,
double3* pos, double* radius, uint nbBodies)
{
uint indexX = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint indexY = __mul24(blockIdx.y,blockDim.y) + threadIdx.y;
uint indexZ = __mul24(blockIdx.z,blockDim.z) + threadIdx.z;
if(indexX<nbCellsX_MC && indexY<nbCellsY_MC && indexZ<nbCellsZ_MC)
{
double iso = 0;
uint indexC_MC = indexX + indexY*nbCellsX_MC + indexZ*nbCellsX_MC*nbCellsY_MC;
int I = floor((pos_MC[indexC_MC].x-Min.x)/sizeCell);
int J = floor((pos_MC[indexC_MC].y-Min.y)/sizeCell);
int K = floor((pos_MC[indexC_MC].z-Min.z)/sizeCell);
double3 pos1 = make_double3(pos_MC[indexC_MC].x,pos_MC[indexC_MC].y,pos_MC[indexC_MC].z);
int nbC = floor(1/scale);
for(int i=I-nbC;i<=I+nbC;i++){
for(int j=J-nbC;j<=J+nbC;j++){
for(int k=K-nbC;k<=K+nbC;k++){
if(i>=0 && j>=0 && k>=0 && i < nbCellsX && j < nbCellsY && k < nbCellsZ){
int indexC = i + j*nbCellsX + k*nbCellsX*nbCellsY;
if(nbParticles[indexC]>0){
for(uint n=0;n<nbParticles[indexC];n++){
uint indexP2 = indexParticles[indexC*MAXPARTICLES+n];
if(indexP2<nbBodies){
double3 pos2 = pos[indexP2];
double r = radius[indexP2]/scale;
double d = length(pos1 - pos2);
if(d<=r)
iso += (1/powf(r,6))*powf(r*r-d*d,3);
}
}
}
}
}
}
}
pos_MC[indexC_MC].w = iso;
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
__global__ void computeNormales_Vertexs_Kernel(double3* posV, double3* normales, float scale, uint nbV,
float3 Min, float sizeCell, int nbCellsX, int nbCellsY, int nbCellsZ,
int* nbParticles, int* indexParticles,
double3* pos, double* radius, double* mass, double* density, uint nbBodies)
{
uint index = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if(index<nbV)
{
int I = floor((posV[index].x-Min.x)/sizeCell);
int J = floor((posV[index].y-Min.y)/sizeCell);
int K = floor((posV[index].z-Min.z)/sizeCell);
normales[index] = make_double3(0,0,0);
double3 pos1 = posV[index];
int nbC = 2;//floor(1/scale);
for(int i=I-nbC;i<=I+nbC;i++){
for(int j=J-nbC;j<=J+nbC;j++){
for(int k=K-nbC;k<=K+nbC;k++){
if(i>=0 && j>=0 && k>=0 && i < nbCellsX && j < nbCellsY && k < nbCellsZ){
int indexC = i + j*nbCellsX + k*nbCellsX*nbCellsY;
if(nbParticles[indexC]>0){
for(uint n=0;n<nbParticles[indexC];n++){
uint indexP2 = indexParticles[indexC*MAXPARTICLES+n];
if(indexP2<nbBodies){
double3 pos2 = pos[indexP2];
double r = radius[indexP2]/scale;
for(int nbP = 0; nbP<6; nbP++){
double3 P1P2 = pos1 - pos2;
double d = length(P1P2);
if(d<=r){
// normale evaluation
double b = 32*M_PI*powf(radius[indexP2],9);
double mk = -945/b;
normales[index] = normales[index] + P1P2*
(mass[indexP2]/density[indexP2])*pow((radius[indexP2]*
radius[indexP2])-(d*d),2)*mk;
}
}
}
}
}
}
}
}
}
double lN = length(normales[index]);
if(lN<=0)
printf("Normale à 0\n");
else
normales[index] = -normales[index]/lN;
}
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
}
/****************************************************************************************************************************/
/****************************************************************************************************************************/
|
d4476e4c5369b5c1389ddddbb9271ff8fb3790cc.hip
|
// !!! This is a file automatically generated by hipify!!!
#pragma clang diagnostic push
#pragma ide diagnostic ignored "hicpp-signed-bitwise"
//#define __JETBRAINS_IDE__
// IDE indexing
#ifdef __JETBRAINS_IDE__
#define __host__
#define __device__
#define __constant__
#define __global__
#define __HIPCC__
#include <hip/device_functions.h>
#include <__clang_cuda_builtin_vars.h>
#include <__clang_cuda_intrinsics.h>
#include <__clang_cuda_math_forward_declares.h>
#include <__clang_cuda_complex_builtins.h>
#include <__clang_cuda_cmath.h>
#endif
#ifdef __INTELLISENSE__
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#define __HIPCC__ //fixes function defenition in ide
//void __syncthreads();
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <device_atomic_functions.h>
#endif
#include <chrono>
#include <cstdint>
#include <thread>
#include <vector>
#include <mutex>
#include <atomic>
#include <iostream>
#include <iomanip>
#include "generator.h"
#define WRITE_BUFFER_SIZE 2048
#define MAX_NUMBER_LEN (21 + 1)
#define WRITE_BUFFER_USED (writeBufCur - writeBuffer)
#define RANDOM_MULTIPLIER_LONG 0x5DEECE66DULL
#define Random uint64_t
#define RANDOM_MULTIPLIER RANDOM_MULTIPLIER_LONG
#define RANDOM_ADDEND 0xBULL
#define RANDOM_MASK (1ULL << 48) - 1
// Random::next(bits)
__host__ __device__ inline uint32_t random_next(Random *random, int32_t bits) {
*random = (*random * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK;
return (uint32_t)(*random >> (48 - bits));
}
// Random::nextInt(bound)
__host__ __device__ inline uint32_t random_next_int(Random *random, uint32_t bound) {
int32_t r = random_next(random, 31);
int32_t m = bound - 1;
if ((bound & m) == 0) {
// Could probably use __mul64hi here
r = (uint32_t)((bound * (uint64_t)r) >> 31);
} else {
r %= bound;
}
return r;
}
#define CHECK_GPU_ERR(code) gpuAssert((code), __FILE__, __LINE__)
inline void gpuAssert(hipError_t code, const char* file, int32_t line) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", hipGetErrorString(code), code, file, line);
exit(code);
}
}
// advance
#define advance_rng(rand, multiplier, addend) ((rand) = ((rand) * (multiplier) + (addend)) & RANDOM_MASK)
#define advance_16(rand) advance_rng(rand, 0x6DC260740241LL, 0xD0352014D90LL)
#define advance_m1(rand) advance_rng(rand, 0xDFE05BCB1365LL, 0x615C0E462AA9LL)
#define advance_m3759(rand) advance_rng(rand, 0x63A9985BE4ADLL, 0xA9AA8DA9BC9BLL)
#define WATERFALL_X 16
//#define WATERFALL_Y 76
#define WATERFALL_Z 10
#define TREE_X (WATERFALL_X - 5)
#define TREE_Z (WATERFALL_Z - 8)
#define TREE_HEIGHT 5
#define OTHER_TREE_COUNT 1
__device__ inline int32_t getTreeHeight(int32_t x, int32_t z) {
if (x == TREE_X && z == TREE_Z)
return TREE_HEIGHT;
if (x == WATERFALL_X - 3 && z == WATERFALL_Z + 3)
return 5;
return 0;
}
#define MODULUS (1LL << 48)
#define X_TRANSLATE 0
#define L00 7847617LL
#define L01 (-18218081LL)
#define LI00 (24667315.0 / 16)
#define LI01 (18218081.0 / 16)
#define LI10 (-4824621.0 / 16)
#define LI11 (7847617.0 / 16)
#define CONST_MIN(a, b) ((a) < (b) ? (a) : (b))
#define CONST_MIN4(a, b, c, d) CONST_MIN(CONST_MIN(a, b), CONST_MIN(c, d))
#define CONST_MAX(a, b) ((a) > (b) ? (a) : (b))
#define CONST_MAX4(a, b, c, d) CONST_MAX(CONST_MAX(a, b), CONST_MAX(c, d))
#define CONST_FLOOR(x) ((x) < (int64_t) (x) ? (int64_t) (x) - 1 : (int64_t) (x))
#define CONST_CEIL(x) ((x) == (int64_t) (x) ? (int64_t) (x) : CONST_FLOOR((x) + 1))
// for a parallelogram ABCD https://media.discordapp.net/attachments/668607204009574411/671018577561649163/unknown.png
#define B_X LI00
#define B_Z LI10
#define C_X (LI00 + LI01)
#define C_Z (LI10 + LI11)
#define D_X LI01
#define D_Z LI11
#define LOWER_X CONST_MIN4(0, B_X, C_X, D_X)
#define LOWER_Z CONST_MIN4(0, B_Z, C_Z, D_Z)
#define UPPER_X CONST_MAX4(0, B_X, C_X, D_X)
#define UPPER_Z CONST_MAX4(0, B_Z, C_Z, D_Z)
#define ORIG_SIZE_X (UPPER_X - LOWER_X + 1)
#define SIZE_X CONST_CEIL(ORIG_SIZE_X - D_X)
#define SIZE_Z CONST_CEIL(UPPER_Z - LOWER_Z + 1)
#define TOTAL_WORK_SIZE (SIZE_X * SIZE_Z)
#define MAX_TREE_ATTEMPTS 12
#define MAX_TREE_SEARCH_BACK (3 * MAX_TREE_ATTEMPTS - 3 + 16 * OTHER_TREE_COUNT)
__constant__ uint64_t search_back_multipliers[MAX_TREE_SEARCH_BACK + 1];
__constant__ uint64_t search_back_addends[MAX_TREE_SEARCH_BACK + 1];
int32_t search_back_count;
#define WORK_UNIT_SIZE (1LL << 25)
#define BLOCK_SIZE 256
__global__ void doPreWork(uint64_t offset, Random* starts, int* num_starts) {
// lattice tree position
uint64_t global_id = blockIdx.x * blockDim.x + threadIdx.x;
int64_t lattice_x = (int64_t) ((offset + global_id) % SIZE_X) + LOWER_X;
int64_t lattice_z = (int64_t) ((offset + global_id) / SIZE_X) + LOWER_Z;
lattice_z += (B_X * lattice_z < B_Z * lattice_x) * SIZE_Z;
if (D_X * lattice_z > D_Z * lattice_x) {
lattice_x += B_X;
lattice_z += B_Z;
}
lattice_x += (int64_t) (TREE_X * LI00 + TREE_Z * LI01);
lattice_z += (int64_t) (TREE_X * LI10 + TREE_Z * LI11);
auto rand = (Random)((lattice_x * L00 + lattice_z * L01 + X_TRANSLATE) % MODULUS);
advance_m1(rand);
Random tree_start = rand;
advance_m1(tree_start);
bool res = random_next(&rand, 4) == TREE_X;
res &= random_next(&rand, 4) == TREE_Z;
res &= random_next_int(&rand, 3) == (uint64_t) (TREE_HEIGHT - 4);
if (res) {
int index = atomicAdd(num_starts, 1);
starts[index] = tree_start;
}
}
__global__ void doWork(const int32_t* num_starts, const Random* tree_starts, int32_t* num_seeds, uint64_t* seeds, int32_t gpu_search_back_count) {
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < *num_starts; i += blockDim.x * gridDim.x) {
Random tree_start = tree_starts[i];
for (int32_t treeBackCalls = 0; treeBackCalls <= gpu_search_back_count; treeBackCalls++) {
Random start = (tree_start * search_back_multipliers[treeBackCalls] + search_back_addends[treeBackCalls]) & RANDOM_MASK;
Random rand = start;
bool this_res = true;
if (random_next_int(&rand, 10) == 0)
continue;
int32_t generated_tree[16];
memset(generated_tree, 0x00, sizeof(generated_tree));
int32_t treesMatched = 0;
for (int32_t treeAttempt = 0; treeAttempt <= MAX_TREE_ATTEMPTS; treeAttempt++) {
int32_t treeX = random_next(&rand, 4);
int32_t treeZ = random_next(&rand, 4);
int32_t wantedTreeHeight = getTreeHeight(treeX, treeZ);
int32_t treeHeight = random_next_int(&rand, 3) + 4;
int32_t& boolpack = generated_tree[treeX];
const int32_t mask = 1 << (treeZ % 16);
if (treeHeight == wantedTreeHeight && !(boolpack & mask)) {
treesMatched++;
boolpack |= mask;
advance_16(rand);
}
}
this_res &= treesMatched >= OTHER_TREE_COUNT + 1;
if (this_res) {
Random start_chunk_rand = start;
advance_m3759(start_chunk_rand);
int32_t index = atomicAdd(num_seeds, 1);
seeds[index] = start_chunk_rand;
}
advance_m1(start);
}
}
}
struct GPU_Node {
int32_t* num_seeds;
uint64_t* seeds;
int32_t* num_tree_starts;
Random* tree_starts;
};
void setup_gpu_node(GPU_Node* node, int32_t gpu) {
CHECK_GPU_ERR(hipSetDevice(gpu));
CHECK_GPU_ERR(hipMallocManaged(&node->num_seeds, sizeof(*node->num_seeds)));
CHECK_GPU_ERR(hipMallocManaged(&node->seeds, (sizeof(Random)*WORK_UNIT_SIZE)));
CHECK_GPU_ERR(hipMallocManaged(&node->num_tree_starts, sizeof(*node->num_tree_starts)));
CHECK_GPU_ERR(hipMallocManaged(&node->tree_starts, (sizeof(Random)*WORK_UNIT_SIZE)));
}
#ifndef GPU_COUNT
#define GPU_COUNT 1
#endif
void calculate_search_backs() {
bool allow_search_back[MAX_TREE_SEARCH_BACK + 1];
memset(allow_search_back, false, sizeof(allow_search_back));
for (int32_t i = 0; i <= MAX_TREE_ATTEMPTS - OTHER_TREE_COUNT - 1; i++) {
allow_search_back[i * 3] = true;
}
for (int32_t tree = 0; tree < OTHER_TREE_COUNT; tree++) {
for (int32_t i = 0; i <= MAX_TREE_SEARCH_BACK - 19; i++) {
if (allow_search_back[i])
allow_search_back[i + 19] = true;
}
}
search_back_count = 0;
uint64_t multiplier = 1;
uint64_t addend = 0;
uint64_t multipliers[MAX_TREE_SEARCH_BACK + 1];
uint64_t addends[MAX_TREE_SEARCH_BACK + 1];
for (int32_t i = 0; i <= MAX_TREE_SEARCH_BACK; i++) {
if (allow_search_back[i]) {
int32_t index = search_back_count++;
multipliers[index] = multiplier;
addends[index] = addend;
}
multiplier = (multiplier * 0xDFE05BCB1365LL) & RANDOM_MASK;
addend = (0xDFE05BCB1365LL * addend + 0x615C0E462AA9LL) & RANDOM_MASK;
}
for (int32_t gpu = 0; gpu < GPU_COUNT; gpu++) {
CHECK_GPU_ERR(hipSetDevice(gpu));
CHECK_GPU_ERR(hipMemcpyToSymbol(search_back_multipliers, &multipliers, search_back_count * sizeof(*multipliers)));
CHECK_GPU_ERR(hipMemcpyToSymbol(search_back_addends, &addends, search_back_count * sizeof(*addends)));
}
}
#ifndef OFFSET
#define OFFSET 0
#endif
int main(int argc, char *argv[]) {
random_math::JavaRand::init();
generator::ChunkGenerator::init();
auto *nodes = (GPU_Node*)malloc(sizeof(GPU_Node) * GPU_COUNT);
std::cout << "Searching " << TOTAL_WORK_SIZE << " total seeds...\n";
calculate_search_backs();
FILE* out_file = fopen("chunk_seeds.txt", "w");
for (int32_t i = 0; i < GPU_COUNT; i++) {
setup_gpu_node(&nodes[i], i);
}
std::vector<std::thread> threads(std::thread::hardware_concurrency() - 4);
std::mutex fileMutex;
std::atomic<uint64_t> count(0);
auto lastIteration = std::chrono::system_clock::now();
auto startTime = std::chrono::system_clock::now();
long long* tempStorage = nullptr;
uint64_t arraySize = 0;
std::cout << "Using " << threads.size() << " threads for cpu work\n";
for (uint64_t offset = OFFSET; offset < TOTAL_WORK_SIZE;) {
for (int32_t gpu_index = 0; gpu_index < GPU_COUNT; gpu_index++) {
CHECK_GPU_ERR(hipSetDevice(gpu_index));
*nodes[gpu_index].num_tree_starts = 0;
hipLaunchKernelGGL(( doPreWork), dim3(WORK_UNIT_SIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, offset, nodes[gpu_index].tree_starts, nodes[gpu_index].num_tree_starts);
offset += WORK_UNIT_SIZE;
}
for (int32_t gpu_index = 0; gpu_index < GPU_COUNT; gpu_index++) {
CHECK_GPU_ERR(hipSetDevice(gpu_index));
CHECK_GPU_ERR(hipDeviceSynchronize());
}
for (int32_t gpu_index = 0; gpu_index < GPU_COUNT; gpu_index++) {
CHECK_GPU_ERR(hipSetDevice(gpu_index));
*nodes[gpu_index].num_seeds = 0;
hipLaunchKernelGGL(( doWork), dim3(WORK_UNIT_SIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, nodes[gpu_index].num_tree_starts, nodes[gpu_index].tree_starts, nodes[gpu_index].num_seeds, nodes[gpu_index].seeds, search_back_count);
}
static auto threadFunc = [&](size_t start, size_t end) {
int32_t myCount = 0;
char writeBuffer[2048];
char* writeBufCur = writeBuffer;
for (int32_t j = start; j < end; ++j) {
if (WRITE_BUFFER_USED + MAX_NUMBER_LEN >= WRITE_BUFFER_SIZE) {
*writeBufCur++ = 0;
{
std::lock_guard<std::mutex> lock(fileMutex);
fprintf(out_file, "%s", writeBuffer);
fflush(out_file);
}
writeBufCur = writeBuffer;
}
if (generator::ChunkGenerator::populate(tempStorage[j], X_TRANSLATE + 16)) {
myCount++;
writeBufCur += snprintf(writeBufCur, MAX_NUMBER_LEN, "%lld\n", tempStorage[j]);
}
}
// Finish up - write remainder and update atomic
{
std::lock_guard<std::mutex> lock(fileMutex);
fprintf(out_file, "%s", writeBuffer);
fflush(out_file);
}
count += myCount;
};
int32_t chunkSize = arraySize / threads.size();
for(size_t i = 0; i < threads.size(); i++)
threads[i] = std::thread(threadFunc, i * chunkSize, (i == (threads.size() - 1)) ? arraySize : ((i + 1) * chunkSize));
for(std::thread& x : threads)
x.join();
fflush(out_file);
free(tempStorage);
tempStorage = (long long*)malloc(sizeof(long long));
arraySize = 0;
for (int32_t gpu_index = 0; gpu_index < GPU_COUNT; gpu_index++) {
CHECK_GPU_ERR(hipSetDevice(gpu_index));
CHECK_GPU_ERR(hipDeviceSynchronize());
tempStorage = (long long*) realloc(tempStorage, (*nodes[gpu_index].num_seeds + arraySize) * sizeof(long long));
for (int32_t i = 0, e = *nodes[gpu_index].num_seeds; i < e; i++) {
tempStorage[arraySize+i]=nodes[gpu_index].seeds[i];
}
arraySize += *nodes[gpu_index].num_seeds;
}
auto iterFinish = std::chrono::system_clock::now();
std::chrono::duration<double> iterationTime = iterFinish - lastIteration;
std::chrono::duration<double> elapsedTime = iterFinish - startTime;
lastIteration = iterFinish;
uint64_t numSearched = offset + WORK_UNIT_SIZE * GPU_COUNT - OFFSET;
double speed = numSearched / elapsedTime.count() / 1000000;
double progress = (double)numSearched / (double)TOTAL_WORK_SIZE * 100.0;
double estimatedTime = (double)(TOTAL_WORK_SIZE - numSearched) / speed / 1000000;
uint64_t curCount = count;
char suffix = 's';
if (estimatedTime >= 3600) {
suffix = 'h';
estimatedTime /= 3600.0;
} else if (estimatedTime >= 60) {
suffix = 'm';
estimatedTime /= 60.0;
}
if (progress >= 100.0) {
estimatedTime = 0.0;
suffix = 's';
}
std::cout << "Searched: " << std::setw(13) << numSearched << " seeds. Found: " << std::setw(13) << count.load() << " matches. Uptime: " <<
std::fixed << std::setprecision(1) << elapsedTime.count() << "s. Speed: " << std::fixed <<
std::setprecision(2) << speed << "m seeds/s. Completion: " << std::setprecision(2) << progress <<
"%. ETA: " << std::fixed << std::setprecision(2) << estimatedTime << suffix << ".\n";
}
// Last batch to do
for (int32_t j = 0; j < arraySize; ++j) {
if (generator::ChunkGenerator::populate(tempStorage[j], X_TRANSLATE + 16)) {
fprintf(out_file, "%lld\n", tempStorage[j]);
count++;
}
}
fflush(out_file);
free(tempStorage);
fclose(out_file);
}
|
d4476e4c5369b5c1389ddddbb9271ff8fb3790cc.cu
|
#pragma clang diagnostic push
#pragma ide diagnostic ignored "hicpp-signed-bitwise"
//#define __JETBRAINS_IDE__
// IDE indexing
#ifdef __JETBRAINS_IDE__
#define __host__
#define __device__
#define __constant__
#define __global__
#define __CUDACC__
#include <device_functions.h>
#include <__clang_cuda_builtin_vars.h>
#include <__clang_cuda_intrinsics.h>
#include <__clang_cuda_math_forward_declares.h>
#include <__clang_cuda_complex_builtins.h>
#include <__clang_cuda_cmath.h>
#endif
#ifdef __INTELLISENSE__
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#define __CUDACC__ //fixes function defenition in ide
//void __syncthreads();
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <device_atomic_functions.h>
#endif
#include <chrono>
#include <cstdint>
#include <thread>
#include <vector>
#include <mutex>
#include <atomic>
#include <iostream>
#include <iomanip>
#include "generator.h"
#define WRITE_BUFFER_SIZE 2048
#define MAX_NUMBER_LEN (21 + 1)
#define WRITE_BUFFER_USED (writeBufCur - writeBuffer)
#define RANDOM_MULTIPLIER_LONG 0x5DEECE66DULL
#define Random uint64_t
#define RANDOM_MULTIPLIER RANDOM_MULTIPLIER_LONG
#define RANDOM_ADDEND 0xBULL
#define RANDOM_MASK (1ULL << 48) - 1
// Random::next(bits)
__host__ __device__ inline uint32_t random_next(Random *random, int32_t bits) {
*random = (*random * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK;
return (uint32_t)(*random >> (48 - bits));
}
// Random::nextInt(bound)
__host__ __device__ inline uint32_t random_next_int(Random *random, uint32_t bound) {
int32_t r = random_next(random, 31);
int32_t m = bound - 1;
if ((bound & m) == 0) {
// Could probably use __mul64hi here
r = (uint32_t)((bound * (uint64_t)r) >> 31);
} else {
r %= bound;
}
return r;
}
#define CHECK_GPU_ERR(code) gpuAssert((code), __FILE__, __LINE__)
inline void gpuAssert(cudaError_t code, const char* file, int32_t line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line);
exit(code);
}
}
// advance
#define advance_rng(rand, multiplier, addend) ((rand) = ((rand) * (multiplier) + (addend)) & RANDOM_MASK)
#define advance_16(rand) advance_rng(rand, 0x6DC260740241LL, 0xD0352014D90LL)
#define advance_m1(rand) advance_rng(rand, 0xDFE05BCB1365LL, 0x615C0E462AA9LL)
#define advance_m3759(rand) advance_rng(rand, 0x63A9985BE4ADLL, 0xA9AA8DA9BC9BLL)
#define WATERFALL_X 16
//#define WATERFALL_Y 76
#define WATERFALL_Z 10
#define TREE_X (WATERFALL_X - 5)
#define TREE_Z (WATERFALL_Z - 8)
#define TREE_HEIGHT 5
#define OTHER_TREE_COUNT 1
__device__ inline int32_t getTreeHeight(int32_t x, int32_t z) {
if (x == TREE_X && z == TREE_Z)
return TREE_HEIGHT;
if (x == WATERFALL_X - 3 && z == WATERFALL_Z + 3)
return 5;
return 0;
}
#define MODULUS (1LL << 48)
#define X_TRANSLATE 0
#define L00 7847617LL
#define L01 (-18218081LL)
#define LI00 (24667315.0 / 16)
#define LI01 (18218081.0 / 16)
#define LI10 (-4824621.0 / 16)
#define LI11 (7847617.0 / 16)
#define CONST_MIN(a, b) ((a) < (b) ? (a) : (b))
#define CONST_MIN4(a, b, c, d) CONST_MIN(CONST_MIN(a, b), CONST_MIN(c, d))
#define CONST_MAX(a, b) ((a) > (b) ? (a) : (b))
#define CONST_MAX4(a, b, c, d) CONST_MAX(CONST_MAX(a, b), CONST_MAX(c, d))
#define CONST_FLOOR(x) ((x) < (int64_t) (x) ? (int64_t) (x) - 1 : (int64_t) (x))
#define CONST_CEIL(x) ((x) == (int64_t) (x) ? (int64_t) (x) : CONST_FLOOR((x) + 1))
// for a parallelogram ABCD https://media.discordapp.net/attachments/668607204009574411/671018577561649163/unknown.png
#define B_X LI00
#define B_Z LI10
#define C_X (LI00 + LI01)
#define C_Z (LI10 + LI11)
#define D_X LI01
#define D_Z LI11
#define LOWER_X CONST_MIN4(0, B_X, C_X, D_X)
#define LOWER_Z CONST_MIN4(0, B_Z, C_Z, D_Z)
#define UPPER_X CONST_MAX4(0, B_X, C_X, D_X)
#define UPPER_Z CONST_MAX4(0, B_Z, C_Z, D_Z)
#define ORIG_SIZE_X (UPPER_X - LOWER_X + 1)
#define SIZE_X CONST_CEIL(ORIG_SIZE_X - D_X)
#define SIZE_Z CONST_CEIL(UPPER_Z - LOWER_Z + 1)
#define TOTAL_WORK_SIZE (SIZE_X * SIZE_Z)
#define MAX_TREE_ATTEMPTS 12
#define MAX_TREE_SEARCH_BACK (3 * MAX_TREE_ATTEMPTS - 3 + 16 * OTHER_TREE_COUNT)
__constant__ uint64_t search_back_multipliers[MAX_TREE_SEARCH_BACK + 1];
__constant__ uint64_t search_back_addends[MAX_TREE_SEARCH_BACK + 1];
int32_t search_back_count;
#define WORK_UNIT_SIZE (1LL << 25)
#define BLOCK_SIZE 256
__global__ void doPreWork(uint64_t offset, Random* starts, int* num_starts) {
// lattice tree position
uint64_t global_id = blockIdx.x * blockDim.x + threadIdx.x;
int64_t lattice_x = (int64_t) ((offset + global_id) % SIZE_X) + LOWER_X;
int64_t lattice_z = (int64_t) ((offset + global_id) / SIZE_X) + LOWER_Z;
lattice_z += (B_X * lattice_z < B_Z * lattice_x) * SIZE_Z;
if (D_X * lattice_z > D_Z * lattice_x) {
lattice_x += B_X;
lattice_z += B_Z;
}
lattice_x += (int64_t) (TREE_X * LI00 + TREE_Z * LI01);
lattice_z += (int64_t) (TREE_X * LI10 + TREE_Z * LI11);
auto rand = (Random)((lattice_x * L00 + lattice_z * L01 + X_TRANSLATE) % MODULUS);
advance_m1(rand);
Random tree_start = rand;
advance_m1(tree_start);
bool res = random_next(&rand, 4) == TREE_X;
res &= random_next(&rand, 4) == TREE_Z;
res &= random_next_int(&rand, 3) == (uint64_t) (TREE_HEIGHT - 4);
if (res) {
int index = atomicAdd(num_starts, 1);
starts[index] = tree_start;
}
}
__global__ void doWork(const int32_t* num_starts, const Random* tree_starts, int32_t* num_seeds, uint64_t* seeds, int32_t gpu_search_back_count) {
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < *num_starts; i += blockDim.x * gridDim.x) {
Random tree_start = tree_starts[i];
for (int32_t treeBackCalls = 0; treeBackCalls <= gpu_search_back_count; treeBackCalls++) {
Random start = (tree_start * search_back_multipliers[treeBackCalls] + search_back_addends[treeBackCalls]) & RANDOM_MASK;
Random rand = start;
bool this_res = true;
if (random_next_int(&rand, 10) == 0)
continue;
int32_t generated_tree[16];
memset(generated_tree, 0x00, sizeof(generated_tree));
int32_t treesMatched = 0;
for (int32_t treeAttempt = 0; treeAttempt <= MAX_TREE_ATTEMPTS; treeAttempt++) {
int32_t treeX = random_next(&rand, 4);
int32_t treeZ = random_next(&rand, 4);
int32_t wantedTreeHeight = getTreeHeight(treeX, treeZ);
int32_t treeHeight = random_next_int(&rand, 3) + 4;
int32_t& boolpack = generated_tree[treeX];
const int32_t mask = 1 << (treeZ % 16);
if (treeHeight == wantedTreeHeight && !(boolpack & mask)) {
treesMatched++;
boolpack |= mask;
advance_16(rand);
}
}
this_res &= treesMatched >= OTHER_TREE_COUNT + 1;
if (this_res) {
Random start_chunk_rand = start;
advance_m3759(start_chunk_rand);
int32_t index = atomicAdd(num_seeds, 1);
seeds[index] = start_chunk_rand;
}
advance_m1(start);
}
}
}
struct GPU_Node {
int32_t* num_seeds;
uint64_t* seeds;
int32_t* num_tree_starts;
Random* tree_starts;
};
void setup_gpu_node(GPU_Node* node, int32_t gpu) {
CHECK_GPU_ERR(cudaSetDevice(gpu));
CHECK_GPU_ERR(cudaMallocManaged(&node->num_seeds, sizeof(*node->num_seeds)));
CHECK_GPU_ERR(cudaMallocManaged(&node->seeds, (sizeof(Random)*WORK_UNIT_SIZE)));
CHECK_GPU_ERR(cudaMallocManaged(&node->num_tree_starts, sizeof(*node->num_tree_starts)));
CHECK_GPU_ERR(cudaMallocManaged(&node->tree_starts, (sizeof(Random)*WORK_UNIT_SIZE)));
}
#ifndef GPU_COUNT
#define GPU_COUNT 1
#endif
void calculate_search_backs() {
bool allow_search_back[MAX_TREE_SEARCH_BACK + 1];
memset(allow_search_back, false, sizeof(allow_search_back));
for (int32_t i = 0; i <= MAX_TREE_ATTEMPTS - OTHER_TREE_COUNT - 1; i++) {
allow_search_back[i * 3] = true;
}
for (int32_t tree = 0; tree < OTHER_TREE_COUNT; tree++) {
for (int32_t i = 0; i <= MAX_TREE_SEARCH_BACK - 19; i++) {
if (allow_search_back[i])
allow_search_back[i + 19] = true;
}
}
search_back_count = 0;
uint64_t multiplier = 1;
uint64_t addend = 0;
uint64_t multipliers[MAX_TREE_SEARCH_BACK + 1];
uint64_t addends[MAX_TREE_SEARCH_BACK + 1];
for (int32_t i = 0; i <= MAX_TREE_SEARCH_BACK; i++) {
if (allow_search_back[i]) {
int32_t index = search_back_count++;
multipliers[index] = multiplier;
addends[index] = addend;
}
multiplier = (multiplier * 0xDFE05BCB1365LL) & RANDOM_MASK;
addend = (0xDFE05BCB1365LL * addend + 0x615C0E462AA9LL) & RANDOM_MASK;
}
for (int32_t gpu = 0; gpu < GPU_COUNT; gpu++) {
CHECK_GPU_ERR(cudaSetDevice(gpu));
CHECK_GPU_ERR(cudaMemcpyToSymbol(search_back_multipliers, &multipliers, search_back_count * sizeof(*multipliers)));
CHECK_GPU_ERR(cudaMemcpyToSymbol(search_back_addends, &addends, search_back_count * sizeof(*addends)));
}
}
#ifndef OFFSET
#define OFFSET 0
#endif
int main(int argc, char *argv[]) {
random_math::JavaRand::init();
generator::ChunkGenerator::init();
auto *nodes = (GPU_Node*)malloc(sizeof(GPU_Node) * GPU_COUNT);
std::cout << "Searching " << TOTAL_WORK_SIZE << " total seeds...\n";
calculate_search_backs();
FILE* out_file = fopen("chunk_seeds.txt", "w");
for (int32_t i = 0; i < GPU_COUNT; i++) {
setup_gpu_node(&nodes[i], i);
}
std::vector<std::thread> threads(std::thread::hardware_concurrency() - 4);
std::mutex fileMutex;
std::atomic<uint64_t> count(0);
auto lastIteration = std::chrono::system_clock::now();
auto startTime = std::chrono::system_clock::now();
long long* tempStorage = nullptr;
uint64_t arraySize = 0;
std::cout << "Using " << threads.size() << " threads for cpu work\n";
for (uint64_t offset = OFFSET; offset < TOTAL_WORK_SIZE;) {
for (int32_t gpu_index = 0; gpu_index < GPU_COUNT; gpu_index++) {
CHECK_GPU_ERR(cudaSetDevice(gpu_index));
*nodes[gpu_index].num_tree_starts = 0;
doPreWork<<<WORK_UNIT_SIZE / BLOCK_SIZE, BLOCK_SIZE>>>(offset, nodes[gpu_index].tree_starts, nodes[gpu_index].num_tree_starts);
offset += WORK_UNIT_SIZE;
}
for (int32_t gpu_index = 0; gpu_index < GPU_COUNT; gpu_index++) {
CHECK_GPU_ERR(cudaSetDevice(gpu_index));
CHECK_GPU_ERR(cudaDeviceSynchronize());
}
for (int32_t gpu_index = 0; gpu_index < GPU_COUNT; gpu_index++) {
CHECK_GPU_ERR(cudaSetDevice(gpu_index));
*nodes[gpu_index].num_seeds = 0;
doWork<<<WORK_UNIT_SIZE / BLOCK_SIZE, BLOCK_SIZE>>>(nodes[gpu_index].num_tree_starts, nodes[gpu_index].tree_starts, nodes[gpu_index].num_seeds, nodes[gpu_index].seeds, search_back_count);
}
static auto threadFunc = [&](size_t start, size_t end) {
int32_t myCount = 0;
char writeBuffer[2048];
char* writeBufCur = writeBuffer;
for (int32_t j = start; j < end; ++j) {
if (WRITE_BUFFER_USED + MAX_NUMBER_LEN >= WRITE_BUFFER_SIZE) {
*writeBufCur++ = 0;
{
std::lock_guard<std::mutex> lock(fileMutex);
fprintf(out_file, "%s", writeBuffer);
fflush(out_file);
}
writeBufCur = writeBuffer;
}
if (generator::ChunkGenerator::populate(tempStorage[j], X_TRANSLATE + 16)) {
myCount++;
writeBufCur += snprintf(writeBufCur, MAX_NUMBER_LEN, "%lld\n", tempStorage[j]);
}
}
// Finish up - write remainder and update atomic
{
std::lock_guard<std::mutex> lock(fileMutex);
fprintf(out_file, "%s", writeBuffer);
fflush(out_file);
}
count += myCount;
};
int32_t chunkSize = arraySize / threads.size();
for(size_t i = 0; i < threads.size(); i++)
threads[i] = std::thread(threadFunc, i * chunkSize, (i == (threads.size() - 1)) ? arraySize : ((i + 1) * chunkSize));
for(std::thread& x : threads)
x.join();
fflush(out_file);
free(tempStorage);
tempStorage = (long long*)malloc(sizeof(long long));
arraySize = 0;
for (int32_t gpu_index = 0; gpu_index < GPU_COUNT; gpu_index++) {
CHECK_GPU_ERR(cudaSetDevice(gpu_index));
CHECK_GPU_ERR(cudaDeviceSynchronize());
tempStorage = (long long*) realloc(tempStorage, (*nodes[gpu_index].num_seeds + arraySize) * sizeof(long long));
for (int32_t i = 0, e = *nodes[gpu_index].num_seeds; i < e; i++) {
tempStorage[arraySize+i]=nodes[gpu_index].seeds[i];
}
arraySize += *nodes[gpu_index].num_seeds;
}
auto iterFinish = std::chrono::system_clock::now();
std::chrono::duration<double> iterationTime = iterFinish - lastIteration;
std::chrono::duration<double> elapsedTime = iterFinish - startTime;
lastIteration = iterFinish;
uint64_t numSearched = offset + WORK_UNIT_SIZE * GPU_COUNT - OFFSET;
double speed = numSearched / elapsedTime.count() / 1000000;
double progress = (double)numSearched / (double)TOTAL_WORK_SIZE * 100.0;
double estimatedTime = (double)(TOTAL_WORK_SIZE - numSearched) / speed / 1000000;
uint64_t curCount = count;
char suffix = 's';
if (estimatedTime >= 3600) {
suffix = 'h';
estimatedTime /= 3600.0;
} else if (estimatedTime >= 60) {
suffix = 'm';
estimatedTime /= 60.0;
}
if (progress >= 100.0) {
estimatedTime = 0.0;
suffix = 's';
}
std::cout << "Searched: " << std::setw(13) << numSearched << " seeds. Found: " << std::setw(13) << count.load() << " matches. Uptime: " <<
std::fixed << std::setprecision(1) << elapsedTime.count() << "s. Speed: " << std::fixed <<
std::setprecision(2) << speed << "m seeds/s. Completion: " << std::setprecision(2) << progress <<
"%. ETA: " << std::fixed << std::setprecision(2) << estimatedTime << suffix << ".\n";
}
// Last batch to do
for (int32_t j = 0; j < arraySize; ++j) {
if (generator::ChunkGenerator::populate(tempStorage[j], X_TRANSLATE + 16)) {
fprintf(out_file, "%lld\n", tempStorage[j]);
count++;
}
}
fflush(out_file);
free(tempStorage);
fclose(out_file);
}
|
bd2ba8f61927141d2368e366e5fb76e4733ae724.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <bits/stdc++.h>
#include <unistd.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include<time.h>
using namespace std;
#define ni 24 // number of neurons in input layer
#define nh 20 // number of neurons in hidden layer
#define no 4 // number of neurons in output layer
#define width 30 // width of the game boundary
#define height 20 // height og the game boundary
#define max_snake_length 100 // maximum length of the snake
#define population_size 4096
#define natural_selection_rate 0.4
#define mutation_rate 0.01
#define generations 10000
#define negative_reward -150
#define positive_reward 500
#define max_total_steps 1000
// randomly initialise neural network parameters to negative values
__global__ void initialise_nn(float *nns, unsigned int *random_int){
int id = blockIdx.x * blockDim.x + threadIdx.x;
nns[id] = (random_int[id] % 2) ? nns[id] : -nns[id];
}
// set the input for neural network, the input size is 24 i.e it looks for wall,
// it's body and fruit in all the 8 directions
__device__ void set_input(float input[], int x, int y, int fruitx, int fruity,
int tailx[], int taily[], int ntail){
for(int i=0;i<ni;i++)
input[i] = 0;
// check up direction
// check food
if(fruitx == x && fruity < y)
input[0] = 1;
// check body
for(int i=0;i<ntail;i++){
if(tailx[i] == x && taily[i] < y){
input[1] = 1;
break;
}
}
// check wall distance
if(y != 0)
input[2] = 1 / (float)y;
// check down direction
// check food
if(fruitx == x && fruity > y)
input[3] = 1;
// check body
for(int i=0;i<ntail;i++){
if(tailx[i] == x && taily[i] > y){
input[4] = 1;
break;
}
}
// check wall distance
if(height-y != 0)
input[5] = 1 / (float)(height-y);
// check right direction
// check food
if(fruity == y && fruitx > x)
input[6] = 1;
// check body
for(int i=0;i<ntail;i++){
if(taily[i] == y && tailx[i] > x){
input[7] = 1;
break;
}
}
// check wall distance
if(width-x != 0)
input[8] = 1 / (width-x);
// check left direction
// check food
if(fruity == y && fruitx < x)
input[9] = 1;
// check body
for(int i=0;i<ntail;i++){
if(taily[i] == y && tailx[i] < x){
input[10] = 1;
break;
}
}
// check wall distance
if(x != 0)
input[11] = 1 / (float)x;
//check north-east direction
int tempx = x, tempy = y;
bool found_food = false, found_body = false;
// check food and body
while(tempx < width && tempy > 0){
tempx++;
tempy--;
if(!found_food && tempx == fruitx && tempy == fruity){
input[12] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[13] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
int min_value = min(width-x,y);
float distance = sqrt(pow(min_value,2)*2);
if(distance != 0)
input[14] = 1 / distance;
//check north-west direction
tempx = x, tempy = y;
found_food = false, found_body = false;
// check food and body
while(tempx > 0 && tempy > 0){
tempx--;
tempy--;
if(!found_food && tempx == fruitx && tempy == fruity){
input[15] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[16] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
min_value = min(x,y);
distance = sqrt(pow((min_value),2)*2);
if(distance != 0)
input[17] = 1 / distance;
//check south-west direction
tempx = x, tempy = y;
found_food = false, found_body = false;
// check food and body
while(tempx > 0 && tempy < height){
tempx--;
tempy++;
if(!found_food && tempx == fruitx && tempy == fruity){
input[18] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[19] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
min_value = min(x,height-y);
distance = sqrt(pow((min_value),2)*2);
if(distance != 0)
input[20] = 1 / distance;
//check south-east direction
tempx = x, tempy = y;
found_food = false, found_body = false;
// check food and body
while(tempx < width && tempy < height){
tempx++;
tempy++;
if(!found_food && tempx == fruitx && tempy == fruity){
input[21] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[22] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
min_value = min(width-x,height-y);
distance = sqrt(pow((min_value),2)*2);
if(distance != 0)
input[23] = 1 / distance;
}
// function to calculate value of neuron in a layer during forward function
__device__ float forward(float input[], float weight[], float bias[], int len_i, int len_o, int index){
float output = 0;
for(int i=0;i<len_i;i++){
output += weight[i*len_o+index] * input[i];
}
output += bias[index];
// sigmoid function
output = 1.0 / (1.0 + exp(-output));
return output;
}
// play the game with each block corresponding to one neural network and each thread corresponding to a parameter of neural network
__global__ void play_game(float *nns, float *fitness, unsigned int *random_int_fruitx, unsigned int *random_int_fruity,
int parameter_size){
int snake_id = blockIdx.x;
int parameter_id = threadIdx.x;
// neural network of a particular id
extern __shared__ float nn[];
nn[parameter_id] = nns[snake_id*parameter_size+parameter_id];
__syncthreads();
// weights and biases of the neural network
float *w1 = &nn[0];
float *b1 = &nn[ni*nh];
float *w2 = &nn[ni*nh+nh];
float *b2 = &nn[ni*nh+nh+nh*no];
/* setup the game */
// STOP: 0, LEFT: 1, RIGHT: 2, UP: 3, DOWN: 4
// next direction to take
__shared__ int dir;
dir = 0;
// next direction to take if the first value is not possible
__shared__ int dir_next;
dir_next = 0;
// last direction taken
__shared__ int last_dir;
last_dir = 0;
// position of head
__shared__ int x;
x = width/2;
__shared__ int y;
y = height/2;
// position of fruit
__shared__ int fruitx;
__shared__ int fruity;
__shared__ int fruit_index;
fruit_index = snake_id * max_snake_length;
fruitx = random_int_fruitx[fruit_index] % width;
fruity = random_int_fruity[fruit_index] % height;
fruit_index++;
//snake length
__shared__ int ntail;
ntail = 3;
// array to store snake body
__shared__ int tailx[max_snake_length];
__shared__ int taily[max_snake_length];
// local variables
int total_steps = 200;
float total_reward = 0;
float reward = 0;
int steps = 0;
// array to store input, hidden and output layer
__shared__ float input[ni];
__shared__ float hidden_output[nh];
__shared__ float output[no];
// flag used to exit all the threads in a block
__shared__ int break_flag;
break_flag = 0;
// play until the snake dies
while(true){
// set the input for the game
set_input(input,x,y,fruitx,fruity,tailx,taily,ntail);
// forward function for the first layer
if(parameter_id < nh){
hidden_output[parameter_id] = forward(input,w1,b1,ni,nh,parameter_id);
}
__syncthreads();
// forward function for the second layer and thus get the output layer
if(parameter_id < no){
output[parameter_id] = forward(hidden_output,w2,b2,nh,no,parameter_id);
}
__syncthreads();
// thread id = 0 executes the logic of the game
if(parameter_id == 0){
// find the two best directions to be taken
float max_value = output[0];
float max_index = 0;
for(int i=1;i<no;i++){
if(output[i] > max_value){
max_value = output[i];
max_index = i;
}
}
dir = max_index + 1;
float max_value1 = INT16_MIN;
float max_index1 = -1;
for(int i=0;i<no;i++){
if(i != max_index && output[i] > max_value1){
max_value1 = output[i];
max_index1 = i;
}
}
dir_next = max_index1 + 1;
// update the snake body
int prevx = tailx[0];
int prevy = taily[0];
int prev2x, prev2y;
tailx[0] = x;
taily[0] = y;
for(int i=1;i<ntail;i++)
{
prev2x = tailx[i];
prev2y = taily[i];
tailx[i] = prevx;
taily[i] = prevy;
prevx = prev2x;
prevy = prev2y;
}
// move snake in the next direction
switch(dir)
{
case 1:
if(last_dir != 2)
x--;
else{
if(dir_next == 2)
x++;
else if(dir_next == 3)
y--;
else if(dir_next == 4)
y++;
}
break;
case 2:
if(last_dir != 1)
x++;
else{
if(dir_next == 1)
x--;
else if(dir_next == 3)
y--;
else if(dir_next == 4)
y++;
}
break;
case 3:
if(last_dir != 4)
y--;
else{
if(dir_next == 1)
x--;
else if(dir_next == 2)
x++;
else if(dir_next == 4)
y++;
}
break;
case 4:
if(last_dir != 3)
y++;
else{
if(dir_next == 1)
x--;
else if(dir_next == 2)
x++;
else if(dir_next == 3)
y--;
}
break;
}
last_dir = dir;
// snake hits the wall
if(x >= width || x < 0 || y >= height || y < 0)
{
reward = negative_reward;
break_flag = 1;
}
// snake hits its body
for(int i =0; i<ntail;i++)
{
if(tailx[i]==x && taily[i]==y)
{
reward = negative_reward;
break_flag = 1;
}
}
// snake eats the fruit
if(x==fruitx && y==fruity)
{
fruitx = random_int_fruitx[fruit_index] % width;
fruity = random_int_fruity[fruit_index] % height;
fruit_index++;
ntail++;
reward = positive_reward;
}
total_reward += reward;
steps += 1;
if(reward == -1){
break_flag = 1;
}
reward = 0;
// update total steps the snake can take
if(reward > 0)
total_steps = (total_steps+100 > max_total_steps) ? max_total_steps : total_steps + 100;
if(steps > total_steps){
break_flag = 1;
}
}
__syncthreads();
// exit while loop for all the threads in the block if the snake dies
if(break_flag)
break;
}
__syncthreads();
// update the fitness score for the game
if(parameter_id == 0){
fitness[snake_id] = total_reward + steps;
}
}
// update the device array to store top neural networks which will be used for crossover
__global__ void select_top(float *nns, float *nns_new, int *indices){
int id1 = blockIdx.x * blockDim.x + threadIdx.x;
int id2 = indices[blockIdx.x] * blockDim.x + threadIdx.x;
nns_new[id1] = nns[id2];
}
// intialise the device array for indices
__global__ void intialise_indices(int *indices){
int id = blockIdx.x * blockDim.x + threadIdx.x;
indices[id] = id;
}
// crossover the top neural networks to generate new neural networks for the generation population
__global__ void crossover(float *nns, float *fitness, unsigned int *random_int1, unsigned int *random_int2, int top){
int snake_id = blockIdx.x;
int parameter_id = threadIdx.x;
// select parents using Roulette Wheel Selection method
int fitness_sum = 0;
for(int i=0;i<population_size;i++)
fitness_sum += fitness[i];
// select parent 1
int parent1 = 0;
if(fitness_sum != 0){
int rand_num = random_int1[snake_id] % fitness_sum;
int sum = 0;
for(int i=0;i<population_size;i++){
sum += fitness[i];
if(sum > rand_num){
parent1 = i;
break;
}
}
}
// select parent 2
int parent2 = 0;
if(fitness_sum != 0){
int rand_num = random_int2[snake_id + blockDim.x] % fitness_sum;
int sum = 0;
for(int i=0;i<population_size;i++){
sum += fitness[i];
if(sum > rand_num){
parent2 = i;
break;
}
}
}
// child index
int child = top + snake_id;
// choose index of the parameter randomly
int id = blockIdx.x * blockDim.x + threadIdx.x;
int rand_num = random_int2[id];
// perform crossover to generate new neural network
if(rand_num%2 == 0){
nns[child * blockDim.x + parameter_id] = nns[parent1 * blockDim.x + parameter_id];
}
else{
nns[child * blockDim.x + parameter_id] = nns[parent2 * blockDim.x + parameter_id];
}
}
// mutate the neural network parameters based on mutation rate
__global__ void mutate(float *nns, float *random_float1, float *random_float2){
int id = blockIdx.x * blockDim.x + threadIdx.x;
// mutate only if random value is less than mutation rate
if(random_float1[id] < mutation_rate){
nns[id] += random_float2[id] / 5;
if(nns[id] > 1)
nns[id] = 1;
if(nns[id] < -1)
nns[id] = -1;
}
}
int main(){
srand(time(NULL));
ofstream fout;
// file to store best neural network parameters
fout.open("output.txt");
ofstream ftime;
// file to store every generation time
ftime.open("generation_time.txt");
// write model parameters into the file
fout<<"n_input\t\t"<<ni<<endl;
fout<<"n_hidden\t"<<nh<<endl;
fout<<"n_output\t"<<no<<endl;
fout<<"height\t\t"<<height<<endl;
fout<<"width\t\t"<<width<<endl;
// number of parameters of neural network
int parameter_size = ni*nh + nh + nh*no + no;
cout<<"Parameter size: "<<parameter_size<<endl;
// neural networks for device
float *dnns, *dnns_new;
// allocate memory for neural networks in device
hipMalloc((void **)&dnns,population_size*parameter_size*sizeof(float));
hipMalloc((void **)&dnns_new,population_size*parameter_size*sizeof(float));
hiprandGenerator_t prng;
// create pseudo random number generator
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_MT19937);
hiprandSetPseudoRandomGeneratorSeed(prng, 41ULL);
// initialise neural networks with uniform distribution
hiprandGenerateUniform(prng, dnns, population_size*parameter_size);
// create random number generator for integer values
unsigned int *random_int;
hipMalloc((void**) &random_int,population_size*parameter_size*sizeof(int));
hiprandGenerate(prng,random_int,population_size*parameter_size);
// initialse the neural networks to have negative values also
hipLaunchKernelGGL(( initialise_nn), dim3(population_size),dim3(parameter_size), 0, 0, dnns,random_int);
// device variable to store fitness score and their indices
float *dfitness;
int *dindices;
// fitness score on host
float *fitness = (float *) malloc(population_size*sizeof(float));
// fitness score and indices on device
hipMalloc((void**) &dfitness,population_size*sizeof(float));
hipMalloc((void**) &dindices,population_size*sizeof(int));
// thrust device pointer to fitness score and indices array
thrust::device_ptr<float> fitness_ptr(dfitness);
thrust::device_ptr<int> indices_ptr(dindices);
// random number generator used for generating indices of fruit
unsigned int *random_int_fruitx;
hipMalloc((void**) &random_int_fruitx,population_size*max_snake_length*sizeof(int));
unsigned int *random_int_fruity;
hipMalloc((void**) &random_int_fruity,population_size*max_snake_length*sizeof(int));
// random number generator used during crossover
unsigned int *random_int_crossover1;
hipMalloc((void**) &random_int_crossover1,2*population_size*sizeof(int));
unsigned int *random_int_crossover2;
hipMalloc((void**) &random_int_crossover2,population_size*parameter_size*sizeof(int));
// random number generator used during mutation
float *random_float_mutate1;
hipMalloc((void**) &random_float_mutate1,population_size*parameter_size*sizeof(float));
float *random_float_mutate2;
hipMalloc((void**) &random_float_mutate2,population_size*parameter_size*sizeof(float));
// local variables
float max_reward = 0;
float avg_reward = 0;
int max_index = 0;
float global_max_reward = 0;
int global_max_generation = 0;
float max_avg_reward = 0;
// array to store parameters of the best neural network
float *best_snake = (float *)malloc(parameter_size*sizeof(float));
// loop for number of generations
for(int k=0;k<generations;k++){
clock_t tStart = clock();
// intialise indices array corresponding to fitness array
int num_threads = (population_size > 1024) ? 1024 : population_size;
int num_blocks = population_size/1024 + 1;
hipLaunchKernelGGL(( intialise_indices), dim3(num_blocks),dim3(num_threads), 0, 0, dindices);
// create random number generator for integer values of fruit
hiprandGenerate(prng,random_int_fruitx,population_size*max_snake_length);
hiprandGenerate(prng,random_int_fruity,population_size*max_snake_length);
// play the games on GPU
hipLaunchKernelGGL(( play_game), dim3(population_size),dim3(parameter_size),parameter_size*sizeof(float), 0, dnns,dfitness,random_int_fruitx,random_int_fruity,parameter_size);
// copy device fitness score to host
hipMemcpy(fitness,dfitness,population_size*sizeof(float),hipMemcpyDeviceToHost);
// find the index with maximum fitness score and also calculate average fitness score
avg_reward = 0;
max_reward = fitness[0];
max_index = 0;
for(int i=1;i<population_size;i++){
if(fitness[i] > max_reward){
max_reward = fitness[i];
max_index = i;
}
avg_reward += fitness[i];
}
avg_reward /= population_size;
double generation_time = (double)(clock() - tStart)/CLOCKS_PER_SEC;
ftime<<generation_time<<endl;
printf("generation: %d\tAverage fitness: %f\tMax reward: %f\tTime: %f\n",k+1,avg_reward,max_reward,generation_time);
// find the maximum fitness score among all the generations
if(max_reward >= global_max_reward){
global_max_reward = max_reward;
global_max_generation = k+1;
}
// copy parameters of neural network with maximum average fitness score among all the generations
if(avg_reward >= max_avg_reward){
max_avg_reward = avg_reward;
hipMemcpy(best_snake,dnns+max_index*parameter_size,parameter_size*sizeof(float),hipMemcpyDeviceToHost);
}
// number of neural networks passed on to next generation from current generation
int top = population_size * natural_selection_rate;
// sort the device fitness score array in descennding order along with the indices array
thrust::sort_by_key(fitness_ptr,fitness_ptr+population_size,indices_ptr,thrust::greater<float>());
// update device neural network array with top neural network parameters
hipLaunchKernelGGL(( select_top), dim3(top),dim3(parameter_size), 0, 0, dnns,dnns_new,dindices);
float *temp = dnns_new;
dnns_new = dnns;
dnns = temp;
// create random number generator for integer values used during crossover
hiprandGenerate(prng,random_int_crossover1,2*population_size);
hiprandGenerate(prng,random_int_crossover2,population_size*parameter_size);
// crossover the top neural networks to generate the remaining neural networks in the population
hipLaunchKernelGGL(( crossover), dim3(population_size-top),dim3(parameter_size), 0, 0, dnns,dfitness,random_int_crossover1,random_int_crossover2,top);
// create random number generator for float values used during mutation
hiprandGenerateUniform(prng,random_float_mutate1,population_size*parameter_size);
hiprandGenerateNormal(prng,random_float_mutate2,population_size*parameter_size,0.0,1.0);
// mutate all neural network parameters in accordance to mutation rate
hipLaunchKernelGGL(( mutate), dim3(population_size),dim3(parameter_size), 0, 0, dnns,random_float_mutate1,random_float_mutate2);
}
// write parameters of the best neural network into file
fout<<"Best neural network parameters: \n";
for(int i=0;i<parameter_size;i++)
fout<<best_snake[i]<<" ";
fout<<endl;
printf("Generation: %d\tGlobal max reward: %f\n",global_max_generation,global_max_reward);
fout.close();
ftime.close();
hipFree(dnns);
hipFree(dnns_new);
hipFree(random_int);
hipFree(dfitness);
hipFree(dindices);
hipFree(random_int_fruitx);
hipFree(random_int_fruity);
hipFree(random_int_crossover1);
hipFree(random_int_crossover2);
hipFree(random_float_mutate1);
hipFree(random_float_mutate2);
free(fitness);
free(best_snake);
return 0;
}
|
bd2ba8f61927141d2368e366e5fb76e4733ae724.cu
|
#include <bits/stdc++.h>
#include <unistd.h>
#include <curand.h>
#include <curand_kernel.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include<time.h>
using namespace std;
#define ni 24 // number of neurons in input layer
#define nh 20 // number of neurons in hidden layer
#define no 4 // number of neurons in output layer
#define width 30 // width of the game boundary
#define height 20 // height og the game boundary
#define max_snake_length 100 // maximum length of the snake
#define population_size 4096
#define natural_selection_rate 0.4
#define mutation_rate 0.01
#define generations 10000
#define negative_reward -150
#define positive_reward 500
#define max_total_steps 1000
// randomly initialise neural network parameters to negative values
__global__ void initialise_nn(float *nns, unsigned int *random_int){
int id = blockIdx.x * blockDim.x + threadIdx.x;
nns[id] = (random_int[id] % 2) ? nns[id] : -nns[id];
}
// set the input for neural network, the input size is 24 i.e it looks for wall,
// it's body and fruit in all the 8 directions
__device__ void set_input(float input[], int x, int y, int fruitx, int fruity,
int tailx[], int taily[], int ntail){
for(int i=0;i<ni;i++)
input[i] = 0;
// check up direction
// check food
if(fruitx == x && fruity < y)
input[0] = 1;
// check body
for(int i=0;i<ntail;i++){
if(tailx[i] == x && taily[i] < y){
input[1] = 1;
break;
}
}
// check wall distance
if(y != 0)
input[2] = 1 / (float)y;
// check down direction
// check food
if(fruitx == x && fruity > y)
input[3] = 1;
// check body
for(int i=0;i<ntail;i++){
if(tailx[i] == x && taily[i] > y){
input[4] = 1;
break;
}
}
// check wall distance
if(height-y != 0)
input[5] = 1 / (float)(height-y);
// check right direction
// check food
if(fruity == y && fruitx > x)
input[6] = 1;
// check body
for(int i=0;i<ntail;i++){
if(taily[i] == y && tailx[i] > x){
input[7] = 1;
break;
}
}
// check wall distance
if(width-x != 0)
input[8] = 1 / (width-x);
// check left direction
// check food
if(fruity == y && fruitx < x)
input[9] = 1;
// check body
for(int i=0;i<ntail;i++){
if(taily[i] == y && tailx[i] < x){
input[10] = 1;
break;
}
}
// check wall distance
if(x != 0)
input[11] = 1 / (float)x;
//check north-east direction
int tempx = x, tempy = y;
bool found_food = false, found_body = false;
// check food and body
while(tempx < width && tempy > 0){
tempx++;
tempy--;
if(!found_food && tempx == fruitx && tempy == fruity){
input[12] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[13] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
int min_value = min(width-x,y);
float distance = sqrt(pow(min_value,2)*2);
if(distance != 0)
input[14] = 1 / distance;
//check north-west direction
tempx = x, tempy = y;
found_food = false, found_body = false;
// check food and body
while(tempx > 0 && tempy > 0){
tempx--;
tempy--;
if(!found_food && tempx == fruitx && tempy == fruity){
input[15] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[16] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
min_value = min(x,y);
distance = sqrt(pow((min_value),2)*2);
if(distance != 0)
input[17] = 1 / distance;
//check south-west direction
tempx = x, tempy = y;
found_food = false, found_body = false;
// check food and body
while(tempx > 0 && tempy < height){
tempx--;
tempy++;
if(!found_food && tempx == fruitx && tempy == fruity){
input[18] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[19] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
min_value = min(x,height-y);
distance = sqrt(pow((min_value),2)*2);
if(distance != 0)
input[20] = 1 / distance;
//check south-east direction
tempx = x, tempy = y;
found_food = false, found_body = false;
// check food and body
while(tempx < width && tempy < height){
tempx++;
tempy++;
if(!found_food && tempx == fruitx && tempy == fruity){
input[21] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[22] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
min_value = min(width-x,height-y);
distance = sqrt(pow((min_value),2)*2);
if(distance != 0)
input[23] = 1 / distance;
}
// function to calculate value of neuron in a layer during forward function
__device__ float forward(float input[], float weight[], float bias[], int len_i, int len_o, int index){
float output = 0;
for(int i=0;i<len_i;i++){
output += weight[i*len_o+index] * input[i];
}
output += bias[index];
// sigmoid function
output = 1.0 / (1.0 + exp(-output));
return output;
}
// play the game with each block corresponding to one neural network and each thread corresponding to a parameter of neural network
__global__ void play_game(float *nns, float *fitness, unsigned int *random_int_fruitx, unsigned int *random_int_fruity,
int parameter_size){
int snake_id = blockIdx.x;
int parameter_id = threadIdx.x;
// neural network of a particular id
extern __shared__ float nn[];
nn[parameter_id] = nns[snake_id*parameter_size+parameter_id];
__syncthreads();
// weights and biases of the neural network
float *w1 = &nn[0];
float *b1 = &nn[ni*nh];
float *w2 = &nn[ni*nh+nh];
float *b2 = &nn[ni*nh+nh+nh*no];
/* setup the game */
// STOP: 0, LEFT: 1, RIGHT: 2, UP: 3, DOWN: 4
// next direction to take
__shared__ int dir;
dir = 0;
// next direction to take if the first value is not possible
__shared__ int dir_next;
dir_next = 0;
// last direction taken
__shared__ int last_dir;
last_dir = 0;
// position of head
__shared__ int x;
x = width/2;
__shared__ int y;
y = height/2;
// position of fruit
__shared__ int fruitx;
__shared__ int fruity;
__shared__ int fruit_index;
fruit_index = snake_id * max_snake_length;
fruitx = random_int_fruitx[fruit_index] % width;
fruity = random_int_fruity[fruit_index] % height;
fruit_index++;
//snake length
__shared__ int ntail;
ntail = 3;
// array to store snake body
__shared__ int tailx[max_snake_length];
__shared__ int taily[max_snake_length];
// local variables
int total_steps = 200;
float total_reward = 0;
float reward = 0;
int steps = 0;
// array to store input, hidden and output layer
__shared__ float input[ni];
__shared__ float hidden_output[nh];
__shared__ float output[no];
// flag used to exit all the threads in a block
__shared__ int break_flag;
break_flag = 0;
// play until the snake dies
while(true){
// set the input for the game
set_input(input,x,y,fruitx,fruity,tailx,taily,ntail);
// forward function for the first layer
if(parameter_id < nh){
hidden_output[parameter_id] = forward(input,w1,b1,ni,nh,parameter_id);
}
__syncthreads();
// forward function for the second layer and thus get the output layer
if(parameter_id < no){
output[parameter_id] = forward(hidden_output,w2,b2,nh,no,parameter_id);
}
__syncthreads();
// thread id = 0 executes the logic of the game
if(parameter_id == 0){
// find the two best directions to be taken
float max_value = output[0];
float max_index = 0;
for(int i=1;i<no;i++){
if(output[i] > max_value){
max_value = output[i];
max_index = i;
}
}
dir = max_index + 1;
float max_value1 = INT16_MIN;
float max_index1 = -1;
for(int i=0;i<no;i++){
if(i != max_index && output[i] > max_value1){
max_value1 = output[i];
max_index1 = i;
}
}
dir_next = max_index1 + 1;
// update the snake body
int prevx = tailx[0];
int prevy = taily[0];
int prev2x, prev2y;
tailx[0] = x;
taily[0] = y;
for(int i=1;i<ntail;i++)
{
prev2x = tailx[i];
prev2y = taily[i];
tailx[i] = prevx;
taily[i] = prevy;
prevx = prev2x;
prevy = prev2y;
}
// move snake in the next direction
switch(dir)
{
case 1:
if(last_dir != 2)
x--;
else{
if(dir_next == 2)
x++;
else if(dir_next == 3)
y--;
else if(dir_next == 4)
y++;
}
break;
case 2:
if(last_dir != 1)
x++;
else{
if(dir_next == 1)
x--;
else if(dir_next == 3)
y--;
else if(dir_next == 4)
y++;
}
break;
case 3:
if(last_dir != 4)
y--;
else{
if(dir_next == 1)
x--;
else if(dir_next == 2)
x++;
else if(dir_next == 4)
y++;
}
break;
case 4:
if(last_dir != 3)
y++;
else{
if(dir_next == 1)
x--;
else if(dir_next == 2)
x++;
else if(dir_next == 3)
y--;
}
break;
}
last_dir = dir;
// snake hits the wall
if(x >= width || x < 0 || y >= height || y < 0)
{
reward = negative_reward;
break_flag = 1;
}
// snake hits its body
for(int i =0; i<ntail;i++)
{
if(tailx[i]==x && taily[i]==y)
{
reward = negative_reward;
break_flag = 1;
}
}
// snake eats the fruit
if(x==fruitx && y==fruity)
{
fruitx = random_int_fruitx[fruit_index] % width;
fruity = random_int_fruity[fruit_index] % height;
fruit_index++;
ntail++;
reward = positive_reward;
}
total_reward += reward;
steps += 1;
if(reward == -1){
break_flag = 1;
}
reward = 0;
// update total steps the snake can take
if(reward > 0)
total_steps = (total_steps+100 > max_total_steps) ? max_total_steps : total_steps + 100;
if(steps > total_steps){
break_flag = 1;
}
}
__syncthreads();
// exit while loop for all the threads in the block if the snake dies
if(break_flag)
break;
}
__syncthreads();
// update the fitness score for the game
if(parameter_id == 0){
fitness[snake_id] = total_reward + steps;
}
}
// update the device array to store top neural networks which will be used for crossover
__global__ void select_top(float *nns, float *nns_new, int *indices){
int id1 = blockIdx.x * blockDim.x + threadIdx.x;
int id2 = indices[blockIdx.x] * blockDim.x + threadIdx.x;
nns_new[id1] = nns[id2];
}
// intialise the device array for indices
__global__ void intialise_indices(int *indices){
int id = blockIdx.x * blockDim.x + threadIdx.x;
indices[id] = id;
}
// crossover the top neural networks to generate new neural networks for the generation population
__global__ void crossover(float *nns, float *fitness, unsigned int *random_int1, unsigned int *random_int2, int top){
int snake_id = blockIdx.x;
int parameter_id = threadIdx.x;
// select parents using Roulette Wheel Selection method
int fitness_sum = 0;
for(int i=0;i<population_size;i++)
fitness_sum += fitness[i];
// select parent 1
int parent1 = 0;
if(fitness_sum != 0){
int rand_num = random_int1[snake_id] % fitness_sum;
int sum = 0;
for(int i=0;i<population_size;i++){
sum += fitness[i];
if(sum > rand_num){
parent1 = i;
break;
}
}
}
// select parent 2
int parent2 = 0;
if(fitness_sum != 0){
int rand_num = random_int2[snake_id + blockDim.x] % fitness_sum;
int sum = 0;
for(int i=0;i<population_size;i++){
sum += fitness[i];
if(sum > rand_num){
parent2 = i;
break;
}
}
}
// child index
int child = top + snake_id;
// choose index of the parameter randomly
int id = blockIdx.x * blockDim.x + threadIdx.x;
int rand_num = random_int2[id];
// perform crossover to generate new neural network
if(rand_num%2 == 0){
nns[child * blockDim.x + parameter_id] = nns[parent1 * blockDim.x + parameter_id];
}
else{
nns[child * blockDim.x + parameter_id] = nns[parent2 * blockDim.x + parameter_id];
}
}
// mutate the neural network parameters based on mutation rate
__global__ void mutate(float *nns, float *random_float1, float *random_float2){
int id = blockIdx.x * blockDim.x + threadIdx.x;
// mutate only if random value is less than mutation rate
if(random_float1[id] < mutation_rate){
nns[id] += random_float2[id] / 5;
if(nns[id] > 1)
nns[id] = 1;
if(nns[id] < -1)
nns[id] = -1;
}
}
int main(){
srand(time(NULL));
ofstream fout;
// file to store best neural network parameters
fout.open("output.txt");
ofstream ftime;
// file to store every generation time
ftime.open("generation_time.txt");
// write model parameters into the file
fout<<"n_input\t\t"<<ni<<endl;
fout<<"n_hidden\t"<<nh<<endl;
fout<<"n_output\t"<<no<<endl;
fout<<"height\t\t"<<height<<endl;
fout<<"width\t\t"<<width<<endl;
// number of parameters of neural network
int parameter_size = ni*nh + nh + nh*no + no;
cout<<"Parameter size: "<<parameter_size<<endl;
// neural networks for device
float *dnns, *dnns_new;
// allocate memory for neural networks in device
cudaMalloc((void **)&dnns,population_size*parameter_size*sizeof(float));
cudaMalloc((void **)&dnns_new,population_size*parameter_size*sizeof(float));
curandGenerator_t prng;
// create pseudo random number generator
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_MT19937);
curandSetPseudoRandomGeneratorSeed(prng, 41ULL);
// initialise neural networks with uniform distribution
curandGenerateUniform(prng, dnns, population_size*parameter_size);
// create random number generator for integer values
unsigned int *random_int;
cudaMalloc((void**) &random_int,population_size*parameter_size*sizeof(int));
curandGenerate(prng,random_int,population_size*parameter_size);
// initialse the neural networks to have negative values also
initialise_nn<<<population_size,parameter_size>>>(dnns,random_int);
// device variable to store fitness score and their indices
float *dfitness;
int *dindices;
// fitness score on host
float *fitness = (float *) malloc(population_size*sizeof(float));
// fitness score and indices on device
cudaMalloc((void**) &dfitness,population_size*sizeof(float));
cudaMalloc((void**) &dindices,population_size*sizeof(int));
// thrust device pointer to fitness score and indices array
thrust::device_ptr<float> fitness_ptr(dfitness);
thrust::device_ptr<int> indices_ptr(dindices);
// random number generator used for generating indices of fruit
unsigned int *random_int_fruitx;
cudaMalloc((void**) &random_int_fruitx,population_size*max_snake_length*sizeof(int));
unsigned int *random_int_fruity;
cudaMalloc((void**) &random_int_fruity,population_size*max_snake_length*sizeof(int));
// random number generator used during crossover
unsigned int *random_int_crossover1;
cudaMalloc((void**) &random_int_crossover1,2*population_size*sizeof(int));
unsigned int *random_int_crossover2;
cudaMalloc((void**) &random_int_crossover2,population_size*parameter_size*sizeof(int));
// random number generator used during mutation
float *random_float_mutate1;
cudaMalloc((void**) &random_float_mutate1,population_size*parameter_size*sizeof(float));
float *random_float_mutate2;
cudaMalloc((void**) &random_float_mutate2,population_size*parameter_size*sizeof(float));
// local variables
float max_reward = 0;
float avg_reward = 0;
int max_index = 0;
float global_max_reward = 0;
int global_max_generation = 0;
float max_avg_reward = 0;
// array to store parameters of the best neural network
float *best_snake = (float *)malloc(parameter_size*sizeof(float));
// loop for number of generations
for(int k=0;k<generations;k++){
clock_t tStart = clock();
// intialise indices array corresponding to fitness array
int num_threads = (population_size > 1024) ? 1024 : population_size;
int num_blocks = population_size/1024 + 1;
intialise_indices<<<num_blocks,num_threads>>>(dindices);
// create random number generator for integer values of fruit
curandGenerate(prng,random_int_fruitx,population_size*max_snake_length);
curandGenerate(prng,random_int_fruity,population_size*max_snake_length);
// play the games on GPU
play_game<<<population_size,parameter_size,parameter_size*sizeof(float)>>>(dnns,dfitness,random_int_fruitx,random_int_fruity,parameter_size);
// copy device fitness score to host
cudaMemcpy(fitness,dfitness,population_size*sizeof(float),cudaMemcpyDeviceToHost);
// find the index with maximum fitness score and also calculate average fitness score
avg_reward = 0;
max_reward = fitness[0];
max_index = 0;
for(int i=1;i<population_size;i++){
if(fitness[i] > max_reward){
max_reward = fitness[i];
max_index = i;
}
avg_reward += fitness[i];
}
avg_reward /= population_size;
double generation_time = (double)(clock() - tStart)/CLOCKS_PER_SEC;
ftime<<generation_time<<endl;
printf("generation: %d\tAverage fitness: %f\tMax reward: %f\tTime: %f\n",k+1,avg_reward,max_reward,generation_time);
// find the maximum fitness score among all the generations
if(max_reward >= global_max_reward){
global_max_reward = max_reward;
global_max_generation = k+1;
}
// copy parameters of neural network with maximum average fitness score among all the generations
if(avg_reward >= max_avg_reward){
max_avg_reward = avg_reward;
cudaMemcpy(best_snake,dnns+max_index*parameter_size,parameter_size*sizeof(float),cudaMemcpyDeviceToHost);
}
// number of neural networks passed on to next generation from current generation
int top = population_size * natural_selection_rate;
// sort the device fitness score array in descennding order along with the indices array
thrust::sort_by_key(fitness_ptr,fitness_ptr+population_size,indices_ptr,thrust::greater<float>());
// update device neural network array with top neural network parameters
select_top<<<top,parameter_size>>>(dnns,dnns_new,dindices);
float *temp = dnns_new;
dnns_new = dnns;
dnns = temp;
// create random number generator for integer values used during crossover
curandGenerate(prng,random_int_crossover1,2*population_size);
curandGenerate(prng,random_int_crossover2,population_size*parameter_size);
// crossover the top neural networks to generate the remaining neural networks in the population
crossover<<<population_size-top,parameter_size>>>(dnns,dfitness,random_int_crossover1,random_int_crossover2,top);
// create random number generator for float values used during mutation
curandGenerateUniform(prng,random_float_mutate1,population_size*parameter_size);
curandGenerateNormal(prng,random_float_mutate2,population_size*parameter_size,0.0,1.0);
// mutate all neural network parameters in accordance to mutation rate
mutate<<<population_size,parameter_size>>>(dnns,random_float_mutate1,random_float_mutate2);
}
// write parameters of the best neural network into file
fout<<"Best neural network parameters: \n";
for(int i=0;i<parameter_size;i++)
fout<<best_snake[i]<<" ";
fout<<endl;
printf("Generation: %d\tGlobal max reward: %f\n",global_max_generation,global_max_reward);
fout.close();
ftime.close();
cudaFree(dnns);
cudaFree(dnns_new);
cudaFree(random_int);
cudaFree(dfitness);
cudaFree(dindices);
cudaFree(random_int_fruitx);
cudaFree(random_int_fruity);
cudaFree(random_int_crossover1);
cudaFree(random_int_crossover2);
cudaFree(random_float_mutate1);
cudaFree(random_float_mutate2);
free(fitness);
free(best_snake);
return 0;
}
|
eb3b3c953454fddbf109294bd3d702d22330457a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "cuda_math_util.h"
#include "io_iterator.h"
#include "weight_clipper_cuda.h"
#include <hipcub/hipcub.hpp>
namespace RPU {
template <typename T> struct StdFunctor {
StdFunctor(T size, T *sum) : size_(size), sum_(sum){};
__device__ __forceinline__ T operator()(const T &a) const {
T m = *sum_ / size_;
return T((a - m) * (a - m));
}
T size_;
T *sum_;
};
template <typename T> __global__ void kernelAClipC(T *values, int size, T *a, T c) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
T abs_a = fabs(*a / c);
if (tid < size) {
values[tid] = MIN(MAX(values[tid], -abs_a), abs_a);
}
}
template <typename T> __global__ void kernelAClipSqrt(T *values, int size, T *a, T sigma) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
T abs_a = sqrtf(fabs(*a) / (size - 1)) * sigma;
if (tid < size) {
values[tid] = MIN(MAX(values[tid], -abs_a), abs_a);
}
}
// ctor
template <typename T>
WeightClipperCuda<T>::WeightClipperCuda(CudaContext *context, int x_size, int d_size)
: context_(context), x_size_(x_size), d_size_(d_size), size_(x_size * d_size) {
T *tmp = nullptr;
StdFunctor<T> std_functor((T)x_size_, tmp);
RPU::hipcub::TransformInputIterator<T, StdFunctor<T>, T *> std_input(tmp, std_functor);
RPU::hipcub::DeviceReduce::Sum(
nullptr, temp_storage_bytes_, std_input, tmp, size_, context_->getStream());
dev_temp_storage_ = RPU::make_unique<CudaArray<char>>(context, temp_storage_bytes_);
}
template <typename T>
void WeightClipperCuda<T>::apply(T *weights, const WeightClipParameter &wclpar) {
// does a weight remap to the scales.
int nthreads = context_->getNThreads();
int nblocks = context_->getNBlocks(size_, nthreads);
auto s = context_->getStream();
// this is to remap the weights
switch (wclpar.type) {
case WeightClipType::None: {
break;
}
case WeightClipType::AverageChannelMax: {
if (!row_amaximizer_) {
row_amaximizer_ = RPU::make_unique<Maximizer<T>>(context_, x_size_, true);
dev_sum_value_ = RPU::make_unique<CudaArray<T>>(context_, 1);
}
row_amaximizer_->compute(weights, d_size_, true);
RPU::hipcub::DeviceReduce::Sum(
dev_temp_storage_->getData(), temp_storage_bytes_, row_amaximizer_->getMaxValues(),
dev_sum_value_->getData(), d_size_, s);
hipLaunchKernelGGL(( kernelAClipC<T>)
, dim3(nblocks), dim3(nthreads), 0, s, weights, size_, dev_sum_value_->getData(), (T)d_size_);
break;
}
case WeightClipType::LayerGaussian: {
if (!dev_sum_value_) {
dev_sum_value_ = RPU::make_unique<CudaArray<T>>(context_, 1);
}
if (!dev_std_value_) {
dev_std_value_ = RPU::make_unique<CudaArray<T>>(context_, 1);
}
StdFunctor<T> std_functor((T)size_, dev_sum_value_->getData());
RPU::hipcub::TransformInputIterator<T, StdFunctor<T>, T *> std_input(weights, std_functor);
// mean (sum)
RPU::hipcub::DeviceReduce::Sum(
dev_temp_storage_->getData(), temp_storage_bytes_, weights, dev_sum_value_->getData(),
size_, s);
// std
RPU::hipcub::DeviceReduce::Sum(
dev_temp_storage_->getData(), temp_storage_bytes_, std_input, dev_std_value_->getData(),
size_, s);
hipLaunchKernelGGL(( kernelAClipSqrt<T>)
, dim3(nblocks), dim3(nthreads), 0, s, weights, size_, dev_std_value_->getData(), wclpar.sigma);
break;
}
case WeightClipType::FixedValue: {
if (wclpar.fixed_value >= 0) {
RPU::math::aclip(context_, weights, size_, (T)wclpar.fixed_value);
}
break;
}
default:
RPU_FATAL("Clipping type not implemented.");
} // switch
}
template class WeightClipperCuda<float>;
#ifdef RPU_USE_DOUBLE
template class WeightClipperCuda<double>;
#endif
#undef RPU_WM_KERNEL_LOOP
} // namespace RPU
|
eb3b3c953454fddbf109294bd3d702d22330457a.cu
|
/**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "cuda_math_util.h"
#include "io_iterator.h"
#include "weight_clipper_cuda.h"
#include <cub/cub.cuh>
namespace RPU {
template <typename T> struct StdFunctor {
StdFunctor(T size, T *sum) : size_(size), sum_(sum){};
__device__ __forceinline__ T operator()(const T &a) const {
T m = *sum_ / size_;
return T((a - m) * (a - m));
}
T size_;
T *sum_;
};
template <typename T> __global__ void kernelAClipC(T *values, int size, T *a, T c) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
T abs_a = fabs(*a / c);
if (tid < size) {
values[tid] = MIN(MAX(values[tid], -abs_a), abs_a);
}
}
template <typename T> __global__ void kernelAClipSqrt(T *values, int size, T *a, T sigma) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
T abs_a = sqrtf(fabs(*a) / (size - 1)) * sigma;
if (tid < size) {
values[tid] = MIN(MAX(values[tid], -abs_a), abs_a);
}
}
// ctor
template <typename T>
WeightClipperCuda<T>::WeightClipperCuda(CudaContext *context, int x_size, int d_size)
: context_(context), x_size_(x_size), d_size_(d_size), size_(x_size * d_size) {
T *tmp = nullptr;
StdFunctor<T> std_functor((T)x_size_, tmp);
RPU::cub::TransformInputIterator<T, StdFunctor<T>, T *> std_input(tmp, std_functor);
RPU::cub::DeviceReduce::Sum(
nullptr, temp_storage_bytes_, std_input, tmp, size_, context_->getStream());
dev_temp_storage_ = RPU::make_unique<CudaArray<char>>(context, temp_storage_bytes_);
}
template <typename T>
void WeightClipperCuda<T>::apply(T *weights, const WeightClipParameter &wclpar) {
// does a weight remap to the scales.
int nthreads = context_->getNThreads();
int nblocks = context_->getNBlocks(size_, nthreads);
auto s = context_->getStream();
// this is to remap the weights
switch (wclpar.type) {
case WeightClipType::None: {
break;
}
case WeightClipType::AverageChannelMax: {
if (!row_amaximizer_) {
row_amaximizer_ = RPU::make_unique<Maximizer<T>>(context_, x_size_, true);
dev_sum_value_ = RPU::make_unique<CudaArray<T>>(context_, 1);
}
row_amaximizer_->compute(weights, d_size_, true);
RPU::cub::DeviceReduce::Sum(
dev_temp_storage_->getData(), temp_storage_bytes_, row_amaximizer_->getMaxValues(),
dev_sum_value_->getData(), d_size_, s);
kernelAClipC<T>
<<<nblocks, nthreads, 0, s>>>(weights, size_, dev_sum_value_->getData(), (T)d_size_);
break;
}
case WeightClipType::LayerGaussian: {
if (!dev_sum_value_) {
dev_sum_value_ = RPU::make_unique<CudaArray<T>>(context_, 1);
}
if (!dev_std_value_) {
dev_std_value_ = RPU::make_unique<CudaArray<T>>(context_, 1);
}
StdFunctor<T> std_functor((T)size_, dev_sum_value_->getData());
RPU::cub::TransformInputIterator<T, StdFunctor<T>, T *> std_input(weights, std_functor);
// mean (sum)
RPU::cub::DeviceReduce::Sum(
dev_temp_storage_->getData(), temp_storage_bytes_, weights, dev_sum_value_->getData(),
size_, s);
// std
RPU::cub::DeviceReduce::Sum(
dev_temp_storage_->getData(), temp_storage_bytes_, std_input, dev_std_value_->getData(),
size_, s);
kernelAClipSqrt<T>
<<<nblocks, nthreads, 0, s>>>(weights, size_, dev_std_value_->getData(), wclpar.sigma);
break;
}
case WeightClipType::FixedValue: {
if (wclpar.fixed_value >= 0) {
RPU::math::aclip(context_, weights, size_, (T)wclpar.fixed_value);
}
break;
}
default:
RPU_FATAL("Clipping type not implemented.");
} // switch
}
template class WeightClipperCuda<float>;
#ifdef RPU_USE_DOUBLE
template class WeightClipperCuda<double>;
#endif
#undef RPU_WM_KERNEL_LOOP
} // namespace RPU
|
5221189d072692414503e91c570d8daca76969e4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define NBIN 1000000
#define NUM_BLOCK 13
#define NUM_THREAD 192
int tid;
float pi = 0;
__global__ void cal_pi( float *sum, int nbin, float step, int nthreads, int nblocks ) {
int i;
float x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for( i = idx; i < nbin; i += nthreads * nblocks ) {
x = ( i - 0.5 ) * step;
sum[idx] += 4.0 / ( 1.0 + x * x );
}
}
int main( void ) {
dim3 dimGrid( NUM_BLOCK, 1, 1 );
dim3 dimBlock( NUM_THREAD, 1, 1 );
float *sumHost, *sumDev;
float step = 1.0 / NBIN;
size_t size = NUM_BLOCK * NUM_THREAD * sizeof( float );
sumHost = (float *) malloc( size );
hipMalloc( (void **) &sumDev, size );
hipMemset( sumDev, 0, size );
hipLaunchKernelGGL(( cal_pi) , dim3(dimGrid), dim3(dimBlock) , 0, 0, sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK );
hipMemcpy( sumHost, sumDev, size, hipMemcpyDeviceToHost );
for( tid = 0; tid < NUM_THREAD * NUM_BLOCK; tid++ )
pi += sumHost[tid];
pi *= step;
printf( "PI = %f\n", pi );
free( sumHost );
hipFree( sumDev );
return 0;
}
|
5221189d072692414503e91c570d8daca76969e4.cu
|
#include <stdio.h>
#include <cuda.h>
#define NBIN 1000000
#define NUM_BLOCK 13
#define NUM_THREAD 192
int tid;
float pi = 0;
__global__ void cal_pi( float *sum, int nbin, float step, int nthreads, int nblocks ) {
int i;
float x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for( i = idx; i < nbin; i += nthreads * nblocks ) {
x = ( i - 0.5 ) * step;
sum[idx] += 4.0 / ( 1.0 + x * x );
}
}
int main( void ) {
dim3 dimGrid( NUM_BLOCK, 1, 1 );
dim3 dimBlock( NUM_THREAD, 1, 1 );
float *sumHost, *sumDev;
float step = 1.0 / NBIN;
size_t size = NUM_BLOCK * NUM_THREAD * sizeof( float );
sumHost = (float *) malloc( size );
cudaMalloc( (void **) &sumDev, size );
cudaMemset( sumDev, 0, size );
cal_pi <<< dimGrid, dimBlock >>> ( sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK );
cudaMemcpy( sumHost, sumDev, size, cudaMemcpyDeviceToHost );
for( tid = 0; tid < NUM_THREAD * NUM_BLOCK; tid++ )
pi += sumHost[tid];
pi *= step;
printf( "PI = %f\n", pi );
free( sumHost );
cudaFree( sumDev );
return 0;
}
|
7c1aec552a69110db3e8f5fb31d319c4c451a7a6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* PageRankLightSpMV.cu
*
* Created on: May 29, 2015
* Author: Yongchao Liu
* Affiliation: School of Computational Science & Engineering, Georgia Institute of Technology
* Email: [email protected]
*/
#include "PageRank.h"
#include "LightSpMVCore.h"
/*formula Y = AX*/
template<typename IntType, typename ValueType, typename CSRGraphType,
typename VecType>
class lightSpMVCSRKernel {
public:
lightSpMVCSRKernel() {
/*allocate space*/
_cudaRowCounters.resize(1);
/*specify the texture object parameters*/
_texVectorX = 0;
memset(&_texDesc, 0, sizeof(_texDesc));
_texDesc.addressMode[0] = hipAddressModeClamp;
_texDesc.addressMode[1] = hipAddressModeClamp;
_texDesc.filterMode = hipFilterModePoint;
_texDesc.readMode = hipReadModeElementType;
/*clear*/
memset(&_resDesc, 0, sizeof(_resDesc));
/*get GPU information*/
int device;
hipGetDevice(&device);
CudaCheckError();
/*get the device property*/
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
_numThreadsPerBlock = prop.maxThreadsPerBlock;
_numThreadBlocks = prop.multiProcessorCount
* (prop.maxThreadsPerMultiProcessor / _numThreadsPerBlock);
//cerr << _numThreadsPerBlock << " " << _numThreadBlocks << endl;
}
inline void convert(const CSRGraphType& graph, const ValueType dummy) {
/*do nothing*/
}
inline void spmv(const CSRGraphType& graph, const VecType& x, VecType& y,
const float alpha, const float beta) {
/*initialize the row counter*/
_cudaRowCounters[0] = 0;
/*texture object*/
_resDesc.resType = hipResourceTypeLinear;
_resDesc.res.linear.desc = hipCreateChannelDesc(32, 0, 0, 0,
hipChannelFormatKindFloat);
_resDesc.res.linear.devPtr = (void*) thrust::raw_pointer_cast(x.data());
_resDesc.res.linear.sizeInBytes = x.size() * sizeof(float);
hipCreateTextureObject(&_texVectorX, &_resDesc, &_texDesc, NULL);
CudaCheckError();
int meanElementsPerRow = rint(
(double) graph.num_entries / graph.num_rows);
/*invoke the kernel*/
if (meanElementsPerRow <= 2) {
hipLaunchKernelGGL(( lightspmv::csr32DynamicWarp<float, 2, MAX_NUM_THREADS_PER_BLOCK / 2>),
dim3( _numThreadBlocks), dim3(_numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else if (meanElementsPerRow <= 4) {
hipLaunchKernelGGL(( lightspmv::csr32DynamicWarp<float, 4, MAX_NUM_THREADS_PER_BLOCK / 4>),
dim3( _numThreadBlocks), dim3(_numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else if (meanElementsPerRow <= 64) {
hipLaunchKernelGGL(( lightspmv::csr32DynamicWarp<float, 8, MAX_NUM_THREADS_PER_BLOCK / 8>),
dim3( _numThreadBlocks), dim3(_numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else {
hipLaunchKernelGGL(( lightspmv::csr32DynamicWarp<float, 32,
MAX_NUM_THREADS_PER_BLOCK / 32>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
}
/*synchronize*/
hipDeviceSynchronize();
}
inline void spmv(const CSRGraphType& graph, const VecType& x, VecType& y,
const double alpha, const double beta) {
/*initialize the row counter*/
_cudaRowCounters[0] = 0;
_resDesc.resType = hipResourceTypeLinear;
_resDesc.res.linear.desc = hipCreateChannelDesc(32, 32, 0, 0,
hipChannelFormatKindSigned);
_resDesc.res.linear.devPtr = (void*) thrust::raw_pointer_cast(x.data());
_resDesc.res.linear.sizeInBytes = x.size() * sizeof(double);
hipCreateTextureObject(&_texVectorX, &_resDesc, &_texDesc, NULL);
CudaCheckError();
/*invoke the kernel*/
int meanElementsPerRow = rint(
(double) graph.num_entries / graph.num_rows);
/*invoke the kernel*/
if (meanElementsPerRow <= 2) {
hipLaunchKernelGGL(( lightspmv::csr64DynamicWarp<double, 2, MAX_NUM_THREADS_PER_BLOCK / 2>),
dim3( _numThreadBlocks), dim3(_numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else if (meanElementsPerRow <= 4) {
hipLaunchKernelGGL(( lightspmv::csr64DynamicWarp<double, 4, MAX_NUM_THREADS_PER_BLOCK / 4>),
dim3( _numThreadBlocks), dim3(_numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else if (meanElementsPerRow <= 64) {
hipLaunchKernelGGL(( lightspmv::csr64DynamicWarp<double, 8, MAX_NUM_THREADS_PER_BLOCK / 8>),
dim3( _numThreadBlocks), dim3(_numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else {
hipLaunchKernelGGL(( lightspmv::csr64DynamicWarp<double, 32,
MAX_NUM_THREADS_PER_BLOCK / 32>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
}
/*synchronize*/
hipDeviceSynchronize();
}
private:
cusp::array1d<uint32_t, cusp::device_memory> _cudaRowCounters;
/*for texture object*/
hipTextureDesc _texDesc;
hipResourceDesc _resDesc;
hipTextureObject_t _texVectorX;
int _numThreadsPerBlock;
int _numThreadBlocks;
};
template<typename IntType, typename ValueType, typename CSRGraphType,
typename VecType>
class lightSpMVCSRKernelBLAS {
public:
lightSpMVCSRKernelBLAS() {
/*allocate space*/
_cudaRowCounters.resize(1);
/*specify the texture object parameters*/
_texVectorX = 0;
memset(&_texDesc, 0, sizeof(_texDesc));
_texDesc.addressMode[0] = hipAddressModeClamp;
_texDesc.addressMode[1] = hipAddressModeClamp;
_texDesc.filterMode = hipFilterModePoint;
_texDesc.readMode = hipReadModeElementType;
/*clear*/
memset(&_resDesc, 0, sizeof(_resDesc));
/*get GPU information*/
int device;
hipGetDevice(&device);
CudaCheckError();
/*get the device property*/
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
_numThreadsPerBlock = prop.maxThreadsPerBlock;
_numThreadBlocks = prop.multiProcessorCount
* (prop.maxThreadsPerMultiProcessor / _numThreadsPerBlock);
//cerr << _numThreadsPerBlock << " " << _numThreadBlocks << endl;
}
inline void convert(const CSRGraphType& graph, const ValueType dummy) {
/*do nothing*/
}
inline void spmv(const CSRGraphType& graph, const VecType& x, VecType& y,
const float alpha, const float beta) {
/*initialize the row counter*/
_cudaRowCounters[0] = 0;
/*texture object*/
_resDesc.resType = hipResourceTypeLinear;
_resDesc.res.linear.desc = hipCreateChannelDesc(32, 0, 0, 0,
hipChannelFormatKindFloat);
_resDesc.res.linear.devPtr = (void*) thrust::raw_pointer_cast(x.data());
_resDesc.res.linear.sizeInBytes = x.size() * sizeof(float);
hipCreateTextureObject(&_texVectorX, &_resDesc, &_texDesc, NULL);
CudaCheckError();
int meanElementsPerRow = rint(
(double) graph.num_entries / graph.num_rows);
/*invoke the kernel*/
if (meanElementsPerRow <= 2) {
hipLaunchKernelGGL(( lightspmv::csr32DynamicWarpBLAS<float, 2,
MAX_NUM_THREADS_PER_BLOCK / 2>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else if (meanElementsPerRow <= 4) {
hipLaunchKernelGGL(( lightspmv::csr32DynamicWarpBLAS<float, 4,
MAX_NUM_THREADS_PER_BLOCK / 4>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else if (meanElementsPerRow <= 64) {
hipLaunchKernelGGL(( lightspmv::csr32DynamicWarpBLAS<float, 8,
MAX_NUM_THREADS_PER_BLOCK / 8>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else {
hipLaunchKernelGGL(( lightspmv::csr32DynamicWarpBLAS<float, 32,
MAX_NUM_THREADS_PER_BLOCK / 32>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()), alpha, beta);
}
/*synchronize*/
hipDeviceSynchronize();
}
inline void spmv(const CSRGraphType& graph, const VecType& x, VecType& y,
const double alpha, const double beta) {
/*initialize the row counter*/
_cudaRowCounters[0] = 0;
_resDesc.resType = hipResourceTypeLinear;
_resDesc.res.linear.desc = hipCreateChannelDesc(32, 32, 0, 0,
hipChannelFormatKindSigned);
_resDesc.res.linear.devPtr = (void*) thrust::raw_pointer_cast(x.data());
_resDesc.res.linear.sizeInBytes = x.size() * sizeof(double);
hipCreateTextureObject(&_texVectorX, &_resDesc, &_texDesc, NULL);
CudaCheckError();
/*invoke the kernel*/
int meanElementsPerRow = rint(
(double) graph.num_entries / graph.num_rows);
/*invoke the kernel*/
if (meanElementsPerRow <= 2) {
hipLaunchKernelGGL(( lightspmv::csr64DynamicWarpBLAS<double, 2,
MAX_NUM_THREADS_PER_BLOCK / 2>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()),
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else if (meanElementsPerRow <= 4) {
hipLaunchKernelGGL(( lightspmv::csr64DynamicWarpBLAS<double, 4,
MAX_NUM_THREADS_PER_BLOCK / 4>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()),
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else if (meanElementsPerRow <= 64) {
hipLaunchKernelGGL(( lightspmv::csr64DynamicWarpBLAS<double, 8,
MAX_NUM_THREADS_PER_BLOCK / 8>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()),
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else {
hipLaunchKernelGGL(( lightspmv::csr64DynamicWarpBLAS<double, 32,
MAX_NUM_THREADS_PER_BLOCK / 32>), dim3(_numThreadBlocks),
dim3( _numThreadsPerBlock), 0, 0,
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()),
thrust::raw_pointer_cast(y.data()), alpha, beta);
}
/*synchronize*/
hipDeviceSynchronize();
}
private:
cusp::array1d<uint32_t, cusp::device_memory> _cudaRowCounters;
/*for texture object*/
hipTextureDesc _texDesc;
hipResourceDesc _resDesc;
hipTextureObject_t _texVectorX;
int _numThreadsPerBlock;
int _numThreadBlocks;
};
struct lightSpmvOptions {
lightSpmvOptions() {
/*default settings*/
_gpuId = 0;
_kernel = 0;
_nrepeats = 10;
_doublePrecision = false;
}
/*variables*/
int _gpuId;
int _kernel;
int _nrepeats;
string _graphFileName; /*adjacency matrix of the graph stored in sparse matrix*/
bool _doublePrecision; /*use single precision*/
vector<int> _gpus;
void printUsage() {
cerr << "PageRankCuda lightspmv [options]" << endl << "Options: "
<< endl << "\t-i <str> (sparse matrix file name)" << endl
<< "\t-k <int> (which sparse matrix-vector multiplication kernel to use, default="
<< _kernel << ")" << endl << "\t 0: formula Y = Ax" << endl
<< "\t 1: formula Y = aAx + bY" << endl
<< "\t-d <int> (use double-precision floating point, default="
<< _doublePrecision << ")" << endl
<< "\t-r <int> (number of repeated runs, default=" << _nrepeats << ")" << endl
<< "\t-g <int> (GPU index to use, default=" << _gpuId << ")"
<< endl << endl;
}
bool parseArgs(int argc, char* argv[]) {
int c;
/*GPU information*/
int count;
hipDeviceProp_t prop;
hipGetDeviceCount(&count);
CudaCheckError();
#if defined(HAVE_SM_35)
cerr << "Require GPUs with compute capability >= 3.5" << endl;
#else
cerr << "Require GPUs with compute capability >= 3.0" << endl;
#endif
/*check the compute capability of GPUs*/
for (int i = 0; i < count; ++i) {
hipGetDeviceProperties(&prop, i);
#if defined(HAVE_SM_35)
if ((prop.major * 10 + prop.minor) >= 35) {
#else
if ((prop.major * 10 + prop.minor) >= 30) {
#endif
cerr << "GPU " << _gpus.size() << ": " << prop.name
<< " (capability " << prop.major << "." << prop.minor
<< ")" << endl;
/*save the GPU*/
_gpus.push_back(i);
}
}
if (_gpus.size() == 0) {
cerr << "No qualified CUDA-enabled GPU is available" << endl;
return false;
}
cerr << "Number of qualified GPUs: " << _gpus.size() << endl;
/*parse parameters*/
while ((c = getopt(argc, argv, "i:k:d:g:\n")) != -1) {
switch (c) {
case 'i':
_graphFileName = optarg;
break;
case 'k':
_kernel = atoi(optarg) ? 1 : 0;
break;
case 'g':
_gpuId = atoi(optarg);
break;
case 'r':
_nrepeats = atoi(optarg);
if(_nrepeats < 1){
_nrepeats = 1;
}
break;
case 'd':
_doublePrecision = atoi(optarg) == 0 ? false : true;
break;
default:
cerr << "Unknown command: " << optarg << endl;
return false;
}
}
/*check the file name*/
if (_graphFileName.length() == 0) {
cerr << "Graph must be given" << endl;
return false;
}
/*check GPU ID*/
if (_gpuId >= (int) _gpus.size()) {
_gpuId = _gpus.size() - 1;
}
if (_gpuId < 0) {
_gpuId = 0;
}
return true;
}
};
int main_lightspmv_pagerank(int argc, char* argv[]) {
lightSpmvOptions options;
/*parse parameters*/
if (options.parseArgs(argc, argv) == false) {
options.printUsage();
return -1;
}
/*set the GPU device*/
hipSetDevice(options._gpus[options._gpuId]);
CudaCheckError();
/*perform SpMV*/
bool ret = false;
switch (options._kernel) {
case 0:
if (options._doublePrecision) {
/*using double precision*/
ret = pageRank<uint32_t, double, cusp::device_memory,
lightSpMVCSRKernel<uint32_t, double,
cusp::csr_matrix<uint32_t, double,
cusp::device_memory>,
cusp::array1d<double, cusp::device_memory> > >(
options._graphFileName.c_str(), options._nrepeats);
} else {
/*using single precision*/
ret = pageRank<uint32_t, float, cusp::device_memory,
lightSpMVCSRKernel<uint32_t, float,
cusp::csr_matrix<uint32_t, float,
cusp::device_memory>,
cusp::array1d<float, cusp::device_memory> > >(
options._graphFileName.c_str(), options._nrepeats);
}
break;
case 1:
if (options._doublePrecision) {
/*using double precision*/
ret = pageRank<uint32_t, double, cusp::device_memory,
lightSpMVCSRKernelBLAS<uint32_t, double,
cusp::csr_matrix<uint32_t, double,
cusp::device_memory>,
cusp::array1d<double, cusp::device_memory> > >(
options._graphFileName.c_str(), options._nrepeats);
} else {
/*using single precision*/
ret = pageRank<uint32_t, float, cusp::device_memory,
lightSpMVCSRKernelBLAS<uint32_t, float,
cusp::csr_matrix<uint32_t, float,
cusp::device_memory>,
cusp::array1d<float, cusp::device_memory> > >(
options._graphFileName.c_str(), options._nrepeats);
}
break;
}
return ret ? 0 : -1;
}
|
7c1aec552a69110db3e8f5fb31d319c4c451a7a6.cu
|
/*
* PageRankLightSpMV.cu
*
* Created on: May 29, 2015
* Author: Yongchao Liu
* Affiliation: School of Computational Science & Engineering, Georgia Institute of Technology
* Email: [email protected]
*/
#include "PageRank.h"
#include "LightSpMVCore.h"
/*formula Y = AX*/
template<typename IntType, typename ValueType, typename CSRGraphType,
typename VecType>
class lightSpMVCSRKernel {
public:
lightSpMVCSRKernel() {
/*allocate space*/
_cudaRowCounters.resize(1);
/*specify the texture object parameters*/
_texVectorX = 0;
memset(&_texDesc, 0, sizeof(_texDesc));
_texDesc.addressMode[0] = cudaAddressModeClamp;
_texDesc.addressMode[1] = cudaAddressModeClamp;
_texDesc.filterMode = cudaFilterModePoint;
_texDesc.readMode = cudaReadModeElementType;
/*clear*/
memset(&_resDesc, 0, sizeof(_resDesc));
/*get GPU information*/
int device;
cudaGetDevice(&device);
CudaCheckError();
/*get the device property*/
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
_numThreadsPerBlock = prop.maxThreadsPerBlock;
_numThreadBlocks = prop.multiProcessorCount
* (prop.maxThreadsPerMultiProcessor / _numThreadsPerBlock);
//cerr << _numThreadsPerBlock << " " << _numThreadBlocks << endl;
}
inline void convert(const CSRGraphType& graph, const ValueType dummy) {
/*do nothing*/
}
inline void spmv(const CSRGraphType& graph, const VecType& x, VecType& y,
const float alpha, const float beta) {
/*initialize the row counter*/
_cudaRowCounters[0] = 0;
/*texture object*/
_resDesc.resType = cudaResourceTypeLinear;
_resDesc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0,
cudaChannelFormatKindFloat);
_resDesc.res.linear.devPtr = (void*) thrust::raw_pointer_cast(x.data());
_resDesc.res.linear.sizeInBytes = x.size() * sizeof(float);
cudaCreateTextureObject(&_texVectorX, &_resDesc, &_texDesc, NULL);
CudaCheckError();
int meanElementsPerRow = rint(
(double) graph.num_entries / graph.num_rows);
/*invoke the kernel*/
if (meanElementsPerRow <= 2) {
lightspmv::csr32DynamicWarp<float, 2, MAX_NUM_THREADS_PER_BLOCK / 2><<<
_numThreadBlocks, _numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else if (meanElementsPerRow <= 4) {
lightspmv::csr32DynamicWarp<float, 4, MAX_NUM_THREADS_PER_BLOCK / 4><<<
_numThreadBlocks, _numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else if (meanElementsPerRow <= 64) {
lightspmv::csr32DynamicWarp<float, 8, MAX_NUM_THREADS_PER_BLOCK / 8><<<
_numThreadBlocks, _numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else {
lightspmv::csr32DynamicWarp<float, 32,
MAX_NUM_THREADS_PER_BLOCK / 32><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
}
/*synchronize*/
cudaDeviceSynchronize();
}
inline void spmv(const CSRGraphType& graph, const VecType& x, VecType& y,
const double alpha, const double beta) {
/*initialize the row counter*/
_cudaRowCounters[0] = 0;
_resDesc.resType = cudaResourceTypeLinear;
_resDesc.res.linear.desc = cudaCreateChannelDesc(32, 32, 0, 0,
cudaChannelFormatKindSigned);
_resDesc.res.linear.devPtr = (void*) thrust::raw_pointer_cast(x.data());
_resDesc.res.linear.sizeInBytes = x.size() * sizeof(double);
cudaCreateTextureObject(&_texVectorX, &_resDesc, &_texDesc, NULL);
CudaCheckError();
/*invoke the kernel*/
int meanElementsPerRow = rint(
(double) graph.num_entries / graph.num_rows);
/*invoke the kernel*/
if (meanElementsPerRow <= 2) {
lightspmv::csr64DynamicWarp<double, 2, MAX_NUM_THREADS_PER_BLOCK / 2><<<
_numThreadBlocks, _numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else if (meanElementsPerRow <= 4) {
lightspmv::csr64DynamicWarp<double, 4, MAX_NUM_THREADS_PER_BLOCK / 4><<<
_numThreadBlocks, _numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else if (meanElementsPerRow <= 64) {
lightspmv::csr64DynamicWarp<double, 8, MAX_NUM_THREADS_PER_BLOCK / 8><<<
_numThreadBlocks, _numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
} else {
lightspmv::csr64DynamicWarp<double, 32,
MAX_NUM_THREADS_PER_BLOCK / 32><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()));
}
/*synchronize*/
cudaDeviceSynchronize();
}
private:
cusp::array1d<uint32_t, cusp::device_memory> _cudaRowCounters;
/*for texture object*/
cudaTextureDesc _texDesc;
cudaResourceDesc _resDesc;
cudaTextureObject_t _texVectorX;
int _numThreadsPerBlock;
int _numThreadBlocks;
};
template<typename IntType, typename ValueType, typename CSRGraphType,
typename VecType>
class lightSpMVCSRKernelBLAS {
public:
lightSpMVCSRKernelBLAS() {
/*allocate space*/
_cudaRowCounters.resize(1);
/*specify the texture object parameters*/
_texVectorX = 0;
memset(&_texDesc, 0, sizeof(_texDesc));
_texDesc.addressMode[0] = cudaAddressModeClamp;
_texDesc.addressMode[1] = cudaAddressModeClamp;
_texDesc.filterMode = cudaFilterModePoint;
_texDesc.readMode = cudaReadModeElementType;
/*clear*/
memset(&_resDesc, 0, sizeof(_resDesc));
/*get GPU information*/
int device;
cudaGetDevice(&device);
CudaCheckError();
/*get the device property*/
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
_numThreadsPerBlock = prop.maxThreadsPerBlock;
_numThreadBlocks = prop.multiProcessorCount
* (prop.maxThreadsPerMultiProcessor / _numThreadsPerBlock);
//cerr << _numThreadsPerBlock << " " << _numThreadBlocks << endl;
}
inline void convert(const CSRGraphType& graph, const ValueType dummy) {
/*do nothing*/
}
inline void spmv(const CSRGraphType& graph, const VecType& x, VecType& y,
const float alpha, const float beta) {
/*initialize the row counter*/
_cudaRowCounters[0] = 0;
/*texture object*/
_resDesc.resType = cudaResourceTypeLinear;
_resDesc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0,
cudaChannelFormatKindFloat);
_resDesc.res.linear.devPtr = (void*) thrust::raw_pointer_cast(x.data());
_resDesc.res.linear.sizeInBytes = x.size() * sizeof(float);
cudaCreateTextureObject(&_texVectorX, &_resDesc, &_texDesc, NULL);
CudaCheckError();
int meanElementsPerRow = rint(
(double) graph.num_entries / graph.num_rows);
/*invoke the kernel*/
if (meanElementsPerRow <= 2) {
lightspmv::csr32DynamicWarpBLAS<float, 2,
MAX_NUM_THREADS_PER_BLOCK / 2><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else if (meanElementsPerRow <= 4) {
lightspmv::csr32DynamicWarpBLAS<float, 4,
MAX_NUM_THREADS_PER_BLOCK / 4><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else if (meanElementsPerRow <= 64) {
lightspmv::csr32DynamicWarpBLAS<float, 8,
MAX_NUM_THREADS_PER_BLOCK / 8><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else {
lightspmv::csr32DynamicWarpBLAS<float, 32,
MAX_NUM_THREADS_PER_BLOCK / 32><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()), alpha, beta);
}
/*synchronize*/
cudaDeviceSynchronize();
}
inline void spmv(const CSRGraphType& graph, const VecType& x, VecType& y,
const double alpha, const double beta) {
/*initialize the row counter*/
_cudaRowCounters[0] = 0;
_resDesc.resType = cudaResourceTypeLinear;
_resDesc.res.linear.desc = cudaCreateChannelDesc(32, 32, 0, 0,
cudaChannelFormatKindSigned);
_resDesc.res.linear.devPtr = (void*) thrust::raw_pointer_cast(x.data());
_resDesc.res.linear.sizeInBytes = x.size() * sizeof(double);
cudaCreateTextureObject(&_texVectorX, &_resDesc, &_texDesc, NULL);
CudaCheckError();
/*invoke the kernel*/
int meanElementsPerRow = rint(
(double) graph.num_entries / graph.num_rows);
/*invoke the kernel*/
if (meanElementsPerRow <= 2) {
lightspmv::csr64DynamicWarpBLAS<double, 2,
MAX_NUM_THREADS_PER_BLOCK / 2><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()),
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else if (meanElementsPerRow <= 4) {
lightspmv::csr64DynamicWarpBLAS<double, 4,
MAX_NUM_THREADS_PER_BLOCK / 4><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()),
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else if (meanElementsPerRow <= 64) {
lightspmv::csr64DynamicWarpBLAS<double, 8,
MAX_NUM_THREADS_PER_BLOCK / 8><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()),
thrust::raw_pointer_cast(y.data()), alpha, beta);
} else {
lightspmv::csr64DynamicWarpBLAS<double, 32,
MAX_NUM_THREADS_PER_BLOCK / 32><<<_numThreadBlocks,
_numThreadsPerBlock>>>(
thrust::raw_pointer_cast(_cudaRowCounters.data()),
graph.num_rows, graph.num_cols,
thrust::raw_pointer_cast(graph.row_offsets.data()),
thrust::raw_pointer_cast(graph.column_indices.data()),
thrust::raw_pointer_cast(graph.values.data()), _texVectorX,
thrust::raw_pointer_cast(y.data()),
thrust::raw_pointer_cast(y.data()), alpha, beta);
}
/*synchronize*/
cudaDeviceSynchronize();
}
private:
cusp::array1d<uint32_t, cusp::device_memory> _cudaRowCounters;
/*for texture object*/
cudaTextureDesc _texDesc;
cudaResourceDesc _resDesc;
cudaTextureObject_t _texVectorX;
int _numThreadsPerBlock;
int _numThreadBlocks;
};
struct lightSpmvOptions {
lightSpmvOptions() {
/*default settings*/
_gpuId = 0;
_kernel = 0;
_nrepeats = 10;
_doublePrecision = false;
}
/*variables*/
int _gpuId;
int _kernel;
int _nrepeats;
string _graphFileName; /*adjacency matrix of the graph stored in sparse matrix*/
bool _doublePrecision; /*use single precision*/
vector<int> _gpus;
void printUsage() {
cerr << "PageRankCuda lightspmv [options]" << endl << "Options: "
<< endl << "\t-i <str> (sparse matrix file name)" << endl
<< "\t-k <int> (which sparse matrix-vector multiplication kernel to use, default="
<< _kernel << ")" << endl << "\t 0: formula Y = Ax" << endl
<< "\t 1: formula Y = aAx + bY" << endl
<< "\t-d <int> (use double-precision floating point, default="
<< _doublePrecision << ")" << endl
<< "\t-r <int> (number of repeated runs, default=" << _nrepeats << ")" << endl
<< "\t-g <int> (GPU index to use, default=" << _gpuId << ")"
<< endl << endl;
}
bool parseArgs(int argc, char* argv[]) {
int c;
/*GPU information*/
int count;
cudaDeviceProp prop;
cudaGetDeviceCount(&count);
CudaCheckError();
#if defined(HAVE_SM_35)
cerr << "Require GPUs with compute capability >= 3.5" << endl;
#else
cerr << "Require GPUs with compute capability >= 3.0" << endl;
#endif
/*check the compute capability of GPUs*/
for (int i = 0; i < count; ++i) {
cudaGetDeviceProperties(&prop, i);
#if defined(HAVE_SM_35)
if ((prop.major * 10 + prop.minor) >= 35) {
#else
if ((prop.major * 10 + prop.minor) >= 30) {
#endif
cerr << "GPU " << _gpus.size() << ": " << prop.name
<< " (capability " << prop.major << "." << prop.minor
<< ")" << endl;
/*save the GPU*/
_gpus.push_back(i);
}
}
if (_gpus.size() == 0) {
cerr << "No qualified CUDA-enabled GPU is available" << endl;
return false;
}
cerr << "Number of qualified GPUs: " << _gpus.size() << endl;
/*parse parameters*/
while ((c = getopt(argc, argv, "i:k:d:g:\n")) != -1) {
switch (c) {
case 'i':
_graphFileName = optarg;
break;
case 'k':
_kernel = atoi(optarg) ? 1 : 0;
break;
case 'g':
_gpuId = atoi(optarg);
break;
case 'r':
_nrepeats = atoi(optarg);
if(_nrepeats < 1){
_nrepeats = 1;
}
break;
case 'd':
_doublePrecision = atoi(optarg) == 0 ? false : true;
break;
default:
cerr << "Unknown command: " << optarg << endl;
return false;
}
}
/*check the file name*/
if (_graphFileName.length() == 0) {
cerr << "Graph must be given" << endl;
return false;
}
/*check GPU ID*/
if (_gpuId >= (int) _gpus.size()) {
_gpuId = _gpus.size() - 1;
}
if (_gpuId < 0) {
_gpuId = 0;
}
return true;
}
};
int main_lightspmv_pagerank(int argc, char* argv[]) {
lightSpmvOptions options;
/*parse parameters*/
if (options.parseArgs(argc, argv) == false) {
options.printUsage();
return -1;
}
/*set the GPU device*/
cudaSetDevice(options._gpus[options._gpuId]);
CudaCheckError();
/*perform SpMV*/
bool ret = false;
switch (options._kernel) {
case 0:
if (options._doublePrecision) {
/*using double precision*/
ret = pageRank<uint32_t, double, cusp::device_memory,
lightSpMVCSRKernel<uint32_t, double,
cusp::csr_matrix<uint32_t, double,
cusp::device_memory>,
cusp::array1d<double, cusp::device_memory> > >(
options._graphFileName.c_str(), options._nrepeats);
} else {
/*using single precision*/
ret = pageRank<uint32_t, float, cusp::device_memory,
lightSpMVCSRKernel<uint32_t, float,
cusp::csr_matrix<uint32_t, float,
cusp::device_memory>,
cusp::array1d<float, cusp::device_memory> > >(
options._graphFileName.c_str(), options._nrepeats);
}
break;
case 1:
if (options._doublePrecision) {
/*using double precision*/
ret = pageRank<uint32_t, double, cusp::device_memory,
lightSpMVCSRKernelBLAS<uint32_t, double,
cusp::csr_matrix<uint32_t, double,
cusp::device_memory>,
cusp::array1d<double, cusp::device_memory> > >(
options._graphFileName.c_str(), options._nrepeats);
} else {
/*using single precision*/
ret = pageRank<uint32_t, float, cusp::device_memory,
lightSpMVCSRKernelBLAS<uint32_t, float,
cusp::csr_matrix<uint32_t, float,
cusp::device_memory>,
cusp::array1d<float, cusp::device_memory> > >(
options._graphFileName.c_str(), options._nrepeats);
}
break;
}
return ret ? 0 : -1;
}
|
45bc6e57d4c25709edd2ce2a357a385a140a5a6b.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <getopt.h>
#include "utils.h"
#define MAX_ITERS 10
#define MAX_SKIP 10
#define THREADS 1024
#define BLOCKS 4
#define MAX_MSG_SIZE 32 * 1024 * 1024
#define UNROLL 2
__global__ void bw(volatile double *data_d, volatile double *remote_d,
volatile unsigned int *counter_d, int len, int pe, int iter, int skip,
double *bw_result) {
int u, i, j, tid, slice;
unsigned int counter;
long long int start = 0, stop = 0;
double time = 0;
int threads = gridDim.x * blockDim.x;
tid = blockIdx.x * blockDim.x + threadIdx.x;
slice = UNROLL * threads;
for (i = 0; i < (iter + skip); i++) {
if (i == skip) {
nvshmem_quiet();
start = clock64();
}
for (j = 0; j < len - slice; j += slice) {
for (u = 0; u < UNROLL; ++u) {
int idx = j + u * threads + tid;
*(remote_d + idx) = *(data_d + idx);
}
__syncthreads();
}
for (u = 0; u < UNROLL; ++u) {
int idx = j + u * threads + tid;
if (idx < len) *(remote_d + idx) = *(data_d + idx);
}
// synchronizing across blocks
__syncthreads();
if (!threadIdx.x) {
__threadfence();
counter = atomicInc((unsigned int *)counter_d, UINT_MAX);
if (counter == (gridDim.x * (i + 1) - 1)) {
*(counter_d + 1) += 1;
}
while (*(counter_d + 1) != i + 1)
;
}
__syncthreads();
}
// synchronizing across blocks
__syncthreads();
if (!threadIdx.x) {
__threadfence();
counter = atomicInc((unsigned int *)counter_d, UINT_MAX);
if (counter == (gridDim.x * (i + 1) - 1)) {
nvshmem_quiet();
*(counter_d + 1) += 1;
}
while (*(counter_d + 1) != i + 1)
;
}
__syncthreads();
stop = clock64();
time = (stop - start);
if (!threadIdx.x && !blockIdx.x) {
*bw_result = ((float)iter * (float)len * sizeof(double) * clockrate) / ((time / 1000) * 1024 * 1024 * 1024);
}
}
int main(int argc, char *argv[]) {
int mype, npes;
double *data_d = NULL, *remote_d;
unsigned int *counter_d;
int max_blocks = BLOCKS, max_threads = THREADS;
int iter = MAX_ITERS;
int skip = MAX_SKIP;
int max_msg_size = MAX_MSG_SIZE;
int array_size, i;
void **h_tables;
uint64_t *h_size_arr;
double *h_bw;
init_wrapper(&argc, &argv);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
if (npes != 2) {
fprintf(stderr, "This test requires exactly two processes \n");
goto finalize;
}
while (1) {
int c;
c = getopt(argc, argv, "c:t:h");
if (c == -1) break;
switch (c) {
case 'c':
max_blocks = strtol(optarg, NULL, 0);
break;
case 't':
max_threads = strtol(optarg, NULL, 0);
break;
default:
case 'h':
printf("-c [CTAs] -t [THREADS] \n");
goto finalize;
}
}
array_size = floor(log2((float)max_msg_size)) + 1;
alloc_tables(&h_tables, 2, array_size);
h_size_arr = (uint64_t *)h_tables[0];
h_bw = (double *)h_tables[1];
data_d = (double *)nvshmem_malloc(max_msg_size);
CUDA_CHECK(hipMemset(data_d, 0, max_msg_size));
remote_d = (double *)nvshmem_ptr((void *)data_d, !mype);
if (remote_d == NULL) {
fprintf(stderr, "peer memory not accessible for LD/ST \n");
goto finalize;
}
CUDA_CHECK(hipMalloc((void **)&counter_d, sizeof(unsigned int) * 2));
CUDA_CHECK(hipMemset(counter_d, 0, sizeof(unsigned int) * 2));
CUDA_CHECK(hipDeviceSynchronize());
if (mype == 0) {
printf("Size(Bytes) \t\t BWGB/sec\n");
fflush(stdout);
}
int size;
i = 0;
if (mype == 0) {
for (size = 1024; size <= MAX_MSG_SIZE; size *= 2) {
int blocks = max_blocks, threads = max_threads;
h_size_arr[i] = size;
CUDA_CHECK(hipMemset(counter_d, 0, sizeof(unsigned int) * 2));
hipLaunchKernelGGL(( bw), dim3(blocks), dim3(threads), 0, 0, data_d, remote_d, counter_d, size / sizeof(double), mype, iter,
skip, &h_bw[i]);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
nvshmem_barrier_all();
i++;
}
} else {
for (size = 1024; size <= MAX_MSG_SIZE; size *= 2) {
nvshmem_barrier_all();
}
}
if (mype == 0) {
print_table("shmem_st_bw", "None", "size (Bytes)", "BW", "GB/sec", '+', h_size_arr, h_bw, i);
}
finalize:
if (data_d) nvshmem_free(data_d);
free_tables(h_tables, 2);
finalize_wrapper();
return 0;
}
|
45bc6e57d4c25709edd2ce2a357a385a140a5a6b.cu
|
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <getopt.h>
#include "utils.h"
#define MAX_ITERS 10
#define MAX_SKIP 10
#define THREADS 1024
#define BLOCKS 4
#define MAX_MSG_SIZE 32 * 1024 * 1024
#define UNROLL 2
__global__ void bw(volatile double *data_d, volatile double *remote_d,
volatile unsigned int *counter_d, int len, int pe, int iter, int skip,
double *bw_result) {
int u, i, j, tid, slice;
unsigned int counter;
long long int start = 0, stop = 0;
double time = 0;
int threads = gridDim.x * blockDim.x;
tid = blockIdx.x * blockDim.x + threadIdx.x;
slice = UNROLL * threads;
for (i = 0; i < (iter + skip); i++) {
if (i == skip) {
nvshmem_quiet();
start = clock64();
}
for (j = 0; j < len - slice; j += slice) {
for (u = 0; u < UNROLL; ++u) {
int idx = j + u * threads + tid;
*(remote_d + idx) = *(data_d + idx);
}
__syncthreads();
}
for (u = 0; u < UNROLL; ++u) {
int idx = j + u * threads + tid;
if (idx < len) *(remote_d + idx) = *(data_d + idx);
}
// synchronizing across blocks
__syncthreads();
if (!threadIdx.x) {
__threadfence();
counter = atomicInc((unsigned int *)counter_d, UINT_MAX);
if (counter == (gridDim.x * (i + 1) - 1)) {
*(counter_d + 1) += 1;
}
while (*(counter_d + 1) != i + 1)
;
}
__syncthreads();
}
// synchronizing across blocks
__syncthreads();
if (!threadIdx.x) {
__threadfence();
counter = atomicInc((unsigned int *)counter_d, UINT_MAX);
if (counter == (gridDim.x * (i + 1) - 1)) {
nvshmem_quiet();
*(counter_d + 1) += 1;
}
while (*(counter_d + 1) != i + 1)
;
}
__syncthreads();
stop = clock64();
time = (stop - start);
if (!threadIdx.x && !blockIdx.x) {
*bw_result = ((float)iter * (float)len * sizeof(double) * clockrate) / ((time / 1000) * 1024 * 1024 * 1024);
}
}
int main(int argc, char *argv[]) {
int mype, npes;
double *data_d = NULL, *remote_d;
unsigned int *counter_d;
int max_blocks = BLOCKS, max_threads = THREADS;
int iter = MAX_ITERS;
int skip = MAX_SKIP;
int max_msg_size = MAX_MSG_SIZE;
int array_size, i;
void **h_tables;
uint64_t *h_size_arr;
double *h_bw;
init_wrapper(&argc, &argv);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
if (npes != 2) {
fprintf(stderr, "This test requires exactly two processes \n");
goto finalize;
}
while (1) {
int c;
c = getopt(argc, argv, "c:t:h");
if (c == -1) break;
switch (c) {
case 'c':
max_blocks = strtol(optarg, NULL, 0);
break;
case 't':
max_threads = strtol(optarg, NULL, 0);
break;
default:
case 'h':
printf("-c [CTAs] -t [THREADS] \n");
goto finalize;
}
}
array_size = floor(log2((float)max_msg_size)) + 1;
alloc_tables(&h_tables, 2, array_size);
h_size_arr = (uint64_t *)h_tables[0];
h_bw = (double *)h_tables[1];
data_d = (double *)nvshmem_malloc(max_msg_size);
CUDA_CHECK(cudaMemset(data_d, 0, max_msg_size));
remote_d = (double *)nvshmem_ptr((void *)data_d, !mype);
if (remote_d == NULL) {
fprintf(stderr, "peer memory not accessible for LD/ST \n");
goto finalize;
}
CUDA_CHECK(cudaMalloc((void **)&counter_d, sizeof(unsigned int) * 2));
CUDA_CHECK(cudaMemset(counter_d, 0, sizeof(unsigned int) * 2));
CUDA_CHECK(cudaDeviceSynchronize());
if (mype == 0) {
printf("Size(Bytes) \t\t BWGB/sec\n");
fflush(stdout);
}
int size;
i = 0;
if (mype == 0) {
for (size = 1024; size <= MAX_MSG_SIZE; size *= 2) {
int blocks = max_blocks, threads = max_threads;
h_size_arr[i] = size;
CUDA_CHECK(cudaMemset(counter_d, 0, sizeof(unsigned int) * 2));
bw<<<blocks, threads>>>(data_d, remote_d, counter_d, size / sizeof(double), mype, iter,
skip, &h_bw[i]);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
nvshmem_barrier_all();
i++;
}
} else {
for (size = 1024; size <= MAX_MSG_SIZE; size *= 2) {
nvshmem_barrier_all();
}
}
if (mype == 0) {
print_table("shmem_st_bw", "None", "size (Bytes)", "BW", "GB/sec", '+', h_size_arr, h_bw, i);
}
finalize:
if (data_d) nvshmem_free(data_d);
free_tables(h_tables, 2);
finalize_wrapper();
return 0;
}
|
800a3a55e9eeaf6ca91ca0a28ebb556773e4251c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "mttkrp_gpu.h"
#include <mpi.h>
#include <vector>
#define mpi_barrier() MPI_Barrier(MPI_COMM_WORLD);
inline hipError_t checkCuda(hipError_t result, int s){
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", hipGetErrorString(result), s);
assert(result == hipSuccess);
}
return result;
}
void cuda_timer_start(hipEvent_t start){
checkCuda(hipEventRecord(start), __LINE__);
}
void cuda_timer_stop(hipEvent_t start, hipEvent_t stop, float &mili){
checkCuda(hipEventRecord(stop), __LINE__);
hipEventSynchronize(stop);
checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__);
hipDeviceSynchronize();
}
// CUDA kernel call to do COO MTTKRP
__global__ void mttkrp_COO_kernel(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE nnz,
DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int x = gId >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
// CUDA kernel call to do COO MTTKRP using loop
__global__ void mttkrp_COO_kernel_loop(DTYPE * const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE * const dInds2, const ITYPE nnz,
DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nnz > nnz_per_loop) {
num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
unsigned int x;
for(size_t nl=0; nl<num_loops_nnz; ++nl) {
x = (gId + nl * nnz_per_loop) >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
__syncthreads();
}
}
// CUDA kernel call to do COO MTTKRP 4D
__global__ void mttkrp_COO_kernel_4D(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE *dInds3,
ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int x = gId >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
ITYPE idx3 = dInds3[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
// CUDA kernel call to do COO MTTKRP 4D using loop
__global__ void mttkrp_COO_kernel_4D_loop(DTYPE *const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE *const dInds2, ITYPE * const dInds3,
ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, DTYPE * const dU3, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nnz > nnz_per_loop) {
num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
unsigned int x;
for(size_t nl=0; nl<num_loops_nnz; ++nl)
{
x = (gId + nl * nnz_per_loop) >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
ITYPE idx3 = dInds3[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
__syncthreads();
}
}
//no atomics because all 1 in HYB - COO
__global__ void mttkrp_HYB_COO_kernel(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE nnz,
DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int x = gId >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r];
dU0[idx0 * R + r] += tmp_val;
}
}
}
// CUDA kernel call to do COO MTTKRP using loop
__global__ void mttkrp_HYB_COO_kernel_loop(DTYPE * const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE * const dInds2, const ITYPE nnz,
DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nnz > nnz_per_loop) {
num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
unsigned int x;
for(size_t nl=0; nl<num_loops_nnz; ++nl) {
x = (gId + nl * nnz_per_loop) >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r];
dU0[idx0 * R + r] += tmp_val;
}
}
__syncthreads();
}
}
//no atomics because all 1 in HYB - COO
__global__ void mttkrp_HYB_COO_kernel_4D(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE *dInds3,
ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int x = gId >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
ITYPE idx3 = dInds3[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r];
dU0[idx0 * R + r] += tmp_val;
}
}
}
// CUDA kernel call to do COO MTTKRP 4D using loop
__global__ void mttkrp_HYB_COO_kernel_4D_loop(DTYPE *const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE *const dInds2, ITYPE * const dInds3,
ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, DTYPE * const dU3, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nnz > nnz_per_loop) {
num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
unsigned int x;
for(size_t nl=0; nl<num_loops_nnz; ++nl)
{
x = (gId + nl * nnz_per_loop) >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
ITYPE idx3 = dInds3[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r];
dU0[idx0 * R + r] += tmp_val;
}
}
__syncthreads();
}
}
__global__ void mttkrp_CSL_kernel(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = slc;//dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc];
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
tmp_val = 0;
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
unsigned int idx1 = dInds1[fbr];
unsigned int idx2 = dInds2[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
__global__ void mttkrp_CSL_kernel_bin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc];
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
tmp_val = 0;
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
unsigned int idx1 = dInds1[fbr];
unsigned int idx2 = dInds2[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
// CSL kernel with loop like ParTI
__global__ void mttkrp_CSL_kernel_bin_loop(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp_val;
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nSlices > nnz_per_loop) {
num_loops_nnz = ((nSlices + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
for(size_t nl=0; nl<num_loops_nnz; ++nl) {
slc = (gId + nl * nnz_per_loop) >> 5;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc];
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
tmp_val = 0;
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
unsigned int idx1 = dInds1[fbr];
unsigned int idx2 = dInds2[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
__syncthreads();
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_CSL_kernel_hvyBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
tmp_val = 0;
for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){
unsigned int idx1 = dInds1[fbr];
unsigned int idx2 = dInds2[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
// HCSR MTTKRP : 16 WARP = 1 TB per slice
__global__ void mttkrp_HCSR_kernel_16WARP(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = tId >> 5; //(tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = blockIdx.x ;//gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp = 0;
DTYPE tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r];
}
}
// unsigned int idx1 = dInds1[fbrPtr1[fbr]];
unsigned int idx1 = fbrIdx1[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU1[idx1 * R + r] ; // C matrix
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
// CUDA kernel call to do HCSR MTTKRP for the first bin 1 WARP per slice
__global__ void mttkrp_HCSR_kernel_COO(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int slc = gId >> 5; // 5: minimum 1 WARP (2^5)
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st; fbr < fb_end; fbr++){
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r];
}
}
unsigned int idx1 = fbrIdx1[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
dU0[idx0 * R + r] += tmp_val * dU1[idx1 * R + r] ;
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_HCSR_kernel_smllBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
// unsigned int slcPerTb = 16/warpPerSlice;
// unsigned int shSlc = slc & slcPerTb;
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r];
}
}
unsigned int idx1 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU1[idx1 * R + r] ;
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_HCSR_kernel_smllBin_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE outbuffer = 0, tmp_val = 0, outbuffer1 = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){
unsigned int idx1 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
outbuffer1 = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx2 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx3 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU3[idx3 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32)
outbuffer1 += tmp_val * dU2[idx2 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32)
outbuffer += outbuffer1 * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], outbuffer);
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_HCSR_kernel_hvyBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1] ; fbr+=warpPerSlice){
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r];
}
}
unsigned int idx1 = fbrIdx1[fbr];//dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU1[idx1 * R + r] ;
// // atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_HCSR_kernel_hvyBin_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE outbuffer = 0, tmp_val = 0, outbuffer1 = 0;;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){
unsigned int idx1 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
outbuffer1 = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx2 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx3 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU3[idx3 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32)
outbuffer1 += tmp_val * dU2[idx2 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32)
outbuffer += outbuffer1 * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], outbuffer);
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbr = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val;
if(fbr < nFibers - 1){
tmp_val = 0;
bool diffFiber = false;
unsigned int idx0;
for (int fr = 0; fr < fbrPerWarp && (fbr+fr) < (nFibers - 1); ++fr){
diffFiber = false;
unsigned int idx1 = fbrIdx1[fbr+fr];// dInds1[fbrPtr1[fbr]];
idx0 = fbrLikeSlcInds[fbr+fr];//slc;
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr+fr] + workId; x < fbrPtr1[fbr+fr+1]; x+=warpPerSlice) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU1[idx1 * R + r] ; //2PR
}
if(fbrLikeSlcInds[fbr+fr] != fbrLikeSlcInds[fbr+fr+1]) {
diffFiber = true;
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp); //2PR
}
tmp = 0;
}
}
if(!diffFiber) {
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0,
DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbrS = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val, tmp2= 0;
if(fbrS < nFibers - 1){
tmp_val = 0;
bool diffFiber = false;
unsigned int idx0;
for (int fr = 0; fr < fbrPerWarp && (fbrS+fr) < (nFibers - 1); ++fr){
diffFiber = false;
unsigned int idx1 = fbrIdx1[fbrS+fr];// dInds1[fbrPtr1[fbr]];
idx0 = fbrLikeSlcInds[fbrS+fr];//slc;
tmp = 0;
for (int fbr = fbrPtr1[fbrS+fr] + workId; fbr < fbrPtr1[fbrS+fr+1]; fbr+=warpPerSlice){
ITYPE idx2 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; x++) {
unsigned int idx3 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU3[idx3 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU2[idx2 * R + r] ;
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp2 += tmp * dU1[idx1 * R + r] ;
}
if(fbrLikeSlcInds[fbrS+fr] != fbrLikeSlcInds[fbrS+fr+1]) {
diffFiber = true;
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
tmp2 = 0;
}
}
if(!diffFiber) {
for(unsigned int r=laneId; r<R; r+=32)
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_fbr_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
ITYPE slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
tmp_val = 0;
unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx1 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp); //2PR
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_fbr_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE outbuffer = 0, tmp_val = 0, tmp = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc;
for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){
unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
tmp = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx0 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx1 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU1[idx1 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE outbuffer = 0, tmp_val = 0, tmp = 0;;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){
unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
tmp = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx0 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx1 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU1[idx1 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_fbrS_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp = 0, tmp_val, tmp2 = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx3 = dfbrIdx0[mappedSlc] ;//slc;
for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){
unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
tmp = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx1 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx2 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU2[idx2 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32)
tmp += tmp_val * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp2 = tmp * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_fbrS_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val, tmp2 = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx3 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){
unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
tmp = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx1 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx2 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU2[idx2 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32)
tmp += tmp_val * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp2 = tmp * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val;
if(fbr < nFibers - 1){
tmp_val = 0;
unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
unsigned int idx2 = fbrLikeSlcInds[fbr];//slc;
for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) {
unsigned int idx1 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp); //2PR
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0,
DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val, tmp2 = 0;
if(fbrS < nFibers - 1){
tmp = 0;
unsigned int idx2 = fbrLikeSlcInds[fbrS];//slc;
unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
unsigned int idx0 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx1 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR
// if(laneId == 0)
// printf("from GPU: (%d %d %d %d) - %f %f %f %f \n", idx0, idx1, idx2, idx3, dU0[idx0 * R] , dU1[idx1 * R], dU2[idx2 * R], dU3[idx3 * R]);
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0,
DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val, tmp2 = 0;
if(fbrS < nFibers - 1){
tmp = 0;
unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
unsigned int idx3 = fbrLikeSlcInds[fbrS];//slc;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
unsigned int idx1 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx2 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU2[idx2 * R + r] ; //2MR
}
for(unsigned int r=laneId; r<R; r+=32)
tmp += tmp_val * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp2 = tmp * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_loop(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
//like PARTI
//hardcoded for 1 warp per nnz
size_t num_loops_fbr = 1 * 32;
size_t const fbr_per_loop = gridDim.x * blockDim.x;
if(nFibers > fbr_per_loop) {
num_loops_fbr = ((nFibers + fbr_per_loop - 1) / fbr_per_loop) << 5;
}
DTYPE tmp = 0, tmp_val;
unsigned int fbr;
for(size_t nl=0; nl<num_loops_fbr; ++nl) {
fbr = (gId + nl * fbr_per_loop) >> 5;
if(fbr < nFibers - 1){
tmp_val = 0;
unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
unsigned int idx2 = fbrLikeSlcInds[fbr];//slc;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; x++) {
unsigned int idx1 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp); //2PR
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
ITYPE laneId = threadIdx.x & 31;
ITYPE workId = threadIdx.x >> 5;
ITYPE slc = blockIdx.x >> logOfTPS;
ITYPE localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){
tmp_val = 0;
unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx1 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_all_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
ITYPE slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
// ITYPE slcPerTb = 16/warpPerSlice;
// ITYPE shSlc = slc & slcPerTb;
DTYPE tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
// for(unsigned int r=laneId; r<R; r+=32)
// tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx0 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp_val); //2MR
// atomicAdd(&dU0[idx0 * R + r], (tmp_val * vals[x]) );
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_all_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
ITYPE laneId = threadIdx.x & 31;
ITYPE workId = threadIdx.x >> 5;
ITYPE slc = blockIdx.x >> logOfTPS;
ITYPE localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){
tmp_val = 0;
unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx0 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
// atomicAdd(&dU0[idx0 * R + r], (tmp_val * vals[x]) );
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_all_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE outbuffer = 0, tmp_val = 0, tmp = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc;
for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){
unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx3 = fbrIdx2[fbr];
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx0 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_all_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){
unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx3 = fbrIdx2[fbr];
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx0 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val;
if(fbr < nFibers - 1){
tmp_val = 0;
unsigned int idx1 = fbrLikeSlcInds[fbr];//slc;
unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
// if(laneId == 0 && idx1 == 0)
// printf("GPU %d %d %f %f\n", idx1, idx2, dU1[idx1 * R], dU2[idx2 * R] );
for(unsigned int r=laneId; r<R; r+=32)
tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) {
unsigned int idx0 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0,
DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val = 0;;
if(fbrS < nFibers - 1){
tmp = 0;
unsigned int idx1 = fbrLikeSlcInds[fbrS];//slc;
unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx3 = fbrIdx2[fbr];
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx0 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_loop(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE warpId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //
ITYPE blockId = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) //blockIdx.x ;//
//like PARTI
//hardcoded for 1 warp per nnz
size_t num_loops_fbr = 1 * 32;
size_t const fbr_per_loop = gridDim.x * blockDim.x;
if(nFibers > fbr_per_loop) {
num_loops_fbr = ((nFibers + fbr_per_loop - 1) / fbr_per_loop) << 5;
}
DTYPE tmp = 0, tmp_val;
unsigned int fbr;
for(size_t nl=0; nl<num_loops_fbr; ++nl) {
fbr = (gId + nl * fbr_per_loop) >> 5;
if(fbr < nFibers - 1){
tmp_val = 0;
unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
unsigned int idx1 = fbrLikeSlcInds[fbr];//slc;
for(unsigned int r=laneId; r<R; r+=32)
tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for(unsigned int x = fbrPtr1[fbr] + warpId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) {
unsigned int idx0 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
}
}
int MTTKRP_COO_GPU(const Tensor &X, Matrix *U, const Options Opt){
//allocate and memcpy GPU memory
//Tensor
ITYPE mode = Opt.mode;
ITYPE R = Opt.R;
ITYPE *dInds0, *dInds1, *dInds2, *dInds3;
DTYPE *dVals;
ITYPE mode0 = X.modeOrder[0];
ITYPE mode1 = X.modeOrder[1];
ITYPE mode2 = X.modeOrder[2];
checkCuda(hipMalloc((void**) &dVals, X.totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dInds0, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds1, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds2, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dVals, &(X.vals[0]), X.totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds0, &(X.inds[mode0][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds1, &(X.inds[mode1][0]), X.totNnz * sizeof(ITYPE) ,hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds2, &(X.inds[mode2][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
// //Matrices
DTYPE *dU0, *dU1, *dU2, *dU3;
checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
if(X.ndims == 4){
ITYPE mode3 = X.modeOrder[3];
checkCuda(hipMalloc((void**) &dInds3, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dInds3, &(X.inds[mode3][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
}
// BLOCK and GRID
int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float mili = 0;
bool useLoop = true;
// /* Like PARTI loop */ =
if(useLoop)
grid.x = 32768;
else
grid.x = (32 * X.totNnz + BLOCKSIZE - 1) / BLOCKSIZE;
// CUDA call
cuda_timer_start(start);
if(!useLoop){
if(X.ndims == 3)
hipLaunchKernelGGL(( mttkrp_COO_kernel), dim3(grid), dim3(block), 0, 0, dVals, dInds0, dInds1, dInds2, X.totNnz, dU0, dU1, dU2, mode, R);
else if(X.ndims == 4)
hipLaunchKernelGGL(( mttkrp_COO_kernel_4D), dim3(grid), dim3(block), 0, 0, dVals, dInds0, dInds1, dInds2, dInds3, X.totNnz, dU0, dU1, dU2, dU3, mode, R);
}
// /* loop like ParTI */
else{
if(X.ndims == 3)
hipLaunchKernelGGL(( mttkrp_COO_kernel_loop), dim3(grid), dim3(block), 0, 0, dVals, dInds0, dInds1, dInds2, X.totNnz, dU0, dU1, dU2, mode, R );
else if(X.ndims == 4)
hipLaunchKernelGGL(( mttkrp_COO_kernel_4D_loop), dim3(grid), dim3(block), 0, 0, dVals, dInds0, dInds1, dInds2, dInds3, X.totNnz, dU0, dU1, dU2, dU3, mode, R);
}
cuda_timer_stop(start, stop, mili);
if(useLoop) cout << "Loop on. ";
cout << "COO GPU using loop - time " << mili << "ms"<< endl;
// check correctness
checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
// print_output(U, 0);
hipFree(dVals);
hipFree(dU0); hipFree(dU1); hipFree(dU2); hipFree(dU3);
hipFree(dInds0); hipFree(dInds1); hipFree(dInds2); hipFree(dInds3);
return 0;
}
int MTTKRP_HCSR_GPU(Tensor &X, Matrix *U, const Options &Opt){
//allocate and memcpy GPU memory
cout << "FIX fiber idx" << endl;
//Tensor
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin;
DTYPE *dVals;
int logOfWarpPerSlice = log2(Opt.warpPerSlice);
int TbPerSlc = 1;
int logOfTPS = log2(TbPerSlc);
ITYPE mode0 = X.modeOrder[0];
ITYPE mode1 = X.modeOrder[1];
ITYPE mode2 = X.modeOrder[2];
// dummy bin mapper to be compatible with bin mapper when bin are not used
X.slcMapperBin.push_back(std::vector<ITYPE>());
for (int s = 0; s < X.fbrIdx[0].size(); ++s)
X.slcMapperBin[0].push_back(s);
checkCuda(hipMalloc((void**) &dVals, X.totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dSlcMapperBin, X.slcMapperBin[0].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx0, X.fbrIdx[0].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr0, X.fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr1, X.fbrPtr[1].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx1, X.fbrIdx[1].size() * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dVals, &(X.vals[0]), X.totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dSlcMapperBin, &(X.slcMapperBin[0][0]), X.slcMapperBin[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr0, &(X.fbrPtr[0][0]), X.fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx0, &(X.fbrIdx[0][0]), X.fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr1, &(X.fbrPtr[1][0]), X.fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx1, &(X.fbrIdx[1][0]), X.fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
// //Matrices
DTYPE *dU0, *dU1, *dU2, *dU3;
checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
if(X.ndims == 3){
checkCuda(hipMalloc((void**) &dInds2, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dInds2, &(X.inds[mode2][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
if(X.ndims == 4){
ITYPE mode3 = X.modeOrder[3];
checkCuda(hipMalloc((void**) &dFbrIdx2, X.fbrIdx[2].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrPtr2, X.fbrPtr[2].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds3, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMemcpy(dFbrPtr2, &(X.fbrPtr[2][0]), X.fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrIdx2, &(X.fbrIdx[2][0]), X.fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds3, &(X.inds[mode3][0]), X.totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
}
// BLOCK and GRID
int BLOCKSIZE = 512;
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
grid.x = (Opt.warpPerSlice * 32 * X.dims[mode0] + BLOCKSIZE - 1) / BLOCKSIZE;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float mili = 0;
checkCuda(hipEventRecord(start), __LINE__);
// mttkrp_HCSR_kernel_COO<<<grid, block, 32 * sizeof(DTYPE)>>>(dVals, dfbrIdx0, dSlcMapperBin, dInds2, dfbrPtr0, dfbrPtr1, dfbrIdx1,
// X.fbrIdx[0].size(), dU0, dU1, dU2,Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
if(X.ndims == 3)
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin), dim3(grid), dim3(block), 32 * sizeof(DTYPE), 0, dVals, dfbrIdx0, dSlcMapperBin, dInds2, dfbrPtr0, dfbrPtr1, dfbrIdx1,
X.fbrIdx[0].size(), dU0, dU1, dU2,Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
else
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin_4D), dim3(grid), dim3(block), 32 * sizeof(DTYPE), 0, dVals, dfbrIdx0, dSlcMapperBin, dInds3, dfbrPtr0, dfbrPtr1, dfbrIdx1,
dFbrPtr2, dFbrIdx2, X.fbrIdx[0].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
checkCuda(hipEventRecord(stop), __LINE__);
hipEventSynchronize(stop);
checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__);
hipDeviceSynchronize();
cout << "HCSR GPU - time " << mili << "ms"<< endl;
// check correctness
checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
hipFree(dVals);
hipFree(dU0); hipFree(dU1); hipFree(dU2); hipFree(dU3);
hipFree(dInds2); hipFree(dInds3);
hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2);
hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2);
return 0;
}
int MTTKRP_TILED_COO_GPU(TiledTensor *TiledX, Matrix *U, const Options Opt){
//allocate and memcpy GPU memory
//Tensor
ITYPE mode = Opt.mode;
ITYPE R = Opt.R;
ITYPE *dInds0, *dInds1, *dInds2;
ITYPE dLoc = 0, totNnz = 0;
DTYPE *dVals;
// All tile same mode
ITYPE mode0 = TiledX[0].modeOrder[0];
ITYPE mode1 = TiledX[0].modeOrder[1];
ITYPE mode2 = TiledX[0].modeOrder[2];
for (int tile = 0; tile < Opt.nTile; ++tile)
totNnz += TiledX[tile].totNnz;
checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dInds0, totNnz * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds1, totNnz * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0)
dLoc += TiledX[tile-1].totNnz;
checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds0 + dLoc, &(TiledX[tile].inds[mode0][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds1 + dLoc, &(TiledX[tile].inds[mode1][0]), TiledX[tile].totNnz * sizeof(ITYPE) ,hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[mode2][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
// //Matrices
DTYPE *dU0, *dU1, *dU2;
checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float mili = 0, GPUTime = 0;
// CUDA call
dLoc = 0;
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0)
dLoc += TiledX[tile-1].totNnz;
cout << "Tile " << tile << " launched.. "<<endl;
grid.x = (32 * TiledX[tile].totNnz + BLOCKSIZE - 1) / BLOCKSIZE;
checkCuda(hipEventRecord(start), __LINE__);
hipLaunchKernelGGL(( mttkrp_COO_kernel), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dInds0 + dLoc, dInds1 + dLoc, dInds2 + dLoc, TiledX[tile].totNnz, dU0, dU1, dU2,
mode, R);
checkCuda(hipEventRecord(stop), __LINE__);
hipEventSynchronize(stop);
checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__);
hipDeviceSynchronize();
cout << "Tile: " << tile << " - time " << mili << "ms"<< endl;
GPUTime += mili;
}
cout << "COO GPU - time " << GPUTime << "ms"<< endl;
// check correctness
checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
hipFree(dVals);
hipFree(dU0); hipFree(dU1); hipFree(dU2);
hipFree(dInds0); hipFree(dInds1); hipFree(dInds2);
return 0;
}
int MTTKRP_B_HCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){
/*choosing kernel type:
false: B-CSF- IPDPS work, true: parallelism at fiber level, call slc_atomic_fbrlblpar function*/
bool slcAtomicFbrLvlPar = false;
/* Allocate and memcpy GPU memory */
//Tensor
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
// // All tile same mode
ITYPE mode0 = TiledX[0].modeOrder[0];
ITYPE mode1 = TiledX[0].modeOrder[1];
ITYPE mode2 = TiledX[0].modeOrder[2];
ITYPE mode3 =((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
for (int tile = 0; tile < Opt.nTile; ++tile){
totNnz += TiledX[tile].totNnz;
totSlcPtr += TiledX[tile].fbrPtr[0].size() ;
totSlcIdx += TiledX[tile].fbrIdx[0].size() ;
totFbrPtr += TiledX[tile].fbrPtr[1].size() ;
totFbrIdx += TiledX[tile].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float memcpyTime = 0;
cuda_timer_start(start);
checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(slcAtomicFbrLvlPar)
checkCuda(hipMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[tile].fbrLikeSlcInds[0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 3)
checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 4){
checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[mode3][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
checkCuda(hipMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
}
cuda_timer_stop(start, stop, memcpyTime);
cout << "Memcopy time " << memcpyTime << endl;
// //Matrices
DTYPE *dU0, *dU1, *dU2, *dU3;
checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
if(TiledX[0].ndims == 4){
checkCuda(hipMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
}
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
hipStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per size */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamCreate(&streams[bin]);
/*MTTKRP on Opt.mode*/
int MTTKRPmode = mode0;//Opt.mode;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
BLOCKSIZE = (( slcAtomicFbrLvlPar == true) ? Opt.TBsize : 512) ;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
// int warpPerFbr = BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//;
// int logOfWarpPerFbr = log2(warpPerFbr);
// int bin = 0;
// int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB
// int logOfFbrPerWarp = log2(fbrPerWarp);
int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;//
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
grid.x = ( warpPerFbr * 32 * ((TiledX[tile].nFibers+fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
double t0 = seconds();
cuda_timer_start(start);
if(slcAtomicFbrLvlPar){
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers,
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else{
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "B-CSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamDestroy(streams[bin]);
// check correctness
checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
hipFree(dVals);
hipFree(dU0); hipFree(dU1); hipFree(dU2); hipFree(dU3);
hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3);
hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2);
hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2);
hipFree(dFbrLikeSlcInds);
return 0;
}
int MTTKRP_B_HCSR_GPU_ANYMODE(TiledTensor *TiledX, Matrix *U, const Options &Opt, int mode){
/* Allocate and memcpy GPU memory */
//Tensor
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
// // All tile same mode
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
for (int tile = 0; tile < Opt.nTile; ++tile){
totNnz += TiledX[tile].totNnz;
totSlcPtr += TiledX[tile].fbrPtr[0].size() ;
totSlcIdx += TiledX[tile].fbrIdx[0].size() ;
totFbrPtr += TiledX[tile].fbrPtr[1].size() ;
totFbrIdx += TiledX[tile].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ;
}
double t0 = seconds();
checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 3)
checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 4){
checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[3]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
checkCuda(hipMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
}
t0 = seconds();
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
ITYPE mtxLoc = 0;
for (int m = 0; m < mode; ++m)
mtxLoc += szDU[m];
checkCuda(hipMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
if(TiledX[0].ndims == 4)
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + mtxLoc, 0, U[mode].nRows * U[mode0].nCols * sizeof(DTYPE));
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per size */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamCreate(&streams[bin]);
/*MTTKRP on Opt.mode*/
int MTTKRPmode = mode;//Opt.mode;
for (int tile = 0; tile < Opt.nTile; ++tile){
/* matrix order according to mode order*/
for (int mm = 0; mm < TiledX[0].ndims; ++mm){
int curMode = TiledX[tile].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
// BLOCKSIZE = (( slcAtomicFbrLvlPar == true) ? Opt.TBsize : 512) ;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
// int warpPerFbr = BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//;
// int logOfWarpPerFbr = log2(warpPerFbr);
// int bin = 0;
// int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB
// int logOfFbrPerWarp = log2(fbrPerWarp);
int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;//
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
double t0 = seconds();
cuda_timer_start(start);
if(mode == TiledX[0].modeOrder[0]){
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
else if(TiledX[0].ndims == 4 && TiledX[0].modeOrder[1] == MTTKRPmode && TiledX[0].totNnz){
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_fbrS_atomic_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_fbrS_atomic_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
else if(mode == TiledX[0].modeOrder[TiledX[0].ndims-2]){
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_fbr_atomic), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_fbr_atomic_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
else if(mode == TiledX[0].modeOrder[TiledX[0].ndims-1]){
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_all_atomic) , dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_smllBin_all_atomic_4D), dim3(grid), dim3(block), shSize , streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_all_atomic), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_hvyBin_all_atomic_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
// if(Opt.verbose)
{
cout << "Tile: " << tile << " - time: " << mili << "ms";
if(TiledX[0].ndims == 3){
cout << " nSlc: " << TiledX[tile].fbrIdx[0].size() << ", nFibers: "
<< TiledX[tile].fbrPtr[1].size() <<", nnz: " << TiledX[tile].totNnz;
cout << endl;
}
else if(TiledX[0].ndims == 4){
cout << " nSlc: " << TiledX[tile].fbrIdx[0].size() << ", nSFibers: "
<< TiledX[tile].fbrPtr[1].size() << ", nFibers: "
<< TiledX[tile].fbrPtr[2].size() <<", nnz: " << TiledX[tile].totNnz;
cout << endl;
}
}
}
allModeGPUTime += GPUTime;
cout << "ONE-B-CSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamDestroy(streams[bin]);
// check correctness
checkCuda(hipMemcpy(&U[mode].vals[0], dU + mtxLoc, U[mode].nRows * U[mode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
hipFree(dVals);
hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3);
hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3);
hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2);
hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2);
hipFree(dFbrLikeSlcInds);
return 0;
}
int MTTKRP_HYB_GPU(const HYBTensor &HybX, Matrix *U, const Options &Opt){
//allocate and memcpy GPU memory
//Tensor
ITYPE *dCOOInds0, *dCOOInds1, *dCOOInds2, *dCOOInds3;
ITYPE *dCSLSlcPtr, *dCSLSlcInds, *dCSLInds1, *dCSLInds2, *dCSLSlcMapperBin;
ITYPE *dfbrPtr0, *dfbrIdx0, *dInds2, *dInds3, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin;
DTYPE *dVals, *dCOOVals, *dCSLVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dCSLBinLoc = 0, dFbrLoc2 =0;
int warpPerSlice = Opt.warpPerSlice;
int logOfWarpPerSlice = log2(Opt.warpPerSlice);
int TbPerSlc = 1;
int logOfTPS = log2(TbPerSlc);
// All tile same mode
ITYPE mode0 = HybX.modeOrder[0];
ITYPE mode1 = HybX.modeOrder[1];
ITYPE mode2 = HybX.modeOrder[2];
ITYPE mode3 =((HybX.ndims == 4) ? HybX.modeOrder[3] : 0) ;
// ****** mem op HYB COO *******
if(HybX.COOnnz > 0){
checkCuda(hipMalloc((void**) &dCOOVals, HybX.COOnnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dCOOInds0, HybX.COOnnz * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dCOOInds1, HybX.COOnnz * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dCOOInds2, HybX.COOnnz * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dCOOVals, &(HybX.COOvals[0]), HybX.COOnnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dCOOInds0, &(HybX.COOinds[mode0][0]), HybX.COOnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dCOOInds1, &(HybX.COOinds[mode1][0]), HybX.COOnnz * sizeof(ITYPE) ,hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dCOOInds2, &(HybX.COOinds[mode2][0]), HybX.COOnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(HybX.ndims == 4){
checkCuda(hipMalloc((void**) &dCOOInds3, HybX.COOnnz * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dCOOInds3, &(HybX.COOinds[mode3][0]), HybX.COOnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
}
// ****** mem op HYB CSL *******
if(HybX.CSLnnz > 0){
checkCuda(hipMalloc((void**) &dCSLVals, HybX.CSLnnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dCSLSlcPtr, HybX.CSLslicePtr.size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dCSLSlcInds, HybX.CSLsliceIdx.size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dCSLInds1, HybX.CSLnnz * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dCSLInds2, HybX.CSLnnz * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dCSLSlcMapperBin, HybX.CSLslicePtr.size() * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dCSLVals, &(HybX.CSLvals[0]), HybX.CSLnnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dCSLSlcPtr + dSlcLoc, &(HybX.CSLslicePtr[0]), HybX.CSLslicePtr.size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dCSLSlcInds + dSlcIdxLoc, &(HybX.CSLsliceIdx[0]), HybX.CSLsliceIdx.size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dCSLInds1, &(HybX.CSLinds[mode1][0]), HybX.CSLnnz * sizeof(ITYPE) ,hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dCSLInds2, &(HybX.CSLinds[mode2][0]), HybX.CSLnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
dCSLBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dCSLBinLoc += HybX.CSLslcMapperBin[bin-1].size();
if(HybX.CSLslcMapperBin[bin].size() > 0)
checkCuda(hipMemcpy(dCSLSlcMapperBin + dSlcIdxLoc + dCSLBinLoc, &(HybX.CSLslcMapperBin[bin][0]), HybX.CSLslcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
}
// ****** mem op HYB HCSR *******
if(HybX.HCSRnnz > 0){
checkCuda(hipMalloc((void**) &dVals, HybX.HCSRnnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr0, HybX.fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx0, HybX.fbrIdx[0].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dSlcMapperBin, HybX.fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr1, HybX.fbrPtr[1].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx1, HybX.fbrPtr[1].size() * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dVals, &(HybX.vals[0]), HybX.HCSRnnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr0, &(HybX.fbrPtr[0][0]), HybX.fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx0, &(HybX.fbrIdx[0][0]), HybX.fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr1, &(HybX.fbrPtr[1][0]), HybX.fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx1, &(HybX.fbrIdx[1][0]), HybX.fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(HybX.ndims == 3){
checkCuda(hipMalloc((void**) &dInds2, HybX.HCSRnnz * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dInds2, &(HybX.inds[mode2][0]), HybX.HCSRnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
if(HybX.ndims == 4){
checkCuda(hipMalloc((void**) &dFbrIdx2, HybX.fbrIdx[2].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrPtr2, HybX.fbrPtr[2].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds3, HybX.HCSRnnz * sizeof(ITYPE)), 0);
checkCuda(hipMemcpy(dFbrPtr2, &(HybX.fbrPtr[2][0]), HybX.fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrIdx2, &(HybX.fbrIdx[2][0]), HybX.fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds3, &(HybX.inds[mode3][0]), HybX.HCSRnnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += HybX.slcMapperBin[bin-1].size();
if(HybX.slcMapperBin[bin].size() > 0)
checkCuda(hipMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(HybX.slcMapperBin[bin][0]), HybX.slcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
}
// //Matrices
DTYPE *dU0, *dU1, *dU2, *dU3;
checkCuda(hipMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
hipMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
if(HybX.ndims == 4){
checkCuda(hipMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0);
checkCuda(hipMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
}
// BLOCK and GRID
int BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
hipEvent_t start, stop, HYBstart, HYBstop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&HYBstart);
hipEventCreate(&HYBstop);
hipStream_t streams[2 * Opt.nBin + 1];
for (int bin = 0; bin < 2 * Opt.nBin + 1; ++bin)
hipStreamCreate(&streams[bin]);
float mili = 0, HYBmili =0, GPUTime = 0, CPUtimer = 0, HYBTime = 0;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0;
bool useLoop = false;
if(useLoop)
grid.x = 32768*2;
// mili = 0;
dCSLBinLoc = 0; dBinLoc = 0;
int smallBinEndsAt = 5;
int slcPerTb = 0;
cuda_timer_start(HYBstart);
// ******* CUDA COO *******
// if(HybX.COOnnz > 0){
// BLOCKSIZE = 128;
// block.x = BLOCKSIZE;
// // /* Like PARTI loop */ =
// if(!useLoop)
// grid.x = (32 * HybX.COOnnz + BLOCKSIZE - 1) / BLOCKSIZE;
// if(Opt.verbose)
// cuda_timer_start(start);
// if(!useLoop){
// if(HybX.ndims == 3)
// hipLaunchKernelGGL(( mttkrp_HYB_COO_kernel), dim3(grid), dim3(block), 0, 0, dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R);
// else if (HybX.ndims == 4)
// hipLaunchKernelGGL(( mttkrp_HYB_COO_kernel_4D), dim3(grid), dim3(block), 0, 0, dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R);
// }
// else{
// if(HybX.ndims == 3)
// mttkrp_HYB_COO_kernel_loop<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R);
// else if (HybX.ndims == 4)
// mttkrp_HYB_COO_kernel_4D_loop<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R);
// }
// if(Opt.verbose){
// cuda_timer_stop(start, stop, mili);
// HYBTime += mili;
// cout << "HYB-COO GPU " << mili << "ms"<< endl;
// }
// }
// ******* CUDA CSL *******
// if(HybX.CSLnnz > 0 || HybX.HCSRnnz > 0)
{
if(HybX.COOnnz > 0){
BLOCKSIZE = 128;
block.x = 128;
grid.x = (32 * HybX.COOnnz + BLOCKSIZE - 1) / BLOCKSIZE;
if(HybX.ndims == 3)
hipLaunchKernelGGL(( mttkrp_HYB_COO_kernel), dim3(grid), dim3(block), 0, 0, dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R);
else if (HybX.ndims == 4)
hipLaunchKernelGGL(( mttkrp_HYB_COO_kernel_4D), dim3(grid), dim3(block), 0, 0, dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R);
}
BLOCKSIZE = 512;
block.x = BLOCKSIZE;
for (int bin = 0; bin < Opt.nBin ; ++bin){
dBinLoc += ((bin > 0) ? HybX.slcMapperBin[bin-1].size() : 0);
dCSLBinLoc += ((bin > 0) ? HybX.CSLslcMapperBin[bin-1].size() : 0);
if( HybX.slcMapperBin[bin].size() == 0 && HybX.CSLslcMapperBin[bin].size() == 0)
continue;
// Processing small bin.. merged to one. 1 WARP slice
if(bin < smallBinEndsAt){
warpPerSlice = 1;
logOfWarpPerSlice = 0;//log2(warpPerSlice);
slcPerTb = 16 / warpPerSlice;
/* CSL small bin */
if(HybX.CSLnnz > 0){
grid.x = ( warpPerSlice * 32 * HybX.CSLslcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
hipLaunchKernelGGL(( mttkrp_CSL_kernel_bin), dim3(grid), dim3(block), 0, streams[1], dCSLVals, dCSLSlcInds, dCSLSlcMapperBin + dCSLBinLoc,
dCSLInds2, dCSLSlcPtr, dCSLInds1, HybX.CSLslcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice);
}
/* HCSR small bin */
if(HybX.HCSRnnz > 0){
grid.x = ( warpPerSlice * 32 * HybX.slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(HybX.ndims == 3)
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin), dim3(grid), dim3(block), 0, streams[2], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, HybX.slcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
else if(HybX.ndims == 4)
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_smllBin_4D), dim3(grid), dim3(block), 0, streams[2], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, HybX.slcMapperBin[bin].size(),
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
}
}
// Processing heavy bin.. multiple TB per slice
else{
TbPerSlc = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc > 32) TbPerSlc = 32;
logOfTPS = log2(TbPerSlc);
warpPerSlice = 16;
logOfWarpPerSlice = 4;
/* CSL big bin */
if(HybX.CSLnnz > 0){
grid.x = (TbPerSlc * warpPerSlice * 32 * HybX.CSLslcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
hipLaunchKernelGGL(( mttkrp_CSL_kernel_hvyBin), dim3(grid), dim3(block), 0, streams[bin+1], dCSLVals + dLoc, dCSLSlcInds + dSlcIdxLoc, dCSLSlcMapperBin + dSlcIdxLoc + dCSLBinLoc,
dCSLInds2 + dLoc, dCSLSlcPtr + dSlcLoc, dCSLInds1, HybX.CSLslcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
}
/* HCSR big bin */
if(HybX.HCSRnnz > 0){
grid.x = (TbPerSlc * warpPerSlice * 32 * HybX.slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(HybX.ndims == 3)
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin), dim3(grid), dim3(block), 0, streams[bin+2], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, HybX.slcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
else if(HybX.ndims == 4)
hipLaunchKernelGGL(( mttkrp_HCSR_kernel_hvyBin_4D), dim3(grid), dim3(block), 0, streams[bin + 2], dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, HybX.slcMapperBin[bin].size(),
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
}
}
}
// if(Opt.verbose){
// cuda_timer_stop(start, stop, mili);
// HYBTime += mili;
// cout << "CSL+HCSR GPU-time: " << mili << "ms"<< endl;
// }
}
cuda_timer_stop(HYBstart, HYBstop, HYBmili);
if(Opt.verbose)
cout << "verbose on. HYB GPU: " << HYBmili << endl;
else
cout << "HYB GPU: " << HYBmili << endl;
for (int bin = 0; bin < 2 * Opt.nBin + 1; ++bin)
hipStreamDestroy(streams[bin]);
// check correctness
checkCuda(hipMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
hipFree(dVals); hipFree(dCOOVals); hipFree(dCSLVals);
hipFree(dU0); hipFree(dU1); hipFree(dU2);
hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3);
hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2);
hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2);
hipFree(dCSLInds1); hipFree(dCSLInds2); hipFree(dCSLSlcPtr); hipFree(dCSLSlcInds);
hipFree(dCOOInds0); hipFree(dCOOInds1); hipFree(dCOOInds2);
return 0;
}
int MTTKRP_ONE_HCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){
bool performMTTKRPMode = true, performMTTKRPnMode = true, performMTTKRPnnMode = true;
/* Allocate and memcpy GPU memory */
//Tensor
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0, dFbrLikeSlcIndsLoc = 0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
// // All tile same mode
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
ITYPE R = Opt.R;
for (int tile = 0; tile < Opt.nTile; ++tile){
totNnz += TiledX[tile].totNnz;
totSlcPtr += TiledX[tile].fbrPtr[0].size() ;
totSlcIdx += TiledX[tile].fbrIdx[0].size() ;
totFbrPtr += TiledX[tile].fbrPtr[1].size() ;
totFbrIdx += TiledX[tile].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ;
}
double t0 = seconds();
checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[tile].fbrLikeSlcInds[0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 3)
checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 4){
checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[TiledX[0].modeOrder[3]][0]), TiledX[tile].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
checkCuda(hipMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
}
float tnsMemcpyTime = seconds() - t0;
t0 = seconds();
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
hipMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
float mtxMemcpyTime = seconds() - t0;
// cout << "tns and mtx memcopy time: " << tnsMemcpyTime <<", " << mtxMemcpyTime<< endl;
if(TiledX[0].ndims == 4)
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
// if(Opt.warpPerSlice * 32 > BLOCKSIZE){
// cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
// exit(0);
// }
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per size */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamCreate(&streams[bin]);
/*MTTKRP on Opt.mode*/
unsigned int dU0Loc, dU1Loc, dU2Loc , dU3Loc;
/* matrix order according to mode order*/
for (int m = 0; m < TiledX[0].ndims; ++m){
int curMode = TiledX[0].modeOrder[m];
dULoc[m] = 0;
for (int q = 0; q < curMode; ++q){
dULoc[m] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
}
for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){
if(MTTKRPmode > 0){
mili = 0; GPUTime = 0; CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
// MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again.
int mode = MTTKRPmode - 1;
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols
U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); //
}
if(MTTKRPmode == 1){
checkCuda(hipMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 2){
checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 3){
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE));
}
}
if(performMTTKRPMode && TiledX[0].modeOrder[0] == MTTKRPmode){
// if(Opt.verbose)
cout << "Slc atomics - " ;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;//
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
bool useLoop = false;
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
// int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB
// int logOfFbrPerWarp = log2(fbrPerWarp );
if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){
cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!"
<< endl << "hint: increase -b!" << endl;
exit(0);
}
/* Like PARTI loop */
if(useLoop)
grid.x = Opt.gridSize;// 32768*16;
else
grid.x = ( warpPerFbr * 32 * ((TiledX[tile].nFibers+fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
double t0 = seconds();
cuda_timer_start(start);
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[tile].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
}
/*processing fbrS level for 4D tensor*/
else if(TiledX[0].ndims == 4 && performMTTKRPnMode && TiledX[0].modeOrder[1] == MTTKRPmode){
// if(Opt.verbose)
cout << "FbrS atomics - " ;
mili = 0, GPUTime = 0, CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
// cout <<"might wanna change binning style and Block size, logWPC, COO like parallelism, allow mode sort" << endl;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
double t0 = seconds();
cuda_timer_start(start);
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[tile].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
}
else if(performMTTKRPnMode && TiledX[0].modeOrder[TiledX[0].ndims-2] == MTTKRPmode){
// if(Opt.verbose)
cout << "Fbr atomics - " ;
mili = 0, GPUTime = 0, CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
// cout <<"might wanna change binning style and Block size, logWPC, COO like parallelism, allow mode sort" << endl;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
bool useLoop = false;
// /* Like PARTI loop */ =
if(useLoop)
grid.x = Opt.gridSize;// 32768*16;
else
grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
double t0 = seconds();
cuda_timer_start(start);
if(useLoop)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_loop), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else{
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[tile].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
}
else if(performMTTKRPnnMode && TiledX[0].modeOrder[TiledX[0].ndims-1] == MTTKRPmode){
// if(Opt.verbose)
cout << "Nnz atomics - " ;
mili = 0, GPUTime = 0, CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
bool useLoop = false;
int smallBinEndsAt = 5;
int slcPerTb = 0;
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
// /* Like PARTI loop */ =
if(useLoop)
grid.x = Opt.gridSize;// 32768;
else
grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
int dloc = 0;
double t0 = seconds();
cuda_timer_start(start);
if(useLoop)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_loop), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else{
if (TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[tile].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
}
}
cout << "Total GPU time: " << allModeGPUTime << ", nnz:" << TiledX[0].totNnz
<< ", nFibers:" << TiledX[0].fbrPtr[1].size() << ", nSlc:" << TiledX[0].fbrIdx[0].size()
<< endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamDestroy(streams[bin]);
/* Copying output matrix from GPU to CPU for correctness check */
int MTTKRPmode = TiledX[0].ndims - 1;
ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]);
checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
// check correctness
// if(Opt.impType == 14){
// MTTKRPmode = 3;
// checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0] , dU + szDU[0] +szDU[1] + szDU[2], U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
// }
// else
// checkCuda(hipMemcpy(&U[mode0].vals[0], dU, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
hipFree(dVals);
hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3);
hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3);
hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2);
hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2);
hipFree(dFbrLikeSlcInds);
return 0;
}
int MTTKRP_MIHCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float memcpyTime = 0;
// All m same mode
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
for (int m = 0; m < TiledX[0].ndims; ++m){
if (TiledX[m].totNnz == 0) continue;
totNnz += TiledX[m].totNnz;
totSlcPtr += TiledX[m].fbrPtr[0].size() ;
totSlcIdx += TiledX[m].fbrIdx[0].size() ;
totFbrPtr += TiledX[m].fbrPtr[1].size() ;
totFbrIdx += TiledX[m].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ;
}
//allocate and memcpy GPU memory
//Tensor
cuda_timer_start(start);
checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
// checkCuda(hipMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
for (int m = 0; m < TiledX[0].ndims; ++m){
if(m > 0) {
if (TiledX[m-1].totNnz > 0) {
dLoc += TiledX[m-1].totNnz;
dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same
dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[m].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size() : 0) ;
}
}
if (TiledX[m].totNnz == 0) continue;
checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[m].fbrPtr[0][0]), TiledX[m].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[m].fbrIdx[0][0]), TiledX[m].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(TiledX[m].ndims == 3){
if(m == 0)
// checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[mode2][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
else if(m == 1)
checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
else if(m == 2)
checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
if(TiledX[m].ndims == 4){
checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[m].fbrPtr[2][0]), TiledX[m].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[m].fbrIdx[2][0]), TiledX[m].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[3]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
}
cuda_timer_stop(start, stop, memcpyTime);
cout << "Memcopy time " << memcpyTime << endl;
// //Matrices
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
hipMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
if(TiledX[0].ndims == 4)
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
// if(Opt.warpPerSlice * 32 > BLOCKSIZE){
// cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
// exit(0);
// }
hipStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per slice */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamCreate(&streams[bin]);
for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){
if(MTTKRPmode > 0){
mili = 0; GPUTime = 0; CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0;
// MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again.
int mode = MTTKRPmode - 1;
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols
U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); //
}
if(MTTKRPmode == 1){
checkCuda(hipMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 2){
checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 3){
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE));
}
}
for (int m = 0; m < TiledX[0].ndims; ++m){
/* matrix order according to mode order*/
for (int mm = 0; mm < TiledX[0].ndims; ++mm){
int curMode = TiledX[m].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
if(m > 0) {
if (TiledX[m-1].totNnz > 0) {
dLoc += TiledX[m-1].totNnz;
dSlcLoc += TiledX[m - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ;
}
}
BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
if (TiledX[m].totNnz == 0) continue;
cuda_timer_start(start);
if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "Slc atomics - " ;
// BLOCKSIZE = 128;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){
cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!"
<< endl << "hint: increase -b!" << endl;
exit(0);
}
grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else if(TiledX[0].ndims == 4 && TiledX[m].modeOrder[1] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "FbrS atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//1;//BLOCKSIZE/32;//1;////4;//;
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "Fbr atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "nnz atomics - " ;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if (TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
GPUTime += mili;
if(Opt.verbose)
{
cout << "Tile: " << m << " - time: " << mili << " ms";
cout <<" nnz: " << TiledX[m].totNnz << " nFibers: "
<< TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " ";
cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" "
<< TiledX[m].modeOrder[2];
cout << endl;
}
}
if(Opt.verbose)
cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl;
allModeGPUTime += GPUTime;
}
int totalMIslics = 0, totalMISfibers = 0, totalMIfibers = 0, totalMInnz = 0;;
for (int m = 0; m < TiledX[0].ndims; ++m){
if(TiledX[m].totNnz){
if(TiledX[m].ndims == 3){
totalMIslics += TiledX[m].fbrIdx[0].size();
totalMIfibers += TiledX[m].fbrPtr[1].size();
totalMInnz += TiledX[m].totNnz;
}
if(TiledX[m].ndims == 4){
totalMIslics += TiledX[m].fbrIdx[0].size();
totalMISfibers += TiledX[m].fbrPtr[1].size();
totalMIfibers += TiledX[m].fbrPtr[2].size();
totalMInnz += TiledX[m].totNnz;
}
}
}
cout << "Total GPU time: " << allModeGPUTime;
// if(Opt.verbose)
if(TiledX[0].ndims == 3)
cout << " nSlc:" << totalMIslics
<< ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz
<< endl;
else if(TiledX[0].ndims == 4)
cout << " nSlc:" << totalMIslics << ", nSFibers:" << totalMISfibers
<< ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz
<< endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamDestroy(streams[bin]);
/* Copying output matrix from GPU to CPU for correctness check */
int MTTKRPmode = TiledX[0].ndims - 1;
ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]);
checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
hipFree(dVals);
hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3);
hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3);
hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2);
hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2);
hipFree(dFbrLikeSlcInds);
return 0;
}
int init_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt, ITYPE **dInds2, ITYPE **dfbrPtr1, ITYPE **dfbrIdx1, ITYPE **dFbrLikeSlcInds, DTYPE **dVals, DTYPE **dU){
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
// if(iter == 0 && cpdMode == 0)
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
for (int m = 0; m < TiledX[0].ndims; ++m){
if (TiledX[m].totNnz == 0) continue;
totNnz += TiledX[m].totNnz;
totFbrPtr += TiledX[m].fbrPtr[1].size() ;
totFbrIdx += TiledX[m].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ;
}
/*allocate and memcpy GPU memory*/
checkCuda(hipMalloc((void**) dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(hipMalloc((void**) dInds2, totNnz * sizeof(ITYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m){
if(m > 0) {
if (TiledX[m-1].totNnz > 0) {
dLoc += TiledX[m-1].totNnz;
dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same
dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
}
}
if (TiledX[m].totNnz == 0) continue;
checkCuda(hipMemcpy(*dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(*dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(*dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(*dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(*dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
// //Matrices
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
checkCuda(hipMalloc((void**) dU, mtxSize * sizeof(DTYPE)), 0);
// hipMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(*dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(*dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(*dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
// MTTKRP_MIHCSR_GPU_oneMode_forCPD(TiledX, U, Opt, 0, 0,
// dInds2, dfbrPtr1, dfbrIdx1, dFbrLikeSlcInds, dVals, dU);
}
int MTTKRP_MIHCSR_GPU_oneMode_forCPD(TiledTensor *TiledX, Matrix *U, const Options &Opt, int cpdMode, int iter,
ITYPE *dInds2, ITYPE *dfbrPtr1, ITYPE *dfbrIdx1, ITYPE *dFbrLikeSlcInds, DTYPE *dVals, DTYPE *dU){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float mili;
ITYPE *dInds3, *dfbrPtr0, *dfbrIdx0, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin;
// DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
ITYPE loc = 0;
for (int m = 0; m < cpdMode; ++m)
loc += szDU[m];
hipMemset(dU+loc, 0, U[cpdMode].nRows * U[cpdMode].nCols * sizeof(DTYPE));
// BLOCK and GRID
int BLOCKSIZE = 512;
float GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
int MTTKRPmode = cpdMode;
// for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode)
{
for (int m = 0; m < TiledX[0].ndims; ++m){
/* matrix order according to mode order*/
for (int mm = 0; mm < TiledX[0].ndims; ++mm){
int curMode = TiledX[m].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
if(m > 0) {
if (TiledX[m-1].totNnz > 0) {
dLoc += TiledX[m-1].totNnz;
dSlcLoc += TiledX[m - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ;
}
}
BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
if (TiledX[m].totNnz == 0) continue;
cuda_timer_start(start);
if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){
// if(Opt.verbose)
// cout << "Slc atomics - " ;
// BLOCKSIZE = 128;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){
cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!"
<< endl << "hint: increase -b!" << endl;
exit(0);
}
grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){
// if(Opt.verbose)
// cout << "Fbr atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){
// if(Opt.verbose)
// cout << "nnz atomics - " ;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if (TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
GPUTime += mili;
if(Opt.verbose)
{
cout << "Tile: " << m << " - time: " << mili << " ms";
cout <<" nnz: " << TiledX[m].totNnz << " nFibers: "
<< TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " ";
cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" "
<< TiledX[m].modeOrder[2];
cout << endl;
}
}
// cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl;
allModeGPUTime += GPUTime;
}
// ITYPE loc = 0;
// for (int m = 0; m < cpdMode; ++m)
// loc += szDU[m];
// ITYPE loc = szDU[0];
/* Copying output matrix from GPU to CPU for correctness check */
checkCuda(hipMemcpy(&U[cpdMode].vals[0], dU + loc, U[cpdMode].nRows * U[cpdMode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
if(iter == Opt.cpdIters - 1 && cpdMode == TiledX[0].ndims - 1)
{
cout << "Freeing variable " << endl;
hipFree(dVals);
hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3);
hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3);
hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2);
hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2);
hipFree(dFbrLikeSlcInds);
}
return 0;
}
int MTTKRP_MIHCSR_multiGPU(TiledTensor *MMCSF, Matrix *U, const Options &Opt, const MPI_param &MPIparam){
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
ITYPE dPsLoc, dPfLoc, dpNloc;// s,f, m locationtion based on parition inside MMcsf
// All m same mode
ITYPE mode0 = 0;//MMCSF[0].modeOrder[0];
ITYPE mode1 = 1;;//MMCSF[0].modeOrder[1];
ITYPE mode2 = 2;//MMCSF[0].modeOrder[2];
ITYPE mode3 = 3;//((MMCSF[0].ndims == 4) ? MMCSF[0].modeOrder[3] : 0) ;
int rank = MPIparam.mpi_rank;
int nP = MPIparam.n_proc;
int *nnzInRank = new int [nP];
int *fbrInRank = new int [nP];
int *slcInRank = new int [nP];
for (int m = 0; m < MMCSF[0].ndims; ++m){
if (MMCSF[m].totNnz == 0) continue;
totNnz += MMCSF[m].nnzInRank[rank];
totSlcPtr += MMCSF[m].slcInRank[rank];
totSlcIdx += MMCSF[m].slcInRank[rank];
totFbrPtr += MMCSF[m].fbrInRank[rank];
totFbrIdx += MMCSF[m].fbrInRank[rank] ;
totFbrPtr2 += ((MMCSF[m].ndims == 4) ? MMCSF[m].fbrPtr[2].size() : 0) ;
}
//allocate and memcpy GPU memory
//Tensor
checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(MMCSF[0].ndims == 3)
checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(MMCSF[0].ndims == 4){
checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
cout <<"point 1 starting MPI" <<MMCSF[0].ndims << endl;
/* cuda memcopy for tiled parts*/
for (int m = 0; m < MMCSF[0].ndims; ++m){
if(m > 0) {
if (MMCSF[m-1].totNnz > 0) {
dLoc += MMCSF[m-1].nnzInRank[rank];
dSlcLoc += MMCSF[m-1].slcInRank[rank]; // all m same
dSlcIdxLoc += MMCSF[m-1].slcInRank[rank];
dFbrLoc += MMCSF[m-1].fbrInRank[rank];
dFbrIdxLoc += MMCSF[m-1].fbrInRank[rank];
// dFbrLoc2 += ((MMCSF[m].ndims == 4) ? MMCSF[m - 1].fbrPtr[2].size() : 0) ;
}
}
if (MMCSF[m].totNnz == 0) continue; // not necessary I guess...
int stNnz = 0, stFbr = 0, stSlc= 0;
if(rank > 0){
stNnz = MMCSF[m].mpiEndNnz[rank - 1];
stFbr = MMCSF[m].mpiEndFbr[rank - 1];
stSlc = MMCSF[m].mpiEndSlc[rank - 1];
}
checkCuda(hipMemcpy(dVals + dLoc, &(MMCSF[m].vals[0 + stNnz]), MMCSF[m].nnzInRank[rank] * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(MMCSF[m].fbrPtr[0][0 + stSlc]), MMCSF[m].slcInRank[rank] * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(MMCSF[m].fbrIdx[0][0+stSlc]), MMCSF[m].slcInRank[rank] * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(MMCSF[m].fbrPtr[1][0+stFbr]), MMCSF[m].fbrInRank[rank] * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(MMCSF[m].fbrIdx[1][0+stFbr]), MMCSF[m].fbrInRank[rank] * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(MMCSF[m].fbrLikeSlcInds[0+stFbr]),MMCSF[m].fbrInRank[rank] * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(MMCSF[m].ndims == 3)
checkCuda(hipMemcpy(dInds2 + dLoc, &(MMCSF[m].inds[MMCSF[m].modeOrder[2]][0+stNnz]), MMCSF[m].nnzInRank[rank] * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(MMCSF[m].ndims == 4){
checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(MMCSF[m].fbrPtr[2][0]), MMCSF[m].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(MMCSF[m].fbrIdx[2][0]), MMCSF[m].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds3 + dLoc, &(MMCSF[m].inds[MMCSF[m].modeOrder[3]][0]), MMCSF[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
}
// //Matrices
unsigned int *dULoc = new unsigned int[MMCSF[0].ndims];
unsigned int *szDU = new unsigned int[MMCSF[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((MMCSF[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < MMCSF[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
hipMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
if(MMCSF[0].ndims == 4)
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per slice */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamCreate(&streams[bin]);
cout <<"point 2 starting MPI" <<MMCSF[0].ndims << endl;
for (int MTTKRPmode = 0; MTTKRPmode < MMCSF[0].ndims; ++MTTKRPmode){
if(MTTKRPmode > 0){
mili = 0; GPUTime = 0; CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0;
// MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again.
int mode = MTTKRPmode - 1;
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols
U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); //
}
if(MTTKRPmode == 1){
checkCuda(hipMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 2){
checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 3){
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE));
}
}
for (int m = 0; m < MMCSF[0].ndims; ++m){
/* matrix order according to mode order*/
for (int mm = 0; mm < MMCSF[0].ndims; ++mm){
int curMode = MMCSF[m].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % MMCSF[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
if(m > 0) {
if (MMCSF[m-1].totNnz > 0) {
dLoc += MMCSF[m-1].nnzInRank[rank];
dSlcLoc += MMCSF[m-1].slcInRank[rank]; // all m same
dSlcIdxLoc += MMCSF[m-1].slcInRank[rank];
dFbrLoc += MMCSF[m-1].fbrInRank[rank];
dFbrIdxLoc += MMCSF[m-1].fbrInRank[rank];
// dFbrLoc2 += ((MMCSF[m].ndims == 4) ? MMCSF[m - 1].fbrPtr[2].size() : 0) ;
}
}
BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
if (MMCSF[m].totNnz == 0) continue;
cuda_timer_start(start);
if(MMCSF[m].modeOrder[0] == MTTKRPmode && MMCSF[m].totNnz){
if(Opt.verbose)
cout << "Slc atomics - " ;
BLOCKSIZE = 128;
// BLOCKSIZE = 128;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){
cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!"
<< endl << "hint: increase -b!" << endl;
exit(0);
}
grid.x = ( warpPerFbr * 32 * ((MMCSF[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
if(MMCSF[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0 , dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, MMCSF[m].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(MMCSF[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0 , dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
MMCSF[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else if(MMCSF[0].ndims == 4 && MMCSF[m].modeOrder[1] == MTTKRPmode && MMCSF[m].totNnz){
if(Opt.verbose)
cout << "FbrS atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//1;//BLOCKSIZE/32;//1;////4;//;
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * MMCSF[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0 , dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
MMCSF[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(MMCSF[m].modeOrder[MMCSF[0].ndims-2] == MTTKRPmode && MMCSF[m].totNnz){
if(Opt.verbose)
cout << "Fbr atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * MMCSF[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if(MMCSF[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0 , dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, MMCSF[m].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (MMCSF[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0 , dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
MMCSF[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(MMCSF[m].modeOrder[MMCSF[0].ndims-1] == MTTKRPmode && MMCSF[m].totNnz){
if(Opt.verbose)
cout << "nnz atomics - " ;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * MMCSF[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if (MMCSF[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0 , dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, MMCSF[m].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (MMCSF[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0 , dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
MMCSF[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << m << " - time: " << mili << " ms";
cout <<" nnz: " << MMCSF[m].totNnz << " nFibers: "
<< MMCSF[m].fbrPtr[1].size() << " nSlc " << MMCSF[m].fbrIdx[0].size() << " ";
cout << " modeOrder: " << MMCSF[m].modeOrder[0] <<" " << MMCSF[m].modeOrder[1] <<" "
<< MMCSF[m].modeOrder[2];
cout << endl;
}
}
cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl;
allModeGPUTime += GPUTime;
}
int totalMIslics = 0, totalMIfibers = 0, totalMInnz = 0;;
for (int m = 0; m < MMCSF[0].ndims; ++m){
if(MMCSF[m].totNnz){
totalMIslics += MMCSF[m].fbrIdx[0].size();
totalMIfibers += MMCSF[m].fbrPtr[1].size();
totalMInnz += MMCSF[m].totNnz;
}
}
cout << "Total GPU time: " << allModeGPUTime << ", nnz:" << totalMInnz
<< ", nFibers:" << totalMIfibers << ", nSlc:" << totalMIslics
<< endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamDestroy(streams[bin]);
/* Copying output matrix from GPU to CPU*/
int MTTKRPmode = MMCSF[0].ndims - 1;
ITYPE loc = ((MMCSF[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]);
DTYPE *tmpDU = new DTYPE[ U[MTTKRPmode].nRows * U[MTTKRPmode].nCols];
checkCuda(hipMemcpy(tmpDU, dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
cout <<"tmpDu: "<< tmpDU[0] << endl;
// checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Allreduce( &(tmpDU[0]), &U[MTTKRPmode].vals[0], szDU[MTTKRPmode] , MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
/*Free variables*/
hipFree(dVals);
hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3);
hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3);
hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2);
hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2);
hipFree(dFbrLikeSlcInds);
return 0;
}
/*scales with the number of partition. An MM-CSF with 2 partition will launch kernel in 2 nodes in paralle.
Not scalable to mode nodes*/
int MTTKRP_MIHCSR_multiGPU_parMM(TiledTensor *TiledX, Matrix *U, const Options &Opt, const MPI_param &MPIparam){
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
// ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
// All m same mode
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
//allocate and memcpy GPU memory
//Tensor
vector<int> activeTile;
for (int m = 0; m < TiledX[0].ndims; ++m){
if(TiledX[m].totNnz)
activeTile.push_back(m);
}
if ( MPIparam.mpi_rank > (activeTile.size()-1) ) {
cout << "Not using node " << MPIparam.mpi_rank << endl;
return 0;
}
if(MPIparam.n_proc < activeTile.size()){
cout << "Number of partition is higher than number of nodes. Hint: Allocate more nodes.";
}
int m = activeTile[MPIparam.mpi_rank];
if (TiledX[m].totNnz == 0) return 0; // not necessary I guess...
checkCuda(hipMalloc((void**) &dVals, TiledX[m].totNnz * sizeof(DTYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr0, TiledX[m].fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx0, TiledX[m].fbrIdx[0].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dSlcMapperBin, TiledX[m].fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrPtr1, TiledX[m].fbrPtr[1].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dfbrIdx1, TiledX[m].fbrIdx[1].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, TiledX[m].fbrIdx[1].size() * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(hipMalloc((void**) &dInds2, TiledX[m].totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(hipMalloc((void**) &dFbrIdx2, TiledX[m].fbrPtr[2].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dFbrPtr2, TiledX[m].fbrIdx[2].size() * sizeof(ITYPE)), 0);
checkCuda(hipMalloc((void**) &dInds3, TiledX[m].totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
// for (int m = 0; m < TiledX[0].ndims; ++m)
{
// if(m > 0) {
// if (TiledX[m-1].totNnz > 0) {
// dLoc += TiledX[m-1].totNnz;
// dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same
// dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
// dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
// dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
// dFbrLoc2 += ((TiledX[m].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size() : 0) ;
// }
// }
checkCuda(hipMemcpy(dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[m].fbrPtr[0][0]), TiledX[m].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[m].fbrIdx[0][0]), TiledX[m].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(TiledX[m].ndims == 3)
checkCuda(hipMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
if(TiledX[m].ndims == 4){
checkCuda(hipMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[m].fbrPtr[2][0]), TiledX[m].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[m].fbrIdx[2][0]), TiledX[m].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dInds3 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[3]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += TiledX[m].slcMapperBin[bin-1].size();
checkCuda(hipMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[m].slcMapperBin[bin][0]), TiledX[m].slcMapperBin[bin].size() * sizeof(ITYPE),hipMemcpyHostToDevice), 0);
}
}
// //Matrices
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
hipMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
if(TiledX[0].ndims == 4)
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per slice */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamCreate(&streams[bin]);
for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){
if(MTTKRPmode > 0){
mili = 0; GPUTime = 0; CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0;
// MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again.
int mode = MTTKRPmode - 1;
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols
U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); //
}
if(MTTKRPmode == 1){
checkCuda(hipMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 2){
checkCuda(hipMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 3){
checkCuda(hipMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice), 0);
hipMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE));
}
}
// for (int m = 0; m < TiledX[0].ndims; ++m)
{
/* matrix order according to mode order*/
for (int mm = 0; mm < TiledX[0].ndims; ++mm){
int curMode = TiledX[m].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
// if(m > 0) {
// if (TiledX[m-1].totNnz > 0) {
// dLoc += TiledX[m-1].totNnz;
// dSlcLoc += TiledX[m - 1].fbrPtr[0].size();
// dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
// dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
// dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
// dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ;
// }
// }
BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
if (TiledX[m].totNnz == 0) continue;
cuda_timer_start(start);
if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "Slc atomics - " ;
BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = 1;//BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
int fbrPerWarp = BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else if(TiledX[0].ndims == 4 && TiledX[m].modeOrder[1] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "FbrS atomics - ";
BLOCKSIZE = 128;//Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = 1;//BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "Fbr atomics - ";
BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = 1;//Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "nnz atomics - " ;
BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = 1;//Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if (TiledX[0].ndims == 3)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, streams[bin], dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
GPUTime += mili;
if(Opt.verbose)
{
cout << "Tile: " << m << " - time: " << mili << " ms";
cout <<" nnz: " << TiledX[m].totNnz << " nFibers: "
<< TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " ";
cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" "
<< TiledX[m].modeOrder[2];
cout << endl;
}
}
cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl;
allModeGPUTime += GPUTime;
}
int totalMIslics = 0, totalMIfibers = 0, totalMInnz = 0;;
for (int m = 0; m < TiledX[0].ndims; ++m){
if(TiledX[m].totNnz){
totalMIslics += TiledX[m].fbrIdx[0].size();
totalMIfibers += TiledX[m].fbrPtr[1].size();
totalMInnz += TiledX[m].totNnz;
}
}
cout << "Total GPU time: " << allModeGPUTime << ", nnz:" << totalMInnz
<< ", nFibers:" << totalMIfibers << ", nSlc:" << totalMIslics
<< endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
hipStreamDestroy(streams[bin]);
/* Copying output matrix from GPU to CPU*/
int MTTKRPmode = TiledX[0].ndims - 1;
ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]);
DTYPE *tmpDU = new DTYPE[ U[MTTKRPmode].nRows * U[MTTKRPmode].nCols];
checkCuda(hipMemcpy(tmpDU, dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
// checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), 0);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Allreduce( &(tmpDU[0]), &U[MTTKRPmode].vals[0], szDU[MTTKRPmode] , MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
/*Free variables*/
hipFree(dVals);
hipFree(dU); //hipFree(dU1); hipFree(dU2); hipFree(dU3);
hipFree(dfbrIdx0); hipFree(dInds2); hipFree(dInds3);
hipFree(dfbrIdx0); hipFree(dfbrIdx1); hipFree(dFbrIdx2);
hipFree(dfbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrPtr2);
hipFree(dFbrLikeSlcInds);
return 0;
}
|
800a3a55e9eeaf6ca91ca0a28ebb556773e4251c.cu
|
#include <iostream>
#include "mttkrp_gpu.h"
#include <mpi.h>
#include <vector>
#define mpi_barrier() MPI_Barrier(MPI_COMM_WORLD);
inline cudaError_t checkCuda(cudaError_t result, int s){
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", cudaGetErrorString(result), s);
assert(result == cudaSuccess);
}
return result;
}
void cuda_timer_start(cudaEvent_t start){
checkCuda(cudaEventRecord(start), __LINE__);
}
void cuda_timer_stop(cudaEvent_t start, cudaEvent_t stop, float &mili){
checkCuda(cudaEventRecord(stop), __LINE__);
cudaEventSynchronize(stop);
checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__);
cudaDeviceSynchronize();
}
// CUDA kernel call to do COO MTTKRP
__global__ void mttkrp_COO_kernel(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE nnz,
DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int x = gId >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
// CUDA kernel call to do COO MTTKRP using loop
__global__ void mttkrp_COO_kernel_loop(DTYPE * const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE * const dInds2, const ITYPE nnz,
DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nnz > nnz_per_loop) {
num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
unsigned int x;
for(size_t nl=0; nl<num_loops_nnz; ++nl) {
x = (gId + nl * nnz_per_loop) >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
__syncthreads();
}
}
// CUDA kernel call to do COO MTTKRP 4D
__global__ void mttkrp_COO_kernel_4D(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE *dInds3,
ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int x = gId >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
ITYPE idx3 = dInds3[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
// CUDA kernel call to do COO MTTKRP 4D using loop
__global__ void mttkrp_COO_kernel_4D_loop(DTYPE *const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE *const dInds2, ITYPE * const dInds3,
ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, DTYPE * const dU3, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nnz > nnz_per_loop) {
num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
unsigned int x;
for(size_t nl=0; nl<num_loops_nnz; ++nl)
{
x = (gId + nl * nnz_per_loop) >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
ITYPE idx3 = dInds3[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
__syncthreads();
}
}
//no atomics because all 1 in HYB - COO
__global__ void mttkrp_HYB_COO_kernel(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE nnz,
DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int x = gId >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r];
dU0[idx0 * R + r] += tmp_val;
}
}
}
// CUDA kernel call to do COO MTTKRP using loop
__global__ void mttkrp_HYB_COO_kernel_loop(DTYPE * const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE * const dInds2, const ITYPE nnz,
DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nnz > nnz_per_loop) {
num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
unsigned int x;
for(size_t nl=0; nl<num_loops_nnz; ++nl) {
x = (gId + nl * nnz_per_loop) >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r];
dU0[idx0 * R + r] += tmp_val;
}
}
__syncthreads();
}
}
//no atomics because all 1 in HYB - COO
__global__ void mttkrp_HYB_COO_kernel_4D(DTYPE *vals, ITYPE *dInds0, ITYPE *dInds1, ITYPE *dInds2, ITYPE *dInds3,
ITYPE nnz, DTYPE *dU0, DTYPE *dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int x = gId >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
ITYPE idx3 = dInds3[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r];
dU0[idx0 * R + r] += tmp_val;
}
}
}
// CUDA kernel call to do COO MTTKRP 4D using loop
__global__ void mttkrp_HYB_COO_kernel_4D_loop(DTYPE *const vals, ITYPE * const dInds0, ITYPE * const dInds1, ITYPE *const dInds2, ITYPE * const dInds3,
ITYPE nnz, DTYPE *dU0, DTYPE * const dU1, DTYPE * const dU2, DTYPE * const dU3, ITYPE mode, ITYPE R){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nnz > nnz_per_loop) {
num_loops_nnz = ((nnz + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
unsigned int x;
for(size_t nl=0; nl<num_loops_nnz; ++nl)
{
x = (gId + nl * nnz_per_loop) >> 5;
if(x < nnz){
DTYPE tmp_val = 0;
ITYPE idx0 = dInds0[x];
ITYPE idx1 = dInds1[x];
ITYPE idx2 = dInds2[x];
ITYPE idx3 = dInds3[x];
for(ITYPE r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] * dU3[idx3 * R + r];
dU0[idx0 * R + r] += tmp_val;
}
}
__syncthreads();
}
}
__global__ void mttkrp_CSL_kernel(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = slc;//dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc];
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
tmp_val = 0;
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
unsigned int idx1 = dInds1[fbr];
unsigned int idx2 = dInds2[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
__global__ void mttkrp_CSL_kernel_bin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc];
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
tmp_val = 0;
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
unsigned int idx1 = dInds1[fbr];
unsigned int idx2 = dInds2[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
// CSL kernel with loop like ParTI
__global__ void mttkrp_CSL_kernel_bin_loop(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp_val;
//like PARTI
size_t num_loops_nnz = 1 * 32;
size_t const nnz_per_loop = gridDim.x * blockDim.x;
if(nSlices > nnz_per_loop) {
num_loops_nnz = ((nSlices + nnz_per_loop - 1) / nnz_per_loop) << 5;
}
for(size_t nl=0; nl<num_loops_nnz; ++nl) {
slc = (gId + nl * nnz_per_loop) >> 5;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc];
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
tmp_val = 0;
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
unsigned int idx1 = dInds1[fbr];
unsigned int idx2 = dInds2[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
__syncthreads();
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_CSL_kernel_hvyBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *dInds1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
tmp_val = 0;
for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){
unsigned int idx1 = dInds1[fbr];
unsigned int idx2 = dInds2[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[fbr] * dU2[idx2 * R + r] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
// HCSR MTTKRP : 16 WARP = 1 TB per slice
__global__ void mttkrp_HCSR_kernel_16WARP(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = tId >> 5; //(tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = blockIdx.x ;//gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp = 0;
DTYPE tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r];
}
}
// unsigned int idx1 = dInds1[fbrPtr1[fbr]];
unsigned int idx1 = fbrIdx1[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU1[idx1 * R + r] ; // C matrix
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
// CUDA kernel call to do HCSR MTTKRP for the first bin 1 WARP per slice
__global__ void mttkrp_HCSR_kernel_COO(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int slc = gId >> 5; // 5: minimum 1 WARP (2^5)
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st; fbr < fb_end; fbr++){
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r];
}
}
unsigned int idx1 = fbrIdx1[fbr];
for(unsigned int r=laneId; r<R; r+=32) {
dU0[idx0 * R + r] += tmp_val * dU1[idx1 * R + r] ;
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_HCSR_kernel_smllBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
// unsigned int slcPerTb = 16/warpPerSlice;
// unsigned int shSlc = slc & slcPerTb;
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r];
}
}
unsigned int idx1 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU1[idx1 * R + r] ;
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_HCSR_kernel_smllBin_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE outbuffer = 0, tmp_val = 0, outbuffer1 = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){
unsigned int idx1 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
outbuffer1 = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx2 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx3 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU3[idx3 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32)
outbuffer1 += tmp_val * dU2[idx2 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32)
outbuffer += outbuffer1 * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], outbuffer);
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_HCSR_kernel_hvyBin(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1] ; fbr+=warpPerSlice){
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r];
}
}
unsigned int idx1 = fbrIdx1[fbr];//dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU1[idx1 * R + r] ;
// // atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_HCSR_kernel_hvyBin_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE outbuffer = 0, tmp_val = 0, outbuffer1 = 0;;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx0 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){
unsigned int idx1 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
outbuffer1 = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx2 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx3 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU3[idx3 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32)
outbuffer1 += tmp_val * dU2[idx2 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32)
outbuffer += outbuffer1 * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], outbuffer);
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbr = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val;
if(fbr < nFibers - 1){
tmp_val = 0;
bool diffFiber = false;
unsigned int idx0;
for (int fr = 0; fr < fbrPerWarp && (fbr+fr) < (nFibers - 1); ++fr){
diffFiber = false;
unsigned int idx1 = fbrIdx1[fbr+fr];// dInds1[fbrPtr1[fbr]];
idx0 = fbrLikeSlcInds[fbr+fr];//slc;
tmp_val = 0;
for(unsigned int x = fbrPtr1[fbr+fr] + workId; x < fbrPtr1[fbr+fr+1]; x+=warpPerSlice) {
unsigned int idx2 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU2[idx2 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU1[idx1 * R + r] ; //2PR
}
if(fbrLikeSlcInds[fbr+fr] != fbrLikeSlcInds[fbr+fr+1]) {
diffFiber = true;
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp); //2PR
}
tmp = 0;
}
}
if(!diffFiber) {
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0,
DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbrS = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val, tmp2= 0;
if(fbrS < nFibers - 1){
tmp_val = 0;
bool diffFiber = false;
unsigned int idx0;
for (int fr = 0; fr < fbrPerWarp && (fbrS+fr) < (nFibers - 1); ++fr){
diffFiber = false;
unsigned int idx1 = fbrIdx1[fbrS+fr];// dInds1[fbrPtr1[fbr]];
idx0 = fbrLikeSlcInds[fbrS+fr];//slc;
tmp = 0;
for (int fbr = fbrPtr1[fbrS+fr] + workId; fbr < fbrPtr1[fbrS+fr+1]; fbr+=warpPerSlice){
ITYPE idx2 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; x++) {
unsigned int idx3 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU3[idx3 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp += tmp_val * dU2[idx2 * R + r] ;
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp2 += tmp * dU1[idx1 * R + r] ;
}
if(fbrLikeSlcInds[fbrS+fr] != fbrLikeSlcInds[fbrS+fr+1]) {
diffFiber = true;
for(unsigned int r=laneId; r<R; r+=32) {
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
tmp2 = 0;
}
}
if(!diffFiber) {
for(unsigned int r=laneId; r<R; r+=32)
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_fbr_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
ITYPE slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
tmp_val = 0;
unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx1 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp); //2PR
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_fbr_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE outbuffer = 0, tmp_val = 0, tmp = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc;
for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){
unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
tmp = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx0 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx1 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU1[idx1 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE outbuffer = 0, tmp_val = 0, tmp = 0;;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){
unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
tmp = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx0 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx1 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU1[idx1 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_fbrS_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE tmp = 0, tmp_val, tmp2 = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx3 = dfbrIdx0[mappedSlc] ;//slc;
for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){
unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
tmp = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx1 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx2 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU2[idx2 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32)
tmp += tmp_val * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp2 = tmp * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_fbrS_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val, tmp2 = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx3 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){
unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
tmp = 0;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx1 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx2 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU2[idx2 * R + r];
}
for(unsigned int r=laneId; r<R; r+=32)
tmp += tmp_val * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp2 = tmp * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val;
if(fbr < nFibers - 1){
tmp_val = 0;
unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
unsigned int idx2 = fbrLikeSlcInds[fbr];//slc;
for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) {
unsigned int idx1 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp); //2PR
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0,
DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val, tmp2 = 0;
if(fbrS < nFibers - 1){
tmp = 0;
unsigned int idx2 = fbrLikeSlcInds[fbrS];//slc;
unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
unsigned int idx0 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx1 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR
// if(laneId == 0)
// printf("from GPU: (%d %d %d %d) - %f %f %f %f \n", idx0, idx1, idx2, idx3, dU0[idx0 * R] , dU1[idx1 * R], dU2[idx2 * R], dU3[idx3 * R]);
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] * dU3[idx3 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0,
DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val, tmp2 = 0;
if(fbrS < nFibers - 1){
tmp = 0;
unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
unsigned int idx3 = fbrLikeSlcInds[fbrS];//slc;
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
unsigned int idx1 = fbrIdx2[fbr];
tmp_val = 0;
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx2 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val += vals[x] * dU2[idx2 * R + r] ; //2MR
}
for(unsigned int r=laneId; r<R; r+=32)
tmp += tmp_val * dU1[idx1 * R + r] ;
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp2 = tmp * dU3[idx3 * R + r];
atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_loop(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
//like PARTI
//hardcoded for 1 warp per nnz
size_t num_loops_fbr = 1 * 32;
size_t const fbr_per_loop = gridDim.x * blockDim.x;
if(nFibers > fbr_per_loop) {
num_loops_fbr = ((nFibers + fbr_per_loop - 1) / fbr_per_loop) << 5;
}
DTYPE tmp = 0, tmp_val;
unsigned int fbr;
for(size_t nl=0; nl<num_loops_fbr; ++nl) {
fbr = (gId + nl * fbr_per_loop) >> 5;
if(fbr < nFibers - 1){
tmp_val = 0;
unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
unsigned int idx2 = fbrLikeSlcInds[fbr];//slc;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; x++) {
unsigned int idx1 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp); //2PR
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
ITYPE laneId = threadIdx.x & 31;
ITYPE workId = threadIdx.x >> 5;
ITYPE slc = blockIdx.x >> logOfTPS;
ITYPE localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx2 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){
tmp_val = 0;
unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx1 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val += vals[x] * dU1[idx1 * R + r];
}
}
for(unsigned int r=laneId; r<R; r+=32) {
tmp = tmp_val * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_all_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
ITYPE slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
// ITYPE slcPerTb = 16/warpPerSlice;
// ITYPE shSlc = slc & slcPerTb;
DTYPE tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc;
int fb_st = fbrPtr0[mappedSlc];
int fb_end = fbrPtr0[mappedSlc+1];
for (int fbr = fb_st + workId; fbr < fb_end; fbr+=warpPerSlice){
unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
// for(unsigned int r=laneId; r<R; r+=32)
// tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx0 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp_val); //2MR
// atomicAdd(&dU0[idx0 * R + r], (tmp_val * vals[x]) );
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_all_atomic(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds2, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
ITYPE laneId = threadIdx.x & 31;
ITYPE workId = threadIdx.x >> 5;
ITYPE slc = blockIdx.x >> logOfTPS;
ITYPE localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbr = fb_st + workId; fbr < fb_end && fbr < fbrPtr0[mappedSlc+1]; fbr+=warpPerSlice){
tmp_val = 0;
unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ;
for(unsigned int x = fbrPtr1[fbr]; x < fbrPtr1[fbr+1]; ++x) {
unsigned int idx0 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
// atomicAdd(&dU0[idx0 * R + r], (tmp_val * vals[x]) );
tmp_val = vals[x] * dU1[idx1 * R + r] * dU2[idx2 * R + r] ;
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_smllBin_all_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int LogOfTPS){
unsigned int tId = threadIdx.x;
unsigned int laneId = tId & 31;
unsigned int gId = (blockIdx.x * blockDim.x + tId);
unsigned int workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5;
unsigned int slc = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5)
DTYPE outbuffer = 0, tmp_val = 0, tmp = 0;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc;
for (int fbrS = fbrPtr0[mappedSlc]; fbrS < fbrPtr0[mappedSlc+1]; fbrS++){
unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx3 = fbrIdx2[fbr];
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx0 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
}
// CUDA kernel call to do HCSR MTTKRP
__global__ void mttkrp_MIHCSR_kernel_hvyBin_all_atomic_4D(DTYPE * vals, ITYPE *dfbrIdx0, ITYPE *dSlcMapperBin, ITYPE *dInds3, ITYPE *fbrPtr0,
ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, unsigned int nSlices, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2, DTYPE *dU3,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int TbPerSlc, int logOfTPS){
unsigned int laneId = threadIdx.x & 31;
unsigned int workId = threadIdx.x >> 5;
unsigned int slc = blockIdx.x >> logOfTPS;
unsigned int localBId = blockIdx.x & (TbPerSlc -1);
DTYPE tmp = 0, tmp_val;
if(slc < nSlices){
unsigned int mappedSlc = dSlcMapperBin[slc];
unsigned int idx1 = dfbrIdx0[mappedSlc] ;//slc;
unsigned int nFbr = fbrPtr0[mappedSlc+1] - fbrPtr0[mappedSlc];
unsigned int fbrPerTb = (nFbr + TbPerSlc - 1 ) >> logOfTPS;
unsigned int fb_st = fbrPtr0[mappedSlc] + localBId * fbrPerTb ;
unsigned int fb_end = fbrPtr0[mappedSlc] + (localBId + 1) * fbrPerTb ;
for (int fbrS = fb_st; fbrS < fb_end && fbrS < fbrPtr0[mappedSlc+1] ; fbrS++){
unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx3 = fbrIdx2[fbr];
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx0 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val;
if(fbr < nFibers - 1){
tmp_val = 0;
unsigned int idx1 = fbrLikeSlcInds[fbr];//slc;
unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
// if(laneId == 0 && idx1 == 0)
// printf("GPU %d %d %f %f\n", idx1, idx2, dU1[idx1 * R], dU2[idx2 * R] );
for(unsigned int r=laneId; r<R; r+=32)
tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) {
unsigned int idx0 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds3,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE *fbrPtr2, ITYPE *fbrIdx2, ITYPE nFibers, DTYPE *dU0,
DTYPE * dU1, DTYPE *dU2, DTYPE *dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;//
ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;//
DTYPE tmp = 0, tmp_val = 0;;
if(fbrS < nFibers - 1){
tmp = 0;
unsigned int idx1 = fbrLikeSlcInds[fbrS];//slc;
unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]];
for(unsigned int r=laneId; r<R; r+=32)
tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){
ITYPE idx3 = fbrIdx2[fbr];
for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) {
unsigned int idx0 = dInds3[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR
atomicAdd(&dU0[idx0 * R + r], tmp);
}
}
}
}
}
// CUDA fbr atomic sing slcLikeFbr
__global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_loop(DTYPE * vals, ITYPE *fbrLikeSlcInds, ITYPE *dInds2,
ITYPE *fbrPtr0, ITYPE *fbrPtr1, ITYPE *fbrIdx1, ITYPE nFibers, DTYPE *dU0, DTYPE * dU1, DTYPE *dU2,
ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC){
ITYPE tId = threadIdx.x;
ITYPE laneId = tId & 31;
ITYPE bdim = blockDim.x;
ITYPE gId = (blockIdx.x * bdim + tId);
ITYPE warpId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //
ITYPE blockId = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) //blockIdx.x ;//
//like PARTI
//hardcoded for 1 warp per nnz
size_t num_loops_fbr = 1 * 32;
size_t const fbr_per_loop = gridDim.x * blockDim.x;
if(nFibers > fbr_per_loop) {
num_loops_fbr = ((nFibers + fbr_per_loop - 1) / fbr_per_loop) << 5;
}
DTYPE tmp = 0, tmp_val;
unsigned int fbr;
for(size_t nl=0; nl<num_loops_fbr; ++nl) {
fbr = (gId + nl * fbr_per_loop) >> 5;
if(fbr < nFibers - 1){
tmp_val = 0;
unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]];
unsigned int idx1 = fbrLikeSlcInds[fbr];//slc;
for(unsigned int r=laneId; r<R; r+=32)
tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR
for(unsigned int x = fbrPtr1[fbr] + warpId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) {
unsigned int idx0 = dInds2[x];
for(unsigned int r=laneId; r<R; r+=32) {
tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR
atomicAdd(&dU0[idx0 * R + r], tmp_val);
}
}
}
}
}
int MTTKRP_COO_GPU(const Tensor &X, Matrix *U, const Options Opt){
//allocate and memcpy GPU memory
//Tensor
ITYPE mode = Opt.mode;
ITYPE R = Opt.R;
ITYPE *dInds0, *dInds1, *dInds2, *dInds3;
DTYPE *dVals;
ITYPE mode0 = X.modeOrder[0];
ITYPE mode1 = X.modeOrder[1];
ITYPE mode2 = X.modeOrder[2];
checkCuda(cudaMalloc((void**) &dVals, X.totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds0, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds1, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds2, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dVals, &(X.vals[0]), X.totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds0, &(X.inds[mode0][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds1, &(X.inds[mode1][0]), X.totNnz * sizeof(ITYPE) ,cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds2, &(X.inds[mode2][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
// //Matrices
DTYPE *dU0, *dU1, *dU2, *dU3;
checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
if(X.ndims == 4){
ITYPE mode3 = X.modeOrder[3];
checkCuda(cudaMalloc((void**) &dInds3, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dInds3, &(X.inds[mode3][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
}
// BLOCK and GRID
int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float mili = 0;
bool useLoop = true;
// /* Like PARTI loop */ =
if(useLoop)
grid.x = 32768;
else
grid.x = (32 * X.totNnz + BLOCKSIZE - 1) / BLOCKSIZE;
// CUDA call
cuda_timer_start(start);
if(!useLoop){
if(X.ndims == 3)
mttkrp_COO_kernel<<<grid, block>>>(dVals, dInds0, dInds1, dInds2, X.totNnz, dU0, dU1, dU2, mode, R);
else if(X.ndims == 4)
mttkrp_COO_kernel_4D<<<grid, block>>>(dVals, dInds0, dInds1, dInds2, dInds3, X.totNnz, dU0, dU1, dU2, dU3, mode, R);
}
// /* loop like ParTI */
else{
if(X.ndims == 3)
mttkrp_COO_kernel_loop<<<grid, block>>>(dVals, dInds0, dInds1, dInds2, X.totNnz, dU0, dU1, dU2, mode, R );
else if(X.ndims == 4)
mttkrp_COO_kernel_4D_loop<<<grid, block>>>(dVals, dInds0, dInds1, dInds2, dInds3, X.totNnz, dU0, dU1, dU2, dU3, mode, R);
}
cuda_timer_stop(start, stop, mili);
if(useLoop) cout << "Loop on. ";
cout << "COO GPU using loop - time " << mili << "ms"<< endl;
// check correctness
checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
// print_output(U, 0);
cudaFree(dVals);
cudaFree(dU0); cudaFree(dU1); cudaFree(dU2); cudaFree(dU3);
cudaFree(dInds0); cudaFree(dInds1); cudaFree(dInds2); cudaFree(dInds3);
return 0;
}
int MTTKRP_HCSR_GPU(Tensor &X, Matrix *U, const Options &Opt){
//allocate and memcpy GPU memory
cout << "FIX fiber idx" << endl;
//Tensor
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin;
DTYPE *dVals;
int logOfWarpPerSlice = log2(Opt.warpPerSlice);
int TbPerSlc = 1;
int logOfTPS = log2(TbPerSlc);
ITYPE mode0 = X.modeOrder[0];
ITYPE mode1 = X.modeOrder[1];
ITYPE mode2 = X.modeOrder[2];
// dummy bin mapper to be compatible with bin mapper when bin are not used
X.slcMapperBin.push_back(std::vector<ITYPE>());
for (int s = 0; s < X.fbrIdx[0].size(); ++s)
X.slcMapperBin[0].push_back(s);
checkCuda(cudaMalloc((void**) &dVals, X.totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dSlcMapperBin, X.slcMapperBin[0].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx0, X.fbrIdx[0].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr0, X.fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr1, X.fbrPtr[1].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx1, X.fbrIdx[1].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dVals, &(X.vals[0]), X.totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dSlcMapperBin, &(X.slcMapperBin[0][0]), X.slcMapperBin[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr0, &(X.fbrPtr[0][0]), X.fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx0, &(X.fbrIdx[0][0]), X.fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr1, &(X.fbrPtr[1][0]), X.fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx1, &(X.fbrIdx[1][0]), X.fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
// //Matrices
DTYPE *dU0, *dU1, *dU2, *dU3;
checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
if(X.ndims == 3){
checkCuda(cudaMalloc((void**) &dInds2, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dInds2, &(X.inds[mode2][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
if(X.ndims == 4){
ITYPE mode3 = X.modeOrder[3];
checkCuda(cudaMalloc((void**) &dFbrIdx2, X.fbrIdx[2].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrPtr2, X.fbrPtr[2].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds3, X.totNnz * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMemcpy(dFbrPtr2, &(X.fbrPtr[2][0]), X.fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrIdx2, &(X.fbrIdx[2][0]), X.fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds3, &(X.inds[mode3][0]), X.totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
}
// BLOCK and GRID
int BLOCKSIZE = 512;
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
grid.x = (Opt.warpPerSlice * 32 * X.dims[mode0] + BLOCKSIZE - 1) / BLOCKSIZE;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float mili = 0;
checkCuda(cudaEventRecord(start), __LINE__);
// mttkrp_HCSR_kernel_COO<<<grid, block, 32 * sizeof(DTYPE)>>>(dVals, dfbrIdx0, dSlcMapperBin, dInds2, dfbrPtr0, dfbrPtr1, dfbrIdx1,
// X.fbrIdx[0].size(), dU0, dU1, dU2,Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
if(X.ndims == 3)
mttkrp_HCSR_kernel_smllBin<<<grid, block, 32 * sizeof(DTYPE)>>>(dVals, dfbrIdx0, dSlcMapperBin, dInds2, dfbrPtr0, dfbrPtr1, dfbrIdx1,
X.fbrIdx[0].size(), dU0, dU1, dU2,Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
else
mttkrp_HCSR_kernel_smllBin_4D<<<grid, block, 32 * sizeof(DTYPE)>>>(dVals, dfbrIdx0, dSlcMapperBin, dInds3, dfbrPtr0, dfbrPtr1, dfbrIdx1,
dFbrPtr2, dFbrIdx2, X.fbrIdx[0].size(), dU0, dU1, dU2, dU3, Opt.mode, Opt.R, Opt.warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
checkCuda(cudaEventRecord(stop), __LINE__);
cudaEventSynchronize(stop);
checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__);
cudaDeviceSynchronize();
cout << "HCSR GPU - time " << mili << "ms"<< endl;
// check correctness
checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
cudaFree(dVals);
cudaFree(dU0); cudaFree(dU1); cudaFree(dU2); cudaFree(dU3);
cudaFree(dInds2); cudaFree(dInds3);
cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2);
cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2);
return 0;
}
int MTTKRP_TILED_COO_GPU(TiledTensor *TiledX, Matrix *U, const Options Opt){
//allocate and memcpy GPU memory
//Tensor
ITYPE mode = Opt.mode;
ITYPE R = Opt.R;
ITYPE *dInds0, *dInds1, *dInds2;
ITYPE dLoc = 0, totNnz = 0;
DTYPE *dVals;
// All tile same mode
ITYPE mode0 = TiledX[0].modeOrder[0];
ITYPE mode1 = TiledX[0].modeOrder[1];
ITYPE mode2 = TiledX[0].modeOrder[2];
for (int tile = 0; tile < Opt.nTile; ++tile)
totNnz += TiledX[tile].totNnz;
checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds0, totNnz * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds1, totNnz * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0)
dLoc += TiledX[tile-1].totNnz;
checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds0 + dLoc, &(TiledX[tile].inds[mode0][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds1 + dLoc, &(TiledX[tile].inds[mode1][0]), TiledX[tile].totNnz * sizeof(ITYPE) ,cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[mode2][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
// //Matrices
DTYPE *dU0, *dU1, *dU2;
checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float mili = 0, GPUTime = 0;
// CUDA call
dLoc = 0;
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0)
dLoc += TiledX[tile-1].totNnz;
cout << "Tile " << tile << " launched.. "<<endl;
grid.x = (32 * TiledX[tile].totNnz + BLOCKSIZE - 1) / BLOCKSIZE;
checkCuda(cudaEventRecord(start), __LINE__);
mttkrp_COO_kernel<<<grid, block>>>(dVals + dLoc, dInds0 + dLoc, dInds1 + dLoc, dInds2 + dLoc, TiledX[tile].totNnz, dU0, dU1, dU2,
mode, R);
checkCuda(cudaEventRecord(stop), __LINE__);
cudaEventSynchronize(stop);
checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__);
cudaDeviceSynchronize();
cout << "Tile: " << tile << " - time " << mili << "ms"<< endl;
GPUTime += mili;
}
cout << "COO GPU - time " << GPUTime << "ms"<< endl;
// check correctness
checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
cudaFree(dVals);
cudaFree(dU0); cudaFree(dU1); cudaFree(dU2);
cudaFree(dInds0); cudaFree(dInds1); cudaFree(dInds2);
return 0;
}
int MTTKRP_B_HCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){
/*choosing kernel type:
false: B-CSF- IPDPS work, true: parallelism at fiber level, call slc_atomic_fbrlblpar function*/
bool slcAtomicFbrLvlPar = false;
/* Allocate and memcpy GPU memory */
//Tensor
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
// // All tile same mode
ITYPE mode0 = TiledX[0].modeOrder[0];
ITYPE mode1 = TiledX[0].modeOrder[1];
ITYPE mode2 = TiledX[0].modeOrder[2];
ITYPE mode3 =((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
for (int tile = 0; tile < Opt.nTile; ++tile){
totNnz += TiledX[tile].totNnz;
totSlcPtr += TiledX[tile].fbrPtr[0].size() ;
totSlcIdx += TiledX[tile].fbrIdx[0].size() ;
totFbrPtr += TiledX[tile].fbrPtr[1].size() ;
totFbrIdx += TiledX[tile].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float memcpyTime = 0;
cuda_timer_start(start);
checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(slcAtomicFbrLvlPar)
checkCuda(cudaMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[tile].fbrLikeSlcInds[0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 3)
checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 4){
checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[mode3][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
checkCuda(cudaMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
}
cuda_timer_stop(start, stop, memcpyTime);
cout << "Memcopy time " << memcpyTime << endl;
// //Matrices
DTYPE *dU0, *dU1, *dU2, *dU3;
checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
if(TiledX[0].ndims == 4){
checkCuda(cudaMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
}
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
cudaStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per size */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamCreate(&streams[bin]);
/*MTTKRP on Opt.mode*/
int MTTKRPmode = mode0;//Opt.mode;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
BLOCKSIZE = (( slcAtomicFbrLvlPar == true) ? Opt.TBsize : 512) ;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
// int warpPerFbr = BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//;
// int logOfWarpPerFbr = log2(warpPerFbr);
// int bin = 0;
// int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB
// int logOfFbrPerWarp = log2(fbrPerWarp);
int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;//
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
grid.x = ( warpPerFbr * 32 * ((TiledX[tile].nFibers+fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
double t0 = seconds();
cuda_timer_start(start);
if(slcAtomicFbrLvlPar){
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].nFibers,
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else{
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_HCSR_kernel_smllBin<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
mttkrp_HCSR_kernel_smllBin_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_HCSR_kernel_hvyBin<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
mttkrp_HCSR_kernel_hvyBin_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "B-CSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamDestroy(streams[bin]);
// check correctness
checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
cudaFree(dVals);
cudaFree(dU0); cudaFree(dU1); cudaFree(dU2); cudaFree(dU3);
cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3);
cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2);
cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2);
cudaFree(dFbrLikeSlcInds);
return 0;
}
int MTTKRP_B_HCSR_GPU_ANYMODE(TiledTensor *TiledX, Matrix *U, const Options &Opt, int mode){
/* Allocate and memcpy GPU memory */
//Tensor
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
// // All tile same mode
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
for (int tile = 0; tile < Opt.nTile; ++tile){
totNnz += TiledX[tile].totNnz;
totSlcPtr += TiledX[tile].fbrPtr[0].size() ;
totSlcIdx += TiledX[tile].fbrIdx[0].size() ;
totFbrPtr += TiledX[tile].fbrPtr[1].size() ;
totFbrIdx += TiledX[tile].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ;
}
double t0 = seconds();
checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 3)
checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 4){
checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[3]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
checkCuda(cudaMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
}
t0 = seconds();
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(cudaMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
ITYPE mtxLoc = 0;
for (int m = 0; m < mode; ++m)
mtxLoc += szDU[m];
checkCuda(cudaMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
if(TiledX[0].ndims == 4)
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + mtxLoc, 0, U[mode].nRows * U[mode0].nCols * sizeof(DTYPE));
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per size */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamCreate(&streams[bin]);
/*MTTKRP on Opt.mode*/
int MTTKRPmode = mode;//Opt.mode;
for (int tile = 0; tile < Opt.nTile; ++tile){
/* matrix order according to mode order*/
for (int mm = 0; mm < TiledX[0].ndims; ++mm){
int curMode = TiledX[tile].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
// BLOCKSIZE = (( slcAtomicFbrLvlPar == true) ? Opt.TBsize : 512) ;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
// int warpPerFbr = BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//;
// int logOfWarpPerFbr = log2(warpPerFbr);
// int bin = 0;
// int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB
// int logOfFbrPerWarp = log2(fbrPerWarp);
int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;//
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
double t0 = seconds();
cuda_timer_start(start);
if(mode == TiledX[0].modeOrder[0]){
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_HCSR_kernel_smllBin<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
mttkrp_HCSR_kernel_smllBin_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_HCSR_kernel_hvyBin<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
mttkrp_HCSR_kernel_hvyBin_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
else if(TiledX[0].ndims == 4 && TiledX[0].modeOrder[1] == MTTKRPmode && TiledX[0].totNnz){
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
mttkrp_MIHCSR_kernel_smllBin_fbrS_atomic_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
mttkrp_MIHCSR_kernel_hvyBin_fbrS_atomic_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
else if(mode == TiledX[0].modeOrder[TiledX[0].ndims-2]){
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_smllBin_fbr_atomic<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
mttkrp_MIHCSR_kernel_smllBin_fbr_atomic_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
mttkrp_MIHCSR_kernel_hvyBin_fbr_atomic_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
else if(mode == TiledX[0].modeOrder[TiledX[0].ndims-1]){
for (int bin = 0; bin < Opt.nBin ; ++bin){
if(bin < smallBinEndsAt){
ITYPE shSize = 0;//slcPerTb * 32 * sizeof(DTYPE); slcPerTb = 16 / warpPerSlc[bin];
dBinLoc += ((bin > 0) ? TiledX[tile].slcMapperBin[bin-1].size() : 0);
grid.x = ( TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_smllBin_all_atomic <<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
mttkrp_MIHCSR_kernel_smllBin_all_atomic_4D<<<grid, block, shSize , streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
// Processing heavy bin.. multiple TB per slice
else{
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
grid.x = (TbPerSlc[bin] * warpPerSlc[bin] * 32 * TiledX[tile].slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_hvyBin_all_atomic<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
else
mttkrp_MIHCSR_kernel_hvyBin_all_atomic_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[tile].slcMapperBin[bin].size(),
dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerSlc[bin], logOfWarpPerSlc[bin], TbPerSlc[bin], logOfTbPerSlc[bin]);
}
}
}
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
// if(Opt.verbose)
{
cout << "Tile: " << tile << " - time: " << mili << "ms";
if(TiledX[0].ndims == 3){
cout << " nSlc: " << TiledX[tile].fbrIdx[0].size() << ", nFibers: "
<< TiledX[tile].fbrPtr[1].size() <<", nnz: " << TiledX[tile].totNnz;
cout << endl;
}
else if(TiledX[0].ndims == 4){
cout << " nSlc: " << TiledX[tile].fbrIdx[0].size() << ", nSFibers: "
<< TiledX[tile].fbrPtr[1].size() << ", nFibers: "
<< TiledX[tile].fbrPtr[2].size() <<", nnz: " << TiledX[tile].totNnz;
cout << endl;
}
}
}
allModeGPUTime += GPUTime;
cout << "ONE-B-CSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamDestroy(streams[bin]);
// check correctness
checkCuda(cudaMemcpy(&U[mode].vals[0], dU + mtxLoc, U[mode].nRows * U[mode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
cudaFree(dVals);
cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3);
cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3);
cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2);
cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2);
cudaFree(dFbrLikeSlcInds);
return 0;
}
int MTTKRP_HYB_GPU(const HYBTensor &HybX, Matrix *U, const Options &Opt){
//allocate and memcpy GPU memory
//Tensor
ITYPE *dCOOInds0, *dCOOInds1, *dCOOInds2, *dCOOInds3;
ITYPE *dCSLSlcPtr, *dCSLSlcInds, *dCSLInds1, *dCSLInds2, *dCSLSlcMapperBin;
ITYPE *dfbrPtr0, *dfbrIdx0, *dInds2, *dInds3, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin;
DTYPE *dVals, *dCOOVals, *dCSLVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dCSLBinLoc = 0, dFbrLoc2 =0;
int warpPerSlice = Opt.warpPerSlice;
int logOfWarpPerSlice = log2(Opt.warpPerSlice);
int TbPerSlc = 1;
int logOfTPS = log2(TbPerSlc);
// All tile same mode
ITYPE mode0 = HybX.modeOrder[0];
ITYPE mode1 = HybX.modeOrder[1];
ITYPE mode2 = HybX.modeOrder[2];
ITYPE mode3 =((HybX.ndims == 4) ? HybX.modeOrder[3] : 0) ;
// ****** mem op HYB COO *******
if(HybX.COOnnz > 0){
checkCuda(cudaMalloc((void**) &dCOOVals, HybX.COOnnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dCOOInds0, HybX.COOnnz * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dCOOInds1, HybX.COOnnz * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dCOOInds2, HybX.COOnnz * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dCOOVals, &(HybX.COOvals[0]), HybX.COOnnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dCOOInds0, &(HybX.COOinds[mode0][0]), HybX.COOnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dCOOInds1, &(HybX.COOinds[mode1][0]), HybX.COOnnz * sizeof(ITYPE) ,cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dCOOInds2, &(HybX.COOinds[mode2][0]), HybX.COOnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(HybX.ndims == 4){
checkCuda(cudaMalloc((void**) &dCOOInds3, HybX.COOnnz * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dCOOInds3, &(HybX.COOinds[mode3][0]), HybX.COOnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
}
// ****** mem op HYB CSL *******
if(HybX.CSLnnz > 0){
checkCuda(cudaMalloc((void**) &dCSLVals, HybX.CSLnnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dCSLSlcPtr, HybX.CSLslicePtr.size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dCSLSlcInds, HybX.CSLsliceIdx.size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dCSLInds1, HybX.CSLnnz * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dCSLInds2, HybX.CSLnnz * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dCSLSlcMapperBin, HybX.CSLslicePtr.size() * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dCSLVals, &(HybX.CSLvals[0]), HybX.CSLnnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dCSLSlcPtr + dSlcLoc, &(HybX.CSLslicePtr[0]), HybX.CSLslicePtr.size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dCSLSlcInds + dSlcIdxLoc, &(HybX.CSLsliceIdx[0]), HybX.CSLsliceIdx.size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dCSLInds1, &(HybX.CSLinds[mode1][0]), HybX.CSLnnz * sizeof(ITYPE) ,cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dCSLInds2, &(HybX.CSLinds[mode2][0]), HybX.CSLnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
dCSLBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dCSLBinLoc += HybX.CSLslcMapperBin[bin-1].size();
if(HybX.CSLslcMapperBin[bin].size() > 0)
checkCuda(cudaMemcpy(dCSLSlcMapperBin + dSlcIdxLoc + dCSLBinLoc, &(HybX.CSLslcMapperBin[bin][0]), HybX.CSLslcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
}
// ****** mem op HYB HCSR *******
if(HybX.HCSRnnz > 0){
checkCuda(cudaMalloc((void**) &dVals, HybX.HCSRnnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr0, HybX.fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx0, HybX.fbrIdx[0].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dSlcMapperBin, HybX.fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr1, HybX.fbrPtr[1].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx1, HybX.fbrPtr[1].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dVals, &(HybX.vals[0]), HybX.HCSRnnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr0, &(HybX.fbrPtr[0][0]), HybX.fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx0, &(HybX.fbrIdx[0][0]), HybX.fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr1, &(HybX.fbrPtr[1][0]), HybX.fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx1, &(HybX.fbrIdx[1][0]), HybX.fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(HybX.ndims == 3){
checkCuda(cudaMalloc((void**) &dInds2, HybX.HCSRnnz * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dInds2, &(HybX.inds[mode2][0]), HybX.HCSRnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
if(HybX.ndims == 4){
checkCuda(cudaMalloc((void**) &dFbrIdx2, HybX.fbrIdx[2].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrPtr2, HybX.fbrPtr[2].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds3, HybX.HCSRnnz * sizeof(ITYPE)), 0);
checkCuda(cudaMemcpy(dFbrPtr2, &(HybX.fbrPtr[2][0]), HybX.fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrIdx2, &(HybX.fbrIdx[2][0]), HybX.fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds3, &(HybX.inds[mode3][0]), HybX.HCSRnnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += HybX.slcMapperBin[bin-1].size();
if(HybX.slcMapperBin[bin].size() > 0)
checkCuda(cudaMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(HybX.slcMapperBin[bin][0]), HybX.slcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
}
// //Matrices
DTYPE *dU0, *dU1, *dU2, *dU3;
checkCuda(cudaMalloc((void**) &dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU1, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dU2, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)), 0);
cudaMemset(dU0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(dU1, &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU2, &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
if(HybX.ndims == 4){
checkCuda(cudaMalloc((void**) &dU3, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)), 0);
checkCuda(cudaMemcpy(dU3, &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
}
// BLOCK and GRID
int BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
cudaEvent_t start, stop, HYBstart, HYBstop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&HYBstart);
cudaEventCreate(&HYBstop);
cudaStream_t streams[2 * Opt.nBin + 1];
for (int bin = 0; bin < 2 * Opt.nBin + 1; ++bin)
cudaStreamCreate(&streams[bin]);
float mili = 0, HYBmili =0, GPUTime = 0, CPUtimer = 0, HYBTime = 0;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0;
bool useLoop = false;
if(useLoop)
grid.x = 32768*2;
// mili = 0;
dCSLBinLoc = 0; dBinLoc = 0;
int smallBinEndsAt = 5;
int slcPerTb = 0;
cuda_timer_start(HYBstart);
// ******* CUDA COO *******
// if(HybX.COOnnz > 0){
// BLOCKSIZE = 128;
// block.x = BLOCKSIZE;
// // /* Like PARTI loop */ =
// if(!useLoop)
// grid.x = (32 * HybX.COOnnz + BLOCKSIZE - 1) / BLOCKSIZE;
// if(Opt.verbose)
// cuda_timer_start(start);
// if(!useLoop){
// if(HybX.ndims == 3)
// mttkrp_HYB_COO_kernel<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R);
// else if (HybX.ndims == 4)
// mttkrp_HYB_COO_kernel_4D<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R);
// }
// else{
// if(HybX.ndims == 3)
// mttkrp_HYB_COO_kernel_loop<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R);
// else if (HybX.ndims == 4)
// mttkrp_HYB_COO_kernel_4D_loop<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R);
// }
// if(Opt.verbose){
// cuda_timer_stop(start, stop, mili);
// HYBTime += mili;
// cout << "HYB-COO GPU " << mili << "ms"<< endl;
// }
// }
// ******* CUDA CSL *******
// if(HybX.CSLnnz > 0 || HybX.HCSRnnz > 0)
{
if(HybX.COOnnz > 0){
BLOCKSIZE = 128;
block.x = 128;
grid.x = (32 * HybX.COOnnz + BLOCKSIZE - 1) / BLOCKSIZE;
if(HybX.ndims == 3)
mttkrp_HYB_COO_kernel<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2, HybX.COOnnz, dU0, dU1, dU2, Opt.mode, Opt.R);
else if (HybX.ndims == 4)
mttkrp_HYB_COO_kernel_4D<<<grid, block, 0, 0>>>(dCOOVals, dCOOInds0, dCOOInds1, dCOOInds2,dCOOInds3, HybX.COOnnz, dU0, dU1, dU2, dU3, Opt.mode, Opt.R);
}
BLOCKSIZE = 512;
block.x = BLOCKSIZE;
for (int bin = 0; bin < Opt.nBin ; ++bin){
dBinLoc += ((bin > 0) ? HybX.slcMapperBin[bin-1].size() : 0);
dCSLBinLoc += ((bin > 0) ? HybX.CSLslcMapperBin[bin-1].size() : 0);
if( HybX.slcMapperBin[bin].size() == 0 && HybX.CSLslcMapperBin[bin].size() == 0)
continue;
// Processing small bin.. merged to one. 1 WARP slice
if(bin < smallBinEndsAt){
warpPerSlice = 1;
logOfWarpPerSlice = 0;//log2(warpPerSlice);
slcPerTb = 16 / warpPerSlice;
/* CSL small bin */
if(HybX.CSLnnz > 0){
grid.x = ( warpPerSlice * 32 * HybX.CSLslcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
mttkrp_CSL_kernel_bin<<<grid, block, 0, streams[1]>>>(dCSLVals, dCSLSlcInds, dCSLSlcMapperBin + dCSLBinLoc,
dCSLInds2, dCSLSlcPtr, dCSLInds1, HybX.CSLslcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice);
}
/* HCSR small bin */
if(HybX.HCSRnnz > 0){
grid.x = ( warpPerSlice * 32 * HybX.slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(HybX.ndims == 3)
mttkrp_HCSR_kernel_smllBin<<<grid, block, 0, streams[2]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, HybX.slcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
else if(HybX.ndims == 4)
mttkrp_HCSR_kernel_smllBin_4D<<<grid, block, 0, streams[2]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, HybX.slcMapperBin[bin].size(),
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
}
}
// Processing heavy bin.. multiple TB per slice
else{
TbPerSlc = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc > 32) TbPerSlc = 32;
logOfTPS = log2(TbPerSlc);
warpPerSlice = 16;
logOfWarpPerSlice = 4;
/* CSL big bin */
if(HybX.CSLnnz > 0){
grid.x = (TbPerSlc * warpPerSlice * 32 * HybX.CSLslcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
mttkrp_CSL_kernel_hvyBin<<<grid, block, 0, streams[bin+1]>>>(dCSLVals + dLoc, dCSLSlcInds + dSlcIdxLoc, dCSLSlcMapperBin + dSlcIdxLoc + dCSLBinLoc,
dCSLInds2 + dLoc, dCSLSlcPtr + dSlcLoc, dCSLInds1, HybX.CSLslcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
}
/* HCSR big bin */
if(HybX.HCSRnnz > 0){
grid.x = (TbPerSlc * warpPerSlice * 32 * HybX.slcMapperBin[bin].size() + BLOCKSIZE - 1) / BLOCKSIZE;
if(HybX.ndims == 3)
mttkrp_HCSR_kernel_hvyBin<<<grid, block, 0, streams[bin+2]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, HybX.slcMapperBin[bin].size(),
dU0, dU1, dU2, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
else if(HybX.ndims == 4)
mttkrp_HCSR_kernel_hvyBin_4D<<<grid, block, 0, streams[bin + 2]>>>(dVals + dLoc, dfbrIdx0 + dSlcIdxLoc, dSlcMapperBin + dSlcIdxLoc + dBinLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, HybX.slcMapperBin[bin].size(),
dU0, dU1, dU2, dU3, Opt.mode, Opt.R, warpPerSlice, logOfWarpPerSlice, TbPerSlc, logOfTPS);
}
}
}
// if(Opt.verbose){
// cuda_timer_stop(start, stop, mili);
// HYBTime += mili;
// cout << "CSL+HCSR GPU-time: " << mili << "ms"<< endl;
// }
}
cuda_timer_stop(HYBstart, HYBstop, HYBmili);
if(Opt.verbose)
cout << "verbose on. HYB GPU: " << HYBmili << endl;
else
cout << "HYB GPU: " << HYBmili << endl;
for (int bin = 0; bin < 2 * Opt.nBin + 1; ++bin)
cudaStreamDestroy(streams[bin]);
// check correctness
checkCuda(cudaMemcpy(&U[mode0].vals[0], dU0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
cudaFree(dVals); cudaFree(dCOOVals); cudaFree(dCSLVals);
cudaFree(dU0); cudaFree(dU1); cudaFree(dU2);
cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3);
cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2);
cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2);
cudaFree(dCSLInds1); cudaFree(dCSLInds2); cudaFree(dCSLSlcPtr); cudaFree(dCSLSlcInds);
cudaFree(dCOOInds0); cudaFree(dCOOInds1); cudaFree(dCOOInds2);
return 0;
}
int MTTKRP_ONE_HCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){
bool performMTTKRPMode = true, performMTTKRPnMode = true, performMTTKRPnnMode = true;
/* Allocate and memcpy GPU memory */
//Tensor
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0, dFbrLikeSlcIndsLoc = 0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
// // All tile same mode
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
ITYPE R = Opt.R;
for (int tile = 0; tile < Opt.nTile; ++tile){
totNnz += TiledX[tile].totNnz;
totSlcPtr += TiledX[tile].fbrPtr[0].size() ;
totSlcIdx += TiledX[tile].fbrIdx[0].size() ;
totFbrPtr += TiledX[tile].fbrPtr[1].size() ;
totFbrIdx += TiledX[tile].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[tile].ndims == 4) ? TiledX[tile].fbrPtr[2].size() : 0) ;
}
double t0 = seconds();
checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
for (int tile = 0; tile < Opt.nTile; ++tile){
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size(); // all tile same
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[tile].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[tile].vals[0]), TiledX[tile].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[tile].fbrPtr[0][0]), TiledX[tile].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[tile].fbrIdx[0][0]), TiledX[tile].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[tile].fbrPtr[1][0]), TiledX[tile].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[tile].fbrIdx[1][0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[tile].fbrLikeSlcInds[0]), TiledX[tile].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 3)
checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[tile].inds[TiledX[tile].modeOrder[2]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(TiledX[tile].ndims == 4){
checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[tile].fbrPtr[2][0]), TiledX[tile].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[tile].fbrIdx[2][0]), TiledX[tile].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds3 + dLoc, &(TiledX[tile].inds[TiledX[0].modeOrder[3]][0]), TiledX[tile].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += TiledX[tile].slcMapperBin[bin-1].size();
checkCuda(cudaMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[tile].slcMapperBin[bin][0]), TiledX[tile].slcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
}
float tnsMemcpyTime = seconds() - t0;
t0 = seconds();
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(cudaMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
cudaMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
float mtxMemcpyTime = seconds() - t0;
// cout << "tns and mtx memcopy time: " << tnsMemcpyTime <<", " << mtxMemcpyTime<< endl;
if(TiledX[0].ndims == 4)
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
// if(Opt.warpPerSlice * 32 > BLOCKSIZE){
// cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
// exit(0);
// }
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per size */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamCreate(&streams[bin]);
/*MTTKRP on Opt.mode*/
unsigned int dU0Loc, dU1Loc, dU2Loc , dU3Loc;
/* matrix order according to mode order*/
for (int m = 0; m < TiledX[0].ndims; ++m){
int curMode = TiledX[0].modeOrder[m];
dULoc[m] = 0;
for (int q = 0; q < curMode; ++q){
dULoc[m] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
}
for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){
if(MTTKRPmode > 0){
mili = 0; GPUTime = 0; CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
// MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again.
int mode = MTTKRPmode - 1;
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols
U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); //
}
if(MTTKRPmode == 1){
checkCuda(cudaMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 2){
checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 3){
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE));
}
}
if(performMTTKRPMode && TiledX[0].modeOrder[0] == MTTKRPmode){
// if(Opt.verbose)
cout << "Slc atomics - " ;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
int warpPerFbr =Opt.warpPerSlice;//4;//; BLOCKSIZE/32;//1;//
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
bool useLoop = false;
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
// int fbrPerWarp = 1;//BLOCKSIZE/32; // dont overflow TB
// int logOfFbrPerWarp = log2(fbrPerWarp );
if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){
cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!"
<< endl << "hint: increase -b!" << endl;
exit(0);
}
/* Like PARTI loop */
if(useLoop)
grid.x = Opt.gridSize;// 32768*16;
else
grid.x = ( warpPerFbr * 32 * ((TiledX[tile].nFibers+fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
double t0 = seconds();
cuda_timer_start(start);
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[tile].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
}
/*processing fbrS level for 4D tensor*/
else if(TiledX[0].ndims == 4 && performMTTKRPnMode && TiledX[0].modeOrder[1] == MTTKRPmode){
// if(Opt.verbose)
cout << "FbrS atomics - " ;
mili = 0, GPUTime = 0, CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
// cout <<"might wanna change binning style and Block size, logWPC, COO like parallelism, allow mode sort" << endl;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
double t0 = seconds();
cuda_timer_start(start);
mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[tile].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
}
else if(performMTTKRPnMode && TiledX[0].modeOrder[TiledX[0].ndims-2] == MTTKRPmode){
// if(Opt.verbose)
cout << "Fbr atomics - " ;
mili = 0, GPUTime = 0, CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
// cout <<"might wanna change binning style and Block size, logWPC, COO like parallelism, allow mode sort" << endl;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int smallBinEndsAt = 5;
int slcPerTb = 0;
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
bool useLoop = false;
// /* Like PARTI loop */ =
if(useLoop)
grid.x = Opt.gridSize;// 32768*16;
else
grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
double t0 = seconds();
cuda_timer_start(start);
if(useLoop)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_loop<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else{
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[tile].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
}
else if(performMTTKRPnnMode && TiledX[0].modeOrder[TiledX[0].ndims-1] == MTTKRPmode){
// if(Opt.verbose)
cout << "Nnz atomics - " ;
mili = 0, GPUTime = 0, CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0, dFbrLikeSlcIndsLoc = 0;
for (int tile = 0; tile < Opt.nTile; ++tile){
dBinLoc = 0;
if(tile > 0) {
dLoc += TiledX[tile-1].totNnz;
dSlcLoc += TiledX[tile - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[tile - 1].fbrIdx[0].size();
dFbrLoc += TiledX[tile - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[tile - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[tile - 1].fbrPtr[2].size() : 0) ;
}
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
bool useLoop = false;
int smallBinEndsAt = 5;
int slcPerTb = 0;
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
// /* Like PARTI loop */ =
if(useLoop)
grid.x = Opt.gridSize;// 32768;
else
grid.x = ( warpPerFbr * 32 * TiledX[tile].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
int dloc = 0;
double t0 = seconds();
cuda_timer_start(start);
if(useLoop)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_loop<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else{
if (TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, TiledX[tile].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[tile].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
CPUtimer += seconds() - t0;
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << tile << " - time: " << mili << "ms";
cout <<" nnz: " << TiledX[tile].totNnz << " nFibers: "
<< TiledX[tile].fbrPtr[1].size() << " nSlc " << TiledX[tile].fbrIdx[0].size() << " ";
cout << endl;
}
}
allModeGPUTime += GPUTime;
cout << "singleCSF-GPU-mode " << MTTKRPmode <<" :" << GPUTime << "," << endl;
}
}
cout << "Total GPU time: " << allModeGPUTime << ", nnz:" << TiledX[0].totNnz
<< ", nFibers:" << TiledX[0].fbrPtr[1].size() << ", nSlc:" << TiledX[0].fbrIdx[0].size()
<< endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamDestroy(streams[bin]);
/* Copying output matrix from GPU to CPU for correctness check */
int MTTKRPmode = TiledX[0].ndims - 1;
ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]);
checkCuda(cudaMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
// check correctness
// if(Opt.impType == 14){
// MTTKRPmode = 3;
// checkCuda(cudaMemcpy(&U[MTTKRPmode].vals[0] , dU + szDU[0] +szDU[1] + szDU[2], U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
// }
// else
// checkCuda(cudaMemcpy(&U[mode0].vals[0], dU, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
cudaFree(dVals);
cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3);
cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3);
cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2);
cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2);
cudaFree(dFbrLikeSlcInds);
return 0;
}
int MTTKRP_MIHCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float memcpyTime = 0;
// All m same mode
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
for (int m = 0; m < TiledX[0].ndims; ++m){
if (TiledX[m].totNnz == 0) continue;
totNnz += TiledX[m].totNnz;
totSlcPtr += TiledX[m].fbrPtr[0].size() ;
totSlcIdx += TiledX[m].fbrIdx[0].size() ;
totFbrPtr += TiledX[m].fbrPtr[1].size() ;
totFbrIdx += TiledX[m].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ;
}
//allocate and memcpy GPU memory
//Tensor
cuda_timer_start(start);
checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
// checkCuda(cudaMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
for (int m = 0; m < TiledX[0].ndims; ++m){
if(m > 0) {
if (TiledX[m-1].totNnz > 0) {
dLoc += TiledX[m-1].totNnz;
dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same
dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[m].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size() : 0) ;
}
}
if (TiledX[m].totNnz == 0) continue;
checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[m].fbrPtr[0][0]), TiledX[m].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[m].fbrIdx[0][0]), TiledX[m].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(TiledX[m].ndims == 3){
if(m == 0)
// checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[m].inds[mode2][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
else if(m == 1)
checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
else if(m == 2)
checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
if(TiledX[m].ndims == 4){
checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[m].fbrPtr[2][0]), TiledX[m].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[m].fbrIdx[2][0]), TiledX[m].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds3 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[3]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
}
cuda_timer_stop(start, stop, memcpyTime);
cout << "Memcopy time " << memcpyTime << endl;
// //Matrices
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(cudaMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
cudaMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
if(TiledX[0].ndims == 4)
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
// if(Opt.warpPerSlice * 32 > BLOCKSIZE){
// cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
// exit(0);
// }
cudaStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per slice */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamCreate(&streams[bin]);
for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){
if(MTTKRPmode > 0){
mili = 0; GPUTime = 0; CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0;
// MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again.
int mode = MTTKRPmode - 1;
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols
U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); //
}
if(MTTKRPmode == 1){
checkCuda(cudaMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 2){
checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 3){
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE));
}
}
for (int m = 0; m < TiledX[0].ndims; ++m){
/* matrix order according to mode order*/
for (int mm = 0; mm < TiledX[0].ndims; ++mm){
int curMode = TiledX[m].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
if(m > 0) {
if (TiledX[m-1].totNnz > 0) {
dLoc += TiledX[m-1].totNnz;
dSlcLoc += TiledX[m - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ;
}
}
BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
if (TiledX[m].totNnz == 0) continue;
cuda_timer_start(start);
if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "Slc atomics - " ;
// BLOCKSIZE = 128;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){
cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!"
<< endl << "hint: increase -b!" << endl;
exit(0);
}
grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else if(TiledX[0].ndims == 4 && TiledX[m].modeOrder[1] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "FbrS atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//1;//BLOCKSIZE/32;//1;////4;//;
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "Fbr atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "nnz atomics - " ;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if (TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
GPUTime += mili;
if(Opt.verbose)
{
cout << "Tile: " << m << " - time: " << mili << " ms";
cout <<" nnz: " << TiledX[m].totNnz << " nFibers: "
<< TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " ";
cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" "
<< TiledX[m].modeOrder[2];
cout << endl;
}
}
if(Opt.verbose)
cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl;
allModeGPUTime += GPUTime;
}
int totalMIslics = 0, totalMISfibers = 0, totalMIfibers = 0, totalMInnz = 0;;
for (int m = 0; m < TiledX[0].ndims; ++m){
if(TiledX[m].totNnz){
if(TiledX[m].ndims == 3){
totalMIslics += TiledX[m].fbrIdx[0].size();
totalMIfibers += TiledX[m].fbrPtr[1].size();
totalMInnz += TiledX[m].totNnz;
}
if(TiledX[m].ndims == 4){
totalMIslics += TiledX[m].fbrIdx[0].size();
totalMISfibers += TiledX[m].fbrPtr[1].size();
totalMIfibers += TiledX[m].fbrPtr[2].size();
totalMInnz += TiledX[m].totNnz;
}
}
}
cout << "Total GPU time: " << allModeGPUTime;
// if(Opt.verbose)
if(TiledX[0].ndims == 3)
cout << " nSlc:" << totalMIslics
<< ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz
<< endl;
else if(TiledX[0].ndims == 4)
cout << " nSlc:" << totalMIslics << ", nSFibers:" << totalMISfibers
<< ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz
<< endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamDestroy(streams[bin]);
/* Copying output matrix from GPU to CPU for correctness check */
int MTTKRPmode = TiledX[0].ndims - 1;
ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]);
checkCuda(cudaMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
cudaFree(dVals);
cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3);
cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3);
cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2);
cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2);
cudaFree(dFbrLikeSlcInds);
return 0;
}
int init_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt, ITYPE **dInds2, ITYPE **dfbrPtr1, ITYPE **dfbrIdx1, ITYPE **dFbrLikeSlcInds, DTYPE **dVals, DTYPE **dU){
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
// if(iter == 0 && cpdMode == 0)
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
for (int m = 0; m < TiledX[0].ndims; ++m){
if (TiledX[m].totNnz == 0) continue;
totNnz += TiledX[m].totNnz;
totFbrPtr += TiledX[m].fbrPtr[1].size() ;
totFbrIdx += TiledX[m].fbrIdx[1].size() ;
totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ;
}
/*allocate and memcpy GPU memory*/
checkCuda(cudaMalloc((void**) dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(cudaMalloc((void**) dInds2, totNnz * sizeof(ITYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m){
if(m > 0) {
if (TiledX[m-1].totNnz > 0) {
dLoc += TiledX[m-1].totNnz;
dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same
dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
}
}
if (TiledX[m].totNnz == 0) continue;
checkCuda(cudaMemcpy(*dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(*dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(*dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(*dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(*dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
// //Matrices
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
checkCuda(cudaMalloc((void**) dU, mtxSize * sizeof(DTYPE)), 0);
// cudaMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(*dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(*dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(*dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
// MTTKRP_MIHCSR_GPU_oneMode_forCPD(TiledX, U, Opt, 0, 0,
// dInds2, dfbrPtr1, dfbrIdx1, dFbrLikeSlcInds, dVals, dU);
}
int MTTKRP_MIHCSR_GPU_oneMode_forCPD(TiledTensor *TiledX, Matrix *U, const Options &Opt, int cpdMode, int iter,
ITYPE *dInds2, ITYPE *dfbrPtr1, ITYPE *dfbrIdx1, ITYPE *dFbrLikeSlcInds, DTYPE *dVals, DTYPE *dU){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float mili;
ITYPE *dInds3, *dfbrPtr0, *dfbrIdx0, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin;
// DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
ITYPE loc = 0;
for (int m = 0; m < cpdMode; ++m)
loc += szDU[m];
cudaMemset(dU+loc, 0, U[cpdMode].nRows * U[cpdMode].nCols * sizeof(DTYPE));
// BLOCK and GRID
int BLOCKSIZE = 512;
float GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
int MTTKRPmode = cpdMode;
// for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode)
{
for (int m = 0; m < TiledX[0].ndims; ++m){
/* matrix order according to mode order*/
for (int mm = 0; mm < TiledX[0].ndims; ++mm){
int curMode = TiledX[m].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
if(m > 0) {
if (TiledX[m-1].totNnz > 0) {
dLoc += TiledX[m-1].totNnz;
dSlcLoc += TiledX[m - 1].fbrPtr[0].size();
dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ;
}
}
BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
if (TiledX[m].totNnz == 0) continue;
cuda_timer_start(start);
if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){
// if(Opt.verbose)
// cout << "Slc atomics - " ;
// BLOCKSIZE = 128;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){
cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!"
<< endl << "hint: increase -b!" << endl;
exit(0);
}
grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){
// if(Opt.verbose)
// cout << "Fbr atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){
// if(Opt.verbose)
// cout << "nnz atomics - " ;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if (TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
GPUTime += mili;
if(Opt.verbose)
{
cout << "Tile: " << m << " - time: " << mili << " ms";
cout <<" nnz: " << TiledX[m].totNnz << " nFibers: "
<< TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " ";
cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" "
<< TiledX[m].modeOrder[2];
cout << endl;
}
}
// cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl;
allModeGPUTime += GPUTime;
}
// ITYPE loc = 0;
// for (int m = 0; m < cpdMode; ++m)
// loc += szDU[m];
// ITYPE loc = szDU[0];
/* Copying output matrix from GPU to CPU for correctness check */
checkCuda(cudaMemcpy(&U[cpdMode].vals[0], dU + loc, U[cpdMode].nRows * U[cpdMode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
if(iter == Opt.cpdIters - 1 && cpdMode == TiledX[0].ndims - 1)
{
cout << "Freeing variable " << endl;
cudaFree(dVals);
cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3);
cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3);
cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2);
cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2);
cudaFree(dFbrLikeSlcInds);
}
return 0;
}
int MTTKRP_MIHCSR_multiGPU(TiledTensor *MMCSF, Matrix *U, const Options &Opt, const MPI_param &MPIparam){
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
ITYPE dPsLoc, dPfLoc, dpNloc;// s,f, m locationtion based on parition inside MMcsf
// All m same mode
ITYPE mode0 = 0;//MMCSF[0].modeOrder[0];
ITYPE mode1 = 1;;//MMCSF[0].modeOrder[1];
ITYPE mode2 = 2;//MMCSF[0].modeOrder[2];
ITYPE mode3 = 3;//((MMCSF[0].ndims == 4) ? MMCSF[0].modeOrder[3] : 0) ;
int rank = MPIparam.mpi_rank;
int nP = MPIparam.n_proc;
int *nnzInRank = new int [nP];
int *fbrInRank = new int [nP];
int *slcInRank = new int [nP];
for (int m = 0; m < MMCSF[0].ndims; ++m){
if (MMCSF[m].totNnz == 0) continue;
totNnz += MMCSF[m].nnzInRank[rank];
totSlcPtr += MMCSF[m].slcInRank[rank];
totSlcIdx += MMCSF[m].slcInRank[rank];
totFbrPtr += MMCSF[m].fbrInRank[rank];
totFbrIdx += MMCSF[m].fbrInRank[rank] ;
totFbrPtr2 += ((MMCSF[m].ndims == 4) ? MMCSF[m].fbrPtr[2].size() : 0) ;
}
//allocate and memcpy GPU memory
//Tensor
checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr0, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx0, totSlcIdx * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dSlcMapperBin, totSlcPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx1, totFbrIdx * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), 0);
if(MMCSF[0].ndims == 3)
checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), 0);
if(MMCSF[0].ndims == 4){
checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), 0);
}
cout <<"point 1 starting MPI" <<MMCSF[0].ndims << endl;
/* cuda memcopy for tiled parts*/
for (int m = 0; m < MMCSF[0].ndims; ++m){
if(m > 0) {
if (MMCSF[m-1].totNnz > 0) {
dLoc += MMCSF[m-1].nnzInRank[rank];
dSlcLoc += MMCSF[m-1].slcInRank[rank]; // all m same
dSlcIdxLoc += MMCSF[m-1].slcInRank[rank];
dFbrLoc += MMCSF[m-1].fbrInRank[rank];
dFbrIdxLoc += MMCSF[m-1].fbrInRank[rank];
// dFbrLoc2 += ((MMCSF[m].ndims == 4) ? MMCSF[m - 1].fbrPtr[2].size() : 0) ;
}
}
if (MMCSF[m].totNnz == 0) continue; // not necessary I guess...
int stNnz = 0, stFbr = 0, stSlc= 0;
if(rank > 0){
stNnz = MMCSF[m].mpiEndNnz[rank - 1];
stFbr = MMCSF[m].mpiEndFbr[rank - 1];
stSlc = MMCSF[m].mpiEndSlc[rank - 1];
}
checkCuda(cudaMemcpy(dVals + dLoc, &(MMCSF[m].vals[0 + stNnz]), MMCSF[m].nnzInRank[rank] * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(MMCSF[m].fbrPtr[0][0 + stSlc]), MMCSF[m].slcInRank[rank] * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(MMCSF[m].fbrIdx[0][0+stSlc]), MMCSF[m].slcInRank[rank] * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(MMCSF[m].fbrPtr[1][0+stFbr]), MMCSF[m].fbrInRank[rank] * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(MMCSF[m].fbrIdx[1][0+stFbr]), MMCSF[m].fbrInRank[rank] * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(MMCSF[m].fbrLikeSlcInds[0+stFbr]),MMCSF[m].fbrInRank[rank] * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(MMCSF[m].ndims == 3)
checkCuda(cudaMemcpy(dInds2 + dLoc, &(MMCSF[m].inds[MMCSF[m].modeOrder[2]][0+stNnz]), MMCSF[m].nnzInRank[rank] * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(MMCSF[m].ndims == 4){
checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(MMCSF[m].fbrPtr[2][0]), MMCSF[m].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(MMCSF[m].fbrIdx[2][0]), MMCSF[m].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds3 + dLoc, &(MMCSF[m].inds[MMCSF[m].modeOrder[3]][0]), MMCSF[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
}
// //Matrices
unsigned int *dULoc = new unsigned int[MMCSF[0].ndims];
unsigned int *szDU = new unsigned int[MMCSF[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((MMCSF[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(cudaMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < MMCSF[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
cudaMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
if(MMCSF[0].ndims == 4)
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per slice */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamCreate(&streams[bin]);
cout <<"point 2 starting MPI" <<MMCSF[0].ndims << endl;
for (int MTTKRPmode = 0; MTTKRPmode < MMCSF[0].ndims; ++MTTKRPmode){
if(MTTKRPmode > 0){
mili = 0; GPUTime = 0; CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0;
// MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again.
int mode = MTTKRPmode - 1;
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols
U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); //
}
if(MTTKRPmode == 1){
checkCuda(cudaMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 2){
checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 3){
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE));
}
}
for (int m = 0; m < MMCSF[0].ndims; ++m){
/* matrix order according to mode order*/
for (int mm = 0; mm < MMCSF[0].ndims; ++mm){
int curMode = MMCSF[m].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % MMCSF[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
if(m > 0) {
if (MMCSF[m-1].totNnz > 0) {
dLoc += MMCSF[m-1].nnzInRank[rank];
dSlcLoc += MMCSF[m-1].slcInRank[rank]; // all m same
dSlcIdxLoc += MMCSF[m-1].slcInRank[rank];
dFbrLoc += MMCSF[m-1].fbrInRank[rank];
dFbrIdxLoc += MMCSF[m-1].fbrInRank[rank];
// dFbrLoc2 += ((MMCSF[m].ndims == 4) ? MMCSF[m - 1].fbrPtr[2].size() : 0) ;
}
}
BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
if (MMCSF[m].totNnz == 0) continue;
cuda_timer_start(start);
if(MMCSF[m].modeOrder[0] == MTTKRPmode && MMCSF[m].totNnz){
if(Opt.verbose)
cout << "Slc atomics - " ;
BLOCKSIZE = 128;
// BLOCKSIZE = 128;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){
cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!"
<< endl << "hint: increase -b!" << endl;
exit(0);
}
grid.x = ( warpPerFbr * 32 * ((MMCSF[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
if(MMCSF[0].ndims == 3)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0 >>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, MMCSF[m].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(MMCSF[0].ndims == 4)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0 >>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
MMCSF[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else if(MMCSF[0].ndims == 4 && MMCSF[m].modeOrder[1] == MTTKRPmode && MMCSF[m].totNnz){
if(Opt.verbose)
cout << "FbrS atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//1;//BLOCKSIZE/32;//1;////4;//;
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * MMCSF[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D<<<grid, block, 0, 0 >>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
MMCSF[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(MMCSF[m].modeOrder[MMCSF[0].ndims-2] == MTTKRPmode && MMCSF[m].totNnz){
if(Opt.verbose)
cout << "Fbr atomics - ";
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * MMCSF[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if(MMCSF[0].ndims == 3)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar<<<grid, block, 0, 0 >>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, MMCSF[m].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (MMCSF[0].ndims == 4)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D<<<grid, block, 0, 0 >>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
MMCSF[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(MMCSF[m].modeOrder[MMCSF[0].ndims-1] == MTTKRPmode && MMCSF[m].totNnz){
if(Opt.verbose)
cout << "nnz atomics - " ;
BLOCKSIZE = Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
if(warpPerFbr > (BLOCKSIZE/32)){
cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << endl;
exit(0);
}
int logOfWarpPerFbr = log2(warpPerFbr);
grid.x = ( warpPerFbr * 32 * MMCSF[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if (MMCSF[0].ndims == 3)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar<<<grid, block, 0, 0 >>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, MMCSF[m].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (MMCSF[0].ndims == 4)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D<<<grid, block, 0, 0 >>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
MMCSF[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
GPUTime += mili;
if(Opt.verbose){
cout << "Tile: " << m << " - time: " << mili << " ms";
cout <<" nnz: " << MMCSF[m].totNnz << " nFibers: "
<< MMCSF[m].fbrPtr[1].size() << " nSlc " << MMCSF[m].fbrIdx[0].size() << " ";
cout << " modeOrder: " << MMCSF[m].modeOrder[0] <<" " << MMCSF[m].modeOrder[1] <<" "
<< MMCSF[m].modeOrder[2];
cout << endl;
}
}
cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl;
allModeGPUTime += GPUTime;
}
int totalMIslics = 0, totalMIfibers = 0, totalMInnz = 0;;
for (int m = 0; m < MMCSF[0].ndims; ++m){
if(MMCSF[m].totNnz){
totalMIslics += MMCSF[m].fbrIdx[0].size();
totalMIfibers += MMCSF[m].fbrPtr[1].size();
totalMInnz += MMCSF[m].totNnz;
}
}
cout << "Total GPU time: " << allModeGPUTime << ", nnz:" << totalMInnz
<< ", nFibers:" << totalMIfibers << ", nSlc:" << totalMIslics
<< endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamDestroy(streams[bin]);
/* Copying output matrix from GPU to CPU*/
int MTTKRPmode = MMCSF[0].ndims - 1;
ITYPE loc = ((MMCSF[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]);
DTYPE *tmpDU = new DTYPE[ U[MTTKRPmode].nRows * U[MTTKRPmode].nCols];
checkCuda(cudaMemcpy(tmpDU, dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
cout <<"tmpDu: "<< tmpDU[0] << endl;
// checkCuda(cudaMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Allreduce( &(tmpDU[0]), &U[MTTKRPmode].vals[0], szDU[MTTKRPmode] , MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
/*Free variables*/
cudaFree(dVals);
cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3);
cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3);
cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2);
cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2);
cudaFree(dFbrLikeSlcInds);
return 0;
}
/*scales with the number of partition. An MM-CSF with 2 partition will launch kernel in 2 nodes in paralle.
Not scalable to mode nodes*/
int MTTKRP_MIHCSR_multiGPU_parMM(TiledTensor *TiledX, Matrix *U, const Options &Opt, const MPI_param &MPIparam){
ITYPE *dInds2, *dInds3, *dfbrPtr0, *dfbrIdx0, *dfbrPtr1, *dfbrIdx1, *dFbrPtr2, *dFbrIdx2, *dSlcMapperBin, *dFbrLikeSlcInds;
DTYPE *dVals;
ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dBinLoc = 0, dFbrLoc2 =0;
// ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0;
// All m same mode
ITYPE mode0 = 0;//TiledX[0].modeOrder[0];
ITYPE mode1 = 1;;//TiledX[0].modeOrder[1];
ITYPE mode2 = 2;//TiledX[0].modeOrder[2];
ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ;
//allocate and memcpy GPU memory
//Tensor
vector<int> activeTile;
for (int m = 0; m < TiledX[0].ndims; ++m){
if(TiledX[m].totNnz)
activeTile.push_back(m);
}
if ( MPIparam.mpi_rank > (activeTile.size()-1) ) {
cout << "Not using node " << MPIparam.mpi_rank << endl;
return 0;
}
if(MPIparam.n_proc < activeTile.size()){
cout << "Number of partition is higher than number of nodes. Hint: Allocate more nodes.";
}
int m = activeTile[MPIparam.mpi_rank];
if (TiledX[m].totNnz == 0) return 0; // not necessary I guess...
checkCuda(cudaMalloc((void**) &dVals, TiledX[m].totNnz * sizeof(DTYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr0, TiledX[m].fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx0, TiledX[m].fbrIdx[0].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dSlcMapperBin, TiledX[m].fbrPtr[0].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrPtr1, TiledX[m].fbrPtr[1].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dfbrIdx1, TiledX[m].fbrIdx[1].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrLikeSlcInds, TiledX[m].fbrIdx[1].size() * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 3)
checkCuda(cudaMalloc((void**) &dInds2, TiledX[m].totNnz * sizeof(ITYPE)), 0);
if(TiledX[0].ndims == 4){
checkCuda(cudaMalloc((void**) &dFbrIdx2, TiledX[m].fbrPtr[2].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dFbrPtr2, TiledX[m].fbrIdx[2].size() * sizeof(ITYPE)), 0);
checkCuda(cudaMalloc((void**) &dInds3, TiledX[m].totNnz * sizeof(ITYPE)), 0);
}
/* cuda memcopy for tiled parts*/
// for (int m = 0; m < TiledX[0].ndims; ++m)
{
// if(m > 0) {
// if (TiledX[m-1].totNnz > 0) {
// dLoc += TiledX[m-1].totNnz;
// dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same
// dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
// dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
// dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
// dFbrLoc2 += ((TiledX[m].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size() : 0) ;
// }
// }
checkCuda(cudaMemcpy(dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr0 + dSlcLoc, &(TiledX[m].fbrPtr[0][0]), TiledX[m].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx0 + dSlcIdxLoc, &(TiledX[m].fbrIdx[0][0]), TiledX[m].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dfbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(TiledX[m].ndims == 3)
checkCuda(cudaMemcpy(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
if(TiledX[m].ndims == 4){
checkCuda(cudaMemcpy(dFbrPtr2 + dFbrLoc2, &(TiledX[m].fbrPtr[2][0]), TiledX[m].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dFbrIdx2 + dFbrLoc2, &(TiledX[m].fbrIdx[2][0]), TiledX[m].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dInds3 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[3]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
dBinLoc = 0;
for (int bin = 0; bin < Opt.nBin; ++bin){
if(bin > 0)
dBinLoc += TiledX[m].slcMapperBin[bin-1].size();
checkCuda(cudaMemcpy(dSlcMapperBin + dSlcIdxLoc + dBinLoc, &(TiledX[m].slcMapperBin[bin][0]), TiledX[m].slcMapperBin[bin].size() * sizeof(ITYPE),cudaMemcpyHostToDevice), 0);
}
}
// //Matrices
unsigned int *dULoc = new unsigned int[TiledX[0].ndims];
unsigned int *szDU = new unsigned int[TiledX[0].ndims];
// //Matrices
DTYPE *dU;// *dU0, *dU1, *dU2, *dU3;
ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols
: (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols );
checkCuda(cudaMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), 0);
for (int m = 0; m < TiledX[0].ndims; ++m)
szDU[m] = U[m].nRows * U[m].nCols;
cudaMemset(dU+0, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE));
checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
if(TiledX[0].ndims == 4)
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
// BLOCK and GRID
int BLOCKSIZE = 512;
unsigned int rowInATB = BLOCKSIZE / (Opt.warpPerSlice*32);
if(Opt.warpPerSlice * 32 > BLOCKSIZE){
cout << "BLOCKSIZE is smaller than work per slice! Increase BLOCKSIZE." << endl;
exit(0);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t streams[Opt.nBin];
float mili = 0, GPUTime = 0, CPUtimer = 0, allModeGPUTime = 0;
int smallBinEndsAt = 5;
/* Warp per slice and threadblock per slice */
int *warpPerSlc = new int[Opt.nBin];
int *logOfWarpPerSlc = new int[Opt.nBin];
int *TbPerSlc = new int[Opt.nBin];
int *logOfTbPerSlc = new int[Opt.nBin];
for (int bin = 0; bin < Opt.nBin ; ++bin){
TbPerSlc[bin] = 1;
warpPerSlc[bin] = ((bin > 0) ? 2 << (bin - 1) : 1);
if(warpPerSlc[bin] > 16)
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = log2(warpPerSlc[bin]);
TbPerSlc[bin] = 1;
logOfTbPerSlc[bin] = 0;
if (bin >= smallBinEndsAt){
TbPerSlc[bin] = 1 << (bin - smallBinEndsAt + 1); // 1st big bin starts with 1 TB 1 << 1 not 1 << 5
if(TbPerSlc[bin] > 32) TbPerSlc[bin] = 32;
logOfTbPerSlc[bin] = log2(TbPerSlc[bin]);
warpPerSlc[bin] = 16;
logOfWarpPerSlc[bin] = 4;
}
}
// TBD: change warpPerSlc to warpPerSlc[bin] and all
int slcPerTb = 1;
dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamCreate(&streams[bin]);
for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){
if(MTTKRPmode > 0){
mili = 0; GPUTime = 0; CPUtimer = 0;
dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0;
// MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again.
int mode = MTTKRPmode - 1;
for(long r = 0; r < U[mode].nRows; ++r){
for(long c = 0; c < U[mode].nCols; ++c) // or u[mode].nCols
U[mode].vals[r * U[mode].nCols + c] = mode + .5;// 0.1 * drand48(); //1 ;//(r * R + c + 1); //
}
if(MTTKRPmode == 1){
checkCuda(cudaMemcpy(dU + 0, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 2){
checkCuda(cudaMemcpy(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE));
}
else if(MTTKRPmode == 3){
checkCuda(cudaMemcpy(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice), 0);
cudaMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE));
}
}
// for (int m = 0; m < TiledX[0].ndims; ++m)
{
/* matrix order according to mode order*/
for (int mm = 0; mm < TiledX[0].ndims; ++mm){
int curMode = TiledX[m].modeOrder[mm];
dULoc[mm] = 0;
for (int q = 0; q < curMode; ++q)
dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0
}
dBinLoc = 0;
// if(m > 0) {
// if (TiledX[m-1].totNnz > 0) {
// dLoc += TiledX[m-1].totNnz;
// dSlcLoc += TiledX[m - 1].fbrPtr[0].size();
// dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size();
// dFbrLoc += TiledX[m - 1].fbrPtr[1].size();
// dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size();
// dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ;
// }
// }
BLOCKSIZE = 512;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
if (TiledX[m].totNnz == 0) continue;
cuda_timer_start(start);
if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "Slc atomics - " ;
BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = 1;//BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
int fbrPerWarp = BLOCKSIZE/32; // dont overflow TB
int logOfFbrPerWarp = log2(fbrPerWarp );
grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
else if(TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp);
}
else if(TiledX[0].ndims == 4 && TiledX[m].modeOrder[1] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "FbrS atomics - ";
BLOCKSIZE = 128;//Opt.TBsize;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = 1;//BLOCKSIZE/32;//1;//Opt.warpPerSlice;//4;//;
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "Fbr atomics - ";
BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = 1;//Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if(TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){
if(Opt.verbose)
cout << "nnz atomics - " ;
BLOCKSIZE = 128;
dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1);
int warpPerFbr = 1;//Opt.warpPerSlice;//4;//;BLOCKSIZE/32;//
int logOfWarpPerFbr = log2(warpPerFbr);
int bin = 0;
grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE;
if (TiledX[0].ndims == 3)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds2 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers,
dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
else if (TiledX[0].ndims == 4)
mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D<<<grid, block, 0, streams[bin]>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc,
dInds3 + dLoc, dfbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dfbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2,
TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr);
}
cuda_timer_stop(start, stop, mili);
GPUTime += mili;
if(Opt.verbose)
{
cout << "Tile: " << m << " - time: " << mili << " ms";
cout <<" nnz: " << TiledX[m].totNnz << " nFibers: "
<< TiledX[m].fbrPtr[1].size() << " nSlc " << TiledX[m].fbrIdx[0].size() << " ";
cout << " modeOrder: " << TiledX[m].modeOrder[0] <<" " << TiledX[m].modeOrder[1] <<" "
<< TiledX[m].modeOrder[2];
cout << endl;
}
}
cout << "MI-HCSR-GPU-mode "<< MTTKRPmode <<" : " << GPUTime << "," << endl;
allModeGPUTime += GPUTime;
}
int totalMIslics = 0, totalMIfibers = 0, totalMInnz = 0;;
for (int m = 0; m < TiledX[0].ndims; ++m){
if(TiledX[m].totNnz){
totalMIslics += TiledX[m].fbrIdx[0].size();
totalMIfibers += TiledX[m].fbrPtr[1].size();
totalMInnz += TiledX[m].totNnz;
}
}
cout << "Total GPU time: " << allModeGPUTime << ", nnz:" << totalMInnz
<< ", nFibers:" << totalMIfibers << ", nSlc:" << totalMIslics
<< endl;
for (int bin = 0; bin < Opt.nBin; ++bin)
cudaStreamDestroy(streams[bin]);
/* Copying output matrix from GPU to CPU*/
int MTTKRPmode = TiledX[0].ndims - 1;
ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]);
DTYPE *tmpDU = new DTYPE[ U[MTTKRPmode].nRows * U[MTTKRPmode].nCols];
checkCuda(cudaMemcpy(tmpDU, dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
// checkCuda(cudaMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), 0);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Allreduce( &(tmpDU[0]), &U[MTTKRPmode].vals[0], szDU[MTTKRPmode] , MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
/*Free variables*/
cudaFree(dVals);
cudaFree(dU); //cudaFree(dU1); cudaFree(dU2); cudaFree(dU3);
cudaFree(dfbrIdx0); cudaFree(dInds2); cudaFree(dInds3);
cudaFree(dfbrIdx0); cudaFree(dfbrIdx1); cudaFree(dFbrIdx2);
cudaFree(dfbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrPtr2);
cudaFree(dFbrLikeSlcInds);
return 0;
}
|
d961919f6ef991af6829dfa282a5d3f6c0e789f6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Author: Christian Huber, KIT, 23.05.2023 - 26.05.2023
#include <torch/torch.h>
#include <iostream>
#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x);
//typedef float DTYPE_BASE; // compile with this for fp32
typedef c10::Half DTYPE_BASE; // compile with this for fp16
typedef c10::complex<DTYPE_BASE> DTYPE;
int64_t nextPowerOfTwo(int64_t n) {
// Set all the bits after the most significant bit to 1
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n |= n >> 32;
n++; // Increase the number by 1 to get the next power of two
return n;
}
#define LOG_NUM_BANKS 4 // device with 2**LOG_NUM_BANKS filterbanks
#define CONFLICT_FREE_OFFSET(n) ((n)>>LOG_NUM_BANKS)
__device__ __forceinline__ void set(DTYPE* temp, int index, DTYPE value) {
temp[index + CONFLICT_FREE_OFFSET(index)] = value;
}
__device__ __forceinline__ void add(DTYPE* temp, int index, DTYPE value) {
temp[index + CONFLICT_FREE_OFFSET(index)] += value;
}
__device__ __forceinline__ DTYPE get(DTYPE* temp, int index) {
return temp[index + CONFLICT_FREE_OFFSET(index)];
}
template <int n, bool reverse>
__global__ void lru_kernel(
torch::PackedTensorAccessor32<DTYPE,2,torch::RestrictPtrTraits> Lambda_exp,
torch::PackedTensorAccessor32<DTYPE,3,torch::RestrictPtrTraits> Bu,
torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> lengths,
torch::PackedTensorAccessor32<DTYPE,3,torch::RestrictPtrTraits> out) {
int length = lengths[blockIdx.x];
extern __shared__ DTYPE temp[];
int offset = 1;
int i = 0;
int thid = threadIdx.x;
if(!reverse) {
if(thid < length) {
set(temp, thid, Bu[blockIdx.x][blockIdx.y][thid]);
}
if(blockDim.x+thid < length) {
set(temp, blockDim.x+thid, Bu[blockIdx.x][blockIdx.y][blockDim.x+thid]);
}
}
else {
if(thid < length) {
set(temp, length-1-thid, Bu[blockIdx.x][blockIdx.y][thid]);
}
if(blockDim.x+thid < length) {
set(temp, length-1-blockDim.x-thid, Bu[blockIdx.x][blockIdx.y][blockDim.x+thid]);
}
}
#pragma unroll
for(int d = n>>1; d > 1; d >>= 1) {
__syncthreads();
if(thid+1 < d) {
DTYPE lambda = Lambda_exp[blockIdx.y][i];
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
add(temp, bi, lambda * get(temp, ai));
}
offset <<= 1;
i++;
}
if(thid==0) {
set(temp, n-1, DTYPE(0,0));
}
#pragma unroll
for(int d = 1; d < n; d <<= 1) {
__syncthreads();
if(thid < d) {
DTYPE lambda = Lambda_exp[blockIdx.y][i];
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
DTYPE t = get(temp, ai);
set(temp, ai, get(temp, bi));
set(temp, bi, lambda * get(temp, bi) + t);
}
offset >>= 1;
i--;
}
__syncthreads();
if(!reverse) {
if(thid < length) {
out[blockIdx.x][blockIdx.y][thid] = get(temp, thid+1);
}
if(blockDim.x+thid < length) {
out[blockIdx.x][blockIdx.y][blockDim.x+thid] = get(temp, blockDim.x+thid+1);
}
}
else {
if(thid < length) {
out[blockIdx.x][blockIdx.y][thid] = get(temp, length-thid);
}
if(blockDim.x+thid < length) {
out[blockIdx.x][blockIdx.y][blockDim.x+thid] = get(temp, length-blockDim.x-thid);
}
}
}
#define k(n)hipLaunchKernelGGL(( lru_kernel<n,reverse>), dim3(blocks), dim3(n/2), sizeof(DTYPE) * (n+CONFLICT_FREE_OFFSET(n)), 0, \
Lambda_exp.packed_accessor32<DTYPE,2,torch::RestrictPtrTraits>(),\
Bu.packed_accessor32<DTYPE,3,torch::RestrictPtrTraits>(),\
lengths.packed_accessor32<int,1,torch::RestrictPtrTraits>(),\
out.packed_accessor32<DTYPE,3,torch::RestrictPtrTraits>());
template <bool reverse>
void generalized_cumsum(
torch::Tensor Lambda_exp, // N x log(L)
torch::Tensor Bu, // B x N x L
torch::Tensor lengths, // B
torch::Tensor out) { // B x N x L
dim3 blocks(Bu.size(0),Bu.size(1));
int64_t n = nextPowerOfTwo(Bu.size(2));
switch(n) {
case 2:
k(2); break;
case 4:
k(4); break;
case 8:
k(8); break;
case 16:
k(16); break;
case 32:
k(32); break;
case 64:
k(64); break;
case 128:
k(128); break;
case 256:
k(256); break;
case 512:
k(512); break;
case 1024:
k(1024); break;
case 2048:
k(2048); break;
default:
assert(false);
}
}
torch::Tensor blru_forward(
torch::Tensor Lambda_exp, // N x log(L)
torch::Tensor Bu, // B x N x L
torch::Tensor lengths, // B
int direction) { // 0: left-to-right, 1: right-to-left, 2: first half left-to-right, second half right-to-left
CHECK_INPUT(Lambda_exp);
CHECK_INPUT(Bu);
CHECK_INPUT(lengths);
auto out = torch::empty_like(Bu);
if(direction==0) {
generalized_cumsum<false>(Lambda_exp,Bu,lengths,out);
} else if(direction==1) {
generalized_cumsum<true>(Lambda_exp,Bu,lengths,out);
} else if(direction==2) {
generalized_cumsum<false>(Lambda_exp.slice(0,0,Bu.size(1)/2),
Bu.slice(1,0,Bu.size(1)/2),
lengths,
out.slice(1,0,Bu.size(1)/2));
generalized_cumsum<true>(Lambda_exp.slice(0,Bu.size(1)/2,Bu.size(1)),
Bu.slice(1,Bu.size(1)/2,Bu.size(1)),
lengths,
out.slice(1,Bu.size(1)/2,Bu.size(1)));
} else {
assert(false);
}
return out;
std::vector<torch::Tensor> blru_backward(
torch::Tensor grad_output, // B x N x L
torch::Tensor Lambda_exp, // N x log(L)
torch::Tensor output, // B x N x L
torch::Tensor lengths, // B
int direction) {
CHECK_INPUT(Lambda_exp);
CHECK_INPUT(grad_output);
CHECK_INPUT(lengths);
CHECK_INPUT(output);
//auto d_Bu = torch::empty_like(grad_output);
auto d_Bu = torch::zeros_like(grad_output);
torch::Tensor d_Lambda;
if(direction==0) {
generalized_cumsum<true>(Lambda_exp,grad_output,lengths,d_Bu);
//auto tmp = torch::empty_like(output);
auto tmp = torch::zeros_like(output);
generalized_cumsum<false>(Lambda_exp,output,lengths,tmp);
tmp = tmp.to(at::kComplexFloat);
grad_output = grad_output.to(at::kComplexFloat);
d_Lambda = torch::einsum("bnl,bnl->n", {grad_output.slice(2,1,grad_output.size(2)), tmp.slice(2,0,tmp.size(2)-1)});
} else if(direction==1) {
generalized_cumsum<false>(Lambda_exp,grad_output,lengths,d_Bu);
//auto tmp = torch::empty_like(output);
auto tmp = torch::zeros_like(output);
generalized_cumsum<true>(Lambda_exp,output,lengths,tmp);
tmp = tmp.to(at::kComplexFloat);
grad_output = grad_output.to(at::kComplexFloat);
d_Lambda = torch::einsum("bnl,bnl->n", {grad_output.slice(2,0,grad_output.size(2)-1), tmp.slice(2,1,tmp.size(2))});
} else if(direction==2) {
generalized_cumsum<true>(Lambda_exp.slice(0,0,d_Bu.size(1)/2),
grad_output.slice(1,0,d_Bu.size(1)/2),
lengths,
d_Bu.slice(1,0,d_Bu.size(1)/2));
generalized_cumsum<false>(Lambda_exp.slice(0,d_Bu.size(1)/2,d_Bu.size(1)),
grad_output.slice(1,d_Bu.size(1)/2,d_Bu.size(1)),
lengths,
d_Bu.slice(1,d_Bu.size(1)/2,d_Bu.size(1)));
grad_output = grad_output.to(at::kComplexFloat);
//auto tmp = torch::empty_like(output.slice(1,0,d_Bu.size(1)/2));
auto tmp = torch::zeros_like(output.slice(1,0,d_Bu.size(1)/2));
generalized_cumsum<false>(Lambda_exp.slice(0,0,d_Bu.size(1)/2),
output.slice(1,0,d_Bu.size(1)/2),
lengths,
tmp);
auto tmp2 = tmp.to(at::kComplexFloat);
auto d_Lambda_1 = torch::einsum("bnl,bnl->n", {grad_output.slice(1,0,d_Bu.size(1)/2).slice(2,1,grad_output.size(2)),
tmp2.slice(2,0,tmp2.size(2)-1)});
tmp.zero_();
generalized_cumsum<true>(Lambda_exp.slice(0,d_Bu.size(1)/2,d_Bu.size(1)),
output.slice(1,d_Bu.size(1)/2,d_Bu.size(1)),
lengths,
tmp);
tmp2 = tmp.to(at::kComplexFloat);
auto d_Lambda_2 = torch::einsum("bnl,bnl->n", {grad_output.slice(1,d_Bu.size(1)/2,d_Bu.size(1)).slice(2,0,grad_output.size(2)-1),
tmp2.slice(2,1,tmp2.size(2))});
d_Lambda = torch::cat({d_Lambda_1,d_Lambda_2});
} else {
assert(false);
}
return {d_Lambda, d_Bu};
}
|
d961919f6ef991af6829dfa282a5d3f6c0e789f6.cu
|
// Author: Christian Huber, KIT, 23.05.2023 - 26.05.2023
#include <torch/torch.h>
#include <iostream>
#define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x);
//typedef float DTYPE_BASE; // compile with this for fp32
typedef c10::Half DTYPE_BASE; // compile with this for fp16
typedef c10::complex<DTYPE_BASE> DTYPE;
int64_t nextPowerOfTwo(int64_t n) {
// Set all the bits after the most significant bit to 1
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n |= n >> 32;
n++; // Increase the number by 1 to get the next power of two
return n;
}
#define LOG_NUM_BANKS 4 // device with 2**LOG_NUM_BANKS filterbanks
#define CONFLICT_FREE_OFFSET(n) ((n)>>LOG_NUM_BANKS)
__device__ __forceinline__ void set(DTYPE* temp, int index, DTYPE value) {
temp[index + CONFLICT_FREE_OFFSET(index)] = value;
}
__device__ __forceinline__ void add(DTYPE* temp, int index, DTYPE value) {
temp[index + CONFLICT_FREE_OFFSET(index)] += value;
}
__device__ __forceinline__ DTYPE get(DTYPE* temp, int index) {
return temp[index + CONFLICT_FREE_OFFSET(index)];
}
template <int n, bool reverse>
__global__ void lru_kernel(
torch::PackedTensorAccessor32<DTYPE,2,torch::RestrictPtrTraits> Lambda_exp,
torch::PackedTensorAccessor32<DTYPE,3,torch::RestrictPtrTraits> Bu,
torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> lengths,
torch::PackedTensorAccessor32<DTYPE,3,torch::RestrictPtrTraits> out) {
int length = lengths[blockIdx.x];
extern __shared__ DTYPE temp[];
int offset = 1;
int i = 0;
int thid = threadIdx.x;
if(!reverse) {
if(thid < length) {
set(temp, thid, Bu[blockIdx.x][blockIdx.y][thid]);
}
if(blockDim.x+thid < length) {
set(temp, blockDim.x+thid, Bu[blockIdx.x][blockIdx.y][blockDim.x+thid]);
}
}
else {
if(thid < length) {
set(temp, length-1-thid, Bu[blockIdx.x][blockIdx.y][thid]);
}
if(blockDim.x+thid < length) {
set(temp, length-1-blockDim.x-thid, Bu[blockIdx.x][blockIdx.y][blockDim.x+thid]);
}
}
#pragma unroll
for(int d = n>>1; d > 1; d >>= 1) {
__syncthreads();
if(thid+1 < d) {
DTYPE lambda = Lambda_exp[blockIdx.y][i];
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
add(temp, bi, lambda * get(temp, ai));
}
offset <<= 1;
i++;
}
if(thid==0) {
set(temp, n-1, DTYPE(0,0));
}
#pragma unroll
for(int d = 1; d < n; d <<= 1) {
__syncthreads();
if(thid < d) {
DTYPE lambda = Lambda_exp[blockIdx.y][i];
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
DTYPE t = get(temp, ai);
set(temp, ai, get(temp, bi));
set(temp, bi, lambda * get(temp, bi) + t);
}
offset >>= 1;
i--;
}
__syncthreads();
if(!reverse) {
if(thid < length) {
out[blockIdx.x][blockIdx.y][thid] = get(temp, thid+1);
}
if(blockDim.x+thid < length) {
out[blockIdx.x][blockIdx.y][blockDim.x+thid] = get(temp, blockDim.x+thid+1);
}
}
else {
if(thid < length) {
out[blockIdx.x][blockIdx.y][thid] = get(temp, length-thid);
}
if(blockDim.x+thid < length) {
out[blockIdx.x][blockIdx.y][blockDim.x+thid] = get(temp, length-blockDim.x-thid);
}
}
}
#define k(n) lru_kernel<n,reverse><<<blocks, n/2, sizeof(DTYPE) * (n+CONFLICT_FREE_OFFSET(n))>>>(\
Lambda_exp.packed_accessor32<DTYPE,2,torch::RestrictPtrTraits>(),\
Bu.packed_accessor32<DTYPE,3,torch::RestrictPtrTraits>(),\
lengths.packed_accessor32<int,1,torch::RestrictPtrTraits>(),\
out.packed_accessor32<DTYPE,3,torch::RestrictPtrTraits>());
template <bool reverse>
void generalized_cumsum(
torch::Tensor Lambda_exp, // N x log(L)
torch::Tensor Bu, // B x N x L
torch::Tensor lengths, // B
torch::Tensor out) { // B x N x L
dim3 blocks(Bu.size(0),Bu.size(1));
int64_t n = nextPowerOfTwo(Bu.size(2));
switch(n) {
case 2:
k(2); break;
case 4:
k(4); break;
case 8:
k(8); break;
case 16:
k(16); break;
case 32:
k(32); break;
case 64:
k(64); break;
case 128:
k(128); break;
case 256:
k(256); break;
case 512:
k(512); break;
case 1024:
k(1024); break;
case 2048:
k(2048); break;
default:
assert(false);
}
}
torch::Tensor blru_forward(
torch::Tensor Lambda_exp, // N x log(L)
torch::Tensor Bu, // B x N x L
torch::Tensor lengths, // B
int direction) { // 0: left-to-right, 1: right-to-left, 2: first half left-to-right, second half right-to-left
CHECK_INPUT(Lambda_exp);
CHECK_INPUT(Bu);
CHECK_INPUT(lengths);
auto out = torch::empty_like(Bu);
if(direction==0) {
generalized_cumsum<false>(Lambda_exp,Bu,lengths,out);
} else if(direction==1) {
generalized_cumsum<true>(Lambda_exp,Bu,lengths,out);
} else if(direction==2) {
generalized_cumsum<false>(Lambda_exp.slice(0,0,Bu.size(1)/2),
Bu.slice(1,0,Bu.size(1)/2),
lengths,
out.slice(1,0,Bu.size(1)/2));
generalized_cumsum<true>(Lambda_exp.slice(0,Bu.size(1)/2,Bu.size(1)),
Bu.slice(1,Bu.size(1)/2,Bu.size(1)),
lengths,
out.slice(1,Bu.size(1)/2,Bu.size(1)));
} else {
assert(false);
}
return out;
std::vector<torch::Tensor> blru_backward(
torch::Tensor grad_output, // B x N x L
torch::Tensor Lambda_exp, // N x log(L)
torch::Tensor output, // B x N x L
torch::Tensor lengths, // B
int direction) {
CHECK_INPUT(Lambda_exp);
CHECK_INPUT(grad_output);
CHECK_INPUT(lengths);
CHECK_INPUT(output);
//auto d_Bu = torch::empty_like(grad_output);
auto d_Bu = torch::zeros_like(grad_output);
torch::Tensor d_Lambda;
if(direction==0) {
generalized_cumsum<true>(Lambda_exp,grad_output,lengths,d_Bu);
//auto tmp = torch::empty_like(output);
auto tmp = torch::zeros_like(output);
generalized_cumsum<false>(Lambda_exp,output,lengths,tmp);
tmp = tmp.to(at::kComplexFloat);
grad_output = grad_output.to(at::kComplexFloat);
d_Lambda = torch::einsum("bnl,bnl->n", {grad_output.slice(2,1,grad_output.size(2)), tmp.slice(2,0,tmp.size(2)-1)});
} else if(direction==1) {
generalized_cumsum<false>(Lambda_exp,grad_output,lengths,d_Bu);
//auto tmp = torch::empty_like(output);
auto tmp = torch::zeros_like(output);
generalized_cumsum<true>(Lambda_exp,output,lengths,tmp);
tmp = tmp.to(at::kComplexFloat);
grad_output = grad_output.to(at::kComplexFloat);
d_Lambda = torch::einsum("bnl,bnl->n", {grad_output.slice(2,0,grad_output.size(2)-1), tmp.slice(2,1,tmp.size(2))});
} else if(direction==2) {
generalized_cumsum<true>(Lambda_exp.slice(0,0,d_Bu.size(1)/2),
grad_output.slice(1,0,d_Bu.size(1)/2),
lengths,
d_Bu.slice(1,0,d_Bu.size(1)/2));
generalized_cumsum<false>(Lambda_exp.slice(0,d_Bu.size(1)/2,d_Bu.size(1)),
grad_output.slice(1,d_Bu.size(1)/2,d_Bu.size(1)),
lengths,
d_Bu.slice(1,d_Bu.size(1)/2,d_Bu.size(1)));
grad_output = grad_output.to(at::kComplexFloat);
//auto tmp = torch::empty_like(output.slice(1,0,d_Bu.size(1)/2));
auto tmp = torch::zeros_like(output.slice(1,0,d_Bu.size(1)/2));
generalized_cumsum<false>(Lambda_exp.slice(0,0,d_Bu.size(1)/2),
output.slice(1,0,d_Bu.size(1)/2),
lengths,
tmp);
auto tmp2 = tmp.to(at::kComplexFloat);
auto d_Lambda_1 = torch::einsum("bnl,bnl->n", {grad_output.slice(1,0,d_Bu.size(1)/2).slice(2,1,grad_output.size(2)),
tmp2.slice(2,0,tmp2.size(2)-1)});
tmp.zero_();
generalized_cumsum<true>(Lambda_exp.slice(0,d_Bu.size(1)/2,d_Bu.size(1)),
output.slice(1,d_Bu.size(1)/2,d_Bu.size(1)),
lengths,
tmp);
tmp2 = tmp.to(at::kComplexFloat);
auto d_Lambda_2 = torch::einsum("bnl,bnl->n", {grad_output.slice(1,d_Bu.size(1)/2,d_Bu.size(1)).slice(2,0,grad_output.size(2)-1),
tmp2.slice(2,1,tmp2.size(2))});
d_Lambda = torch::cat({d_Lambda_1,d_Lambda_2});
} else {
assert(false);
}
return {d_Lambda, d_Bu};
}
|
f6ea14d0bcff5c3d743499963553bea5b31199fb.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
// Parts of this code sourced from SnopyDogy
// https://gist.github.com/SnopyDogy/a9a22497a893ec86aa3e
#if defined(WITH_GRAPHICS)
#include <Array.hpp>
#include <image.hpp>
#include <err_cuda.hpp>
#include <debug_cuda.hpp>
#include <interopManager.hpp>
using af::dim4;
namespace cuda
{
template<typename T>
void copy_image(const Array<T> &in, const fg::Image* image)
{
InteropManager& intrpMngr = InteropManager::getInstance();
cudaGraphicsResource *cudaPBOResource = intrpMngr.getBufferResource(image);
const T *d_X = in.get();
// Map resource. Copy data to PBO. Unmap resource.
size_t num_bytes;
T* d_pbo = NULL;
hipGraphicsMapResources(1, &cudaPBOResource, 0);
hipGraphicsResourceGetMappedPointer((void **)&d_pbo, &num_bytes, cudaPBOResource);
hipMemcpyAsync(d_pbo, d_X, num_bytes, hipMemcpyDeviceToDevice,
cuda::getStream(cuda::getActiveDeviceId()));
hipGraphicsUnmapResources(1, &cudaPBOResource, 0);
POST_LAUNCH_CHECK();
CheckGL("After cuda resource copy");
}
#define INSTANTIATE(T) \
template void copy_image<T>(const Array<T> &in, const fg::Image* image);
INSTANTIATE(float)
INSTANTIATE(double)
INSTANTIATE(int)
INSTANTIATE(uint)
INSTANTIATE(uchar)
INSTANTIATE(char)
INSTANTIATE(ushort)
INSTANTIATE(short)
}
#endif
|
f6ea14d0bcff5c3d743499963553bea5b31199fb.cu
|
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
// Parts of this code sourced from SnopyDogy
// https://gist.github.com/SnopyDogy/a9a22497a893ec86aa3e
#if defined(WITH_GRAPHICS)
#include <Array.hpp>
#include <image.hpp>
#include <err_cuda.hpp>
#include <debug_cuda.hpp>
#include <interopManager.hpp>
using af::dim4;
namespace cuda
{
template<typename T>
void copy_image(const Array<T> &in, const fg::Image* image)
{
InteropManager& intrpMngr = InteropManager::getInstance();
cudaGraphicsResource *cudaPBOResource = intrpMngr.getBufferResource(image);
const T *d_X = in.get();
// Map resource. Copy data to PBO. Unmap resource.
size_t num_bytes;
T* d_pbo = NULL;
cudaGraphicsMapResources(1, &cudaPBOResource, 0);
cudaGraphicsResourceGetMappedPointer((void **)&d_pbo, &num_bytes, cudaPBOResource);
cudaMemcpyAsync(d_pbo, d_X, num_bytes, cudaMemcpyDeviceToDevice,
cuda::getStream(cuda::getActiveDeviceId()));
cudaGraphicsUnmapResources(1, &cudaPBOResource, 0);
POST_LAUNCH_CHECK();
CheckGL("After cuda resource copy");
}
#define INSTANTIATE(T) \
template void copy_image<T>(const Array<T> &in, const fg::Image* image);
INSTANTIATE(float)
INSTANTIATE(double)
INSTANTIATE(int)
INSTANTIATE(uint)
INSTANTIATE(uchar)
INSTANTIATE(char)
INSTANTIATE(ushort)
INSTANTIATE(short)
}
#endif
|
f598e1e46d593e100748991380ce9315ac7a34c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <wb.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#define T_Width 16
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement basic matrix multiplication here
//@@ Do not use shared memory to write this kernel
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if ((Row < numARows) && (Col < numBColumns)) {
float Pvalue = 0;
for (int k = 0; k < numAColumns; k++) {
Pvalue += A[Row*numAColumns + k] * B[k*numBColumns + Col];
C[Row*numBColumns + Col] = Pvalue;
}
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
hostC = NULL;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
int Allo_C = numCRows * numCColumns * sizeof(float);
hostC = (float*)malloc(numCRows * numCColumns * sizeof(float));
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
//@@ Allocate GPU memory here
wbTime_start(GPU, "Allocating GPU memory.");
int Allo_A = sizeof(float) * numARows * numAColumns;
int Allo_B = sizeof(float) * numBRows * numBColumns;
hipMalloc((void **)&deviceA, Allo_A);
hipMalloc((void **)&deviceB, Allo_B);
hipMalloc((void **)&deviceC, Allo_C);
wbTime_stop(GPU, "Allocating GPU memory.");
//@@ Copy memory to the GPU here
wbTime_start(GPU, "Copying input memory to the GPU.");
hipMemcpy(deviceA, hostA, Allo_A, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, Allo_B, hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimBlock(T_Width, T_Width, 1);
dim3 DimGrid((numBColumns - 1) / T_Width + 1, (numARows - 1) / T_Width + 1, 1);
//@@ Launch the GPU Kernel here
wbTime_start(Compute, "Performing CUDA computation");
hipLaunchKernelGGL(( matrixMultiply), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
//@@ Copy the GPU memory back to the CPU here
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(hostC, deviceC, Allo_C, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
f598e1e46d593e100748991380ce9315ac7a34c8.cu
|
#include <wb.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#define T_Width 16
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement basic matrix multiplication here
//@@ Do not use shared memory to write this kernel
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if ((Row < numARows) && (Col < numBColumns)) {
float Pvalue = 0;
for (int k = 0; k < numAColumns; k++) {
Pvalue += A[Row*numAColumns + k] * B[k*numBColumns + Col];
C[Row*numBColumns + Col] = Pvalue;
}
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
hostC = NULL;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
int Allo_C = numCRows * numCColumns * sizeof(float);
hostC = (float*)malloc(numCRows * numCColumns * sizeof(float));
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
//@@ Allocate GPU memory here
wbTime_start(GPU, "Allocating GPU memory.");
int Allo_A = sizeof(float) * numARows * numAColumns;
int Allo_B = sizeof(float) * numBRows * numBColumns;
cudaMalloc((void **)&deviceA, Allo_A);
cudaMalloc((void **)&deviceB, Allo_B);
cudaMalloc((void **)&deviceC, Allo_C);
wbTime_stop(GPU, "Allocating GPU memory.");
//@@ Copy memory to the GPU here
wbTime_start(GPU, "Copying input memory to the GPU.");
cudaMemcpy(deviceA, hostA, Allo_A, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, Allo_B, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimBlock(T_Width, T_Width, 1);
dim3 DimGrid((numBColumns - 1) / T_Width + 1, (numARows - 1) / T_Width + 1, 1);
//@@ Launch the GPU Kernel here
wbTime_start(Compute, "Performing CUDA computation");
matrixMultiply<<<DimGrid, DimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
//@@ Copy the GPU memory back to the CPU here
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(hostC, deviceC, Allo_C, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
da1d236d165662f84e0de63fd63a7df6c2d719b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/accuracy_kernel.h"
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
using phi::PADDLE_CUDA_NUM_THREADS;
template <int BlockSize, typename T>
__global__ void AccuracyCudaKernel(const int N,
const int D,
const int64_t* Xdata,
const int64_t* labeldata,
int* correct_data,
T* accuracy,
int* total_data) {
int count = 0;
__shared__ int total[BlockSize];
// support only 1 block
for (int i = threadIdx.x; i < (N); i += BlockSize) {
for (int j = 0; j < D; ++j) {
if (Xdata[i * D + j] == labeldata[i]) {
++count;
break;
}
}
}
total[threadIdx.x] = count;
__syncthreads();
// reduce the count with init value 0, and output accuracy.
#ifdef PADDLE_WITH_CUDA
int result = thrust::reduce(thrust::device, total, total + BlockSize, 0);
#else
// HIP thrust::reduce not support __device__
for (int s = BlockSize / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
total[threadIdx.x] += total[threadIdx.x + s];
}
__syncthreads();
}
int result = total[0];
#endif
if (threadIdx.x == 0) {
*correct_data = result;
*accuracy = static_cast<T>(result) / static_cast<T>(N);
*total_data = N;
}
}
template <typename T, typename Context>
void AccuracyRawKernel(const Context& dev_ctx,
const DenseTensor& inference,
const DenseTensor& indices,
const DenseTensor& label,
DenseTensor* accuracy,
DenseTensor* correct,
DenseTensor* total) {
// FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type?
const int64_t* indices_data = indices.data<int64_t>();
const int64_t* label_data = label.data<int64_t>();
PADDLE_ENFORCE_EQ(
inference.dims().size(),
2,
phi::errors::InvalidArgument(
"Rank(Input) of AccuracyOp must be 2, with shape "
"[sample_number, class_dim], But received rank(Input) is %d",
inference.dims().size()));
int* correct_data = dev_ctx.template Alloc<int>(correct);
int* total_data = dev_ctx.template Alloc<int>(total);
T* accuracy_data = dev_ctx.template Alloc<T>(accuracy);
int num_samples = static_cast<int>(inference.dims()[0]);
size_t infer_width = inference.dims()[1];
auto stream = dev_ctx.stream();
phi::backends::gpu::GpuMemsetAsync(accuracy_data, 0, sizeof(T), stream);
PADDLE_ENFORCE_GT(label.dims().size(),
0,
phi::errors::InvalidArgument(
"Rank(Label) of AccuracyOp must greater than 0, "
"But received rank(Label) is %d",
label.dims().size()));
PADDLE_ENFORCE_GE(
label.dims()[0],
inference.dims()[0],
phi::errors::InvalidArgument("num_samples(%d) of Label should less than "
"or equal to num_samples(%d) of Input",
label.dims()[0],
num_samples));
if (num_samples == 0) {
return;
}
hipLaunchKernelGGL(( AccuracyCudaKernel<PADDLE_CUDA_NUM_THREADS, T>)
, dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, num_samples,
infer_width,
indices_data,
label_data,
correct_data,
accuracy_data,
total_data);
}
} // namespace phi
// FIXME(typhoonzero): types of T is for inference data.
// label data is always int64
PD_REGISTER_KERNEL(accuracy,
GPU,
ALL_LAYOUT,
phi::AccuracyRawKernel,
phi::dtype::float16,
float,
double) {
kernel->InputAt(1).SetDataType(phi::DataType::INT64);
kernel->InputAt(2).SetDataType(phi::DataType::INT64);
kernel->OutputAt(1).SetDataType(phi::DataType::INT64);
kernel->OutputAt(2).SetDataType(phi::DataType::INT64);
}
|
da1d236d165662f84e0de63fd63a7df6c2d719b5.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/accuracy_kernel.h"
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
using phi::PADDLE_CUDA_NUM_THREADS;
template <int BlockSize, typename T>
__global__ void AccuracyCudaKernel(const int N,
const int D,
const int64_t* Xdata,
const int64_t* labeldata,
int* correct_data,
T* accuracy,
int* total_data) {
int count = 0;
__shared__ int total[BlockSize];
// support only 1 block
for (int i = threadIdx.x; i < (N); i += BlockSize) {
for (int j = 0; j < D; ++j) {
if (Xdata[i * D + j] == labeldata[i]) {
++count;
break;
}
}
}
total[threadIdx.x] = count;
__syncthreads();
// reduce the count with init value 0, and output accuracy.
#ifdef PADDLE_WITH_CUDA
int result = thrust::reduce(thrust::device, total, total + BlockSize, 0);
#else
// HIP thrust::reduce not support __device__
for (int s = BlockSize / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
total[threadIdx.x] += total[threadIdx.x + s];
}
__syncthreads();
}
int result = total[0];
#endif
if (threadIdx.x == 0) {
*correct_data = result;
*accuracy = static_cast<T>(result) / static_cast<T>(N);
*total_data = N;
}
}
template <typename T, typename Context>
void AccuracyRawKernel(const Context& dev_ctx,
const DenseTensor& inference,
const DenseTensor& indices,
const DenseTensor& label,
DenseTensor* accuracy,
DenseTensor* correct,
DenseTensor* total) {
// FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type?
const int64_t* indices_data = indices.data<int64_t>();
const int64_t* label_data = label.data<int64_t>();
PADDLE_ENFORCE_EQ(
inference.dims().size(),
2,
phi::errors::InvalidArgument(
"Rank(Input) of AccuracyOp must be 2, with shape "
"[sample_number, class_dim], But received rank(Input) is %d",
inference.dims().size()));
int* correct_data = dev_ctx.template Alloc<int>(correct);
int* total_data = dev_ctx.template Alloc<int>(total);
T* accuracy_data = dev_ctx.template Alloc<T>(accuracy);
int num_samples = static_cast<int>(inference.dims()[0]);
size_t infer_width = inference.dims()[1];
auto stream = dev_ctx.stream();
phi::backends::gpu::GpuMemsetAsync(accuracy_data, 0, sizeof(T), stream);
PADDLE_ENFORCE_GT(label.dims().size(),
0,
phi::errors::InvalidArgument(
"Rank(Label) of AccuracyOp must greater than 0, "
"But received rank(Label) is %d",
label.dims().size()));
PADDLE_ENFORCE_GE(
label.dims()[0],
inference.dims()[0],
phi::errors::InvalidArgument("num_samples(%d) of Label should less than "
"or equal to num_samples(%d) of Input",
label.dims()[0],
num_samples));
if (num_samples == 0) {
return;
}
AccuracyCudaKernel<PADDLE_CUDA_NUM_THREADS, T>
<<<1, PADDLE_CUDA_NUM_THREADS, 0, stream>>>(num_samples,
infer_width,
indices_data,
label_data,
correct_data,
accuracy_data,
total_data);
}
} // namespace phi
// FIXME(typhoonzero): types of T is for inference data.
// label data is always int64
PD_REGISTER_KERNEL(accuracy,
GPU,
ALL_LAYOUT,
phi::AccuracyRawKernel,
phi::dtype::float16,
float,
double) {
kernel->InputAt(1).SetDataType(phi::DataType::INT64);
kernel->InputAt(2).SetDataType(phi::DataType::INT64);
kernel->OutputAt(1).SetDataType(phi::DataType::INT64);
kernel->OutputAt(2).SetDataType(phi::DataType::INT64);
}
|
569943b1f85f0640254c21483a902adf569ec6e0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "b.h"
__global__ void foo (void) {
__shared__ int a[N];
a[threadIdx.x] = threadIdx.x; __syncthreads();
g[threadIdx.x] = a[blockDim.x - threadIdx.x - 1];
bar();
}
int main (void) {
unsigned int i;
int *dg, hg[N];
int sum = 0;
hipLaunchKernelGGL(( foo), dim3(1), dim3(N), 0, 0, );
if(hipGetSymbolAddress((void**)&dg, g)){
printf("couldn't get the symbol addr\n");
return 1;
}
if(hipMemcpy(hg, dg, N * sizeof(int), hipMemcpyDeviceToHost)){
printf("couldn't memcpy\n");
return 1;
}
for (i = 0; i < N; i++) {
sum += hg[i];
}
if (sum == 36) {
printf("PASSED\n");
} else {
printf("FAILED (%d)\n", sum);
} return 0;
}
|
569943b1f85f0640254c21483a902adf569ec6e0.cu
|
#include <stdio.h>
#include "b.h"
__global__ void foo (void) {
__shared__ int a[N];
a[threadIdx.x] = threadIdx.x; __syncthreads();
g[threadIdx.x] = a[blockDim.x - threadIdx.x - 1];
bar();
}
int main (void) {
unsigned int i;
int *dg, hg[N];
int sum = 0;
foo<<<1, N>>>();
if(cudaGetSymbolAddress((void**)&dg, g)){
printf("couldn't get the symbol addr\n");
return 1;
}
if(cudaMemcpy(hg, dg, N * sizeof(int), cudaMemcpyDeviceToHost)){
printf("couldn't memcpy\n");
return 1;
}
for (i = 0; i < N; i++) {
sum += hg[i];
}
if (sum == 36) {
printf("PASSED\n");
} else {
printf("FAILED (%d)\n", sum);
} return 0;
}
|
a523e98c6df9be9e8e177916b2fe352d24577de6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void encode_cols_kernel(float *a, uint32_t* b, int m, int n) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int i32 = i*ENCODE_BITS;
if (j < n && i32 < m) {
uint32_t r = 0;
for(int k = 0; j + n * (i32 + k)< m * n && k < ENCODE_BITS; k++){
r |= (a[j + n * (i32 + k)]>0)<<k;
}
b[j + n * i] = r;
}
}
|
a523e98c6df9be9e8e177916b2fe352d24577de6.cu
|
#include "includes.h"
__global__ void encode_cols_kernel(float *a, uint32_t* b, int m, int n) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int i32 = i*ENCODE_BITS;
if (j < n && i32 < m) {
uint32_t r = 0;
for(int k = 0; j + n * (i32 + k)< m * n && k < ENCODE_BITS; k++){
r |= (a[j + n * (i32 + k)]>0)<<k;
}
b[j + n * i] = r;
}
}
|
97515184a037bae4dbae547bc04c41f3199f47bd.hip
|
// !!! This is a file automatically generated by hipify!!!
// stl
#include <stdio.h>
#include <iostream>
#include <vector>
#include <algorithm>
// cuda
#include "hip/hip_runtime.h"
#include "cusolverRf.h"
#include "rocblas.h"
#include "cutil_math.h"
#include "cutil_matrix.h"
/*
Compute the principle curvatures for all points in the vector
@param points - array with all points of type float3
@param normals - array with all normal vectors
@param number_nn - integer with the number of nearest neighbors for each point
@param nn_indices - array with all nearest neighbor indices
@param dst_curvatures - vector of type float2 with all curvatures per point
*/
__global__ void cuComputePrincipleCurvature( float3* points, float3* normals, int number_nn, int* nn_indices, float2* dst_curvatures)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
float3 N = normals[index];
float3 P = points[index];
cuMatrix3 I = cuMatrix3::Identity();
cuMatrix3 M = I - makeMatrix(N, N);
float3 normal;
float3 centroid;
centroid.x = 0.0; centroid.y = 0.0; centroid.z = 0.0;
float3 projected_normals[24];
for (size_t idx = 1; idx < number_nn; idx++)
{
int n_idx = nn_indices[number_nn* index + idx];
normal = normals[n_idx];
projected_normals[idx -1] = M * normal;
centroid += projected_normals[idx - 1];
}
// estimate the centroid
float nn_indices_float = (float)(number_nn);
centroid /= ((float)(number_nn)- 1.0);
cuMatrix3 covariance_matrix;
covariance_matrix.setZero();
float mean_xy, mean_xz, mean_yz;
float3 mean;
for (size_t idx = 0; idx < number_nn-1; idx++)
{
mean = projected_normals[idx] - centroid;
mean_xy = mean.x * mean.y;
mean_xz = mean.x * mean.z;
mean_yz = mean.y * mean.z;
covariance_matrix.data[0] = mean.x * mean.x;
covariance_matrix.data[3] += (float)(mean_xy);
covariance_matrix.data[6] += (float)(mean_xz);
covariance_matrix.data[1] += (float)(mean_xy);
covariance_matrix.data[4] += mean.y * mean.y;
covariance_matrix.data[7] += (float)(mean_yz);
covariance_matrix.data[2] += (float)(mean_xz);
covariance_matrix.data[5] += (float)(mean_yz);
covariance_matrix.data[8] += mean.z * mean.z;
}
/*
Matrix I
Eigen::Matrix3f I = Eigen::Matrix3f::Identity();
// project matrix into tangent plane
Eigen::Matrix3f M = I - normal_idx * normal_idx.transpose();
// project normals into tangent plane
Eigen::Vector3f normal;
Eigen::Vector3f centroid;
centroid.setZero();
vector<Eigen::Vector3f> projected_normals(nn_indices.size() - 1);
for (size_t idx = 1; idx < nn_indices.size(); idx++) // the first one is the point itself in the nn matrix
{
// osg to eigen
Eigen::Vector3f eigen_normal(normals[nn_indices[idx]].x(), normals[nn_indices[idx]].y(), normals[nn_indices[idx]].z());
normal = eigen_normal;
projected_normals[idx - 1] = M * normal;
centroid += projected_normals[idx - 1];
}
// estimate the centroid
centroid /= static_cast<float> (nn_indices.size() - 1);
Eigen::Matrix3f covariance_matrix;
covariance_matrix.setZero();
double mean_xy, mean_xz, mean_yz;
Eigen::Vector3f mean;
for (size_t idx = 0; idx < projected_normals.size(); idx++)
{
mean = projected_normals[idx] - centroid;
mean_xy = mean[0] * mean[1];
mean_xz = mean[0] * mean[2];
mean_yz = mean[1] * mean[2];
covariance_matrix(0, 0) += mean[0] * mean[0];
covariance_matrix(0, 1) += static_cast<float>(mean_xy);
covariance_matrix(0, 2) += static_cast<float>(mean_xz);
covariance_matrix(1, 0) += static_cast<float>(mean_xy);
covariance_matrix(1, 1) += mean[1] * mean[1];
covariance_matrix(1, 2) += static_cast<float>(mean_yz);
covariance_matrix(2, 0) += static_cast<float>(mean_xz);
covariance_matrix(2, 1) += static_cast<float>(mean_yz);
covariance_matrix(2, 2) += mean[2] * mean[2];
}
//extract the eigenvalues and eigenvectors
EigenSolver<Eigen::Matrix3f> es(covariance_matrix);
Eigen::Matrix3f eigenvalues = es.pseudoEigenvalueMatrix();
Eigen::Matrix3f eigenvectors = es.pseudoEigenvectors();
float index_size = 1.0 / static_cast<float> (projected_normals.size());
vector<float> ev(3);
ev[0] = eigenvalues(0);
ev[1] = eigenvalues(4);
ev[2] = eigenvalues(8);
std::sort(ev.begin(), ev.end());
pc1 = ev[2] * index_size;
pc2 = ev[1] * index_size;
return true;
*/
}
|
97515184a037bae4dbae547bc04c41f3199f47bd.cu
|
// stl
#include <stdio.h>
#include <iostream>
#include <vector>
#include <algorithm>
// cuda
#include "cuda_runtime.h"
#include "cusolverRf.h"
#include "cublas_v2.h"
#include "cutil_math.h"
#include "cutil_matrix.h"
/*
Compute the principle curvatures for all points in the vector
@param points - array with all points of type float3
@param normals - array with all normal vectors
@param number_nn - integer with the number of nearest neighbors for each point
@param nn_indices - array with all nearest neighbor indices
@param dst_curvatures - vector of type float2 with all curvatures per point
*/
__global__ void cuComputePrincipleCurvature( float3* points, float3* normals, int number_nn, int* nn_indices, float2* dst_curvatures)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
float3 N = normals[index];
float3 P = points[index];
cuMatrix3 I = cuMatrix3::Identity();
cuMatrix3 M = I - makeMatrix(N, N);
float3 normal;
float3 centroid;
centroid.x = 0.0; centroid.y = 0.0; centroid.z = 0.0;
float3 projected_normals[24];
for (size_t idx = 1; idx < number_nn; idx++)
{
int n_idx = nn_indices[number_nn* index + idx];
normal = normals[n_idx];
projected_normals[idx -1] = M * normal;
centroid += projected_normals[idx - 1];
}
// estimate the centroid
float nn_indices_float = (float)(number_nn);
centroid /= ((float)(number_nn)- 1.0);
cuMatrix3 covariance_matrix;
covariance_matrix.setZero();
float mean_xy, mean_xz, mean_yz;
float3 mean;
for (size_t idx = 0; idx < number_nn-1; idx++)
{
mean = projected_normals[idx] - centroid;
mean_xy = mean.x * mean.y;
mean_xz = mean.x * mean.z;
mean_yz = mean.y * mean.z;
covariance_matrix.data[0] = mean.x * mean.x;
covariance_matrix.data[3] += (float)(mean_xy);
covariance_matrix.data[6] += (float)(mean_xz);
covariance_matrix.data[1] += (float)(mean_xy);
covariance_matrix.data[4] += mean.y * mean.y;
covariance_matrix.data[7] += (float)(mean_yz);
covariance_matrix.data[2] += (float)(mean_xz);
covariance_matrix.data[5] += (float)(mean_yz);
covariance_matrix.data[8] += mean.z * mean.z;
}
/*
Matrix I
Eigen::Matrix3f I = Eigen::Matrix3f::Identity();
// project matrix into tangent plane
Eigen::Matrix3f M = I - normal_idx * normal_idx.transpose();
// project normals into tangent plane
Eigen::Vector3f normal;
Eigen::Vector3f centroid;
centroid.setZero();
vector<Eigen::Vector3f> projected_normals(nn_indices.size() - 1);
for (size_t idx = 1; idx < nn_indices.size(); idx++) // the first one is the point itself in the nn matrix
{
// osg to eigen
Eigen::Vector3f eigen_normal(normals[nn_indices[idx]].x(), normals[nn_indices[idx]].y(), normals[nn_indices[idx]].z());
normal = eigen_normal;
projected_normals[idx - 1] = M * normal;
centroid += projected_normals[idx - 1];
}
// estimate the centroid
centroid /= static_cast<float> (nn_indices.size() - 1);
Eigen::Matrix3f covariance_matrix;
covariance_matrix.setZero();
double mean_xy, mean_xz, mean_yz;
Eigen::Vector3f mean;
for (size_t idx = 0; idx < projected_normals.size(); idx++)
{
mean = projected_normals[idx] - centroid;
mean_xy = mean[0] * mean[1];
mean_xz = mean[0] * mean[2];
mean_yz = mean[1] * mean[2];
covariance_matrix(0, 0) += mean[0] * mean[0];
covariance_matrix(0, 1) += static_cast<float>(mean_xy);
covariance_matrix(0, 2) += static_cast<float>(mean_xz);
covariance_matrix(1, 0) += static_cast<float>(mean_xy);
covariance_matrix(1, 1) += mean[1] * mean[1];
covariance_matrix(1, 2) += static_cast<float>(mean_yz);
covariance_matrix(2, 0) += static_cast<float>(mean_xz);
covariance_matrix(2, 1) += static_cast<float>(mean_yz);
covariance_matrix(2, 2) += mean[2] * mean[2];
}
//extract the eigenvalues and eigenvectors
EigenSolver<Eigen::Matrix3f> es(covariance_matrix);
Eigen::Matrix3f eigenvalues = es.pseudoEigenvalueMatrix();
Eigen::Matrix3f eigenvectors = es.pseudoEigenvectors();
float index_size = 1.0 / static_cast<float> (projected_normals.size());
vector<float> ev(3);
ev[0] = eigenvalues(0);
ev[1] = eigenvalues(4);
ev[2] = eigenvalues(8);
std::sort(ev.begin(), ev.end());
pc1 = ev[2] * index_size;
pc2 = ev[1] * index_size;
return true;
*/
}
|
6fc4ad66203ee0f5675131795fa3bac3866170a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*//---------------------------------------------------------------------------------------------------------
|
| Test di Kernel simili che utilizzano memoria in modo diverso ( mostrare il diverso tempo di esecuzione)
|
*///---------------------------------------------------------------------------------------------------------
//OGNI thread alloca 5 unsigned int e 2 float ---> 7*4= 28 byte
#include <iostream>
#include <stdio.h>
using namespace std;
#include <cstdlib>
#include <stdlib.h> /* srand, rand */ //http://www.cplusplus.com/reference/cstdlib/rand/
#include <fstream> //http://www.cplusplus.com/doc/tutorial/files/
#define DIM_GRID 128
#define DIM_BLOCK 128
#define THREAD_PER_BLOCK 128
#include "condensation_tentative.cuh"
//kernel che copia una colonna della matrice in un segmento lineare di memoria
/*
input:
puntatore destinatatio
puntatore matricepartenza
indice della colonna da copiare
numero di elementi
*/
__global__ void copy_Col( float *a, float *A,int j1, int numAColumns, int numARow){
int tid = blockIdx.x*blockDim.x + threadIdx.x; //identit del thread (al solito metto in fila ogni thread a partire dal primo blocco)
int Nthreads = blockDim.x * gridDim.x; //Numero totale di threads istanziati dalla chiamata del kernel
while(tid<numARow){
a[tid]=A[tid*numAColumns + j1]; //qui copia
tid+= Nthreads; // se il vettore ha pi elementi del numero di thread chiamati faccio si che ogni thread sommi pi di un elemento
}
}
void print(float *A, int ROW, int COL){
for(int i=0 ; i<ROW ; i++){
for(int j=0 ; j<COL ; j++){
//printf(" %.1f ",A[i*COL+j]);
cout<< A[i*COL+j] << " ";
}
cout<<endl;
}
cout<<endl;
}
int main(void){
int ROW=10000;
int COL=ROW;
int N = COL*ROW;
int Nb= (ROW-1)*(COL-1);
srand (time(NULL));
//Punto La memoria
float * A_host; float *B_host;
float * A_dev; float *B_dev;
//Alloco la matrice
A_host = new float[N] ();
B_host = new float[Nb] ();
//Alloco le matrice device (riservo la memoria richiesta)
unsigned int ByteSize = N*sizeof(float);
hipMalloc( (void **)&A_dev, ByteSize );
ByteSize = Nb*sizeof(float);
hipMalloc( (void **)&B_dev, ByteSize );
//Riempio la Matrice Host
//float min = -10, max = 10 , delta = max-min;
for(int i=0 ; i<ROW ; i++)for(int j=0 ; j<COL ; j++)A_host[i*COL+j] = (rand() % 10 );
A_host[N-1]=1;
//Riempio La Matrice Device
hipMemcpy( A_dev, A_host, N*sizeof(float), hipMemcpyHostToDevice);
//Check Matrice Creata
// print(A_host,ROW,COL);
//Creo degli eventi per la statistica
hipEvent_t T1,T2;
hipEventCreate(&T1);
hipEventCreate(&T2);
float diff_time;
cout<<"CONDENSAZIONE VERSIONE 1"<<endl;
//registro il primo tempo
hipEventRecord(T1, 0);
//Kernel
hipLaunchKernelGGL(( stepCondensationSimple_v1), dim3(DIM_GRID),dim3(DIM_BLOCK), 0, 0, B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
hipMemcpy( B_host, B_dev, Nb*sizeof(float), hipMemcpyDeviceToHost);
//registro il secondo evento alla fine del kernel
hipEventRecord(T2, 0);
hipEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
hipEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 2"<<endl;
//registro il primo tempo
hipEventRecord(T1, 0);
//Kernel
hipLaunchKernelGGL(( stepCondensationSimple_v2), dim3(DIM_GRID),dim3(DIM_BLOCK), 0, 0, B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
hipMemcpy( B_host, B_dev, Nb*sizeof(float), hipMemcpyDeviceToHost);
//registro il secondo evento alla fine del kernel
hipEventRecord(T2, 0);
hipEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
hipEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 3"<<endl;
//registro il primo tempo
hipEventRecord(T1, 0);
//Kernel
hipLaunchKernelGGL(( stepCondensationSimple_v3), dim3(DIM_GRID),dim3(DIM_BLOCK), 0, 0, B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
hipMemcpy( B_host, B_dev, Nb*sizeof(float), hipMemcpyDeviceToHost);
//registro il secondo evento alla fine del kernel
hipEventRecord(T2, 0);
hipEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
hipEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 4 (1 texture)"<<endl;
//registro il primo tempo
hipEventRecord(T1, 0);
// --------- Preparo Texture
float *pivot_Column = 0;
unsigned int numBytes = ROW*sizeof(float);
//Alloco la memoria lineare sulla gpu dove caricare la colonna di pivot
hipMalloc((void**)&pivot_Column, numBytes);
//Chiamo kernel che mette la colonna in memoria lineare
hipLaunchKernelGGL(( copy_Col), dim3(DIM_GRID),dim3(DIM_BLOCK), 0, 0, pivot_Column, A_dev ,COL-1, COL, ROW);
//Bindo a texture la memoria lineare
hipBindTexture(NULL, texRef, pivot_Column);
// ---------
//Kernel
hipLaunchKernelGGL(( stepCondensationSimple_v4), dim3(DIM_GRID),dim3(DIM_BLOCK), 0, 0, B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
hipMemcpy( B_host, B_dev, Nb*sizeof(float), hipMemcpyDeviceToHost);
// --------- Preparo Texture
//unbind della texture
hipUnbindTexture ( texRef );
hipFree (pivot_Column);
// ---------
//registro il secondo evento alla fine del kernel
hipEventRecord(T2, 0);
hipEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
hipEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 5 (2 texture)"<<endl;
//registro il primo tempo
hipEventRecord(T1, 0);
// --------- Preparo Texture
// *pivot_Column = 0;
numBytes = ROW*sizeof(float);
//Alloco la memoria lineare sulla gpu dove caricare la colonna di pivot
hipMalloc((void**)&pivot_Column, numBytes);
//Chiamo kernel che mette la colonna in memoria lineare
hipLaunchKernelGGL(( copy_Col), dim3(DIM_GRID),dim3(DIM_BLOCK), 0, 0, pivot_Column, A_dev ,COL-1, COL, ROW);
//Bindo a texture la memoria lineare
hipBindTexture(NULL, texRef, pivot_Column);
// ---------
//Bindo a texture tutta la matrice in modo che e' bindato anche il vettore che mi interessa
hipBindTexture(NULL, texRef2, A_dev);
// ---------
//Kernel
hipLaunchKernelGGL(( stepCondensationSimple_v5), dim3(DIM_GRID),dim3(DIM_BLOCK), 0, 0, B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
hipMemcpy( B_host, B_dev, Nb*sizeof(float), hipMemcpyDeviceToHost);
// --------- Preparo Texture
//unbind della texture
hipUnbindTexture ( texRef );
hipUnbindTexture ( texRef2 );
hipFree (pivot_Column);
// ---------
//registro il secondo evento alla fine del kernel
hipEventRecord(T2, 0);
hipEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
hipEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 6 (1 sola texture grande)"<<endl;
//registro il primo tempo
hipEventRecord(T1, 0);
// --------- Preparo Texture
//Bindo a texture tutta la matrice in modo che e' bindato anche il vettore che mi interessa
hipBindTexture(NULL, texRef2, A_dev);
// ---------
//Kernel
hipLaunchKernelGGL(( stepCondensationSimple_v6), dim3(DIM_GRID),dim3(DIM_BLOCK), 0, 0, B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
hipMemcpy( B_host, B_dev, Nb*sizeof(float), hipMemcpyDeviceToHost);
// --------- Preparo Texture
//unbind della texture
hipUnbindTexture ( texRef2 );
// ---------
//registro il secondo evento alla fine del kernel
hipEventRecord(T2, 0);
hipEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
hipEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
hipFree( A_dev);
hipFree( B_dev);
return 0;
}
|
6fc4ad66203ee0f5675131795fa3bac3866170a7.cu
|
/*//---------------------------------------------------------------------------------------------------------
|
| Test di Kernel simili che utilizzano memoria in modo diverso ( mostrare il diverso tempo di esecuzione)
|
*///---------------------------------------------------------------------------------------------------------
//OGNI thread alloca 5 unsigned int e 2 float ---> 7*4= 28 byte
#include <iostream>
#include <stdio.h>
using namespace std;
#include <cstdlib>
#include <stdlib.h> /* srand, rand */ //http://www.cplusplus.com/reference/cstdlib/rand/
#include <fstream> //http://www.cplusplus.com/doc/tutorial/files/
#define DIM_GRID 128
#define DIM_BLOCK 128
#define THREAD_PER_BLOCK 128
#include "condensation_tentative.cuh"
//kernel che copia una colonna della matrice in un segmento lineare di memoria
/*
input:
puntatore destinatatio
puntatore matricepartenza
indice della colonna da copiare
numero di elementi
*/
__global__ void copy_Col( float *a, float *A,int j1, int numAColumns, int numARow){
int tid = blockIdx.x*blockDim.x + threadIdx.x; //identità del thread (al solito metto in fila ogni thread a partire dal primo blocco)
int Nthreads = blockDim.x * gridDim.x; //Numero totale di threads istanziati dalla chiamata del kernel
while(tid<numARow){
a[tid]=A[tid*numAColumns + j1]; //qui copia
tid+= Nthreads; // se il vettore ha più elementi del numero di thread chiamati faccio si che ogni thread sommi più di un elemento
}
}
void print(float *A, int ROW, int COL){
for(int i=0 ; i<ROW ; i++){
for(int j=0 ; j<COL ; j++){
//printf(" %.1f ",A[i*COL+j]);
cout<< A[i*COL+j] << " ";
}
cout<<endl;
}
cout<<endl;
}
int main(void){
int ROW=10000;
int COL=ROW;
int N = COL*ROW;
int Nb= (ROW-1)*(COL-1);
srand (time(NULL));
//Punto La memoria
float * A_host; float *B_host;
float * A_dev; float *B_dev;
//Alloco la matrice
A_host = new float[N] ();
B_host = new float[Nb] ();
//Alloco le matrice device (riservo la memoria richiesta)
unsigned int ByteSize = N*sizeof(float);
cudaMalloc( (void **)&A_dev, ByteSize );
ByteSize = Nb*sizeof(float);
cudaMalloc( (void **)&B_dev, ByteSize );
//Riempio la Matrice Host
//float min = -10, max = 10 , delta = max-min;
for(int i=0 ; i<ROW ; i++)for(int j=0 ; j<COL ; j++)A_host[i*COL+j] = (rand() % 10 );
A_host[N-1]=1;
//Riempio La Matrice Device
cudaMemcpy( A_dev, A_host, N*sizeof(float), cudaMemcpyHostToDevice);
//Check Matrice Creata
// print(A_host,ROW,COL);
//Creo degli eventi per la statistica
cudaEvent_t T1,T2;
cudaEventCreate(&T1);
cudaEventCreate(&T2);
float diff_time;
cout<<"CONDENSAZIONE VERSIONE 1"<<endl;
//registro il primo tempo
cudaEventRecord(T1, 0);
//Kernel
stepCondensationSimple_v1<<<DIM_GRID,DIM_BLOCK>>>(B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
cudaMemcpy( B_host, B_dev, Nb*sizeof(float), cudaMemcpyDeviceToHost);
//registro il secondo evento alla fine del kernel
cudaEventRecord(T2, 0);
cudaEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
cudaEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 2"<<endl;
//registro il primo tempo
cudaEventRecord(T1, 0);
//Kernel
stepCondensationSimple_v2<<<DIM_GRID,DIM_BLOCK>>>(B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
cudaMemcpy( B_host, B_dev, Nb*sizeof(float), cudaMemcpyDeviceToHost);
//registro il secondo evento alla fine del kernel
cudaEventRecord(T2, 0);
cudaEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
cudaEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 3"<<endl;
//registro il primo tempo
cudaEventRecord(T1, 0);
//Kernel
stepCondensationSimple_v3<<<DIM_GRID,DIM_BLOCK>>>(B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
cudaMemcpy( B_host, B_dev, Nb*sizeof(float), cudaMemcpyDeviceToHost);
//registro il secondo evento alla fine del kernel
cudaEventRecord(T2, 0);
cudaEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
cudaEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 4 (1 texture)"<<endl;
//registro il primo tempo
cudaEventRecord(T1, 0);
// --------- Preparo Texture
float *pivot_Column = 0;
unsigned int numBytes = ROW*sizeof(float);
//Alloco la memoria lineare sulla gpu dove caricare la colonna di pivot
cudaMalloc((void**)&pivot_Column, numBytes);
//Chiamo kernel che mette la colonna in memoria lineare
copy_Col<<<DIM_GRID,DIM_BLOCK>>>(pivot_Column, A_dev ,COL-1, COL, ROW);
//Bindo a texture la memoria lineare
cudaBindTexture(NULL, texRef, pivot_Column);
// ---------
//Kernel
stepCondensationSimple_v4<<<DIM_GRID,DIM_BLOCK>>>(B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
cudaMemcpy( B_host, B_dev, Nb*sizeof(float), cudaMemcpyDeviceToHost);
// --------- Preparo Texture
//unbind della texture
cudaUnbindTexture ( texRef );
cudaFree (pivot_Column);
// ---------
//registro il secondo evento alla fine del kernel
cudaEventRecord(T2, 0);
cudaEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
cudaEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 5 (2 texture)"<<endl;
//registro il primo tempo
cudaEventRecord(T1, 0);
// --------- Preparo Texture
// *pivot_Column = 0;
numBytes = ROW*sizeof(float);
//Alloco la memoria lineare sulla gpu dove caricare la colonna di pivot
cudaMalloc((void**)&pivot_Column, numBytes);
//Chiamo kernel che mette la colonna in memoria lineare
copy_Col<<<DIM_GRID,DIM_BLOCK>>>(pivot_Column, A_dev ,COL-1, COL, ROW);
//Bindo a texture la memoria lineare
cudaBindTexture(NULL, texRef, pivot_Column);
// ---------
//Bindo a texture tutta la matrice in modo che e' bindato anche il vettore che mi interessa
cudaBindTexture(NULL, texRef2, A_dev);
// ---------
//Kernel
stepCondensationSimple_v5<<<DIM_GRID,DIM_BLOCK>>>(B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
cudaMemcpy( B_host, B_dev, Nb*sizeof(float), cudaMemcpyDeviceToHost);
// --------- Preparo Texture
//unbind della texture
cudaUnbindTexture ( texRef );
cudaUnbindTexture ( texRef2 );
cudaFree (pivot_Column);
// ---------
//registro il secondo evento alla fine del kernel
cudaEventRecord(T2, 0);
cudaEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
cudaEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cout<<"CONDENSAZIONE VERSIONE 6 (1 sola texture grande)"<<endl;
//registro il primo tempo
cudaEventRecord(T1, 0);
// --------- Preparo Texture
//Bindo a texture tutta la matrice in modo che e' bindato anche il vettore che mi interessa
cudaBindTexture(NULL, texRef2, A_dev);
// ---------
//Kernel
stepCondensationSimple_v6<<<DIM_GRID,DIM_BLOCK>>>(B_dev,A_dev, COL-1,ROW-1);
//PullBack del risultato
cudaMemcpy( B_host, B_dev, Nb*sizeof(float), cudaMemcpyDeviceToHost);
// --------- Preparo Texture
//unbind della texture
cudaUnbindTexture ( texRef2 );
// ---------
//registro il secondo evento alla fine del kernel
cudaEventRecord(T2, 0);
cudaEventSynchronize(T2);
//check
// print(B_host,ROW-1,COL-1);
cudaEventElapsedTime(&diff_time, T1, T2);
cout<<"Tempo esecuzione = "<<diff_time<<" ms"<<endl;
cudaFree( A_dev);
cudaFree( B_dev);
return 0;
}
|
5ae72b53ba737f758407feb492cdddcfe88c38fd.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <cstdlib>
#include <string>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float* A, const float* B, float* C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(int argc, char* argv[])
{
if(argc != 3)
{
printf("Not enough arguments.");
return EXIT_FAILURE;
}
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = std::stoi(argv[1]);
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float* h_A = (float*)malloc(size);
// Allocate the host input vector B
float* h_B = (float*)malloc(size);
// Allocate the host output vector C
float* h_C = (float*)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
}
// Allocate the device input vector A
float* d_A = NULL;
err = hipMalloc((void**)& d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float* d_B = NULL;
err = hipMalloc((void**)& d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float* d_C = NULL;
err = hipMalloc((void**)& d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = std::stoi(argv[2]);
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
|
5ae72b53ba737f758407feb492cdddcfe88c38fd.cu
|
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <cstdlib>
#include <string>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float* A, const float* B, float* C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(int argc, char* argv[])
{
if(argc != 3)
{
printf("Not enough arguments.");
return EXIT_FAILURE;
}
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = std::stoi(argv[1]);
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float* h_A = (float*)malloc(size);
// Allocate the host input vector B
float* h_B = (float*)malloc(size);
// Allocate the host output vector C
float* h_C = (float*)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
}
// Allocate the device input vector A
float* d_A = NULL;
err = cudaMalloc((void**)& d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float* d_B = NULL;
err = cudaMalloc((void**)& d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float* d_C = NULL;
err = cudaMalloc((void**)& d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = std::stoi(argv[2]);
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd <<<blocksPerGrid, threadsPerBlock >>> (d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
|
cf8dd3fe102f53bdb7fbb8546092c9726ffc458a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <ops/declarable/helpers/s_t_d.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void spaceToDepthKernel(void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, const int block_size, const bool isNHWC) {
auto input_ptr = reinterpret_cast<T *>(vx);
auto output_ptr = reinterpret_cast<T *>(vz);
const int batch_size = shape::sizeAt(xShapeInfo, 0);
const int input_depth = isNHWC ? shape::sizeAt(xShapeInfo, 3) : shape::sizeAt(xShapeInfo, 1);
const int input_height = isNHWC ? shape::sizeAt(xShapeInfo, 1) : shape::sizeAt(xShapeInfo, 2);
const int input_width = isNHWC ? shape::sizeAt(xShapeInfo, 2) : shape::sizeAt(xShapeInfo, 3);
const int output_depth = isNHWC ? shape::sizeAt(zShapeInfo, 3) : shape::sizeAt(zShapeInfo, 1);
const int output_height = isNHWC ? shape::sizeAt(zShapeInfo, 1) : shape::sizeAt(zShapeInfo, 2);
const int output_width = isNHWC ? shape::sizeAt(zShapeInfo, 2) : shape::sizeAt(zShapeInfo, 3);
const int input_depth_by_output_height = input_depth * output_height;
const int output_area = output_width * output_height;
const int output_depth_by_output_area = output_depth * output_area;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (isNHWC) {
const int total_count = batch_size * input_height * input_width * input_depth;
for (int inp_idx = tid; inp_idx < total_count; inp_idx += blockDim.x * gridDim.x){
// inp_idx = d + input_depth * (w + input_width * (h + input_height * b))
const int d = inp_idx % input_depth;
const int inp_idx2 = inp_idx / input_depth;
const int w = inp_idx2 % input_width;
const int inp_idx3 = inp_idx2 / input_width;
const int h = inp_idx3 % input_height;
const int b = inp_idx3 / input_height;
const int out_h = h / block_size;
const int offset_h = h % block_size;
const int out_w = w / block_size;
const int offset_w = w % block_size;
const int offset_d = (offset_h * block_size + offset_w) * input_depth;
const int out_d = d + offset_d;
const int out_idx = out_d + output_depth * (out_w + output_width * (out_h + output_height * b));
*(output_ptr + out_idx) = *(input_ptr + inp_idx);
}
} else {
const int total_count = batch_size * output_depth_by_output_area;
for (int inp_idx = tid; inp_idx < total_count; inp_idx += blockDim.x * gridDim.x) {
const int n_iC_oY_bY_oX = inp_idx / block_size;
const int bX = inp_idx - n_iC_oY_bY_oX * block_size;
const int n_iC_oY_bY = n_iC_oY_bY_oX / output_width;
const int oX = n_iC_oY_bY_oX - n_iC_oY_bY * output_width;
const int n_iC_oY = n_iC_oY_bY / block_size;
const int bY = n_iC_oY_bY - n_iC_oY * block_size;
const int n = n_iC_oY / input_depth_by_output_height;
const int iC_oY = n_iC_oY - n * input_depth_by_output_height;
const int output_idx = oX + (((n * block_size + bY) * block_size + bX) * input_depth_by_output_height + iC_oY) * output_width;
*(output_ptr + output_idx) = *(input_ptr + inp_idx);
}
}
}
template <typename T>
static void _spaceTodepth_(sd::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC) {
hipLaunchKernelGGL(( spaceToDepthKernel<T>), dim3(512), dim3(512), 1024, *context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), block_size, isNHWC);
}
void _spaceTodepth(sd::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), _spaceTodepth_, (context, input, output, block_size, isNHWC), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input});
}
BUILD_SINGLE_TEMPLATE(template void _spaceTodepth_, (sd::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC), LIBND4J_TYPES);
}
}
}
|
cf8dd3fe102f53bdb7fbb8546092c9726ffc458a.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <ops/declarable/helpers/s_t_d.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void spaceToDepthKernel(void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, const int block_size, const bool isNHWC) {
auto input_ptr = reinterpret_cast<T *>(vx);
auto output_ptr = reinterpret_cast<T *>(vz);
const int batch_size = shape::sizeAt(xShapeInfo, 0);
const int input_depth = isNHWC ? shape::sizeAt(xShapeInfo, 3) : shape::sizeAt(xShapeInfo, 1);
const int input_height = isNHWC ? shape::sizeAt(xShapeInfo, 1) : shape::sizeAt(xShapeInfo, 2);
const int input_width = isNHWC ? shape::sizeAt(xShapeInfo, 2) : shape::sizeAt(xShapeInfo, 3);
const int output_depth = isNHWC ? shape::sizeAt(zShapeInfo, 3) : shape::sizeAt(zShapeInfo, 1);
const int output_height = isNHWC ? shape::sizeAt(zShapeInfo, 1) : shape::sizeAt(zShapeInfo, 2);
const int output_width = isNHWC ? shape::sizeAt(zShapeInfo, 2) : shape::sizeAt(zShapeInfo, 3);
const int input_depth_by_output_height = input_depth * output_height;
const int output_area = output_width * output_height;
const int output_depth_by_output_area = output_depth * output_area;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (isNHWC) {
const int total_count = batch_size * input_height * input_width * input_depth;
for (int inp_idx = tid; inp_idx < total_count; inp_idx += blockDim.x * gridDim.x){
// inp_idx = d + input_depth * (w + input_width * (h + input_height * b))
const int d = inp_idx % input_depth;
const int inp_idx2 = inp_idx / input_depth;
const int w = inp_idx2 % input_width;
const int inp_idx3 = inp_idx2 / input_width;
const int h = inp_idx3 % input_height;
const int b = inp_idx3 / input_height;
const int out_h = h / block_size;
const int offset_h = h % block_size;
const int out_w = w / block_size;
const int offset_w = w % block_size;
const int offset_d = (offset_h * block_size + offset_w) * input_depth;
const int out_d = d + offset_d;
const int out_idx = out_d + output_depth * (out_w + output_width * (out_h + output_height * b));
*(output_ptr + out_idx) = *(input_ptr + inp_idx);
}
} else {
const int total_count = batch_size * output_depth_by_output_area;
for (int inp_idx = tid; inp_idx < total_count; inp_idx += blockDim.x * gridDim.x) {
const int n_iC_oY_bY_oX = inp_idx / block_size;
const int bX = inp_idx - n_iC_oY_bY_oX * block_size;
const int n_iC_oY_bY = n_iC_oY_bY_oX / output_width;
const int oX = n_iC_oY_bY_oX - n_iC_oY_bY * output_width;
const int n_iC_oY = n_iC_oY_bY / block_size;
const int bY = n_iC_oY_bY - n_iC_oY * block_size;
const int n = n_iC_oY / input_depth_by_output_height;
const int iC_oY = n_iC_oY - n * input_depth_by_output_height;
const int output_idx = oX + (((n * block_size + bY) * block_size + bX) * input_depth_by_output_height + iC_oY) * output_width;
*(output_ptr + output_idx) = *(input_ptr + inp_idx);
}
}
}
template <typename T>
static void _spaceTodepth_(sd::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC) {
spaceToDepthKernel<T><<<512, 512, 1024, *context->getCudaStream()>>>(input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), block_size, isNHWC);
}
void _spaceTodepth(sd::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), _spaceTodepth_, (context, input, output, block_size, isNHWC), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input});
}
BUILD_SINGLE_TEMPLATE(template void _spaceTodepth_, (sd::LaunchContext * context, NDArray *input, NDArray *output, int block_size, bool isNHWC), LIBND4J_TYPES);
}
}
}
|
0dd9535aad35272b5f88a23ba374ecefc106009d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_tanhf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_tanhf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_tanhf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_tanhf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
0dd9535aad35272b5f88a23ba374ecefc106009d.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_tanhf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_tanhf<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_tanhf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_tanhf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3ddd1382157ef0805d27a8d61e6852a99b339bb1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_cd1.cuh"
#include "../include/dervfields_cd1.cuh"
#include "../include/usersource_cd1.cuh"
__device__ __host__
int divflux1(real *dw, real *wd, real *w, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
//real g = grad3dn_cd1(wd,wd,p,ii,flux,dir);
dw[fencode3_cd1(p,ii,field)]+= grad3dn_cd1(wd,wd,p,ii,flux,dir);
/*if(field==rho && (p->ipe)==0 && ((p)->it)==1 && isnan(g))
{
printf("nant %d %d %lg %lg %lg %lg\n",ii[0],ii[1],g,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]+=1;
printf("nant 0+1 %d %d %lg %lg %lg\n",ii[0]+1,ii[1],wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]-=1;
printf("nant 0-1 %d %d %lg %lg %lg\n",ii[0]-1,ii[1],wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[1]+=1;
printf("nant 1+1 %d %d %lg %lg %lg\n",ii[0],ii[1]+1,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]-=1;
printf("nant %1-1 d %d %lg %lg %lg\n\n",ii[0],ii[1]-1,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
}*/
return ( status);
}
__device__ __host__
real transportflux (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/(w[fencode3_cd1(p,ii,rho)]+w[fencode3_cd1(p,ii,rhob)]));
#else
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/w[fencode3_cd1(p,ii,rho)]);
#endif
}
__device__ __host__
real fluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return( -(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom10 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
/*real gtest=(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
if( (p->ipe)==0 && ((p)->it)==1 && (isnan(gtest) || isnan(w[fencode3_cd1(p,ii,field)]) || w[fencode3_cd1(p,ii,field)]==0 ))
{
printf("nant %d %d %d %d %lg %lg\n",ii[0],ii[1],field, direction, w[fencode3_cd1(p,ii,rho)],w[fencode3_cd1(p,ii,field)] );
}*/
#if defined USE_SAC || defined USE_SAC_3D
return(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom11 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
/*real gtest=(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
if( (p->ipe)==0 && ((p)->it)==2 && (isnan(gtest) || isnan(w[fencode3_cd1(p,ii,field)])|| w[fencode3_cd1(p,ii,field)]==0 ))
{
printf("nant %d %d %d %d %lg %lg \n",ii[0],ii[1],field,direction, w[fencode3_cd1(p,ii,rho)],w[fencode3_cd1(p,ii,field)] );
}*/
#if defined USE_SAC || defined USE_SAC_3D
return(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom12 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==2?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
int computefluxrho (real *dw, real *wd, real *w, struct params *p,int *ii,int direction) {
int field;
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction)+(w[fencode3_cd1(p,ii,rhob)]*w[fencode3_cd1(p,ii,mom1+direction)])/(w[fencode3_cd1(p,ii,rhob)]+w[fencode3_cd1(p,ii,rho)]);
#else
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom3 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]=0.0;
wd[fencode3_cd1(p,ii,flux)]+=transportflux(dw,wd,w,p,ii,field,direction)+fluxmom12(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef ADIABHYDRO
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int dir) {
switch(field)
{
case rho:
computefluxrho(dw,wd,w,p,ii,dir);
break;
case mom1:
computefluxmom1(dw,wd,w,p,ii,field,dir);
break;
case mom2:
computefluxmom2(dw,wd,w,p,ii,field,dir);
break;
#ifdef USE_SAC_3D
case mom3:
computefluxmom3(dw,wd,w,p,ii,field,dir);
break;
#endif
}
}
__global__ void centdiff1init_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,0);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,1);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,2);
break;
#endif
}
__syncthreads();
}
__global__ void centdiff1a_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if(ii[0]>1 && ii[1] >1 && ii[0]<(ni-2) && ii[1]<(nj-2))
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] >1 && ii[2] >1 && ii[0]<(ni-2) && ii[1]<(nj-2) && ii[2]<(nk-2))
#endif
divflux1(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,dir);
__syncthreads();
}
__global__ void centdiff1af_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 1:
#ifdef USE_SAC
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) )
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 2:
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[0]<(ni-2) && ii[1]>1 && ii[1]<(nj-2) && ii[2] <(nk) )
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
#endif
break;
}
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==0 && ii[0]==124 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.225;
w[fencode3_cd1(p,ii,rho)]=0.225;
}*/
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==3 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.22114;
w[fencode3_cd1(p,ii,rho)]=0.22114;
}*/
__syncthreads();
}
__global__ void centdiff1binit_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1b_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#if(defined(USE_USERSOURCE))
{
ii[0]=ip;
ii[1]=jp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
ii[2]=kp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms1_cd1(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
}
__syncthreads();
#endif
// }
}
__global__ void centdiff1bf_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if( ii[1] <(nj) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1] <(nj) && ii[0]<(ni) && ii[2] <(nk) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
// TEST
//wmod[fencode3_cd1(p,ii,rho)+(ordero*NVAR*dimp)] = -99999;
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cd1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucentdiff1(struct params **p, struct params **d_p,struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real dt, int field, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( centdiff1init_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff1_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff1a_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff1af_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
}
|
3ddd1382157ef0805d27a8d61e6852a99b339bb1.cu
|
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_cd1.cuh"
#include "../include/dervfields_cd1.cuh"
#include "../include/usersource_cd1.cuh"
__device__ __host__
int divflux1(real *dw, real *wd, real *w, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
//real g = grad3dn_cd1(wd,wd,p,ii,flux,dir);
dw[fencode3_cd1(p,ii,field)]+= grad3dn_cd1(wd,wd,p,ii,flux,dir);
/*if(field==rho && (p->ipe)==0 && ((p)->it)==1 && isnan(g))
{
printf("nant %d %d %lg %lg %lg %lg\n",ii[0],ii[1],g,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]+=1;
printf("nant 0+1 %d %d %lg %lg %lg\n",ii[0]+1,ii[1],wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]-=1;
printf("nant 0-1 %d %d %lg %lg %lg\n",ii[0]-1,ii[1],wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[1]+=1;
printf("nant 1+1 %d %d %lg %lg %lg\n",ii[0],ii[1]+1,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
ii[0]-=1;
printf("nant %1-1 d %d %lg %lg %lg\n\n",ii[0],ii[1]-1,wd[fencode3_cd1(p,ii,flux)],wd[fencode3_cd1(p,ii,delx1)],wd[fencode3_cd1(p,ii,delx2)] );
}*/
return ( status);
}
__device__ __host__
real transportflux (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/(w[fencode3_cd1(p,ii,rho)]+w[fencode3_cd1(p,ii,rhob)]));
#else
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/w[fencode3_cd1(p,ii,rho)]);
#endif
}
__device__ __host__
real fluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return( -(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom10 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
/*real gtest=(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
if( (p->ipe)==0 && ((p)->it)==1 && (isnan(gtest) || isnan(w[fencode3_cd1(p,ii,field)]) || w[fencode3_cd1(p,ii,field)]==0 ))
{
printf("nant %d %d %d %d %lg %lg\n",ii[0],ii[1],field, direction, w[fencode3_cd1(p,ii,rho)],w[fencode3_cd1(p,ii,field)] );
}*/
#if defined USE_SAC || defined USE_SAC_3D
return(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom11 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
/*real gtest=(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
if( (p->ipe)==0 && ((p)->it)==2 && (isnan(gtest) || isnan(w[fencode3_cd1(p,ii,field)])|| w[fencode3_cd1(p,ii,field)]==0 ))
{
printf("nant %d %d %d %d %lg %lg \n",ii[0],ii[1],field,direction, w[fencode3_cd1(p,ii,rho)],w[fencode3_cd1(p,ii,field)] );
}*/
#if defined USE_SAC || defined USE_SAC_3D
return(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom12 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==2?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
int computefluxrho (real *dw, real *wd, real *w, struct params *p,int *ii,int direction) {
int field;
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction)+(w[fencode3_cd1(p,ii,rhob)]*w[fencode3_cd1(p,ii,mom1+direction)])/(w[fencode3_cd1(p,ii,rhob)]+w[fencode3_cd1(p,ii,rho)]);
#else
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom3 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]=0.0;
wd[fencode3_cd1(p,ii,flux)]+=transportflux(dw,wd,w,p,ii,field,direction)+fluxmom12(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef ADIABHYDRO
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int dir) {
switch(field)
{
case rho:
computefluxrho(dw,wd,w,p,ii,dir);
break;
case mom1:
computefluxmom1(dw,wd,w,p,ii,field,dir);
break;
case mom2:
computefluxmom2(dw,wd,w,p,ii,field,dir);
break;
#ifdef USE_SAC_3D
case mom3:
computefluxmom3(dw,wd,w,p,ii,field,dir);
break;
#endif
}
}
__global__ void centdiff1init_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,0);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,1);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,2);
break;
#endif
}
__syncthreads();
}
__global__ void centdiff1a_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if(ii[0]>1 && ii[1] >1 && ii[0]<(ni-2) && ii[1]<(nj-2))
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] >1 && ii[2] >1 && ii[0]<(ni-2) && ii[1]<(nj-2) && ii[2]<(nk-2))
#endif
divflux1(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,dir);
__syncthreads();
}
__global__ void centdiff1af_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 1:
#ifdef USE_SAC
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) )
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 2:
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[0]<(ni-2) && ii[1]>1 && ii[1]<(nj-2) && ii[2] <(nk) )
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
#endif
break;
}
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==0 && ii[0]==124 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.225;
w[fencode3_cd1(p,ii,rho)]=0.225;
}*/
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==3 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.22114;
w[fencode3_cd1(p,ii,rho)]=0.22114;
}*/
__syncthreads();
}
__global__ void centdiff1binit_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1b_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#if(defined(USE_USERSOURCE))
{
ii[0]=ip;
ii[1]=jp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
ii[2]=kp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms1_cd1(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
}
__syncthreads();
#endif
// }
}
__global__ void centdiff1bf_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if( ii[1] <(nj) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1] <(nj) && ii[0]<(ni) && ii[2] <(nk) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
// TEST
//wmod[fencode3_cd1(p,ii,rho)+(ordero*NVAR*dimp)] = -99999;
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cd1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucentdiff1(struct params **p, struct params **d_p,struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real dt, int field, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
centdiff1init_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff1_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff1a_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff1af_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
}
|
78fdc88ae075f0a876d322016e9db760419a4df0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gauge_field_order.h>
#include <comm_quda.h>
#include <complex_quda.h>
#include <index_helper.cuh>
/**
This code has not been checked. In particular, I suspect it is
erroneous in multi-GPU since it looks like the halo ghost region
isn't being treated here.
*/
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Float, typename Order>
struct GaugePhaseArg {
Order order;
int X[4];
int threads;
Float tBoundary;
Float i_mu;
complex<Float> i_mu_phase;
GaugePhaseArg(const Order &order, const GaugeField &u)
: order(order), threads(u.VolumeCB()), i_mu(u.iMu())
{
// if staggered phases are applied, then we are removing them
// else we are applying them
Float dir = u.StaggeredPhaseApplied() ? -1.0 : 1.0;
i_mu_phase = complex<Float>( cos(M_PI * u.iMu() / (u.X()[3]*comm_dim(3)) ),
dir * sin(M_PI * u.iMu() / (u.X()[3]*comm_dim(3))) );
for (int d=0; d<4; d++) X[d] = u.X()[d];
// only set the boundary condition on the last time slice of nodes
#ifdef MULTI_GPU
bool last_node_in_t = (commCoords(3) == commDim(3)-1);
#else
bool last_node_in_t = true;
#endif
tBoundary = (Float)(last_node_in_t ? u.TBoundary() : QUDA_PERIODIC_T);
}
GaugePhaseArg(const GaugePhaseArg &arg)
: order(arg.order), tBoundary(arg.tBoundary), threads(arg.threads),
i_mu(arg.i_mu), i_mu_phase(arg.i_mu_phase) {
for (int d=0; d<4; d++) X[d] = arg.X[d];
}
};
// FIXME need to check this with odd local volumes
template <int dim, typename Float, QudaStaggeredPhase phaseType, typename Arg>
__device__ __host__ Float getPhase(int x, int y, int z, int t, Arg &arg) {
Float phase = 1.0;
if (phaseType == QUDA_MILC_STAGGERED_PHASE) {
if (dim==0) {
phase = (1.0 - 2.0 * (t % 2) );
} else if (dim == 1) {
phase = (1.0 - 2.0 * ((t + x) % 2) );
} else if (dim == 2) {
phase = (1.0 - 2.0 * ((t + x + y) % 2) );
} else if (dim == 3) { // also apply boundary condition
phase = (t == arg.X[3]-1) ? arg.tBoundary : 1.0;
}
} if (phaseType == QUDA_TIFR_STAGGERED_PHASE) {
if (dim==0) {
phase = (1.0 - 2.0 * ((3 + t + z + y) % 2) );
} else if (dim == 1) {
phase = (1.0 - 2.0 * ((2 + t + z) % 2) );
} else if (dim == 2) {
phase = (1.0 - 2.0 * ((1 + t) % 2) );
} else if (dim == 3) { // also apply boundary condition
phase = (t == arg.X[3]-1) ? arg.tBoundary : 1.0;
}
} else if (phaseType == QUDA_CPS_STAGGERED_PHASE) {
if (dim==0) {
phase = 1.0;
} else if (dim == 1) {
phase = (1.0 - 2.0 * ((1 + x) % 2) );
} else if (dim == 2) {
phase = (1.0 - 2.0 * ((1 + x + y) % 2) );
} else if (dim == 3) { // also apply boundary condition
phase = ((t == arg.X[3]-1) ? arg.tBoundary : 1.0) *
(1.0 - 2 * ((1 + x + y + z) % 2) );
}
}
return phase;
}
template <typename Float, int length, QudaStaggeredPhase phaseType, int dim, typename Arg>
__device__ __host__ void gaugePhase(int indexCB, int parity, Arg &arg) {
typedef typename mapper<Float>::type RegType;
int x[4];
getCoords(x, indexCB, arg.X, parity);
RegType phase = getPhase<dim,Float,phaseType>(x[0], x[1], x[2], x[3], arg);
RegType u[length];
arg.order.load(u, indexCB, dim, parity);
for (int i=0; i<length; i++) u[i] *= phase;
// apply imaginary chemical potential if needed
if (dim==3 && arg.i_mu != 0.0) {
complex<RegType>* v = reinterpret_cast<complex<RegType>*>(u);
for (int i=0; i<length/2; i++) v[i] *= arg.i_mu_phase;
}
arg.order.save(u, indexCB, dim, parity);
}
/**
Generic CPU staggered phase application
*/
template <typename Float, int length, QudaStaggeredPhase phaseType, typename Arg>
void gaugePhase(Arg &arg) {
for (int parity=0; parity<2; parity++) {
for (int indexCB=0; indexCB < arg.threads; indexCB++) {
gaugePhase<Float,length,phaseType,0>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,1>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,2>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,3>(indexCB, parity, arg);
}
}
}
/**
Generic GPU staggered phase application
*/
template <typename Float, int length, QudaStaggeredPhase phaseType, typename Arg>
__global__ void gaugePhaseKernel(Arg arg) {
int indexCB = blockIdx.x * blockDim.x + threadIdx.x;
if (indexCB >= arg.threads) return;
int parity = blockIdx.y;
gaugePhase<Float,length,phaseType,0>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,1>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,2>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,3>(indexCB, parity, arg);
}
template <typename Float, int length, QudaStaggeredPhase phaseType, typename Arg>
class GaugePhase : Tunable {
Arg &arg;
const GaugeField &meta; // used for meta data only
QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
GaugePhase(Arg &arg, const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("stride=%d,prec=%lu",arg.order.stride,sizeof(Float));
}
virtual ~GaugePhase() { ; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid.y = 2;
return rtn;
}
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.grid.y = 2;
}
void apply(const hipStream_t &stream) {
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
tp.grid.y = 2; // parity is the y grid dimension
hipLaunchKernelGGL(( gaugePhaseKernel<Float, length, phaseType, Arg>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else {
gaugePhase<Float, length, phaseType, Arg>(arg);
}
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), typeid(*this).name(), aux);
}
void preTune() { arg.order.save(); }
void postTune() { arg.order.load(); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const { return 2 * arg.threads * 2 * arg.order.Bytes(); } // parity * e/o volume * i/o * vec size
};
template <typename Float, int length, typename Order>
void gaugePhase(Order order, const GaugeField &u, QudaFieldLocation location) {
if (u.StaggeredPhase() == QUDA_MILC_STAGGERED_PHASE) {
GaugePhaseArg<Float,Order> arg(order, u);
GaugePhase<Float,length,QUDA_MILC_STAGGERED_PHASE,
GaugePhaseArg<Float,Order> > phase(arg, u, location);
phase.apply(0);
} else if (u.StaggeredPhase() == QUDA_CPS_STAGGERED_PHASE) {
GaugePhaseArg<Float,Order> arg(order, u);
GaugePhase<Float,length,QUDA_CPS_STAGGERED_PHASE,
GaugePhaseArg<Float,Order> > phase(arg, u, location);
phase.apply(0);
} else if (u.StaggeredPhase() == QUDA_TIFR_STAGGERED_PHASE) {
GaugePhaseArg<Float,Order> arg(order, u);
GaugePhase<Float,length,QUDA_TIFR_STAGGERED_PHASE,
GaugePhaseArg<Float,Order> > phase(arg, u, location);
phase.apply(0);
} else {
errorQuda("Undefined phase type");
}
if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
/** This is the template driver for gaugePhase */
template <typename Float>
void gaugePhase(GaugeField &u) {
const int length = 18;
QudaFieldLocation location =
(typeid(u)==typeid(cudaGaugeField)) ? QUDA_CUDA_FIELD_LOCATION : QUDA_CPU_FIELD_LOCATION;
if (u.isNative()) {
if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
gaugePhase<Float,length>(G(u), u, location);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
gaugePhase<Float,length>(G(u), u, location);
} else {
errorQuda("Unsupported recsontruction type");
}
} else {
errorQuda("Gauge field %d order not supported", u.Order());
}
}
#endif
void applyGaugePhase(GaugeField &u) {
#ifdef GPU_GAUGE_TOOLS
if (u.Precision() == QUDA_DOUBLE_PRECISION) {
gaugePhase<double>(u);
} else if (u.Precision() == QUDA_SINGLE_PRECISION) {
gaugePhase<float>(u);
} else {
errorQuda("Unknown precision type %d", u.Precision());
}
#else
errorQuda("Gauge tools are not build");
#endif
}
} // namespace quda
|
78fdc88ae075f0a876d322016e9db760419a4df0.cu
|
#include <gauge_field_order.h>
#include <comm_quda.h>
#include <complex_quda.h>
#include <index_helper.cuh>
/**
This code has not been checked. In particular, I suspect it is
erroneous in multi-GPU since it looks like the halo ghost region
isn't being treated here.
*/
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Float, typename Order>
struct GaugePhaseArg {
Order order;
int X[4];
int threads;
Float tBoundary;
Float i_mu;
complex<Float> i_mu_phase;
GaugePhaseArg(const Order &order, const GaugeField &u)
: order(order), threads(u.VolumeCB()), i_mu(u.iMu())
{
// if staggered phases are applied, then we are removing them
// else we are applying them
Float dir = u.StaggeredPhaseApplied() ? -1.0 : 1.0;
i_mu_phase = complex<Float>( cos(M_PI * u.iMu() / (u.X()[3]*comm_dim(3)) ),
dir * sin(M_PI * u.iMu() / (u.X()[3]*comm_dim(3))) );
for (int d=0; d<4; d++) X[d] = u.X()[d];
// only set the boundary condition on the last time slice of nodes
#ifdef MULTI_GPU
bool last_node_in_t = (commCoords(3) == commDim(3)-1);
#else
bool last_node_in_t = true;
#endif
tBoundary = (Float)(last_node_in_t ? u.TBoundary() : QUDA_PERIODIC_T);
}
GaugePhaseArg(const GaugePhaseArg &arg)
: order(arg.order), tBoundary(arg.tBoundary), threads(arg.threads),
i_mu(arg.i_mu), i_mu_phase(arg.i_mu_phase) {
for (int d=0; d<4; d++) X[d] = arg.X[d];
}
};
// FIXME need to check this with odd local volumes
template <int dim, typename Float, QudaStaggeredPhase phaseType, typename Arg>
__device__ __host__ Float getPhase(int x, int y, int z, int t, Arg &arg) {
Float phase = 1.0;
if (phaseType == QUDA_MILC_STAGGERED_PHASE) {
if (dim==0) {
phase = (1.0 - 2.0 * (t % 2) );
} else if (dim == 1) {
phase = (1.0 - 2.0 * ((t + x) % 2) );
} else if (dim == 2) {
phase = (1.0 - 2.0 * ((t + x + y) % 2) );
} else if (dim == 3) { // also apply boundary condition
phase = (t == arg.X[3]-1) ? arg.tBoundary : 1.0;
}
} if (phaseType == QUDA_TIFR_STAGGERED_PHASE) {
if (dim==0) {
phase = (1.0 - 2.0 * ((3 + t + z + y) % 2) );
} else if (dim == 1) {
phase = (1.0 - 2.0 * ((2 + t + z) % 2) );
} else if (dim == 2) {
phase = (1.0 - 2.0 * ((1 + t) % 2) );
} else if (dim == 3) { // also apply boundary condition
phase = (t == arg.X[3]-1) ? arg.tBoundary : 1.0;
}
} else if (phaseType == QUDA_CPS_STAGGERED_PHASE) {
if (dim==0) {
phase = 1.0;
} else if (dim == 1) {
phase = (1.0 - 2.0 * ((1 + x) % 2) );
} else if (dim == 2) {
phase = (1.0 - 2.0 * ((1 + x + y) % 2) );
} else if (dim == 3) { // also apply boundary condition
phase = ((t == arg.X[3]-1) ? arg.tBoundary : 1.0) *
(1.0 - 2 * ((1 + x + y + z) % 2) );
}
}
return phase;
}
template <typename Float, int length, QudaStaggeredPhase phaseType, int dim, typename Arg>
__device__ __host__ void gaugePhase(int indexCB, int parity, Arg &arg) {
typedef typename mapper<Float>::type RegType;
int x[4];
getCoords(x, indexCB, arg.X, parity);
RegType phase = getPhase<dim,Float,phaseType>(x[0], x[1], x[2], x[3], arg);
RegType u[length];
arg.order.load(u, indexCB, dim, parity);
for (int i=0; i<length; i++) u[i] *= phase;
// apply imaginary chemical potential if needed
if (dim==3 && arg.i_mu != 0.0) {
complex<RegType>* v = reinterpret_cast<complex<RegType>*>(u);
for (int i=0; i<length/2; i++) v[i] *= arg.i_mu_phase;
}
arg.order.save(u, indexCB, dim, parity);
}
/**
Generic CPU staggered phase application
*/
template <typename Float, int length, QudaStaggeredPhase phaseType, typename Arg>
void gaugePhase(Arg &arg) {
for (int parity=0; parity<2; parity++) {
for (int indexCB=0; indexCB < arg.threads; indexCB++) {
gaugePhase<Float,length,phaseType,0>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,1>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,2>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,3>(indexCB, parity, arg);
}
}
}
/**
Generic GPU staggered phase application
*/
template <typename Float, int length, QudaStaggeredPhase phaseType, typename Arg>
__global__ void gaugePhaseKernel(Arg arg) {
int indexCB = blockIdx.x * blockDim.x + threadIdx.x;
if (indexCB >= arg.threads) return;
int parity = blockIdx.y;
gaugePhase<Float,length,phaseType,0>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,1>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,2>(indexCB, parity, arg);
gaugePhase<Float,length,phaseType,3>(indexCB, parity, arg);
}
template <typename Float, int length, QudaStaggeredPhase phaseType, typename Arg>
class GaugePhase : Tunable {
Arg &arg;
const GaugeField &meta; // used for meta data only
QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
GaugePhase(Arg &arg, const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("stride=%d,prec=%lu",arg.order.stride,sizeof(Float));
}
virtual ~GaugePhase() { ; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid.y = 2;
return rtn;
}
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.grid.y = 2;
}
void apply(const cudaStream_t &stream) {
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
tp.grid.y = 2; // parity is the y grid dimension
gaugePhaseKernel<Float, length, phaseType, Arg>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else {
gaugePhase<Float, length, phaseType, Arg>(arg);
}
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), typeid(*this).name(), aux);
}
void preTune() { arg.order.save(); }
void postTune() { arg.order.load(); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const { return 2 * arg.threads * 2 * arg.order.Bytes(); } // parity * e/o volume * i/o * vec size
};
template <typename Float, int length, typename Order>
void gaugePhase(Order order, const GaugeField &u, QudaFieldLocation location) {
if (u.StaggeredPhase() == QUDA_MILC_STAGGERED_PHASE) {
GaugePhaseArg<Float,Order> arg(order, u);
GaugePhase<Float,length,QUDA_MILC_STAGGERED_PHASE,
GaugePhaseArg<Float,Order> > phase(arg, u, location);
phase.apply(0);
} else if (u.StaggeredPhase() == QUDA_CPS_STAGGERED_PHASE) {
GaugePhaseArg<Float,Order> arg(order, u);
GaugePhase<Float,length,QUDA_CPS_STAGGERED_PHASE,
GaugePhaseArg<Float,Order> > phase(arg, u, location);
phase.apply(0);
} else if (u.StaggeredPhase() == QUDA_TIFR_STAGGERED_PHASE) {
GaugePhaseArg<Float,Order> arg(order, u);
GaugePhase<Float,length,QUDA_TIFR_STAGGERED_PHASE,
GaugePhaseArg<Float,Order> > phase(arg, u, location);
phase.apply(0);
} else {
errorQuda("Undefined phase type");
}
if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
/** This is the template driver for gaugePhase */
template <typename Float>
void gaugePhase(GaugeField &u) {
const int length = 18;
QudaFieldLocation location =
(typeid(u)==typeid(cudaGaugeField)) ? QUDA_CUDA_FIELD_LOCATION : QUDA_CPU_FIELD_LOCATION;
if (u.isNative()) {
if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
gaugePhase<Float,length>(G(u), u, location);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
gaugePhase<Float,length>(G(u), u, location);
} else {
errorQuda("Unsupported recsontruction type");
}
} else {
errorQuda("Gauge field %d order not supported", u.Order());
}
}
#endif
void applyGaugePhase(GaugeField &u) {
#ifdef GPU_GAUGE_TOOLS
if (u.Precision() == QUDA_DOUBLE_PRECISION) {
gaugePhase<double>(u);
} else if (u.Precision() == QUDA_SINGLE_PRECISION) {
gaugePhase<float>(u);
} else {
errorQuda("Unknown precision type %d", u.Precision());
}
#else
errorQuda("Gauge tools are not build");
#endif
}
} // namespace quda
|
9407ca0accfc6bc01897df4cc5cf8407f66d850c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaDefs.h>
#include <vector>
#include <rng.h>
#include <timer.h>
#include <type_traits>
hipError_t err = hipError_t::hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
/// \brief Threads per block
constexpr unsigned int TPB = 256;
/// \brief Number of blocks
constexpr unsigned int NOB = 16;
/// \brief Memory block per thread block
constexpr unsigned int MBPTB = 2;
/// \brief
/// \param a
/// \param b
/// \param length
/// \param c
/// \note __restrict__ Avoids pointers aliasing
template<typename T, typename = typename std::enable_if<std::is_arithmetic<T>::value, T>::type>
__global__ void add(const T *__restrict__ a, const T *__restrict__ b, const size_t length, T *__restrict__ c) {
//TODO c[i] = a[i] + b[i]
const size_t offset = (blockDim.x * blockIdx.x) + threadIdx.x;
if (offset < length) {
c[offset] = a[offset] + b[offset];
}
}
int main() {
initializeCUDA(deviceProp);
#define COMP_TYPE int
constexpr size_t length = 1U << 20U;
// constexpr unsigned int length = 1000U;
constexpr size_t sizeInBytes = length * sizeof(COMP_TYPE);
/// Allocate host data (on CPU)
auto *host_a = static_cast<COMP_TYPE *>(::operator new(sizeInBytes));
auto *host_b = static_cast<COMP_TYPE *>(::operator new(sizeInBytes));
auto *host_c = static_cast<COMP_TYPE *>(::operator new(sizeInBytes));
/// Initialize data
// constexpr float maxNum = INT_MAX / 2;
constexpr float maxNum = 255.f;
for (size_t i = 0; i < length; i++) {
host_a[i] = static_cast<COMP_TYPE>(rng(0.f, maxNum));
host_b[i] = static_cast<COMP_TYPE>(rng(0.f, maxNum));
}
/// Allocate device data (on GPU)
COMP_TYPE *device_a = nullptr;
COMP_TYPE *device_b = nullptr;
COMP_TYPE *device_c = nullptr;
checkCudaErrors(hipMalloc((void **) &device_a, sizeInBytes));
checkCudaErrors(hipMalloc((void **) &device_b, sizeInBytes));
checkCudaErrors(hipMalloc((void **) &device_c, sizeInBytes));
/// Copy data host -> device
checkCudaErrors(hipMemcpy(device_a, host_a, sizeInBytes, hipMemcpyKind::hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(device_b, host_b, sizeInBytes, hipMemcpyKind::hipMemcpyHostToDevice));
checkDeviceMatrix(device_a, sizeInBytes, 1, length, "%d ", "Device A");
checkDeviceMatrix(device_b, sizeInBytes, 1, length, "%d ", "Device B");
checkDeviceMatrix(device_c, sizeInBytes, 1, length, "%d ", "Device C");
/// Prepare grid and blocks
dim3 dimBlock(TPB, 1, 1);
dim3 dimGrid(NOB, 1, 1); // What about data?
// dim3 dimGrid(getNumberOfParts(length, TPB), 1, 1); // Great number of blocks
// dim3 dimGrid(getNumberOfParts(length, TPB * MBPTB), 1, 1);
Timer t;
/// Call kernel
hipLaunchKernelGGL(( add), dim3(dimGrid), dim3(dimBlock), 0, 0, device_a, device_b, length, device_c); // ATTENTION, always pass device pointers
printLastCudaError("ERROR: ");
checkCudaErrors(hipDeviceSynchronize());
const auto elapsed = t.elapsed();
checkDeviceMatrix(device_c, sizeInBytes, 1, length, "%d ", "Device C");
/// Copy data device -> host
checkCudaErrors(hipMemcpy(host_c, device_c, sizeInBytes, hipMemcpyKind::hipMemcpyDeviceToHost));
checkHostMatrix(host_c, sizeInBytes, 1, length, "%d ", "Host C");
/// Free memory
SAFE_DELETE_ARRAY(host_a);
SAFE_DELETE_ARRAY(host_b);
SAFE_DELETE_ARRAY(host_c);
SAFE_DELETE_CUDA(device_a);
SAFE_DELETE_CUDA(device_b);
SAFE_DELETE_CUDA(device_c);
printf("Elapsed: %f\n", elapsed);
}
|
9407ca0accfc6bc01897df4cc5cf8407f66d850c.cu
|
#include <cudaDefs.h>
#include <vector>
#include <rng.h>
#include <timer.h>
#include <type_traits>
cudaError_t err = cudaError_t::cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
/// \brief Threads per block
constexpr unsigned int TPB = 256;
/// \brief Number of blocks
constexpr unsigned int NOB = 16;
/// \brief Memory block per thread block
constexpr unsigned int MBPTB = 2;
/// \brief
/// \param a
/// \param b
/// \param length
/// \param c
/// \note __restrict__ Avoids pointers aliasing
template<typename T, typename = typename std::enable_if<std::is_arithmetic<T>::value, T>::type>
__global__ void add(const T *__restrict__ a, const T *__restrict__ b, const size_t length, T *__restrict__ c) {
//TODO c[i] = a[i] + b[i]
const size_t offset = (blockDim.x * blockIdx.x) + threadIdx.x;
if (offset < length) {
c[offset] = a[offset] + b[offset];
}
}
int main() {
initializeCUDA(deviceProp);
#define COMP_TYPE int
constexpr size_t length = 1U << 20U;
// constexpr unsigned int length = 1000U;
constexpr size_t sizeInBytes = length * sizeof(COMP_TYPE);
/// Allocate host data (on CPU)
auto *host_a = static_cast<COMP_TYPE *>(::operator new(sizeInBytes));
auto *host_b = static_cast<COMP_TYPE *>(::operator new(sizeInBytes));
auto *host_c = static_cast<COMP_TYPE *>(::operator new(sizeInBytes));
/// Initialize data
// constexpr float maxNum = INT_MAX / 2;
constexpr float maxNum = 255.f;
for (size_t i = 0; i < length; i++) {
host_a[i] = static_cast<COMP_TYPE>(rng(0.f, maxNum));
host_b[i] = static_cast<COMP_TYPE>(rng(0.f, maxNum));
}
/// Allocate device data (on GPU)
COMP_TYPE *device_a = nullptr;
COMP_TYPE *device_b = nullptr;
COMP_TYPE *device_c = nullptr;
checkCudaErrors(cudaMalloc((void **) &device_a, sizeInBytes));
checkCudaErrors(cudaMalloc((void **) &device_b, sizeInBytes));
checkCudaErrors(cudaMalloc((void **) &device_c, sizeInBytes));
/// Copy data host -> device
checkCudaErrors(cudaMemcpy(device_a, host_a, sizeInBytes, cudaMemcpyKind::cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(device_b, host_b, sizeInBytes, cudaMemcpyKind::cudaMemcpyHostToDevice));
checkDeviceMatrix(device_a, sizeInBytes, 1, length, "%d ", "Device A");
checkDeviceMatrix(device_b, sizeInBytes, 1, length, "%d ", "Device B");
checkDeviceMatrix(device_c, sizeInBytes, 1, length, "%d ", "Device C");
/// Prepare grid and blocks
dim3 dimBlock(TPB, 1, 1);
dim3 dimGrid(NOB, 1, 1); // What about data?
// dim3 dimGrid(getNumberOfParts(length, TPB), 1, 1); // Great number of blocks
// dim3 dimGrid(getNumberOfParts(length, TPB * MBPTB), 1, 1);
Timer t;
/// Call kernel
add<<<dimGrid, dimBlock>>>(device_a, device_b, length, device_c); // ATTENTION, always pass device pointers
printLastCudaError("ERROR: ");
checkCudaErrors(cudaDeviceSynchronize());
const auto elapsed = t.elapsed();
checkDeviceMatrix(device_c, sizeInBytes, 1, length, "%d ", "Device C");
/// Copy data device -> host
checkCudaErrors(cudaMemcpy(host_c, device_c, sizeInBytes, cudaMemcpyKind::cudaMemcpyDeviceToHost));
checkHostMatrix(host_c, sizeInBytes, 1, length, "%d ", "Host C");
/// Free memory
SAFE_DELETE_ARRAY(host_a);
SAFE_DELETE_ARRAY(host_b);
SAFE_DELETE_ARRAY(host_c);
SAFE_DELETE_CUDA(device_a);
SAFE_DELETE_CUDA(device_b);
SAFE_DELETE_CUDA(device_c);
printf("Elapsed: %f\n", elapsed);
}
|
bee40effb0decf1d795f310ac5eb322b3d6facb1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
//#include "cuPrintf.hip"
/*
* CUDA parallel factorial computing program by bahramwhh
* Note : This program is only tested for n <= 21
* because of my hardware constraints
*
* comments in the code are for debuggin purpose only ;)
* */
__global__ void fact(int *n, int *a, int *b, size_t *result)
{
extern __shared__ size_t output[];
int arrays_size = (*n)/2;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// first round
output[tid] = a[tid] * b[tid];
// sync
__syncthreads();
//cuPrintf("First Round [tid=%d] : output[%d] = %d \n", tid, tid, output[tid]);
// other rounds
for(int i=0; i < 5; i++)
{
if(!(tid%2) && output[tid] != 0)
{
int next_tid = tid+1;
while(next_tid < arrays_size && output[next_tid] == 0)
next_tid++;
if(next_tid < arrays_size)
{
//cuPrintf("Currently I want to multiply output[%d] (%d) * output[%d] (%d) = %d\n", tid, output[tid], next_tid, output[next_tid], output[tid]*output[next_tid]);
size_t temp = output[next_tid];
output[next_tid] = 0;
if(output[tid] != 0)
{
//cuPrintf(" ************ I [tid=%d] made output[%d] to zero - output[%d]=%d, output[%d]=%d, temp=%d ***************\n", tid, next_tid, tid, output[tid], next_tid, output[next_tid], temp);
//__syncthreads();
output[tid] *= temp;
//cuPrintf("I made the changes : output[%d] (%d) * output[%d] (%d) = %d, temp=%d\n", tid, output[tid], next_tid, output[next_tid], output[tid]*output[next_tid], temp);
}
else
output[next_tid] = temp;
}
}
__syncthreads();
//cuPrintf("Round[%d] [tid=%d] : output[%d] = %lld \n", i+2, tid, tid, output[tid]);
}
if(*n % 2)
output[0] *= (*n);
*result = output[0];
}
int main(void)
{
int *n, *dev_n, *a, *b, *dev_a, *dev_b;
size_t *result, *dev_result;
n = (int *)malloc(sizeof(int));
result = (size_t *)malloc(sizeof(size_t));
printf("Please enter your value to calculate it's factorial: \n");
scanf("%d", n);
if(*n == 1 || *n == 0 || *n == 2)
printf("Result = %d\n", *n);
else if(*n < 0)
printf("Factorial input can't be negative ;)\n");
else
{
int arrays_size = (*n)/2;
hipMalloc((void **)&dev_n, sizeof(int));
hipMalloc((void **)&dev_a, arrays_size * sizeof(int));
hipMalloc((void **)&dev_b, arrays_size * sizeof(int));
hipMalloc((void **)&dev_result, sizeof(size_t));
a = (int *)malloc(arrays_size * sizeof(int));
b = (int *)malloc(arrays_size * sizeof(int));
for(int i=1, a_c=0, b_c=0; i <= *n; i++)
{
if(i % 2)
a[a_c++] = i;
else
b[b_c++] = i;
}
hipMemcpy(dev_n, n, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_a, a, arrays_size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, arrays_size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_result, result, sizeof(size_t), hipMemcpyHostToDevice);
clock_t start, stop;
double t = 0.0;
//cudaPrintfInit(); // needed for debuggin ;)
hipLaunchKernelGGL(( fact), dim3(1), dim3(arrays_size), arrays_size*sizeof(size_t), 0, dev_n, dev_a, dev_b, dev_result);
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
hipMemcpy(result, dev_result, sizeof(size_t), hipMemcpyDeviceToHost);
printf("Result is %lu\n", *result);
free(n);
free(a);
free(b);
free(result);
hipFree(dev_n);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_result);
}
}
|
bee40effb0decf1d795f310ac5eb322b3d6facb1.cu
|
#include <stdio.h>
#include <cuda.h>
//#include "cuPrintf.cu"
/*
* CUDA parallel factorial computing program by bahramwhh
* Note : This program is only tested for n <= 21
* because of my hardware constraints
*
* comments in the code are for debuggin purpose only ;)
* */
__global__ void fact(int *n, int *a, int *b, size_t *result)
{
extern __shared__ size_t output[];
int arrays_size = (*n)/2;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// first round
output[tid] = a[tid] * b[tid];
// sync
__syncthreads();
//cuPrintf("First Round [tid=%d] : output[%d] = %d \n", tid, tid, output[tid]);
// other rounds
for(int i=0; i < 5; i++)
{
if(!(tid%2) && output[tid] != 0)
{
int next_tid = tid+1;
while(next_tid < arrays_size && output[next_tid] == 0)
next_tid++;
if(next_tid < arrays_size)
{
//cuPrintf("Currently I want to multiply output[%d] (%d) * output[%d] (%d) = %d\n", tid, output[tid], next_tid, output[next_tid], output[tid]*output[next_tid]);
size_t temp = output[next_tid];
output[next_tid] = 0;
if(output[tid] != 0)
{
//cuPrintf(" ************ I [tid=%d] made output[%d] to zero - output[%d]=%d, output[%d]=%d, temp=%d ***************\n", tid, next_tid, tid, output[tid], next_tid, output[next_tid], temp);
//__syncthreads();
output[tid] *= temp;
//cuPrintf("I made the changes : output[%d] (%d) * output[%d] (%d) = %d, temp=%d\n", tid, output[tid], next_tid, output[next_tid], output[tid]*output[next_tid], temp);
}
else
output[next_tid] = temp;
}
}
__syncthreads();
//cuPrintf("Round[%d] [tid=%d] : output[%d] = %lld \n", i+2, tid, tid, output[tid]);
}
if(*n % 2)
output[0] *= (*n);
*result = output[0];
}
int main(void)
{
int *n, *dev_n, *a, *b, *dev_a, *dev_b;
size_t *result, *dev_result;
n = (int *)malloc(sizeof(int));
result = (size_t *)malloc(sizeof(size_t));
printf("Please enter your value to calculate it's factorial: \n");
scanf("%d", n);
if(*n == 1 || *n == 0 || *n == 2)
printf("Result = %d\n", *n);
else if(*n < 0)
printf("Factorial input can't be negative ;)\n");
else
{
int arrays_size = (*n)/2;
cudaMalloc((void **)&dev_n, sizeof(int));
cudaMalloc((void **)&dev_a, arrays_size * sizeof(int));
cudaMalloc((void **)&dev_b, arrays_size * sizeof(int));
cudaMalloc((void **)&dev_result, sizeof(size_t));
a = (int *)malloc(arrays_size * sizeof(int));
b = (int *)malloc(arrays_size * sizeof(int));
for(int i=1, a_c=0, b_c=0; i <= *n; i++)
{
if(i % 2)
a[a_c++] = i;
else
b[b_c++] = i;
}
cudaMemcpy(dev_n, n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_a, a, arrays_size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, arrays_size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_result, result, sizeof(size_t), cudaMemcpyHostToDevice);
clock_t start, stop;
double t = 0.0;
//cudaPrintfInit(); // needed for debuggin ;)
fact<<<1, arrays_size, arrays_size*sizeof(size_t)>>>(dev_n, dev_a, dev_b, dev_result);
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
cudaMemcpy(result, dev_result, sizeof(size_t), cudaMemcpyDeviceToHost);
printf("Result is %lu\n", *result);
free(n);
free(a);
free(b);
free(result);
cudaFree(dev_n);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_result);
}
}
|
c05e93dc02c2c19c71c13ed2ea6339a2cfd9d9d4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_particle.h"
#include <hip/hip_runtime.h>
extern "C"
void cuda_part_malloc(void)
{
// allocate device memory on host
_parts = (part_struct**) malloc(nsubdom * sizeof(part_struct*));
cpumem += nsubdom * sizeof(part_struct*);
_pnm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phase = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_phase_shell = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_u = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_v = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_w = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
// allocate device memory on device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(hipSetDevice(dev + dev_start));
(hipMalloc((void**) &(_parts[dev]),
sizeof(part_struct) * nparts));
gpumem += sizeof(part_struct) * nparts;
(hipMalloc((void**) &(_pnm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_pnm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_phinm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_phinm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_chinm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_chinm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_pnm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_pnm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_phinm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_phinm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_chinm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_chinm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_pnm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_pnm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_phinm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_phinm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_chinm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_chinm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(hipMalloc((void**) &(_phase[dev]),
sizeof(int) * dom[dev].Gcc.s3b));
gpumem += sizeof(int) * dom[dev].Gcc.s3b;
(hipMalloc((void**) &(_phase_shell[dev]),
sizeof(int) * dom[dev].Gcc.s3b));
gpumem += sizeof(int) * dom[dev].Gcc.s3b;
(hipMalloc((void**) &(_flag_u[dev]),
sizeof(int) * dom[dev].Gfx.s3b));
gpumem += sizeof(int) * dom[dev].Gfx.s3b;
(hipMalloc((void**) &(_flag_v[dev]),
sizeof(int) * dom[dev].Gfy.s3b));
gpumem += sizeof(int) * dom[dev].Gfy.s3b;
(hipMalloc((void**) &(_flag_w[dev]),
sizeof(int) * dom[dev].Gfz.s3b));
gpumem += sizeof(int) * dom[dev].Gfz.s3b;
(hipMalloc((void**) &(_binDom), sizeof(dom_struct)));
gpumem += sizeof(dom_struct);
}
}
extern "C"
void cuda_part_push(void)
{
// copy host data to device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(hipSetDevice(dev + dev_start));
(hipMemcpy(_parts[dev], parts, sizeof(part_struct) * nparts,
hipMemcpyHostToDevice));
(hipMemcpy(_pnm_re[dev], pnm_re, sizeof(real) * coeff_stride
* nparts, hipMemcpyHostToDevice));
(hipMemcpy(_pnm_im[dev], pnm_im, sizeof(real) * coeff_stride
* nparts, hipMemcpyHostToDevice));
(hipMemcpy(_phinm_re[dev], phinm_re, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_phinm_im[dev], phinm_im, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_chinm_re[dev], chinm_re, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_chinm_im[dev], chinm_im, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_pnm_re0[dev], pnm_re0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_pnm_im0[dev], pnm_im0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_phinm_re0[dev], phinm_re0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_phinm_im0[dev], phinm_im0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_chinm_re0[dev], chinm_re0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_chinm_im0[dev], chinm_im0, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_pnm_re00[dev], pnm_re00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_pnm_im00[dev], pnm_im00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_phinm_re00[dev], phinm_re00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_phinm_im00[dev], phinm_im00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_chinm_re00[dev], chinm_re00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_chinm_im00[dev], chinm_im00, sizeof(real)
* coeff_stride * nparts, hipMemcpyHostToDevice));
(hipMemcpy(_phase[0], phase, sizeof(int) * dom[0].Gcc.s3b,
hipMemcpyHostToDevice));
(hipMemcpy(_phase_shell[0], phase_shell,
sizeof(int) * dom[0].Gcc.s3b, hipMemcpyHostToDevice));
(hipMemcpy(_flag_u[0], flag_u, sizeof(int) * dom[0].Gfx.s3b,
hipMemcpyHostToDevice));
(hipMemcpy(_flag_v[0], flag_v, sizeof(int) * dom[0].Gfy.s3b,
hipMemcpyHostToDevice));
(hipMemcpy(_flag_w[0], flag_w, sizeof(int) * dom[0].Gfz.s3b,
hipMemcpyHostToDevice));
(hipMemcpy(_binDom, &binDom, sizeof(dom_struct),
hipMemcpyHostToDevice));
}
}
extern "C"
void cuda_part_pull(void)
{
// all devices have the same particle data for now, so just copy one of them
(hipMemcpy(parts, _parts[0], sizeof(part_struct) * nparts,
hipMemcpyDeviceToHost));
(hipMemcpy(pnm_re, _pnm_re[0], sizeof(real) * coeff_stride
* nparts,hipMemcpyDeviceToHost));
(hipMemcpy(pnm_im, _pnm_im[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(phinm_re, _phinm_re[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(phinm_im, _phinm_im[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(chinm_re, _chinm_re[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(chinm_im, _chinm_im[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(pnm_re0, _pnm_re0[0], sizeof(real) * coeff_stride
* nparts,hipMemcpyDeviceToHost));
(hipMemcpy(pnm_im0, _pnm_im0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(phinm_re0, _phinm_re0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(phinm_im0, _phinm_im0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(chinm_re0, _chinm_re0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(chinm_im0, _chinm_im0[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(pnm_re00, _pnm_re00[0], sizeof(real) * coeff_stride
* nparts,hipMemcpyDeviceToHost));
(hipMemcpy(pnm_im00, _pnm_im00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(phinm_re00, _phinm_re00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(phinm_im00, _phinm_im00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(chinm_re00, _chinm_re00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
(hipMemcpy(chinm_im00, _chinm_im00[0], sizeof(real) * coeff_stride
* nparts, hipMemcpyDeviceToHost));
// copy for device cage setup testing
(hipMemcpy(phase, _phase[0], sizeof(int) * dom[0].Gcc.s3b,
hipMemcpyDeviceToHost));
(hipMemcpy(phase_shell, _phase_shell[0], sizeof(int) * dom[0].Gcc.s3b,
hipMemcpyDeviceToHost));
#ifdef DDEBUG
(hipMemcpy(phase_shell, _phase_shell[0],
sizeof(int) * dom[0].Gcc.s3b, hipMemcpyDeviceToHost));
(hipMemcpy(flag_u, _flag_u[0], sizeof(int) * dom[0].Gfx.s3b,
hipMemcpyDeviceToHost));
(hipMemcpy(flag_v, _flag_v[0], sizeof(int) * dom[0].Gfy.s3b,
hipMemcpyDeviceToHost));
(hipMemcpy(flag_w, _flag_w[0], sizeof(int) * dom[0].Gfz.s3b,
hipMemcpyDeviceToHost));
#endif
}
extern "C"
void cuda_part_free(void)
{
// free device memory on device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(hipSetDevice(dev + dev_start));
(hipFree(_parts[dev]));
(hipFree(_pnm_re[dev]));
(hipFree(_pnm_im[dev]));
(hipFree(_phinm_re[dev]));
(hipFree(_phinm_im[dev]));
(hipFree(_chinm_re[dev]));
(hipFree(_chinm_im[dev]));
(hipFree(_pnm_re0[dev]));
(hipFree(_pnm_im0[dev]));
(hipFree(_phinm_re0[dev]));
(hipFree(_phinm_im0[dev]));
(hipFree(_chinm_re0[dev]));
(hipFree(_chinm_im0[dev]));
(hipFree(_pnm_re00[dev]));
(hipFree(_pnm_im00[dev]));
(hipFree(_phinm_re00[dev]));
(hipFree(_phinm_im00[dev]));
(hipFree(_chinm_re00[dev]));
(hipFree(_chinm_im00[dev]));
(hipFree(_phase[dev]));
(hipFree(_phase_shell[dev]));
(hipFree(_flag_u[dev]));
(hipFree(_flag_v[dev]));
(hipFree(_flag_w[dev]));
}
(hipFree(_binDom));
free(_parts);
free(_pnm_re);
free(_pnm_im);
free(_phinm_re);
free(_phinm_im);
free(_chinm_re);
free(_chinm_im);
free(_pnm_re0);
free(_pnm_im0);
free(_phinm_re0);
free(_phinm_im0);
free(_chinm_re0);
free(_chinm_im0);
free(_pnm_re00);
free(_pnm_im00);
free(_phinm_re00);
free(_phinm_im00);
free(_chinm_re00);
free(_chinm_im00);
free(_phase);
free(_phase_shell);
free(_flag_u);
free(_flag_v);
free(_flag_w);
}
extern "C"
void cuda_build_cages(void)
{
cuda_part_pull();
// parallelize over domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(hipSetDevice(dev + dev_start));
int i; // iterator
real X, Y, Z; // virtual particle center location
int threads_x = 0;
int threads_y = 0;
int threads_z = 0;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
int threads_c = 0; // number of threads for cage build
// reset phase
if(dom[dev].Gcc.jnb < MAX_THREADS_DIM)
threads_y = dom[dev].Gcc.jnb;
else
threads_y = MAX_THREADS_DIM;
if(dom[dev].Gcc.knb < MAX_THREADS_DIM)
threads_z = dom[dev].Gcc.knb;
else
threads_z = MAX_THREADS_DIM;
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
dim3 dimBlocks(threads_y, threads_z);
dim3 numBlocks(blocks_y, blocks_z);
hipLaunchKernelGGL(( reset_phase), dim3(numBlocks), dim3(dimBlocks), 0, 0, _phase[dev], _dom[dev]);
hipLaunchKernelGGL(( reset_phase_shell), dim3(numBlocks), dim3(dimBlocks), 0, 0, _phase_shell[dev], _dom[dev]);
// reset flag_u
if(dom[dev].Gfx.jn < MAX_THREADS_DIM)
threads_y = dom[dev].Gfx.jnb;
else
threads_y = MAX_THREADS_DIM;
if(dom[dev].Gfx.kn < MAX_THREADS_DIM)
threads_z = dom[dev].Gfx.knb;
else
threads_z = MAX_THREADS_DIM;
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_u(threads_y, threads_z);
dim3 numBlocks_u(blocks_y, blocks_z);
hipLaunchKernelGGL(( reset_flag_u), dim3(numBlocks_u), dim3(dimBlocks_u), 0, 0, _flag_u[dev], _dom[dev]);
// reset flag_v
if(dom[dev].Gfy.kn < MAX_THREADS_DIM)
threads_z = dom[dev].Gfy.knb;
else
threads_z = MAX_THREADS_DIM;
if(dom[dev].Gfy.in < MAX_THREADS_DIM)
threads_x = dom[dev].Gfy.inb;
else
threads_x = MAX_THREADS_DIM;
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_v(threads_z, threads_x);
dim3 numBlocks_v(blocks_z, blocks_x);
hipLaunchKernelGGL(( reset_flag_v), dim3(numBlocks_v), dim3(dimBlocks_v), 0, 0, _flag_v[dev], _dom[dev]);
// reset flag_w
if(dom[dev].Gfz.in < MAX_THREADS_DIM)
threads_x = dom[dev].Gfz.inb;
else
threads_x = MAX_THREADS_DIM;
if(dom[dev].Gfz.jn < MAX_THREADS_DIM)
threads_y = dom[dev].Gfz.jnb;
else
threads_y = MAX_THREADS_DIM;
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_w(threads_x, threads_y);
dim3 numBlocks_w(blocks_x, blocks_y);
hipLaunchKernelGGL(( reset_flag_w), dim3(numBlocks_w), dim3(dimBlocks_w), 0, 0, _flag_w[dev], _dom[dev]);
// build cages and update phase
// TODO: do the first half of this on the card
threads_c = MAX_THREADS_DIM;
for(i = 0; i < nparts; i++) {
// set up cage extents
// add 4 cells to ensure cage is completely contained in the bounding box
parts[i].cage.in = (int)(2.0 * ceil(parts[i].r / dom[dev].dx)) + 2;
parts[i].cage.jn = (int)(2.0 * ceil(parts[i].r / dom[dev].dy)) + 2;
parts[i].cage.kn = (int)(2.0 * ceil(parts[i].r / dom[dev].dz)) + 2;
// remove a cell from cage for odd number of cells in domain
if(dom[dev].xn % 2) {
parts[i].cage.in = parts[i].cage.in - 1;
}
if(dom[dev].yn % 2) {
parts[i].cage.jn = parts[i].cage.jn - 1;
}
if(dom[dev].zn % 2) {
parts[i].cage.kn = parts[i].cage.kn - 1;
}
// find indices of cell that contains the particle center
parts[i].cage.cx = (int)((parts[i].x - dom->xs + 0.5 * dom->dx) / dom->dx);
parts[i].cage.cy = (int)((parts[i].y - dom->ys + 0.5 * dom->dy) / dom->dy);
parts[i].cage.cz = (int)((parts[i].z - dom->zs + 0.5 * dom->dz) / dom->dz);
// compute start and end cells of cage that contains particle
parts[i].cage.is = (int)(round((parts[i].x-dom->xs)/dom->dx)
- 0.5 * parts[i].cage.in + DOM_BUF);
parts[i].cage.ie = parts[i].cage.is + parts[i].cage.in;
if(parts[i].cage.is < dom->Gcc.is) {
parts[i].cage.is = parts[i].cage.is + dom->Gcc.ie;
parts[i].cage.ibs = dom->Gcc.ie;
parts[i].cage.ibe = dom->Gcc.is;
} else if(parts[i].cage.ie > dom->Gcc.ie) {
parts[i].cage.ie = parts[i].cage.ie - dom->Gcc.ie;
parts[i].cage.ibs = dom->Gcc.ie;
parts[i].cage.ibe = dom->Gcc.is;
} else {
parts[i].cage.ibs = parts[i].cage.ie;
parts[i].cage.ibe = parts[i].cage.ie;
}
parts[i].cage.js = (int)(round((parts[i].y-dom->ys)/dom->dy)
- 0.5 * parts[i].cage.jn + DOM_BUF);
parts[i].cage.je = parts[i].cage.js + parts[i].cage.jn;
if(parts[i].cage.js < dom->Gcc.js) {
parts[i].cage.js = parts[i].cage.js + dom->Gcc.je;
parts[i].cage.jbs = dom->Gcc.je;
parts[i].cage.jbe = dom->Gcc.js;
} else if(parts[i].cage.je > dom->Gcc.je) {
parts[i].cage.je = parts[i].cage.je - dom->Gcc.je;
parts[i].cage.jbs = dom->Gcc.je;
parts[i].cage.jbe = dom->Gcc.js;
} else {
parts[i].cage.jbs = parts[i].cage.je;
parts[i].cage.jbe = parts[i].cage.je;
}
parts[i].cage.ks = (int)(round((parts[i].z-dom->zs)/dom->dz)
- 0.5 * parts[i].cage.kn + DOM_BUF);
parts[i].cage.ke = parts[i].cage.ks + parts[i].cage.kn;
if(parts[i].cage.ks < dom->Gcc.ks) {
parts[i].cage.ks = parts[i].cage.ks + dom->Gcc.ke;
parts[i].cage.kbs = dom->Gcc.ke;
parts[i].cage.kbe = dom->Gcc.ks;
} else if(parts[i].cage.ke > dom->Gcc.ke) {
parts[i].cage.ke = parts[i].cage.ke - dom->Gcc.ke;
parts[i].cage.kbs = dom->Gcc.ke;
parts[i].cage.kbe = dom->Gcc.ks;
} else {
parts[i].cage.kbs = parts[i].cage.ke;
parts[i].cage.kbe = parts[i].cage.ke;
}
}
// push particle information to device
(hipMemcpy(_parts[dev], parts, sizeof(part_struct) * nparts,
hipMemcpyHostToDevice));
threads_x = MAX_THREADS_DIM/2;
threads_y = MAX_THREADS_DIM/2;
threads_z = MAX_THREADS_DIM/2;
int xPer = (bc.uE == PERIODIC);
int yPer = (bc.vN == PERIODIC);
int zPer = (bc.wT == PERIODIC);
for(i = 0; i < nparts; i++) {
blocks_x = (int)ceil((real)parts[i].cage.in/(real)threads_x);
blocks_y = (int)ceil((real)parts[i].cage.jn/(real)threads_y);
blocks_z = (int)ceil((real)parts[i].cage.kn/(real)threads_z);
dim3 dimBlocks_3c(threads_x, threads_y, threads_z);
dim3 numBlocks_3c(blocks_x, blocks_y, blocks_z);
if(blocks_x > 0 && blocks_y > 0 && blocks_z > 0) {
// center - is < ibs, js < jbs, ks < kbs
X = parts[i].x + (parts[i].x < dom[dev].xs + parts[i].r)*dom[dev].xl*xPer;
Y = parts[i].y + (parts[i].y < dom[dev].ys + parts[i].r)*dom[dev].yl*yPer;
Z = parts[i].z + (parts[i].z < dom[dev].zs + parts[i].r)*dom[dev].zl*zPer;
hipLaunchKernelGGL(( build_phase), dim3(numBlocks_3c), dim3(dimBlocks_3c), 0, 0, i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
// WE split - ibe < ie, js < jbs, ks < kbs
if(parts[i].cage.ibe < parts[i].cage.ie) {
X = parts[i].x
- dom[dev].xl*(parts[i].x > (dom[dev].xe - parts[i].r))*xPer;
Y = parts[i].y
+ dom[dev].yl*(parts[i].y < (dom[dev].ys + parts[i].r))*yPer;
Z = parts[i].z
+ dom[dev].zl*(parts[i].z < (dom[dev].zs + parts[i].r))*zPer;
hipLaunchKernelGGL(( build_phase), dim3(numBlocks_3c), dim3(dimBlocks_3c), 0, 0, i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
}
// SN split - is < ibs, jbe < je, ks < kbs
if(parts[i].cage.jbe < parts[i].cage.je) {
X = parts[i].x
+ dom[dev].xl*(parts[i].x < (dom[dev].xs + parts[i].r))*xPer;
Y = parts[i].y
- dom[dev].yl*(parts[i].y > (dom[dev].ye - parts[i].r))*yPer;
Z = parts[i].z
+ dom[dev].zl*(parts[i].z < (dom[dev].zs + parts[i].r))*zPer;
hipLaunchKernelGGL(( build_phase), dim3(numBlocks_3c), dim3(dimBlocks_3c), 0, 0, i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
}
// BT split - is < ibs, js < jbs, kbe < ke
if(parts[i].cage.kbe < parts[i].cage.ke) {
X = parts[i].x
+ dom[dev].xl*(parts[i].x < (dom[dev].xs + parts[i].r))*xPer;
Y = parts[i].y
+ dom[dev].yl*(parts[i].y < (dom[dev].ys + parts[i].r))*yPer;
Z = parts[i].z
- dom[dev].zl*(parts[i].z > (dom[dev].ze - parts[i].r))*zPer;
hipLaunchKernelGGL(( build_phase), dim3(numBlocks_3c), dim3(dimBlocks_3c), 0, 0, i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
}
// WESN corner - ibe < ie, jbe < je, ks < kbs
if(parts[i].cage.ibe < parts[i].cage.ie &&
parts[i].cage.jbe < parts[i].cage.je) {
X = parts[i].x
- dom[dev].xl*(parts[i].x > (dom[dev].xe - parts[i].r))*xPer;
Y = parts[i].y
- dom[dev].yl*(parts[i].y > (dom[dev].ye - parts[i].r))*yPer;
Z = parts[i].z
+ dom[dev].zl*(parts[i].z < (dom[dev].zs + parts[i].r))*zPer;
hipLaunchKernelGGL(( build_phase), dim3(numBlocks_3c), dim3(dimBlocks_3c), 0, 0, i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
}
// WEBT corner - ibe < ie, js < jbs, kbe < ke
if(parts[i].cage.ibe < parts[i].cage.ie &&
parts[i].cage.kbe < parts[i].cage.ke) {
X = parts[i].x
- dom[dev].xl*(parts[i].x > (dom[dev].xe - parts[i].r))*xPer;
Y = parts[i].y
+ dom[dev].yl*(parts[i].y < (dom[dev].ys + parts[i].r))*yPer;
Z = parts[i].z
- dom[dev].zl*(parts[i].z > (dom[dev].ze - parts[i].r))*zPer;
hipLaunchKernelGGL(( build_phase), dim3(numBlocks_3c), dim3(dimBlocks_3c), 0, 0, i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
}
// SNBT corner - is < ibs, jbe < je, kbe < ke
if(parts[i].cage.jbe < parts[i].cage.je &&
parts[i].cage.kbe < parts[i].cage.ke) {
X = parts[i].x
+ dom[dev].xl*(parts[i].x < (dom[dev].xs + parts[i].r))*xPer;
Y = parts[i].y
- dom[dev].yl*(parts[i].y > (dom[dev].ye - parts[i].r))*yPer;
Z = parts[i].z
- dom[dev].zl*(parts[i].z > (dom[dev].ze - parts[i].r))*zPer;
hipLaunchKernelGGL(( build_phase), dim3(numBlocks_3c), dim3(dimBlocks_3c), 0, 0, i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
}
// All corners - ibe < ie, jbe < je, kbe < ke
if(parts[i].cage.ibe < parts[i].cage.ie &&
parts[i].cage.jbe < parts[i].cage.je &&
parts[i].cage.kbe < parts[i].cage.ke) {
X = parts[i].x
- dom[dev].xl*(parts[i].x > (dom[dev].xe - parts[i].r))*xPer;
Y = parts[i].y
- dom[dev].yl*(parts[i].y > (dom[dev].ye - parts[i].r))*yPer;
Z = parts[i].z
- dom[dev].zl*(parts[i].z > (dom[dev].ze - parts[i].r))*zPer;
hipLaunchKernelGGL(( build_phase), dim3(numBlocks_3c), dim3(dimBlocks_3c), 0, 0, i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
}
}
}
// fill in phase ghost cells for periodic boundary conditions
dim3 dimBlocks_c(threads_c, threads_c);
dim3 numBlocks_c(blocks_y, blocks_z);
if(bc.uW == PERIODIC && bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
hipLaunchKernelGGL(( cage_phases_periodic_x), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC && bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
hipLaunchKernelGGL(( cage_phases_periodic_y), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC && bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
hipLaunchKernelGGL(( cage_phases_periodic_z), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase[dev],
_dom[dev]);
}
// do flagging of particle shell
threads_x = MAX_THREADS_DIM;
threads_y = MAX_THREADS_DIM;
threads_z = MAX_THREADS_DIM;
// x
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
dim3 dimBlocks_cu(threads_y, threads_z);
dim3 numBlocks_cu(blocks_y, blocks_z);
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( phase_shell_x), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0,
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// y
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
dim3 dimBlocks_cv(threads_z, threads_x);
dim3 numBlocks_cv(blocks_z, blocks_x);
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( phase_shell_y), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0,
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// z
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
dim3 dimBlocks_cw(threads_x, threads_y);
dim3 numBlocks_cw(blocks_x, blocks_y);
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( phase_shell_z), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0,
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// fill in phase shell ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC && bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
hipLaunchKernelGGL(( cage_phases_periodic_x), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase_shell[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC && bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
hipLaunchKernelGGL(( cage_phases_periodic_y), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase_shell[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC && bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
hipLaunchKernelGGL(( cage_phases_periodic_z), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _phase_shell[dev],
_dom[dev]);
}
// fill in island cells
//phase_shell_remove_islands<<<numBlocks_cu, dimBlocks_cu>>>(_dom[dev],
//_phase_shell[dev]);
// flag u, v, w on particles
// u
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_u), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// v
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
hipLaunchKernelGGL(( cage_flag_v), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
hipLaunchKernelGGL(( cage_flag_w), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// flag external boundaries
if(bc.uW != PERIODIC && bc.uE != PERIODIC)
hipLaunchKernelGGL(( flag_external_u), dim3(numBlocks_u), dim3(dimBlocks_u), 0, 0, _flag_u[dev], _dom[dev]);
if(bc.vS != PERIODIC && bc.vN != PERIODIC)
hipLaunchKernelGGL(( flag_external_v), dim3(numBlocks_v), dim3(dimBlocks_v), 0, 0, _flag_v[dev], _dom[dev]);
if(bc.wB != PERIODIC && bc.wT != PERIODIC)
hipLaunchKernelGGL(( flag_external_w), dim3(numBlocks_w), dim3(dimBlocks_w), 0, 0, _flag_w[dev], _dom[dev]);
// fill in ghost cells for periodic boundary conditions
// flag_u
if(bc.uW == PERIODIC && bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_u_periodic_x), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC && bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_u_periodic_y), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC && bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_u_periodic_z), dim3(numBlocks_cu), dim3(dimBlocks_cu), 0, 0, _flag_u[dev],
_dom[dev]);
}
//flag_v
if(bc.vW == PERIODIC && bc.vW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_v_periodic_x), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vS == PERIODIC && bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_v_periodic_y), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
if(bc.vB == PERIODIC && bc.vT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_v_periodic_z), dim3(numBlocks_cv), dim3(dimBlocks_cv), 0, 0, _flag_v[dev],
_dom[dev]);
}
// flag_w
if(bc.wW == PERIODIC && bc.wE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
hipLaunchKernelGGL(( cage_flag_w_periodic_x), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wS == PERIODIC && bc.wN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
hipLaunchKernelGGL(( cage_flag_w_periodic_y), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
if(bc.wB == PERIODIC && bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
hipLaunchKernelGGL(( cage_flag_w_periodic_z), dim3(numBlocks_cw), dim3(dimBlocks_cw), 0, 0, _flag_w[dev],
_dom[dev]);
}
}
}
extern "C"
void cuda_part_BC(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(hipSetDevice(dev + dev_start));
int threads_x = MAX_THREADS_DIM;
int threads_y = MAX_THREADS_DIM;
int threads_z = MAX_THREADS_DIM;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
// u
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_x(threads_y, threads_z);
dim3 numBlocks_x(blocks_y, blocks_z);
hipLaunchKernelGGL(( part_BC_u), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _u[dev], _phase[dev],
_flag_u[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// v
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_y(threads_z, threads_x);
dim3 numBlocks_y(blocks_z, blocks_x);
hipLaunchKernelGGL(( part_BC_v), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _v[dev], _phase[dev],
_flag_v[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_z(threads_x, threads_y);
dim3 numBlocks_z(blocks_x, blocks_y);
hipLaunchKernelGGL(( part_BC_w), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _w[dev], _phase[dev],
_flag_w[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
}
}
extern "C"
void cuda_part_BC_star(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(hipSetDevice(dev + dev_start));
int threads_x = MAX_THREADS_DIM;
int threads_y = MAX_THREADS_DIM;
int threads_z = MAX_THREADS_DIM;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
// u
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_x(threads_y, threads_z);
dim3 numBlocks_x(blocks_y, blocks_z);
hipLaunchKernelGGL(( part_BC_u), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _u_star[dev], _phase[dev],
_flag_u[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// v
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_y(threads_z, threads_x);
dim3 numBlocks_y(blocks_z, blocks_x);
hipLaunchKernelGGL(( part_BC_v), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _v_star[dev], _phase[dev],
_flag_v[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_z(threads_x, threads_y);
dim3 numBlocks_z(blocks_x, blocks_y);
hipLaunchKernelGGL(( part_BC_w), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _w_star[dev], _phase[dev],
_flag_w[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
}
}
// already CPU parallelized in cuda_PP_bicgstab, which calls it
extern "C"
void cuda_part_BC_p(int dev)
{
int threads_c = MAX_THREADS_DIM;
int blocks_y = 0;
int blocks_z = 0;
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_c);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_c);
dim3 dimBlocks_c(threads_c, threads_c);
dim3 numBlocks_c(blocks_y, blocks_z);
hipLaunchKernelGGL(( part_BC_p), dim3(numBlocks_c), dim3(dimBlocks_c), 0, 0, _p0[dev], _rhs_p[dev], _phase[dev],
_phase_shell[dev], _parts[dev], _dom[dev],
mu, nu, dt, dt0, gradP, rho_f, coeff_stride,
_pnm_re00[dev], _pnm_im00[dev],
_phinm_re00[dev], _phinm_im00[dev], _chinm_re00[dev], _chinm_im00[dev],
_pnm_re[dev], _pnm_im[dev],
_phinm_re[dev], _phinm_im[dev], _chinm_re[dev], _chinm_im[dev]);
}
extern "C"
void cuda_part_p_fill(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(hipSetDevice(dev + dev_start));
int threads_c = MAX_THREADS_DIM;
int blocks_y = 0;
int blocks_z = 0;
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_c);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_c);
dim3 dimblocks_c(threads_c, threads_c);
dim3 numblocks_c(blocks_y, blocks_z);
hipLaunchKernelGGL(( part_BC_p_fill), dim3(numblocks_c), dim3(dimblocks_c), 0, 0, _p[dev], _phase[dev],
_parts[dev], _dom[dev],
mu, nu, rho_f, gradP, coeff_stride,
_pnm_re[dev], _pnm_im[dev]);
}
}
extern "C"
void cuda_store_coeffs(void)
{
// parallelize over CPU threads
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(hipSetDevice(dev + dev_start));
// coeff00 & coeff ==> coeff0 (Adams-Bashforth)
dim3 dimBlocks(coeff_stride);
dim3 numBlocks(nparts);
// as implemented, this actually makes convergence slower
/*if(dt0 > 0.) {
predict_coeffs<<<numBlocks, dimBlocks>>>(dt0, dt,
_pnm_re00[dev], _pnm_im00[dev], _phinm_re00[dev], _phinm_im00[dev],
_chinm_re00[dev], _chinm_im00[dev],
_pnm_re0[dev], _pnm_im0[dev], _phinm_re0[dev], _phinm_im0[dev],
_chinm_re0[dev], _chinm_im0[dev],
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev], coeff_stride);
}
*/
(hipMemcpy(_pnm_re00[dev], _pnm_re[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
(hipMemcpy(_pnm_im00[dev], _pnm_im[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
(hipMemcpy(_phinm_re00[dev], _phinm_re[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
(hipMemcpy(_phinm_im00[dev], _phinm_im[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
(hipMemcpy(_chinm_re00[dev], _chinm_re[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
(hipMemcpy(_chinm_im00[dev], _chinm_im[dev],
sizeof(real) * coeff_stride*nparts, hipMemcpyDeviceToDevice));
}
}
|
c05e93dc02c2c19c71c13ed2ea6339a2cfd9d9d4.cu
|
/*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_particle.h"
#include <cuda.h>
extern "C"
void cuda_part_malloc(void)
{
// allocate device memory on host
_parts = (part_struct**) malloc(nsubdom * sizeof(part_struct*));
cpumem += nsubdom * sizeof(part_struct*);
_pnm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im0 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_pnm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phinm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_re00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_chinm_im00 = (real**) malloc(nsubdom * sizeof(real*));
cpumem += nsubdom * sizeof(real*);
_phase = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_phase_shell = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_u = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_v = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
_flag_w = (int**) malloc(nsubdom * sizeof(int*));
cpumem += nsubdom * sizeof(int);
// allocate device memory on device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(cudaSetDevice(dev + dev_start));
(cudaMalloc((void**) &(_parts[dev]),
sizeof(part_struct) * nparts));
gpumem += sizeof(part_struct) * nparts;
(cudaMalloc((void**) &(_pnm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_pnm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_phinm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_phinm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_chinm_re[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_chinm_im[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_pnm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_pnm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_phinm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_phinm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_chinm_re0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_chinm_im0[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_pnm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_pnm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_phinm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_phinm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_chinm_re00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_chinm_im00[dev]),
sizeof(real) * coeff_stride * nparts));
gpumem += sizeof(real) * coeff_stride * nparts;
(cudaMalloc((void**) &(_phase[dev]),
sizeof(int) * dom[dev].Gcc.s3b));
gpumem += sizeof(int) * dom[dev].Gcc.s3b;
(cudaMalloc((void**) &(_phase_shell[dev]),
sizeof(int) * dom[dev].Gcc.s3b));
gpumem += sizeof(int) * dom[dev].Gcc.s3b;
(cudaMalloc((void**) &(_flag_u[dev]),
sizeof(int) * dom[dev].Gfx.s3b));
gpumem += sizeof(int) * dom[dev].Gfx.s3b;
(cudaMalloc((void**) &(_flag_v[dev]),
sizeof(int) * dom[dev].Gfy.s3b));
gpumem += sizeof(int) * dom[dev].Gfy.s3b;
(cudaMalloc((void**) &(_flag_w[dev]),
sizeof(int) * dom[dev].Gfz.s3b));
gpumem += sizeof(int) * dom[dev].Gfz.s3b;
(cudaMalloc((void**) &(_binDom), sizeof(dom_struct)));
gpumem += sizeof(dom_struct);
}
}
extern "C"
void cuda_part_push(void)
{
// copy host data to device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(cudaSetDevice(dev + dev_start));
(cudaMemcpy(_parts[dev], parts, sizeof(part_struct) * nparts,
cudaMemcpyHostToDevice));
(cudaMemcpy(_pnm_re[dev], pnm_re, sizeof(real) * coeff_stride
* nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_pnm_im[dev], pnm_im, sizeof(real) * coeff_stride
* nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_phinm_re[dev], phinm_re, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_phinm_im[dev], phinm_im, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_chinm_re[dev], chinm_re, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_chinm_im[dev], chinm_im, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_pnm_re0[dev], pnm_re0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_pnm_im0[dev], pnm_im0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_phinm_re0[dev], phinm_re0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_phinm_im0[dev], phinm_im0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_chinm_re0[dev], chinm_re0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_chinm_im0[dev], chinm_im0, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_pnm_re00[dev], pnm_re00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_pnm_im00[dev], pnm_im00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_phinm_re00[dev], phinm_re00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_phinm_im00[dev], phinm_im00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_chinm_re00[dev], chinm_re00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_chinm_im00[dev], chinm_im00, sizeof(real)
* coeff_stride * nparts, cudaMemcpyHostToDevice));
(cudaMemcpy(_phase[0], phase, sizeof(int) * dom[0].Gcc.s3b,
cudaMemcpyHostToDevice));
(cudaMemcpy(_phase_shell[0], phase_shell,
sizeof(int) * dom[0].Gcc.s3b, cudaMemcpyHostToDevice));
(cudaMemcpy(_flag_u[0], flag_u, sizeof(int) * dom[0].Gfx.s3b,
cudaMemcpyHostToDevice));
(cudaMemcpy(_flag_v[0], flag_v, sizeof(int) * dom[0].Gfy.s3b,
cudaMemcpyHostToDevice));
(cudaMemcpy(_flag_w[0], flag_w, sizeof(int) * dom[0].Gfz.s3b,
cudaMemcpyHostToDevice));
(cudaMemcpy(_binDom, &binDom, sizeof(dom_struct),
cudaMemcpyHostToDevice));
}
}
extern "C"
void cuda_part_pull(void)
{
// all devices have the same particle data for now, so just copy one of them
(cudaMemcpy(parts, _parts[0], sizeof(part_struct) * nparts,
cudaMemcpyDeviceToHost));
(cudaMemcpy(pnm_re, _pnm_re[0], sizeof(real) * coeff_stride
* nparts,cudaMemcpyDeviceToHost));
(cudaMemcpy(pnm_im, _pnm_im[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(phinm_re, _phinm_re[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(phinm_im, _phinm_im[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(chinm_re, _chinm_re[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(chinm_im, _chinm_im[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(pnm_re0, _pnm_re0[0], sizeof(real) * coeff_stride
* nparts,cudaMemcpyDeviceToHost));
(cudaMemcpy(pnm_im0, _pnm_im0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(phinm_re0, _phinm_re0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(phinm_im0, _phinm_im0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(chinm_re0, _chinm_re0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(chinm_im0, _chinm_im0[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(pnm_re00, _pnm_re00[0], sizeof(real) * coeff_stride
* nparts,cudaMemcpyDeviceToHost));
(cudaMemcpy(pnm_im00, _pnm_im00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(phinm_re00, _phinm_re00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(phinm_im00, _phinm_im00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(chinm_re00, _chinm_re00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
(cudaMemcpy(chinm_im00, _chinm_im00[0], sizeof(real) * coeff_stride
* nparts, cudaMemcpyDeviceToHost));
// copy for device cage setup testing
(cudaMemcpy(phase, _phase[0], sizeof(int) * dom[0].Gcc.s3b,
cudaMemcpyDeviceToHost));
(cudaMemcpy(phase_shell, _phase_shell[0], sizeof(int) * dom[0].Gcc.s3b,
cudaMemcpyDeviceToHost));
#ifdef DDEBUG
(cudaMemcpy(phase_shell, _phase_shell[0],
sizeof(int) * dom[0].Gcc.s3b, cudaMemcpyDeviceToHost));
(cudaMemcpy(flag_u, _flag_u[0], sizeof(int) * dom[0].Gfx.s3b,
cudaMemcpyDeviceToHost));
(cudaMemcpy(flag_v, _flag_v[0], sizeof(int) * dom[0].Gfy.s3b,
cudaMemcpyDeviceToHost));
(cudaMemcpy(flag_w, _flag_w[0], sizeof(int) * dom[0].Gfz.s3b,
cudaMemcpyDeviceToHost));
#endif
}
extern "C"
void cuda_part_free(void)
{
// free device memory on device
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(cudaSetDevice(dev + dev_start));
(cudaFree(_parts[dev]));
(cudaFree(_pnm_re[dev]));
(cudaFree(_pnm_im[dev]));
(cudaFree(_phinm_re[dev]));
(cudaFree(_phinm_im[dev]));
(cudaFree(_chinm_re[dev]));
(cudaFree(_chinm_im[dev]));
(cudaFree(_pnm_re0[dev]));
(cudaFree(_pnm_im0[dev]));
(cudaFree(_phinm_re0[dev]));
(cudaFree(_phinm_im0[dev]));
(cudaFree(_chinm_re0[dev]));
(cudaFree(_chinm_im0[dev]));
(cudaFree(_pnm_re00[dev]));
(cudaFree(_pnm_im00[dev]));
(cudaFree(_phinm_re00[dev]));
(cudaFree(_phinm_im00[dev]));
(cudaFree(_chinm_re00[dev]));
(cudaFree(_chinm_im00[dev]));
(cudaFree(_phase[dev]));
(cudaFree(_phase_shell[dev]));
(cudaFree(_flag_u[dev]));
(cudaFree(_flag_v[dev]));
(cudaFree(_flag_w[dev]));
}
(cudaFree(_binDom));
free(_parts);
free(_pnm_re);
free(_pnm_im);
free(_phinm_re);
free(_phinm_im);
free(_chinm_re);
free(_chinm_im);
free(_pnm_re0);
free(_pnm_im0);
free(_phinm_re0);
free(_phinm_im0);
free(_chinm_re0);
free(_chinm_im0);
free(_pnm_re00);
free(_pnm_im00);
free(_phinm_re00);
free(_phinm_im00);
free(_chinm_re00);
free(_chinm_im00);
free(_phase);
free(_phase_shell);
free(_flag_u);
free(_flag_v);
free(_flag_w);
}
extern "C"
void cuda_build_cages(void)
{
cuda_part_pull();
// parallelize over domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(cudaSetDevice(dev + dev_start));
int i; // iterator
real X, Y, Z; // virtual particle center location
int threads_x = 0;
int threads_y = 0;
int threads_z = 0;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
int threads_c = 0; // number of threads for cage build
// reset phase
if(dom[dev].Gcc.jnb < MAX_THREADS_DIM)
threads_y = dom[dev].Gcc.jnb;
else
threads_y = MAX_THREADS_DIM;
if(dom[dev].Gcc.knb < MAX_THREADS_DIM)
threads_z = dom[dev].Gcc.knb;
else
threads_z = MAX_THREADS_DIM;
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
dim3 dimBlocks(threads_y, threads_z);
dim3 numBlocks(blocks_y, blocks_z);
reset_phase<<<numBlocks, dimBlocks>>>(_phase[dev], _dom[dev]);
reset_phase_shell<<<numBlocks, dimBlocks>>>(_phase_shell[dev], _dom[dev]);
// reset flag_u
if(dom[dev].Gfx.jn < MAX_THREADS_DIM)
threads_y = dom[dev].Gfx.jnb;
else
threads_y = MAX_THREADS_DIM;
if(dom[dev].Gfx.kn < MAX_THREADS_DIM)
threads_z = dom[dev].Gfx.knb;
else
threads_z = MAX_THREADS_DIM;
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_u(threads_y, threads_z);
dim3 numBlocks_u(blocks_y, blocks_z);
reset_flag_u<<<numBlocks_u, dimBlocks_u>>>(_flag_u[dev], _dom[dev]);
// reset flag_v
if(dom[dev].Gfy.kn < MAX_THREADS_DIM)
threads_z = dom[dev].Gfy.knb;
else
threads_z = MAX_THREADS_DIM;
if(dom[dev].Gfy.in < MAX_THREADS_DIM)
threads_x = dom[dev].Gfy.inb;
else
threads_x = MAX_THREADS_DIM;
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_v(threads_z, threads_x);
dim3 numBlocks_v(blocks_z, blocks_x);
reset_flag_v<<<numBlocks_v, dimBlocks_v>>>(_flag_v[dev], _dom[dev]);
// reset flag_w
if(dom[dev].Gfz.in < MAX_THREADS_DIM)
threads_x = dom[dev].Gfz.inb;
else
threads_x = MAX_THREADS_DIM;
if(dom[dev].Gfz.jn < MAX_THREADS_DIM)
threads_y = dom[dev].Gfz.jnb;
else
threads_y = MAX_THREADS_DIM;
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_w(threads_x, threads_y);
dim3 numBlocks_w(blocks_x, blocks_y);
reset_flag_w<<<numBlocks_w, dimBlocks_w>>>(_flag_w[dev], _dom[dev]);
// build cages and update phase
// TODO: do the first half of this on the card
threads_c = MAX_THREADS_DIM;
for(i = 0; i < nparts; i++) {
// set up cage extents
// add 4 cells to ensure cage is completely contained in the bounding box
parts[i].cage.in = (int)(2.0 * ceil(parts[i].r / dom[dev].dx)) + 2;
parts[i].cage.jn = (int)(2.0 * ceil(parts[i].r / dom[dev].dy)) + 2;
parts[i].cage.kn = (int)(2.0 * ceil(parts[i].r / dom[dev].dz)) + 2;
// remove a cell from cage for odd number of cells in domain
if(dom[dev].xn % 2) {
parts[i].cage.in = parts[i].cage.in - 1;
}
if(dom[dev].yn % 2) {
parts[i].cage.jn = parts[i].cage.jn - 1;
}
if(dom[dev].zn % 2) {
parts[i].cage.kn = parts[i].cage.kn - 1;
}
// find indices of cell that contains the particle center
parts[i].cage.cx = (int)((parts[i].x - dom->xs + 0.5 * dom->dx) / dom->dx);
parts[i].cage.cy = (int)((parts[i].y - dom->ys + 0.5 * dom->dy) / dom->dy);
parts[i].cage.cz = (int)((parts[i].z - dom->zs + 0.5 * dom->dz) / dom->dz);
// compute start and end cells of cage that contains particle
parts[i].cage.is = (int)(round((parts[i].x-dom->xs)/dom->dx)
- 0.5 * parts[i].cage.in + DOM_BUF);
parts[i].cage.ie = parts[i].cage.is + parts[i].cage.in;
if(parts[i].cage.is < dom->Gcc.is) {
parts[i].cage.is = parts[i].cage.is + dom->Gcc.ie;
parts[i].cage.ibs = dom->Gcc.ie;
parts[i].cage.ibe = dom->Gcc.is;
} else if(parts[i].cage.ie > dom->Gcc.ie) {
parts[i].cage.ie = parts[i].cage.ie - dom->Gcc.ie;
parts[i].cage.ibs = dom->Gcc.ie;
parts[i].cage.ibe = dom->Gcc.is;
} else {
parts[i].cage.ibs = parts[i].cage.ie;
parts[i].cage.ibe = parts[i].cage.ie;
}
parts[i].cage.js = (int)(round((parts[i].y-dom->ys)/dom->dy)
- 0.5 * parts[i].cage.jn + DOM_BUF);
parts[i].cage.je = parts[i].cage.js + parts[i].cage.jn;
if(parts[i].cage.js < dom->Gcc.js) {
parts[i].cage.js = parts[i].cage.js + dom->Gcc.je;
parts[i].cage.jbs = dom->Gcc.je;
parts[i].cage.jbe = dom->Gcc.js;
} else if(parts[i].cage.je > dom->Gcc.je) {
parts[i].cage.je = parts[i].cage.je - dom->Gcc.je;
parts[i].cage.jbs = dom->Gcc.je;
parts[i].cage.jbe = dom->Gcc.js;
} else {
parts[i].cage.jbs = parts[i].cage.je;
parts[i].cage.jbe = parts[i].cage.je;
}
parts[i].cage.ks = (int)(round((parts[i].z-dom->zs)/dom->dz)
- 0.5 * parts[i].cage.kn + DOM_BUF);
parts[i].cage.ke = parts[i].cage.ks + parts[i].cage.kn;
if(parts[i].cage.ks < dom->Gcc.ks) {
parts[i].cage.ks = parts[i].cage.ks + dom->Gcc.ke;
parts[i].cage.kbs = dom->Gcc.ke;
parts[i].cage.kbe = dom->Gcc.ks;
} else if(parts[i].cage.ke > dom->Gcc.ke) {
parts[i].cage.ke = parts[i].cage.ke - dom->Gcc.ke;
parts[i].cage.kbs = dom->Gcc.ke;
parts[i].cage.kbe = dom->Gcc.ks;
} else {
parts[i].cage.kbs = parts[i].cage.ke;
parts[i].cage.kbe = parts[i].cage.ke;
}
}
// push particle information to device
(cudaMemcpy(_parts[dev], parts, sizeof(part_struct) * nparts,
cudaMemcpyHostToDevice));
threads_x = MAX_THREADS_DIM/2;
threads_y = MAX_THREADS_DIM/2;
threads_z = MAX_THREADS_DIM/2;
int xPer = (bc.uE == PERIODIC);
int yPer = (bc.vN == PERIODIC);
int zPer = (bc.wT == PERIODIC);
for(i = 0; i < nparts; i++) {
blocks_x = (int)ceil((real)parts[i].cage.in/(real)threads_x);
blocks_y = (int)ceil((real)parts[i].cage.jn/(real)threads_y);
blocks_z = (int)ceil((real)parts[i].cage.kn/(real)threads_z);
dim3 dimBlocks_3c(threads_x, threads_y, threads_z);
dim3 numBlocks_3c(blocks_x, blocks_y, blocks_z);
if(blocks_x > 0 && blocks_y > 0 && blocks_z > 0) {
// center - is < ibs, js < jbs, ks < kbs
X = parts[i].x + (parts[i].x < dom[dev].xs + parts[i].r)*dom[dev].xl*xPer;
Y = parts[i].y + (parts[i].y < dom[dev].ys + parts[i].r)*dom[dev].yl*yPer;
Z = parts[i].z + (parts[i].z < dom[dev].zs + parts[i].r)*dom[dev].zl*zPer;
build_phase<<<numBlocks_3c, dimBlocks_3c>>>(i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
// WE split - ibe < ie, js < jbs, ks < kbs
if(parts[i].cage.ibe < parts[i].cage.ie) {
X = parts[i].x
- dom[dev].xl*(parts[i].x > (dom[dev].xe - parts[i].r))*xPer;
Y = parts[i].y
+ dom[dev].yl*(parts[i].y < (dom[dev].ys + parts[i].r))*yPer;
Z = parts[i].z
+ dom[dev].zl*(parts[i].z < (dom[dev].zs + parts[i].r))*zPer;
build_phase<<<numBlocks_3c, dimBlocks_3c>>>(i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.ks, parts[i].cage.kbs);
}
// SN split - is < ibs, jbe < je, ks < kbs
if(parts[i].cage.jbe < parts[i].cage.je) {
X = parts[i].x
+ dom[dev].xl*(parts[i].x < (dom[dev].xs + parts[i].r))*xPer;
Y = parts[i].y
- dom[dev].yl*(parts[i].y > (dom[dev].ye - parts[i].r))*yPer;
Z = parts[i].z
+ dom[dev].zl*(parts[i].z < (dom[dev].zs + parts[i].r))*zPer;
build_phase<<<numBlocks_3c, dimBlocks_3c>>>(i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
}
// BT split - is < ibs, js < jbs, kbe < ke
if(parts[i].cage.kbe < parts[i].cage.ke) {
X = parts[i].x
+ dom[dev].xl*(parts[i].x < (dom[dev].xs + parts[i].r))*xPer;
Y = parts[i].y
+ dom[dev].yl*(parts[i].y < (dom[dev].ys + parts[i].r))*yPer;
Z = parts[i].z
- dom[dev].zl*(parts[i].z > (dom[dev].ze - parts[i].r))*zPer;
build_phase<<<numBlocks_3c, dimBlocks_3c>>>(i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
}
// WESN corner - ibe < ie, jbe < je, ks < kbs
if(parts[i].cage.ibe < parts[i].cage.ie &&
parts[i].cage.jbe < parts[i].cage.je) {
X = parts[i].x
- dom[dev].xl*(parts[i].x > (dom[dev].xe - parts[i].r))*xPer;
Y = parts[i].y
- dom[dev].yl*(parts[i].y > (dom[dev].ye - parts[i].r))*yPer;
Z = parts[i].z
+ dom[dev].zl*(parts[i].z < (dom[dev].zs + parts[i].r))*zPer;
build_phase<<<numBlocks_3c, dimBlocks_3c>>>(i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.ks, parts[i].cage.kbs);
}
// WEBT corner - ibe < ie, js < jbs, kbe < ke
if(parts[i].cage.ibe < parts[i].cage.ie &&
parts[i].cage.kbe < parts[i].cage.ke) {
X = parts[i].x
- dom[dev].xl*(parts[i].x > (dom[dev].xe - parts[i].r))*xPer;
Y = parts[i].y
+ dom[dev].yl*(parts[i].y < (dom[dev].ys + parts[i].r))*yPer;
Z = parts[i].z
- dom[dev].zl*(parts[i].z > (dom[dev].ze - parts[i].r))*zPer;
build_phase<<<numBlocks_3c, dimBlocks_3c>>>(i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.js, parts[i].cage.jbs,
parts[i].cage.kbe, parts[i].cage.ke);
}
// SNBT corner - is < ibs, jbe < je, kbe < ke
if(parts[i].cage.jbe < parts[i].cage.je &&
parts[i].cage.kbe < parts[i].cage.ke) {
X = parts[i].x
+ dom[dev].xl*(parts[i].x < (dom[dev].xs + parts[i].r))*xPer;
Y = parts[i].y
- dom[dev].yl*(parts[i].y > (dom[dev].ye - parts[i].r))*yPer;
Z = parts[i].z
- dom[dev].zl*(parts[i].z > (dom[dev].ze - parts[i].r))*zPer;
build_phase<<<numBlocks_3c, dimBlocks_3c>>>(i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.is, parts[i].cage.ibs,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
}
// All corners - ibe < ie, jbe < je, kbe < ke
if(parts[i].cage.ibe < parts[i].cage.ie &&
parts[i].cage.jbe < parts[i].cage.je &&
parts[i].cage.kbe < parts[i].cage.ke) {
X = parts[i].x
- dom[dev].xl*(parts[i].x > (dom[dev].xe - parts[i].r))*xPer;
Y = parts[i].y
- dom[dev].yl*(parts[i].y > (dom[dev].ye - parts[i].r))*yPer;
Z = parts[i].z
- dom[dev].zl*(parts[i].z > (dom[dev].ze - parts[i].r))*zPer;
build_phase<<<numBlocks_3c, dimBlocks_3c>>>(i, _parts[dev],
_phase[dev], _dom[dev], X, Y, Z,
parts[i].cage.ibe, parts[i].cage.ie,
parts[i].cage.jbe, parts[i].cage.je,
parts[i].cage.kbe, parts[i].cage.ke);
}
}
}
// fill in phase ghost cells for periodic boundary conditions
dim3 dimBlocks_c(threads_c, threads_c);
dim3 numBlocks_c(blocks_y, blocks_z);
if(bc.uW == PERIODIC && bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
cage_phases_periodic_x<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC && bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
cage_phases_periodic_y<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC && bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
cage_phases_periodic_z<<<numBlocks_c, dimBlocks_c>>>(_phase[dev],
_dom[dev]);
}
// do flagging of particle shell
threads_x = MAX_THREADS_DIM;
threads_y = MAX_THREADS_DIM;
threads_z = MAX_THREADS_DIM;
// x
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
dim3 dimBlocks_cu(threads_y, threads_z);
dim3 numBlocks_cu(blocks_y, blocks_z);
if(blocks_y > 0 && blocks_z > 0)
phase_shell_x<<<numBlocks_cu, dimBlocks_cu>>>(
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// y
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
dim3 dimBlocks_cv(threads_z, threads_x);
dim3 numBlocks_cv(blocks_z, blocks_x);
if(blocks_x > 0 && blocks_z > 0)
phase_shell_y<<<numBlocks_cv, dimBlocks_cv>>>(
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// z
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
dim3 dimBlocks_cw(threads_x, threads_y);
dim3 numBlocks_cw(blocks_x, blocks_y);
if(blocks_x > 0 && blocks_y > 0)
phase_shell_z<<<numBlocks_cw, dimBlocks_cw>>>(
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// fill in phase shell ghost cells for periodic boundary conditions
if(bc.uW == PERIODIC && bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
numBlocks_c.x = blocks_y;
numBlocks_c.y = blocks_z;
cage_phases_periodic_x<<<numBlocks_c, dimBlocks_c>>>(_phase_shell[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC && bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gcc.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
numBlocks_c.x = blocks_z;
numBlocks_c.y = blocks_x;
cage_phases_periodic_y<<<numBlocks_c, dimBlocks_c>>>(_phase_shell[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC && bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) threads_y);
numBlocks_c.x = blocks_x;
numBlocks_c.y = blocks_y;
cage_phases_periodic_z<<<numBlocks_c, dimBlocks_c>>>(_phase_shell[dev],
_dom[dev]);
}
// fill in island cells
//phase_shell_remove_islands<<<numBlocks_cu, dimBlocks_cu>>>(_dom[dev],
//_phase_shell[dev]);
// flag u, v, w on particles
// u
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
if(blocks_y > 0 && blocks_z > 0)
cage_flag_u<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// v
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
if(blocks_x > 0 && blocks_z > 0)
cage_flag_v<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gcc.in / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_z);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
if(blocks_x > 0 && blocks_y > 0)
cage_flag_w<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_parts[dev], _dom[dev], _phase[dev], _phase_shell[dev]);
// flag external boundaries
if(bc.uW != PERIODIC && bc.uE != PERIODIC)
flag_external_u<<<numBlocks_u, dimBlocks_u>>>(_flag_u[dev], _dom[dev]);
if(bc.vS != PERIODIC && bc.vN != PERIODIC)
flag_external_v<<<numBlocks_v, dimBlocks_v>>>(_flag_v[dev], _dom[dev]);
if(bc.wB != PERIODIC && bc.wT != PERIODIC)
flag_external_w<<<numBlocks_w, dimBlocks_w>>>(_flag_w[dev], _dom[dev]);
// fill in ghost cells for periodic boundary conditions
// flag_u
if(bc.uW == PERIODIC && bc.uE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
numBlocks_cu.x = blocks_y;
numBlocks_cu.y = blocks_z;
cage_flag_u_periodic_x<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uS == PERIODIC && bc.uN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
numBlocks_cu.x = blocks_z;
numBlocks_cu.y = blocks_x;
cage_flag_u_periodic_y<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
if(bc.uB == PERIODIC && bc.uT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfx.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
numBlocks_cu.x = blocks_x;
numBlocks_cu.y = blocks_y;
cage_flag_u_periodic_z<<<numBlocks_cu, dimBlocks_cu>>>(_flag_u[dev],
_dom[dev]);
}
//flag_v
if(bc.vW == PERIODIC && bc.vW == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
numBlocks_cv.x = blocks_y;
numBlocks_cv.y = blocks_z;
cage_flag_v_periodic_x<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vS == PERIODIC && bc.vN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
numBlocks_cv.x = blocks_z;
numBlocks_cv.y = blocks_x;
cage_flag_v_periodic_y<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
if(bc.vB == PERIODIC && bc.vT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfy.jnb / (real) threads_y);
numBlocks_cv.x = blocks_x;
numBlocks_cv.y = blocks_y;
cage_flag_v_periodic_z<<<numBlocks_cv, dimBlocks_cv>>>(_flag_v[dev],
_dom[dev]);
}
// flag_w
if(bc.wW == PERIODIC && bc.wE == PERIODIC) {
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
numBlocks_cw.x = blocks_y;
numBlocks_cw.y = blocks_z;
cage_flag_w_periodic_x<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wS == PERIODIC && bc.wN == PERIODIC) {
blocks_z = (int)ceil((real) dom[dev].Gfz.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
numBlocks_cw.x = blocks_z;
numBlocks_cw.y = blocks_x;
cage_flag_w_periodic_y<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
if(bc.wB == PERIODIC && bc.wT == PERIODIC) {
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
numBlocks_cw.x = blocks_x;
numBlocks_cw.y = blocks_y;
cage_flag_w_periodic_z<<<numBlocks_cw, dimBlocks_cw>>>(_flag_w[dev],
_dom[dev]);
}
}
}
extern "C"
void cuda_part_BC(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(cudaSetDevice(dev + dev_start));
int threads_x = MAX_THREADS_DIM;
int threads_y = MAX_THREADS_DIM;
int threads_z = MAX_THREADS_DIM;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
// u
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_x(threads_y, threads_z);
dim3 numBlocks_x(blocks_y, blocks_z);
part_BC_u<<<numBlocks_x, dimBlocks_x>>>(_u[dev], _phase[dev],
_flag_u[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// v
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_y(threads_z, threads_x);
dim3 numBlocks_y(blocks_z, blocks_x);
part_BC_v<<<numBlocks_y, dimBlocks_y>>>(_v[dev], _phase[dev],
_flag_v[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_z(threads_x, threads_y);
dim3 numBlocks_z(blocks_x, blocks_y);
part_BC_w<<<numBlocks_z, dimBlocks_z>>>(_w[dev], _phase[dev],
_flag_w[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
}
}
extern "C"
void cuda_part_BC_star(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(cudaSetDevice(dev + dev_start));
int threads_x = MAX_THREADS_DIM;
int threads_y = MAX_THREADS_DIM;
int threads_z = MAX_THREADS_DIM;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
// u
blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) threads_y);
blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) threads_z);
dim3 dimBlocks_x(threads_y, threads_z);
dim3 numBlocks_x(blocks_y, blocks_z);
part_BC_u<<<numBlocks_x, dimBlocks_x>>>(_u_star[dev], _phase[dev],
_flag_u[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// v
blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) threads_z);
blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) threads_x);
dim3 dimBlocks_y(threads_z, threads_x);
dim3 numBlocks_y(blocks_z, blocks_x);
part_BC_v<<<numBlocks_y, dimBlocks_y>>>(_v_star[dev], _phase[dev],
_flag_v[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
// w
blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) threads_x);
blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) threads_y);
dim3 dimBlocks_z(threads_x, threads_y);
dim3 numBlocks_z(blocks_x, blocks_y);
part_BC_w<<<numBlocks_z, dimBlocks_z>>>(_w_star[dev], _phase[dev],
_flag_w[dev], _parts[dev], _dom[dev], nu, coeff_stride,
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev]);
}
}
// already CPU parallelized in cuda_PP_bicgstab, which calls it
extern "C"
void cuda_part_BC_p(int dev)
{
int threads_c = MAX_THREADS_DIM;
int blocks_y = 0;
int blocks_z = 0;
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_c);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_c);
dim3 dimBlocks_c(threads_c, threads_c);
dim3 numBlocks_c(blocks_y, blocks_z);
part_BC_p<<<numBlocks_c, dimBlocks_c>>>(_p0[dev], _rhs_p[dev], _phase[dev],
_phase_shell[dev], _parts[dev], _dom[dev],
mu, nu, dt, dt0, gradP, rho_f, coeff_stride,
_pnm_re00[dev], _pnm_im00[dev],
_phinm_re00[dev], _phinm_im00[dev], _chinm_re00[dev], _chinm_im00[dev],
_pnm_re[dev], _pnm_im[dev],
_phinm_re[dev], _phinm_im[dev], _chinm_re[dev], _chinm_im[dev]);
}
extern "C"
void cuda_part_p_fill(void)
{
// parallize across domains
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(cudaSetDevice(dev + dev_start));
int threads_c = MAX_THREADS_DIM;
int blocks_y = 0;
int blocks_z = 0;
blocks_y = (int)ceil((real) dom[dev].Gcc.jn / (real) threads_c);
blocks_z = (int)ceil((real) dom[dev].Gcc.kn / (real) threads_c);
dim3 dimblocks_c(threads_c, threads_c);
dim3 numblocks_c(blocks_y, blocks_z);
part_BC_p_fill<<<numblocks_c, dimblocks_c>>>(_p[dev], _phase[dev],
_parts[dev], _dom[dev],
mu, nu, rho_f, gradP, coeff_stride,
_pnm_re[dev], _pnm_im[dev]);
}
}
extern "C"
void cuda_store_coeffs(void)
{
// parallelize over CPU threads
#pragma omp parallel num_threads(nsubdom)
{
int dev = omp_get_thread_num();
(cudaSetDevice(dev + dev_start));
// coeff00 & coeff ==> coeff0 (Adams-Bashforth)
dim3 dimBlocks(coeff_stride);
dim3 numBlocks(nparts);
// as implemented, this actually makes convergence slower
/*if(dt0 > 0.) {
predict_coeffs<<<numBlocks, dimBlocks>>>(dt0, dt,
_pnm_re00[dev], _pnm_im00[dev], _phinm_re00[dev], _phinm_im00[dev],
_chinm_re00[dev], _chinm_im00[dev],
_pnm_re0[dev], _pnm_im0[dev], _phinm_re0[dev], _phinm_im0[dev],
_chinm_re0[dev], _chinm_im0[dev],
_pnm_re[dev], _pnm_im[dev], _phinm_re[dev], _phinm_im[dev],
_chinm_re[dev], _chinm_im[dev], coeff_stride);
}
*/
(cudaMemcpy(_pnm_re00[dev], _pnm_re[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
(cudaMemcpy(_pnm_im00[dev], _pnm_im[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
(cudaMemcpy(_phinm_re00[dev], _phinm_re[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
(cudaMemcpy(_phinm_im00[dev], _phinm_im[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
(cudaMemcpy(_chinm_re00[dev], _chinm_re[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
(cudaMemcpy(_chinm_im00[dev], _chinm_im[dev],
sizeof(real) * coeff_stride*nparts, cudaMemcpyDeviceToDevice));
}
}
|
8553cc857b5f4ad4fe05b06943bfbd9735ed7e0a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "ITMVisualisationEngine.h"
#include "ITMPixelUtils.h"
#include "ITMCUDAUtils.h"
#include "ITMLibDefines.h"
#include "ITMSceneReconstructionEngine.h"
#include "ITMRepresentationAccess.h"
/**
the 3D intersection locations generated by the latest raycast
in voxelCoordinates
*/
static __managed__ PointImage* raycastResult;
// for ICP
//!< [out] receives output points in world coordinates
//!< [out] receives world space normals computed from points (image space)
static __managed__ DEVICEPTR(RayImage) * lastFrameICPMap = 0;
// for RenderImage
static __managed__ CameraImage<Vector4u>* outRendering = 0;
static __managed__ Vector3f towardsCamera;
// written by rendering
static __managed__ ITMFloatImage* outDepth;
// === raycasting, rendering ===
/// \param x,y [in] camera space pixel determining ray direction
//!< [out] raycastResult[locId]: the intersection point.
// w is 1 for a valid point, 0 for no intersection; in voxel-fractional-world-coordinates
struct castRay {
forEachPixelNoImage_process()
{
// Find 3d position of depth pixel xy, in eye coordinates
auto pt_camera_f = raycastResult->getRayThroughPixel(Vector2i(x, y), viewFrustum_min);
assert(pt_camera_f.origin.coordinateSystem == raycastResult->eyeCoordinates);
auto l = pt_camera_f.endpoint().location;
assert(l.z == viewFrustum_min);
// Length given in voxel-fractional-coordinates (such that one voxel has size 1)
auto pt_camera_f_vc = voxelCoordinates->convert(pt_camera_f);
float totalLength = length(pt_camera_f_vc.direction.direction);
assert(voxelSize < 1);
assert(totalLength > length(pt_camera_f.direction.direction));
assert(abs(
totalLength - length(pt_camera_f.direction.direction) / voxelSize) < 0.0001f);
// in voxel-fractional-world-coordinates (such that one voxel has size 1)
assert(pt_camera_f.endpoint().coordinateSystem == raycastResult->eyeCoordinates);
assert(!(pt_camera_f_vc.endpoint().coordinateSystem == raycastResult->eyeCoordinates));
const auto pt_block_s = pt_camera_f_vc.endpoint();
// End point
auto pt_camera_e = raycastResult->getRayThroughPixel(Vector2i(x, y), viewFrustum_max);
auto pt_camera_e_vc = voxelCoordinates->convert(pt_camera_e);
const float totalLengthMax = length(pt_camera_e_vc.direction.direction);
const auto pt_block_e = pt_camera_e_vc.endpoint();
assert(totalLength < totalLengthMax);
assert(pt_block_s.coordinateSystem == voxelCoordinates);
assert(pt_block_e.coordinateSystem == voxelCoordinates);
// Raymarching
const auto rayDirection = Vector(voxelCoordinates, normalize(pt_block_e.location - pt_block_s.location));
auto pt_result = pt_block_s; // Current position in voxel-fractional-world-coordinates
const float stepScale = mu * oneOverVoxelSize; // sdf values are distances in world-coordinates, normalized by division through mu. This is the factor to convert to voxelCoordinates.
// TODO use caching, we will access the same voxel block multiple times
float sdfValue = 1.0f;
bool hash_found;
// in voxel-fractional-world-coordinates (1.0f means step one voxel)
float stepLength;
while (totalLength < totalLengthMax) {
// D(X)
sdfValue = readFromSDF_float_uninterpolated(pt_result.location, hash_found);
if (!hash_found) {
// First we try to find an allocated voxel block, and the length of the steps we take is determined by the block size
stepLength = SDF_BLOCK_SIZE;
}
else {
// If we found an allocated block,
// [Once we are inside the truncation band], the values from the SDF give us conservative step lengths.
// using trilinear interpolation only if we have read values in the range 0.5 D(X) 0.1
if ((sdfValue <= 0.1f) && (sdfValue >= -0.5f)) {
sdfValue = readFromSDF_float_interpolated(pt_result.location, hash_found);
}
// once we read a negative value from the SDF, we found the intersection with the surface.
if (sdfValue <= 0.0f) break;
stepLength = MAX(
sdfValue * stepScale,
1.0f // if we are outside the truncation band , our step size is determined by the truncation band
// (note that the distance is normalized to lie in [-1,1] within the truncation band)
);
}
pt_result = pt_result + rayDirection * stepLength;
totalLength += stepLength;
}
bool pt_found;
// If the T - SDF value is negative after such a trilinear interpolation, the surface
// has indeed been found and we terminate the ray, performing one last
// trilinear interpolation step for a smoother appearance.
if (sdfValue <= 0.0f)
{
// Refine position
stepLength = sdfValue * stepScale;
pt_result = pt_result + rayDirection * stepLength;
// Read again
sdfValue = readFromSDF_float_interpolated(pt_result.location, hash_found);
// Refine position
stepLength = sdfValue * stepScale;
pt_result = pt_result + rayDirection * stepLength;
pt_found = true;
}
else pt_found = false;
raycastResult->image->GetData()[locId] = Vector4f(pt_result.location, (pt_found) ? 1.0f : 0.0f);
assert(raycastResult->pointCoordinates == voxelCoordinates);
assert(pt_result.coordinateSystem == voxelCoordinates);
}
};
/// Compute normal in the distance field via the gradient.
/// c.f. computeSingleNormalFromSDF
GPU_ONLY inline void computeNormalAndAngle(
THREADPTR(bool) & foundPoint, //!< [in,out]
const THREADPTR(Vector3f) & point, //!< [in]
THREADPTR(Vector3f) & outNormal,//!< [out]
THREADPTR(float) & angle //!< [out] outNormal . towardsCamera
)
{
if (!foundPoint) return;
outNormal = normalize(computeSingleNormalFromSDF(point));
angle = dot(outNormal, towardsCamera);
// dont consider points not facing the camera (raycast will hit these, do backface culling now)
if (!(angle > 0.0)) foundPoint = false;
}
// PIXEL SHADERS
// " Finally a coloured or shaded rendering of the surface is trivially computed, as desired for the visualisation."
#define DRAWFUNCTIONPARAMS \
DEVICEPTR(Vector4u) & dest,/* in voxel-fractional world coordinates, comes from raycastResult*/\
const CONSTPTR(Vector3f) & point, /* in voxel-fractional world coordinates, comes from raycastResult*/\
const THREADPTR(Vector3f) & normal_obj,\
const THREADPTR(float) & angle
GPU_ONLY inline void drawPixelGrey(DRAWFUNCTIONPARAMS)
{
const float outRes = (0.8f * angle + 0.2f) * 255.0f;
dest = Vector4u((uchar)outRes);
}
GPU_ONLY inline void drawPixelNormal(DRAWFUNCTIONPARAMS) {
dest.r = (uchar)((0.3f + (normal_obj.r + 1.0f)*0.35f)*255.0f);
dest.g = (uchar)((0.3f + (normal_obj.g + 1.0f)*0.35f)*255.0f);
dest.b = (uchar)((0.3f + (normal_obj.b + 1.0f)*0.35f)*255.0f);
}
GPU_ONLY inline void drawPixelColour(DRAWFUNCTIONPARAMS) {
const Vector3f clr = readFromSDF_color4u_interpolated(point);
dest = Vector4u(TO_UCHAR3(clr), 255);
}
#define PROCESS_AND_DRAW_PIXEL(PROCESSFUNCTION, DRAWFUNCTION) \
struct PROCESSFUNCTION { \
forEachPixelNoImage_process() {\
DEVICEPTR(Vector4u) &outRender = outRendering->image->GetData()[locId]; \
Point voxelCoordinatePoint = raycastResult->getPointForPixel(Vector2i(x,y));\
assert(voxelCoordinatePoint.coordinateSystem == voxelCoordinates); \
const CONSTPTR(Vector3f) point = voxelCoordinatePoint.location; \
float& outZ = ::outDepth->GetData()[locId];\
auto a = outRendering->eyeCoordinates->convert(voxelCoordinatePoint);\
outZ = a.location.z; /* in world / eye coordinates (distance) */ \
bool foundPoint = raycastResult->image->GetData()[locId].w > 0; \
\
Vector3f outNormal; \
float angle; \
computeNormalAndAngle(foundPoint, point, outNormal, angle); \
if (foundPoint) {/*assert(outZ >= viewFrustum_min && outZ <= viewFrustum_max); -- approx*/DRAWFUNCTION(outRender, point, outNormal, angle);} \
else {\
outRender = Vector4u((uchar)0); outZ = 0;\
} \
}\
};
PROCESS_AND_DRAW_PIXEL(renderColour, drawPixelColour)
PROCESS_AND_DRAW_PIXEL(renderGrey, drawPixelGrey)
PROCESS_AND_DRAW_PIXEL(renderColourFromNormal, drawPixelNormal)
/// Initializes raycastResult
static void Common(
const ITMPose *pose,
const ITMIntrinsics *intrinsics,
const Vector2i imgSize
) {
assert(imgSize.area() > 1);
auto raycastImage = new ITMFloat4Image(imgSize);
auto invPose_M = pose->GetInvM();
auto cameraCs = new CoordinateSystem(invPose_M);
raycastResult = new PointImage(
raycastImage,
voxelCoordinates,
cameraCs,
intrinsics->projectionParamsSimple.all
);
// (negative camera z axis)
towardsCamera = -Vector3f(invPose_M.getColumn(2));
forEachPixelNoImage<castRay>(imgSize);
}
CameraImage<Vector4u>* RenderImage(
const ITMPose *pose,
const ITMIntrinsics *intrinsics,
const Vector2i imgSize,
ITMFloatImage* const outDepth,
std::string shader)
{
assert(imgSize.area() > 1);
assert(outDepth);
assert(outDepth->noDims == imgSize);
::outDepth = outDepth;
auto outImage = new ITMUChar4Image(imgSize);
auto outCs = new CoordinateSystem(pose->GetInvM());
outRendering = new CameraImage<Vector4u>(
outImage,
outCs,
intrinsics->projectionParamsSimple.all
);
Common(pose, intrinsics, outRendering->imgSize());
hipDeviceSynchronize(); // want to read imgSize
#define isShader(s) if (shader == #s) {forEachPixelNoImage<s>(outRendering->imgSize());hipDeviceSynchronize(); return outRendering;}
isShader(renderColour);
isShader(renderColourFromNormal);
isShader(renderGrey);
assert(false); // unkown shader
return nullptr;
}
/// Computing the surface normal in image space given raycasted image (raycastResult).
///
/// In image space, since the normals are computed on a regular grid,
/// there are only 4 uninterpolated read operations followed by a cross-product.
/// (here we might do more when useSmoothing is true, and we step 2 pixels wide to find // //further-away neighbors)
///
/// \returns normal_out[idx].w = sigmaZ_out[idx] = -1 on error where idx = x + y * imgDims.x
template <bool useSmoothing>
GPU_ONLY inline void computeNormalImageSpace(
THREADPTR(bool) & foundPoint, //!< [in,out] Set to false when the normal cannot be computed
const THREADPTR(int) &x, const THREADPTR(int) &y,
THREADPTR(Vector3f) & outNormal
)
{
if (!foundPoint) return;
const Vector2i imgSize = raycastResult->imgSize();
// Lookup world coordinates of points surrounding (x,y)
// and compute forward difference vectors
Vector4f xp1_y, xm1_y, x_yp1, x_ym1;
Vector4f diff_x(0.0f, 0.0f, 0.0f, 0.0f), diff_y(0.0f, 0.0f, 0.0f, 0.0f);
// If useSmoothing, use positions 2 away
int extraDelta = useSmoothing ? 1 : 0;
#define d(x) (x + extraDelta)
if (y <= d(1) || y >= imgSize.y - d(2) || x <= d(1) || x >= imgSize.x - d(2)) { foundPoint = false; return; }
#define lookupNeighbors() \
xp1_y = sampleNearest(raycastResult->image->GetData(), x + d(1), y, imgSize);\
x_yp1 = sampleNearest(raycastResult->image->GetData(), x, y + d(1), imgSize);\
xm1_y = sampleNearest(raycastResult->image->GetData(), x - d(1), y, imgSize);\
x_ym1 = sampleNearest(raycastResult->image->GetData(), x, y - d(1), imgSize);\
diff_x = xp1_y - xm1_y;\
diff_y = x_yp1 - x_ym1;
lookupNeighbors();
#define isAnyPointIllegal() (xp1_y.w <= 0 || x_yp1.w <= 0 || xm1_y.w <= 0 || x_ym1.w <= 0)
float length_diff = MAX(length2(diff_x.toVector3()), length2(diff_y.toVector3()));
bool lengthDiffTooLarge = (length_diff * voxelSize * voxelSize > (0.15f * 0.15f));
if (isAnyPointIllegal() || (lengthDiffTooLarge && useSmoothing)) {
if (!useSmoothing) { foundPoint = false; return; }
// In case we used smoothing, try again without extra delta
extraDelta = 0;
lookupNeighbors();
if (isAnyPointIllegal()){ foundPoint = false; return; }
}
#undef d
#undef isAnyPointIllegal
#undef lookupNeighbors
// TODO why the extra minus? -- it probably does not matter because we compute the distance to a plane which would be the same with the inverse normal
outNormal = normalize(-cross(diff_x.toVector3(), diff_y.toVector3()));
float angle = dot(outNormal, towardsCamera);
// dont consider points not facing the camera (raycast will hit these, do backface culling now)
if (!(angle > 0.0)) foundPoint = false;
}
#define useSmoothing true
static __managed__ RayImage* outIcpMap = 0;
/// Produces a shaded image (outRendering) and a point cloud for e.g. tracking.
/// Uses image space normals.
/// \param useSmoothing whether to compute normals by forward differences two pixels away (true) or just one pixel away (false)
struct processPixelICP {
forEachPixelNoImage_process() {
const Vector4f point = raycastResult->image->GetData()[locId];
assert(raycastResult->pointCoordinates == voxelCoordinates);
bool foundPoint = point.w > 0.0f;
Vector3f outNormal;
// TODO could we use the world space normals here? not without change
computeNormalImageSpace<useSmoothing>(
foundPoint, x, y, outNormal);
#define pointsMap outIcpMap->image->GetData()
#define normalsMap outIcpMap->normalImage->GetData()
if (!foundPoint)
{
pointsMap[locId] = normalsMap[locId] = IllegalColor<Vector4f>::make();
return;
}
// Convert point to world coordinates
pointsMap[locId] = Vector4f(point.toVector3() * voxelSize, 1);
// Normals are the same whether in world or voxel coordinates
normalsMap[locId] = Vector4f(outNormal, 0);
#undef pointsMap
#undef normalsMap
}
};
void approxEqual(float a, float b, const float eps = 0.00001) {
assert(abs(a - b) < eps);
}
void approxEqual(Matrix4f a, Matrix4f b, const float eps = 0.00001) {
for (int i = 0; i < 4 * 4; i++)
approxEqual(a.m[i], b.m[i], eps);
}
void approxEqual(Matrix3f a, Matrix3f b, const float eps = 0.00001) {
for (int i = 0; i < 3 * 3; i++)
approxEqual(a.m[i], b.m[i], eps);
}
// 1. raycast scene from current viewpoint
// to create point cloud for tracking
RayImage * CreateICPMapsForCurrentView() {
assert(currentView);
auto imgSize_d = currentView->depthImage->imgSize();
assert(imgSize_d.area() > 1);
auto pointsMap = new ITMFloat4Image(imgSize_d);
auto normalsMap = new ITMFloat4Image(imgSize_d);
assert(!outIcpMap);
outIcpMap = new RayImage(
pointsMap,
normalsMap,
CoordinateSystem::global(),
currentView->depthImage->eyeCoordinates,
currentView->depthImage->cameraIntrinsics
);
assert(Scene::getCurrentScene());
// TODO reduce conversion friction
ITMPose pose; pose.SetM(currentView->depthImage->eyeCoordinates->fromGlobal);
ITMIntrinsics intrin;
intrin.projectionParamsSimple.all = currentView->depthImage->cameraIntrinsics;
Common(
&pose, //trackingState->pose_d,
&intrin,
imgSize_d
);
hipDeviceSynchronize();
approxEqual(raycastResult->eyeCoordinates->fromGlobal, currentView->depthImage->eyeCoordinates->fromGlobal);
assert(raycastResult->pointCoordinates == voxelCoordinates);
// Create ICP maps
forEachPixelNoImage<processPixelICP>(imgSize_d);
hipDeviceSynchronize();
// defensive
assert(outIcpMap->eyeCoordinates == currentView->depthImage->eyeCoordinates);
assert(outIcpMap->pointCoordinates == CoordinateSystem::global());
assert(outIcpMap->imgSize() == imgSize_d);
assert(outIcpMap->normalImage->noDims == imgSize_d);
auto icpMap = outIcpMap;
outIcpMap = 0;
return icpMap;
}
|
8553cc857b5f4ad4fe05b06943bfbd9735ed7e0a.cu
|
#include "ITMVisualisationEngine.h"
#include "ITMPixelUtils.h"
#include "ITMCUDAUtils.h"
#include "ITMLibDefines.h"
#include "ITMSceneReconstructionEngine.h"
#include "ITMRepresentationAccess.h"
/**
the 3D intersection locations generated by the latest raycast
in voxelCoordinates
*/
static __managed__ PointImage* raycastResult;
// for ICP
//!< [out] receives output points in world coordinates
//!< [out] receives world space normals computed from points (image space)
static __managed__ DEVICEPTR(RayImage) * lastFrameICPMap = 0;
// for RenderImage
static __managed__ CameraImage<Vector4u>* outRendering = 0;
static __managed__ Vector3f towardsCamera;
// written by rendering
static __managed__ ITMFloatImage* outDepth;
// === raycasting, rendering ===
/// \param x,y [in] camera space pixel determining ray direction
//!< [out] raycastResult[locId]: the intersection point.
// w is 1 for a valid point, 0 for no intersection; in voxel-fractional-world-coordinates
struct castRay {
forEachPixelNoImage_process()
{
// Find 3d position of depth pixel xy, in eye coordinates
auto pt_camera_f = raycastResult->getRayThroughPixel(Vector2i(x, y), viewFrustum_min);
assert(pt_camera_f.origin.coordinateSystem == raycastResult->eyeCoordinates);
auto l = pt_camera_f.endpoint().location;
assert(l.z == viewFrustum_min);
// Length given in voxel-fractional-coordinates (such that one voxel has size 1)
auto pt_camera_f_vc = voxelCoordinates->convert(pt_camera_f);
float totalLength = length(pt_camera_f_vc.direction.direction);
assert(voxelSize < 1);
assert(totalLength > length(pt_camera_f.direction.direction));
assert(abs(
totalLength - length(pt_camera_f.direction.direction) / voxelSize) < 0.0001f);
// in voxel-fractional-world-coordinates (such that one voxel has size 1)
assert(pt_camera_f.endpoint().coordinateSystem == raycastResult->eyeCoordinates);
assert(!(pt_camera_f_vc.endpoint().coordinateSystem == raycastResult->eyeCoordinates));
const auto pt_block_s = pt_camera_f_vc.endpoint();
// End point
auto pt_camera_e = raycastResult->getRayThroughPixel(Vector2i(x, y), viewFrustum_max);
auto pt_camera_e_vc = voxelCoordinates->convert(pt_camera_e);
const float totalLengthMax = length(pt_camera_e_vc.direction.direction);
const auto pt_block_e = pt_camera_e_vc.endpoint();
assert(totalLength < totalLengthMax);
assert(pt_block_s.coordinateSystem == voxelCoordinates);
assert(pt_block_e.coordinateSystem == voxelCoordinates);
// Raymarching
const auto rayDirection = Vector(voxelCoordinates, normalize(pt_block_e.location - pt_block_s.location));
auto pt_result = pt_block_s; // Current position in voxel-fractional-world-coordinates
const float stepScale = mu * oneOverVoxelSize; // sdf values are distances in world-coordinates, normalized by division through mu. This is the factor to convert to voxelCoordinates.
// TODO use caching, we will access the same voxel block multiple times
float sdfValue = 1.0f;
bool hash_found;
// in voxel-fractional-world-coordinates (1.0f means step one voxel)
float stepLength;
while (totalLength < totalLengthMax) {
// D(X)
sdfValue = readFromSDF_float_uninterpolated(pt_result.location, hash_found);
if (!hash_found) {
// First we try to find an allocated voxel block, and the length of the steps we take is determined by the block size
stepLength = SDF_BLOCK_SIZE;
}
else {
// If we found an allocated block,
// [Once we are inside the truncation band], the values from the SDF give us conservative step lengths.
// using trilinear interpolation only if we have read values in the range −0.5 ≤ D(X) ≤ 0.1
if ((sdfValue <= 0.1f) && (sdfValue >= -0.5f)) {
sdfValue = readFromSDF_float_interpolated(pt_result.location, hash_found);
}
// once we read a negative value from the SDF, we found the intersection with the surface.
if (sdfValue <= 0.0f) break;
stepLength = MAX(
sdfValue * stepScale,
1.0f // if we are outside the truncation band µ, our step size is determined by the truncation band
// (note that the distance is normalized to lie in [-1,1] within the truncation band)
);
}
pt_result = pt_result + rayDirection * stepLength;
totalLength += stepLength;
}
bool pt_found;
// If the T - SDF value is negative after such a trilinear interpolation, the surface
// has indeed been found and we terminate the ray, performing one last
// trilinear interpolation step for a smoother appearance.
if (sdfValue <= 0.0f)
{
// Refine position
stepLength = sdfValue * stepScale;
pt_result = pt_result + rayDirection * stepLength;
// Read again
sdfValue = readFromSDF_float_interpolated(pt_result.location, hash_found);
// Refine position
stepLength = sdfValue * stepScale;
pt_result = pt_result + rayDirection * stepLength;
pt_found = true;
}
else pt_found = false;
raycastResult->image->GetData()[locId] = Vector4f(pt_result.location, (pt_found) ? 1.0f : 0.0f);
assert(raycastResult->pointCoordinates == voxelCoordinates);
assert(pt_result.coordinateSystem == voxelCoordinates);
}
};
/// Compute normal in the distance field via the gradient.
/// c.f. computeSingleNormalFromSDF
GPU_ONLY inline void computeNormalAndAngle(
THREADPTR(bool) & foundPoint, //!< [in,out]
const THREADPTR(Vector3f) & point, //!< [in]
THREADPTR(Vector3f) & outNormal,//!< [out]
THREADPTR(float) & angle //!< [out] outNormal . towardsCamera
)
{
if (!foundPoint) return;
outNormal = normalize(computeSingleNormalFromSDF(point));
angle = dot(outNormal, towardsCamera);
// dont consider points not facing the camera (raycast will hit these, do backface culling now)
if (!(angle > 0.0)) foundPoint = false;
}
// PIXEL SHADERS
// " Finally a coloured or shaded rendering of the surface is trivially computed, as desired for the visualisation."
#define DRAWFUNCTIONPARAMS \
DEVICEPTR(Vector4u) & dest,/* in voxel-fractional world coordinates, comes from raycastResult*/\
const CONSTPTR(Vector3f) & point, /* in voxel-fractional world coordinates, comes from raycastResult*/\
const THREADPTR(Vector3f) & normal_obj,\
const THREADPTR(float) & angle
GPU_ONLY inline void drawPixelGrey(DRAWFUNCTIONPARAMS)
{
const float outRes = (0.8f * angle + 0.2f) * 255.0f;
dest = Vector4u((uchar)outRes);
}
GPU_ONLY inline void drawPixelNormal(DRAWFUNCTIONPARAMS) {
dest.r = (uchar)((0.3f + (normal_obj.r + 1.0f)*0.35f)*255.0f);
dest.g = (uchar)((0.3f + (normal_obj.g + 1.0f)*0.35f)*255.0f);
dest.b = (uchar)((0.3f + (normal_obj.b + 1.0f)*0.35f)*255.0f);
}
GPU_ONLY inline void drawPixelColour(DRAWFUNCTIONPARAMS) {
const Vector3f clr = readFromSDF_color4u_interpolated(point);
dest = Vector4u(TO_UCHAR3(clr), 255);
}
#define PROCESS_AND_DRAW_PIXEL(PROCESSFUNCTION, DRAWFUNCTION) \
struct PROCESSFUNCTION { \
forEachPixelNoImage_process() {\
DEVICEPTR(Vector4u) &outRender = outRendering->image->GetData()[locId]; \
Point voxelCoordinatePoint = raycastResult->getPointForPixel(Vector2i(x,y));\
assert(voxelCoordinatePoint.coordinateSystem == voxelCoordinates); \
const CONSTPTR(Vector3f) point = voxelCoordinatePoint.location; \
float& outZ = ::outDepth->GetData()[locId];\
auto a = outRendering->eyeCoordinates->convert(voxelCoordinatePoint);\
outZ = a.location.z; /* in world / eye coordinates (distance) */ \
bool foundPoint = raycastResult->image->GetData()[locId].w > 0; \
\
Vector3f outNormal; \
float angle; \
computeNormalAndAngle(foundPoint, point, outNormal, angle); \
if (foundPoint) {/*assert(outZ >= viewFrustum_min && outZ <= viewFrustum_max); -- approx*/DRAWFUNCTION(outRender, point, outNormal, angle);} \
else {\
outRender = Vector4u((uchar)0); outZ = 0;\
} \
}\
};
PROCESS_AND_DRAW_PIXEL(renderColour, drawPixelColour)
PROCESS_AND_DRAW_PIXEL(renderGrey, drawPixelGrey)
PROCESS_AND_DRAW_PIXEL(renderColourFromNormal, drawPixelNormal)
/// Initializes raycastResult
static void Common(
const ITMPose *pose,
const ITMIntrinsics *intrinsics,
const Vector2i imgSize
) {
assert(imgSize.area() > 1);
auto raycastImage = new ITMFloat4Image(imgSize);
auto invPose_M = pose->GetInvM();
auto cameraCs = new CoordinateSystem(invPose_M);
raycastResult = new PointImage(
raycastImage,
voxelCoordinates,
cameraCs,
intrinsics->projectionParamsSimple.all
);
// (negative camera z axis)
towardsCamera = -Vector3f(invPose_M.getColumn(2));
forEachPixelNoImage<castRay>(imgSize);
}
CameraImage<Vector4u>* RenderImage(
const ITMPose *pose,
const ITMIntrinsics *intrinsics,
const Vector2i imgSize,
ITMFloatImage* const outDepth,
std::string shader)
{
assert(imgSize.area() > 1);
assert(outDepth);
assert(outDepth->noDims == imgSize);
::outDepth = outDepth;
auto outImage = new ITMUChar4Image(imgSize);
auto outCs = new CoordinateSystem(pose->GetInvM());
outRendering = new CameraImage<Vector4u>(
outImage,
outCs,
intrinsics->projectionParamsSimple.all
);
Common(pose, intrinsics, outRendering->imgSize());
cudaDeviceSynchronize(); // want to read imgSize
#define isShader(s) if (shader == #s) {forEachPixelNoImage<s>(outRendering->imgSize());cudaDeviceSynchronize(); return outRendering;}
isShader(renderColour);
isShader(renderColourFromNormal);
isShader(renderGrey);
assert(false); // unkown shader
return nullptr;
}
/// Computing the surface normal in image space given raycasted image (raycastResult).
///
/// In image space, since the normals are computed on a regular grid,
/// there are only 4 uninterpolated read operations followed by a cross-product.
/// (here we might do more when useSmoothing is true, and we step 2 pixels wide to find // //further-away neighbors)
///
/// \returns normal_out[idx].w = sigmaZ_out[idx] = -1 on error where idx = x + y * imgDims.x
template <bool useSmoothing>
GPU_ONLY inline void computeNormalImageSpace(
THREADPTR(bool) & foundPoint, //!< [in,out] Set to false when the normal cannot be computed
const THREADPTR(int) &x, const THREADPTR(int) &y,
THREADPTR(Vector3f) & outNormal
)
{
if (!foundPoint) return;
const Vector2i imgSize = raycastResult->imgSize();
// Lookup world coordinates of points surrounding (x,y)
// and compute forward difference vectors
Vector4f xp1_y, xm1_y, x_yp1, x_ym1;
Vector4f diff_x(0.0f, 0.0f, 0.0f, 0.0f), diff_y(0.0f, 0.0f, 0.0f, 0.0f);
// If useSmoothing, use positions 2 away
int extraDelta = useSmoothing ? 1 : 0;
#define d(x) (x + extraDelta)
if (y <= d(1) || y >= imgSize.y - d(2) || x <= d(1) || x >= imgSize.x - d(2)) { foundPoint = false; return; }
#define lookupNeighbors() \
xp1_y = sampleNearest(raycastResult->image->GetData(), x + d(1), y, imgSize);\
x_yp1 = sampleNearest(raycastResult->image->GetData(), x, y + d(1), imgSize);\
xm1_y = sampleNearest(raycastResult->image->GetData(), x - d(1), y, imgSize);\
x_ym1 = sampleNearest(raycastResult->image->GetData(), x, y - d(1), imgSize);\
diff_x = xp1_y - xm1_y;\
diff_y = x_yp1 - x_ym1;
lookupNeighbors();
#define isAnyPointIllegal() (xp1_y.w <= 0 || x_yp1.w <= 0 || xm1_y.w <= 0 || x_ym1.w <= 0)
float length_diff = MAX(length2(diff_x.toVector3()), length2(diff_y.toVector3()));
bool lengthDiffTooLarge = (length_diff * voxelSize * voxelSize > (0.15f * 0.15f));
if (isAnyPointIllegal() || (lengthDiffTooLarge && useSmoothing)) {
if (!useSmoothing) { foundPoint = false; return; }
// In case we used smoothing, try again without extra delta
extraDelta = 0;
lookupNeighbors();
if (isAnyPointIllegal()){ foundPoint = false; return; }
}
#undef d
#undef isAnyPointIllegal
#undef lookupNeighbors
// TODO why the extra minus? -- it probably does not matter because we compute the distance to a plane which would be the same with the inverse normal
outNormal = normalize(-cross(diff_x.toVector3(), diff_y.toVector3()));
float angle = dot(outNormal, towardsCamera);
// dont consider points not facing the camera (raycast will hit these, do backface culling now)
if (!(angle > 0.0)) foundPoint = false;
}
#define useSmoothing true
static __managed__ RayImage* outIcpMap = 0;
/// Produces a shaded image (outRendering) and a point cloud for e.g. tracking.
/// Uses image space normals.
/// \param useSmoothing whether to compute normals by forward differences two pixels away (true) or just one pixel away (false)
struct processPixelICP {
forEachPixelNoImage_process() {
const Vector4f point = raycastResult->image->GetData()[locId];
assert(raycastResult->pointCoordinates == voxelCoordinates);
bool foundPoint = point.w > 0.0f;
Vector3f outNormal;
// TODO could we use the world space normals here? not without change
computeNormalImageSpace<useSmoothing>(
foundPoint, x, y, outNormal);
#define pointsMap outIcpMap->image->GetData()
#define normalsMap outIcpMap->normalImage->GetData()
if (!foundPoint)
{
pointsMap[locId] = normalsMap[locId] = IllegalColor<Vector4f>::make();
return;
}
// Convert point to world coordinates
pointsMap[locId] = Vector4f(point.toVector3() * voxelSize, 1);
// Normals are the same whether in world or voxel coordinates
normalsMap[locId] = Vector4f(outNormal, 0);
#undef pointsMap
#undef normalsMap
}
};
void approxEqual(float a, float b, const float eps = 0.00001) {
assert(abs(a - b) < eps);
}
void approxEqual(Matrix4f a, Matrix4f b, const float eps = 0.00001) {
for (int i = 0; i < 4 * 4; i++)
approxEqual(a.m[i], b.m[i], eps);
}
void approxEqual(Matrix3f a, Matrix3f b, const float eps = 0.00001) {
for (int i = 0; i < 3 * 3; i++)
approxEqual(a.m[i], b.m[i], eps);
}
// 1. raycast scene from current viewpoint
// to create point cloud for tracking
RayImage * CreateICPMapsForCurrentView() {
assert(currentView);
auto imgSize_d = currentView->depthImage->imgSize();
assert(imgSize_d.area() > 1);
auto pointsMap = new ITMFloat4Image(imgSize_d);
auto normalsMap = new ITMFloat4Image(imgSize_d);
assert(!outIcpMap);
outIcpMap = new RayImage(
pointsMap,
normalsMap,
CoordinateSystem::global(),
currentView->depthImage->eyeCoordinates,
currentView->depthImage->cameraIntrinsics
);
assert(Scene::getCurrentScene());
// TODO reduce conversion friction
ITMPose pose; pose.SetM(currentView->depthImage->eyeCoordinates->fromGlobal);
ITMIntrinsics intrin;
intrin.projectionParamsSimple.all = currentView->depthImage->cameraIntrinsics;
Common(
&pose, //trackingState->pose_d,
&intrin,
imgSize_d
);
cudaDeviceSynchronize();
approxEqual(raycastResult->eyeCoordinates->fromGlobal, currentView->depthImage->eyeCoordinates->fromGlobal);
assert(raycastResult->pointCoordinates == voxelCoordinates);
// Create ICP maps
forEachPixelNoImage<processPixelICP>(imgSize_d);
cudaDeviceSynchronize();
// defensive
assert(outIcpMap->eyeCoordinates == currentView->depthImage->eyeCoordinates);
assert(outIcpMap->pointCoordinates == CoordinateSystem::global());
assert(outIcpMap->imgSize() == imgSize_d);
assert(outIcpMap->normalImage->noDims == imgSize_d);
auto icpMap = outIcpMap;
outIcpMap = 0;
return icpMap;
}
|
4edadeac1256cd3390ca3e53806539d7cf6c2f23.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 32
#define ITERATIONS (unsigned)( 1200 )
#define ITERATIONS2 1
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for(unsigned j=0; j<ITERATIONS2;j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
}
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
4edadeac1256cd3390ca3e53806539d7cf6c2f23.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 32
#define ITERATIONS (unsigned)( 1200 )
#define ITERATIONS2 1
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for(unsigned j=0; j<ITERATIONS2;j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
}
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
a23fdbe6e511d8007b7270f5704d6317ee549c7b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_texture_types.h>
#include <device_launch_parameters.h>
#include <vector_functions.h>
#include <hip/hip_vector_types.h>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <texture_fetch_functions.hpp>
#include "common_hip.cuh"
#define IMAGE_DIM 2048
#define SAMPLE_SIZE 6
#define NUMBER_OF_SAMPLES (((SAMPLE_SIZE * 2) + 1) * ((SAMPLE_SIZE * 2) + 1))
#define rnd(x) (x * rand() / RAND_MAX)
#define INF 2e10f
void OutputImageFile(const char *filename, uchar4 *image);
void InputImageFile(const char *filename, uchar4 *image);
void CheckCUDAError(const char *msg);
#define IMAGE_BLUR(read_image, image_output) \
do { \
int x = threadIdx.x + blockIdx.x * blockDim.x; \
int y = threadIdx.y + blockIdx.y * blockDim.y; \
int output_offset = x + y * blockDim.x * gridDim.x; \
uchar4 pixel; \
float4 average = make_float4(0, 0, 0, 0); \
for (int i = -SAMPLE_SIZE; i <= SAMPLE_SIZE; i++) { \
for (int j = -SAMPLE_SIZE; j <= SAMPLE_SIZE; j++) { \
int x_offset = x + i; \
int y_offset = y + j; \
if (x_offset < 0) x_offset += IMAGE_DIM; \
if (x_offset >= IMAGE_DIM) x_offset -= IMAGE_DIM; \
if (y_offset < 0) y_offset += IMAGE_DIM; \
if (y_offset >= IMAGE_DIM) y_offset -= IMAGE_DIM; \
int offset = x_offset + y_offset * blockDim.x * gridDim.x; \
pixel = read_image(x + i, y + j, offset); \
average.x += pixel.x; \
average.y += pixel.y; \
average.z += pixel.z; \
} \
} \
average.x /= (float)NUMBER_OF_SAMPLES; \
average.y /= (float)NUMBER_OF_SAMPLES; \
average.z /= (float)NUMBER_OF_SAMPLES; \
image_output[output_offset].x = (unsigned char)average.x; \
image_output[output_offset].y = (unsigned char)average.y; \
image_output[output_offset].z = (unsigned char)average.z; \
image_output[output_offset].w = 255; \
} while (0)
__global__ void ImageBlur(uchar4 *image, uchar4 *image_output) {
#define READ_IMAGE(x, y, i) image[i]
IMAGE_BLUR(READ_IMAGE, image_output);
#undef READ_IMAGE
}
texture<uchar4, hipTextureType1D, hipReadModeElementType> image_tex_1d;
__global__ void ImageBlurTexture1D(uchar4 *image_output) {
#define READ_IMAGE(x, y, i) tex1Dfetch(image_tex_1d, i)
IMAGE_BLUR(READ_IMAGE, image_output);
#undef READ_IMAGE
}
texture<uchar4, hipTextureType2D, hipReadModeElementType> image_tex_2d;
__global__ void ImageBlurTexture2D(uchar4 *image_output) {
#define READ_IMAGE(x, y, i) tex2D(image_tex_2d, x, y)
IMAGE_BLUR(READ_IMAGE, image_output);
#undef READ_IMAGE
}
/* Host code */
int main(void) {
unsigned int image_size;
uchar4 *d_image, *d_image_output;
uchar4 *h_image;
hipEvent_t start, stop;
float3 ms; //[0]=normal,[1]=tex1d,[2]=tex2d
image_size = IMAGE_DIM * IMAGE_DIM * sizeof(uchar4);
// create timers
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate memory on the GPU for the output image
hipMalloc((void **)&d_image, image_size);
hipMalloc((void **)&d_image_output, image_size);
CheckCUDAError("CUDA malloc");
auto channel_desc = hipCreateChannelDesc<uchar4>();
image_tex_2d.addressMode[0] = hipAddressModeWrap;
image_tex_2d.addressMode[1] = hipAddressModeWrap;
hipBindTexture(nullptr, image_tex_1d, d_image, channel_desc, image_size);
hipBindTexture2D(nullptr, image_tex_2d, d_image, channel_desc, IMAGE_DIM,
IMAGE_DIM, IMAGE_DIM * sizeof(uchar4));
// allocate and load host image
h_image = (uchar4 *)malloc(image_size);
InputImageFile("input.ppm", h_image);
// copy image to device memory
hipMemcpy(d_image, h_image, image_size, hipMemcpyHostToDevice);
CheckCUDAError("CUDA memcpy to device");
// cuda layout and execution
dim3 blocksPerGrid(IMAGE_DIM / 16, IMAGE_DIM / 16);
dim3 threadsPerBlock(16, 16);
// normal version
TIME("kernel (normal)", ms.x, ImageBlur, blocksPerGrid, threadsPerBlock,
d_image, d_image_output);
// copy the image back from the GPU for output to file
hipMemcpy(h_image, d_image_output, image_size, hipMemcpyDeviceToHost);
CheckCUDAError("CUDA memcpy from device");
OutputImageFile("normal.ppm", h_image);
TIME("kernel (texture 1D)", ms.y, ImageBlurTexture1D, blocksPerGrid,
threadsPerBlock, d_image_output);
// copy the image back from the GPU for output to file
hipMemcpy(h_image, d_image_output, image_size, hipMemcpyDeviceToHost);
CheckCUDAError("CUDA memcpy from device");
OutputImageFile("tex_1d.ppm", h_image);
TIME("kernel (texture 2D)", ms.z, ImageBlurTexture2D, blocksPerGrid,
threadsPerBlock, d_image_output);
// copy the image back from the GPU for output to file
hipMemcpy(h_image, d_image_output, image_size, hipMemcpyDeviceToHost);
CheckCUDAError("CUDA memcpy from device");
OutputImageFile("tex_2d.ppm", h_image);
// output timings
printf("Execution times:\n");
printf("\tNormal version: %f\n", ms.x);
printf("\ttex1D version: %f\n", ms.y);
printf("\ttex2D version: %f\n", ms.z);
// output image
// cleanup
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(d_image);
hipFree(d_image_output);
free(h_image);
hipUnbindTexture(image_tex_1d);
hipUnbindTexture(image_tex_2d);
return 0;
}
void OutputImageFile(const char *filename, uchar4 *image) {
FILE *f; // output file handle
// open the output file and write header info for PPM filetype
f = fopen(filename, "wb");
if (f == NULL) {
fprintf(stderr, "Error opening 'output.ppm' output file\n");
exit(1);
}
fprintf(f, "P6\n");
fprintf(f, "# COM4521 Lab 05 Exercise03\n");
fprintf(f, "%d %d\n%d\n", IMAGE_DIM, IMAGE_DIM, 255);
for (int x = 0; x < IMAGE_DIM; x++) {
for (int y = 0; y < IMAGE_DIM; y++) {
int i = x + y * IMAGE_DIM;
fwrite(&image[i], sizeof(unsigned char), 3,
f); // only write rgb (ignoring a)
}
}
fclose(f);
}
void InputImageFile(const char *filename, uchar4 *image) {
FILE *f; // input file handle
char temp[256];
unsigned int x, y, s;
// open the input file and write header info for PPM filetype
f = fopen("input.ppm", "rb");
if (f == NULL) {
fprintf(stderr, "Error opening 'input.ppm' input file\n");
exit(1);
}
fscanf(f, "%s\n", &temp);
fscanf(f, "%d %d\n", &x, &y);
fscanf(f, "%d\n", &s);
if ((x != y) && (x != IMAGE_DIM)) {
fprintf(stderr, "Error: Input image file has wrong fixed dimensions\n");
exit(1);
}
for (int x = 0; x < IMAGE_DIM; x++) {
for (int y = 0; y < IMAGE_DIM; y++) {
int i = x + y * IMAGE_DIM;
fread(&image[i], sizeof(unsigned char), 3, f); // only read rgb
// image[i].w = 255;
}
}
fclose(f);
}
void CheckCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
a23fdbe6e511d8007b7270f5704d6317ee549c7b.cu
|
#include <cuda_runtime.h>
#include <cuda_texture_types.h>
#include <device_launch_parameters.h>
#include <vector_functions.h>
#include <vector_types.h>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <texture_fetch_functions.hpp>
#include "common.cuh"
#define IMAGE_DIM 2048
#define SAMPLE_SIZE 6
#define NUMBER_OF_SAMPLES (((SAMPLE_SIZE * 2) + 1) * ((SAMPLE_SIZE * 2) + 1))
#define rnd(x) (x * rand() / RAND_MAX)
#define INF 2e10f
void OutputImageFile(const char *filename, uchar4 *image);
void InputImageFile(const char *filename, uchar4 *image);
void CheckCUDAError(const char *msg);
#define IMAGE_BLUR(read_image, image_output) \
do { \
int x = threadIdx.x + blockIdx.x * blockDim.x; \
int y = threadIdx.y + blockIdx.y * blockDim.y; \
int output_offset = x + y * blockDim.x * gridDim.x; \
uchar4 pixel; \
float4 average = make_float4(0, 0, 0, 0); \
for (int i = -SAMPLE_SIZE; i <= SAMPLE_SIZE; i++) { \
for (int j = -SAMPLE_SIZE; j <= SAMPLE_SIZE; j++) { \
int x_offset = x + i; \
int y_offset = y + j; \
if (x_offset < 0) x_offset += IMAGE_DIM; \
if (x_offset >= IMAGE_DIM) x_offset -= IMAGE_DIM; \
if (y_offset < 0) y_offset += IMAGE_DIM; \
if (y_offset >= IMAGE_DIM) y_offset -= IMAGE_DIM; \
int offset = x_offset + y_offset * blockDim.x * gridDim.x; \
pixel = read_image(x + i, y + j, offset); \
average.x += pixel.x; \
average.y += pixel.y; \
average.z += pixel.z; \
} \
} \
average.x /= (float)NUMBER_OF_SAMPLES; \
average.y /= (float)NUMBER_OF_SAMPLES; \
average.z /= (float)NUMBER_OF_SAMPLES; \
image_output[output_offset].x = (unsigned char)average.x; \
image_output[output_offset].y = (unsigned char)average.y; \
image_output[output_offset].z = (unsigned char)average.z; \
image_output[output_offset].w = 255; \
} while (0)
__global__ void ImageBlur(uchar4 *image, uchar4 *image_output) {
#define READ_IMAGE(x, y, i) image[i]
IMAGE_BLUR(READ_IMAGE, image_output);
#undef READ_IMAGE
}
texture<uchar4, cudaTextureType1D, cudaReadModeElementType> image_tex_1d;
__global__ void ImageBlurTexture1D(uchar4 *image_output) {
#define READ_IMAGE(x, y, i) tex1Dfetch(image_tex_1d, i)
IMAGE_BLUR(READ_IMAGE, image_output);
#undef READ_IMAGE
}
texture<uchar4, cudaTextureType2D, cudaReadModeElementType> image_tex_2d;
__global__ void ImageBlurTexture2D(uchar4 *image_output) {
#define READ_IMAGE(x, y, i) tex2D(image_tex_2d, x, y)
IMAGE_BLUR(READ_IMAGE, image_output);
#undef READ_IMAGE
}
/* Host code */
int main(void) {
unsigned int image_size;
uchar4 *d_image, *d_image_output;
uchar4 *h_image;
cudaEvent_t start, stop;
float3 ms; //[0]=normal,[1]=tex1d,[2]=tex2d
image_size = IMAGE_DIM * IMAGE_DIM * sizeof(uchar4);
// create timers
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate memory on the GPU for the output image
cudaMalloc((void **)&d_image, image_size);
cudaMalloc((void **)&d_image_output, image_size);
CheckCUDAError("CUDA malloc");
auto channel_desc = cudaCreateChannelDesc<uchar4>();
image_tex_2d.addressMode[0] = cudaAddressModeWrap;
image_tex_2d.addressMode[1] = cudaAddressModeWrap;
cudaBindTexture(nullptr, image_tex_1d, d_image, channel_desc, image_size);
cudaBindTexture2D(nullptr, image_tex_2d, d_image, channel_desc, IMAGE_DIM,
IMAGE_DIM, IMAGE_DIM * sizeof(uchar4));
// allocate and load host image
h_image = (uchar4 *)malloc(image_size);
InputImageFile("input.ppm", h_image);
// copy image to device memory
cudaMemcpy(d_image, h_image, image_size, cudaMemcpyHostToDevice);
CheckCUDAError("CUDA memcpy to device");
// cuda layout and execution
dim3 blocksPerGrid(IMAGE_DIM / 16, IMAGE_DIM / 16);
dim3 threadsPerBlock(16, 16);
// normal version
TIME("kernel (normal)", ms.x, ImageBlur, blocksPerGrid, threadsPerBlock,
d_image, d_image_output);
// copy the image back from the GPU for output to file
cudaMemcpy(h_image, d_image_output, image_size, cudaMemcpyDeviceToHost);
CheckCUDAError("CUDA memcpy from device");
OutputImageFile("normal.ppm", h_image);
TIME("kernel (texture 1D)", ms.y, ImageBlurTexture1D, blocksPerGrid,
threadsPerBlock, d_image_output);
// copy the image back from the GPU for output to file
cudaMemcpy(h_image, d_image_output, image_size, cudaMemcpyDeviceToHost);
CheckCUDAError("CUDA memcpy from device");
OutputImageFile("tex_1d.ppm", h_image);
TIME("kernel (texture 2D)", ms.z, ImageBlurTexture2D, blocksPerGrid,
threadsPerBlock, d_image_output);
// copy the image back from the GPU for output to file
cudaMemcpy(h_image, d_image_output, image_size, cudaMemcpyDeviceToHost);
CheckCUDAError("CUDA memcpy from device");
OutputImageFile("tex_2d.ppm", h_image);
// output timings
printf("Execution times:\n");
printf("\tNormal version: %f\n", ms.x);
printf("\ttex1D version: %f\n", ms.y);
printf("\ttex2D version: %f\n", ms.z);
// output image
// cleanup
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(d_image);
cudaFree(d_image_output);
free(h_image);
cudaUnbindTexture(image_tex_1d);
cudaUnbindTexture(image_tex_2d);
return 0;
}
void OutputImageFile(const char *filename, uchar4 *image) {
FILE *f; // output file handle
// open the output file and write header info for PPM filetype
f = fopen(filename, "wb");
if (f == NULL) {
fprintf(stderr, "Error opening 'output.ppm' output file\n");
exit(1);
}
fprintf(f, "P6\n");
fprintf(f, "# COM4521 Lab 05 Exercise03\n");
fprintf(f, "%d %d\n%d\n", IMAGE_DIM, IMAGE_DIM, 255);
for (int x = 0; x < IMAGE_DIM; x++) {
for (int y = 0; y < IMAGE_DIM; y++) {
int i = x + y * IMAGE_DIM;
fwrite(&image[i], sizeof(unsigned char), 3,
f); // only write rgb (ignoring a)
}
}
fclose(f);
}
void InputImageFile(const char *filename, uchar4 *image) {
FILE *f; // input file handle
char temp[256];
unsigned int x, y, s;
// open the input file and write header info for PPM filetype
f = fopen("input.ppm", "rb");
if (f == NULL) {
fprintf(stderr, "Error opening 'input.ppm' input file\n");
exit(1);
}
fscanf(f, "%s\n", &temp);
fscanf(f, "%d %d\n", &x, &y);
fscanf(f, "%d\n", &s);
if ((x != y) && (x != IMAGE_DIM)) {
fprintf(stderr, "Error: Input image file has wrong fixed dimensions\n");
exit(1);
}
for (int x = 0; x < IMAGE_DIM; x++) {
for (int y = 0; y < IMAGE_DIM; y++) {
int i = x + y * IMAGE_DIM;
fread(&image[i], sizeof(unsigned char), 3, f); // only read rgb
// image[i].w = 255;
}
}
fclose(f);
}
void CheckCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
ef57288bd4f714da7ddeb7a6b4b9acf62914007c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <math.h>
__global__ void vectorAdd(int a) {
}
|
ef57288bd4f714da7ddeb7a6b4b9acf62914007c.cu
|
#include <iostream>
#include <stdio.h>
#include <math.h>
__global__ void vectorAdd(int a) {
}
|
6e7affa88eb521f29ce590dbedcdf992f5a33c9f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
#include <cfloat>
#include <cmath>
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
*/
extern "C"
__global__ void slice_sparse_dense(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru){
int retClen = cu - cl + 1;
// Iterate over elements of the row 'rowIndex'.
for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) {
// Only slice if the index falls into the given range
if(cl <= colInd[i] && colInd[i] <= cu) {
ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
}
}
}
}
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
__global__ void copy_u2l_dense(double* ret, int dim, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / dim;
int iy = tid % dim;
int id_dest = iy * dim + ix;
if(iy > ix && id_dest < N) {
// TODO: Potential to reduce the number of threads by half
int id_src = tid;
ret[id_dest] = ret[id_src];
}
}
extern "C"
__forceinline__ __device__ double getBoolean(int val) {
if(val == 0)
return 0.0;
else
return 1.0;
}
// op = {0=plus, 1=minus, 2=multiply, 3=divide, 4=power,
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
// 11=min, 12=max, 13=and, 14=or, 15=minus1multiply, 16=minusnz,
// 17=modulus, 18=integer division}
extern "C"
__forceinline__ __device__ double binaryOp(double x, double y, int op) {
switch(op) {
case 0 : return x + y;
case 1 : return x - y;
case 2 : return x * y;
case 3 : return x / y;
case 4 : return pow(x, y);
case 5 : return getBoolean(x < y);
case 6 : return getBoolean(x <= y);
case 7 : return getBoolean(x > y);
case 8 : return getBoolean(x >= y);
case 9 : return getBoolean(x == y);
case 10 : return getBoolean(x != y);
case 11 : return min(x, y);
case 12 : return max(x, y);
case 13 : return getBoolean((int)llrint(x) & (int)llrint(y));
case 14 : return getBoolean((int)llrint(x) | (int)llrint(y));
case 15 : return 1 - x * y;
case 16 : return (x != 0.0 ? x - y : 0.0);
case 17 : {
if (y == 0.0 || y == -0.0){
return nan("");
}
double v = x / y;
// Check for v being NaN (v != v) or if it is infinity
if (isnan(v) || isinf(v)){
return v;
} else {
v = floor(v);
}
return x - v * y;
}
case 18:{
double v = x / y;
if (isnan(v) || isinf(v)){
return v;
} else {
return floor(v);
}
}
default : return DBL_MAX;
}
}
extern "C"
__global__ void relu(double* A, double* ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
ret[index] = max(0.0, A[index]);
}
}
// This method computes the backpropagation errors for previous layer of relu operation
extern "C"
__global__ void relu_backward(double* X, double* dout, double* ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
ret[index] = X[index] > 0 ? dout[index] : 0;
}
}
// Performs the operation corresponding to the DML script:
// ones = matrix(1, rows=1, cols=Hout*Wout)
// output = input + matrix(bias %*% ones, rows=1, cols=F*Hout*Wout)
// This operation is often followed by conv2d and hence we have introduced bias_add(input, bias) built-in function
extern "C"
__global__ void bias_add(double* input, double* bias, double* ret, int rlen, int clen, int PQ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
int biasIndex = iy / PQ;
ret[index] = input[index] + bias[biasIndex];
}
}
// Performs the operation "ret <- A + alpha*B", where B is a vector
extern "C"
__global__ void daxpy_matrix_vector(double* A, double* B, double alpha, double* ret, int rlenA, int clenA, int rlenB, int clenB) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clenA;
int iy = tid % clenA;
if(ix < rlenA && iy < clenA) {
int index = ix * clenA + iy;
if(rlenB == 1) {
ret[index] = A[index] + alpha*B[iy];
}
else {
ret[index] = A[index] + alpha*B[ix];
}
}
}
// Performs similar operation as bias_add except elementwise multiplication instead of add
extern "C"
__global__ void bias_multiply(double* input, double* bias, double* ret, int rlen, int clen, int PQ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
int biasIndex = iy / PQ;
ret[index] = input[index] * bias[biasIndex];
}
}
// Compares the value and set
extern "C"
__global__ void compare_and_set(double* A, double* ret, int rlen, int clen, double compareVal, double tol, double ifEqualsVal, double ifLessThanVal, double ifGreaterThanVal) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
int index = ix * clen + iy;
if(ix < rlen && iy < clen) {
if(abs(A[index]-compareVal) < tol)
ret[index] = ifEqualsVal;
else if(A[index] < compareVal)
ret[index] = ifLessThanVal;
else
ret[index] = ifGreaterThanVal;
}
}
/**
* Performs a binary cellwise arithmetic operation on 2 matrices.
* Either both matrices are of equal size or one of them is a vector or both are.
* @param A first input matrix allocated on GPU
* @param B second input matrix allocated on GPU
* @param C output allocated on GPU
* @param maxRlen maximum of the row lengths of A and B
* @param maxClen maximum of the column lengths of A and B
* @param vectorAStatus if A is a row vector, column vector or neither
* @param vectorBStatus if B is a row vector, column vector or neither
* @param op the numeric code of the arithmetic operation to perform
*
*/
extern "C"
__global__ void matrix_matrix_cellwise_op(double* A, double* B, double* C,
int maxRlen, int maxClen, int vectorAStatus, int vectorBStatus, int op) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
if(ix < maxRlen && iy < maxClen) {
int outIndex = ix * maxClen + iy;
int aIndex = outIndex;
int bIndex = outIndex;
if(vectorAStatus == 1)
aIndex = ix; // clen == 1
else if(vectorAStatus == 2)
aIndex = iy; // rlen == 1
if(vectorBStatus == 1)
bIndex = ix; // clen == 1
else if(vectorBStatus == 2)
bIndex = iy; // rlen == 1
C[outIndex] = binaryOp(A[aIndex], B[bIndex], op);
//printf("C[%d] = A[%d](%f) B[%d](%f) (%d %d)\n", outIndex, aIndex, A[aIndex], bIndex, B[bIndex], (ix+1), (iy+1));
__syncthreads();
}
}
/**
* Performs an arithmetic operation between a matrix and a scalar.
* C = s op A or C = A op s (where A is the matrix, s is the scalar and op is the operation)
* @param A input matrix allocated on GPU
* @param scalar scalar input
* @param C output matrix allocated on GPU
* @param size number of elements in matrix A
* @param op number code of the arithmetic operation to perform
* @param isLeftScalar whether the scalar is on the left side
*/
extern "C"
__global__ void matrix_scalar_op(double* A, double scalar, double* C, int size, int op, int isLeftScalar) {
int index = blockIdx.x *blockDim.x + threadIdx.x;
if(index < size) {
if(isLeftScalar) {
C[index] = binaryOp(scalar, A[index], op);
} else {
C[index] = binaryOp(A[index], scalar, op);
}
}
__syncthreads();
}
/**
* Sets all elements (fills) of a double array of given length with a given scalar value
* @param A array to be filled
* @param scalar value to fill array with
* @param lenA length of array A
*/
extern "C"
__global__ void fill(double* A, double scalar, int lenA) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lenA){
A[index] = scalar;
}
}
/**
* Appends Matrix B to the right side of Matrix A into a new matrix C
* | 1 2 3 4 | | 8 8 8 | | 1 2 3 4 8 8 8 |
* cbind ( | 9 8 7 6 | , | 7 7 7 | ) = | 9 8 7 6 7 7 7 |
* | 4 3 2 1 | | 9 9 9 | | 4 3 2 1 9 9 9 |
* @param A input matrix A allocated on the GPU
* @param B input matrix B allocated on the GPU
* @param C input matrix C allocated on the GPU
* @param rowsA rows in A
* @param colsA columns in A
* @param rowsB rows in B
* @param colsB columns in B
*/
extern "C"
__global__ void cbind(double *A, double *B, double *C, int rowsA, int colsA, int rowsB, int colsB) {
int maxClen = max(colsA, colsB);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
int colsC = colsA + colsB;
int rowsC = rowsA;
// Copy an element of A into C into the appropriate location
if (ix < rowsA && iy < colsA) {
double elemA = A[ix * colsA + iy];
C[ix * colsC + iy] = elemA;
}
// Copy an element of B into C into the appropriate location
if (ix < rowsB && iy < colsB) {
double elemB = B[ix * colsB + iy];
C[ix * colsC + (iy + colsA)] = elemB;
}
}
/**
* Appends Matrix B to the bottom of Matrix A into a new matrix C
* | 2 3 4 | | 8 8 8 | | 2 3 4 |
* rbind ( | 8 7 6 | , | 7 7 7 | ) = | 8 7 6 |
* | 3 2 1 | | 3 2 1 |
| 8 8 8 |
| 7 7 7 |
* @param A input matrix A allocated on the GPU
* @param B input matrix B allocated on the GPU
* @param C input matrix C allocated on the GPU
* @param rowsA rows in A
* @param colsA columns in A
* @param rowsB rows in B
* @param colsB columns in B
*/
extern "C"
__global__ void rbind(double *A, double *B, double *C, int rowsA, int colsA, int rowsB, int colsB) {
int maxClen = max(colsA, colsB);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
int rowsC = rowsA + rowsB;
int colsC = colsA;
// Copy an element of A into C into the appropriate location
if (ix < rowsA && iy < colsA) {
double elemA = A[ix * colsA + iy];
C[ix * colsC + iy] = elemA;
}
// Copy an element of B into C into the appropriate location
if (ix < rowsB && iy < colsB) {
double elemB = B[ix * colsB + iy];
C[(ix + rowsA) * colsC + iy] = elemB;
}
}
/**
* Does a reduce operation over all elements of the array.
* This method has been adapted from the Reduction sample in the NVIDIA CUDA Samples (v8.0)
* and the Reduction example available through jcuda.org
* When invoked initially, all blocks partly compute the reduction operation over the entire array
* and writes it to the output/temporary array. A second invokation needs to happen to get the
* reduced value.
* The number of threads, blocks and amount of shared memory is calculated in a specific way.
* Please refer to the NVIDIA CUDA Sample or the SystemML code that invokes this method to see
* how its done.
* The template-ized version of this function is similar to what is found in NVIDIA CUB
*
* @param ReductionOp Type of the functor object that implements the reduction operation
*/
template <typename ReductionOp>
__device__ void reduce(
double *g_idata, ///< input data stored in device memory (of size n)
double *g_odata, ///< output/temporary array stored in device memory (of size n)
unsigned int n, ///< size of the input and temporary/output arrays
ReductionOp reduction_op, ///< Reduction operation to perform (functor object)
double initialValue) ///< initial value for the reduction variable
{
extern __shared__ double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x;
unsigned int gridSize = blockDim.x*2*gridDim.x;
double v = initialValue;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
v = reduction_op(v, g_idata[i]);
// ensure we don't read out of bounds
if (i + blockDim.x < n)
v = reduction_op(v, g_idata[i+blockDim.x]);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024){ if (tid < 512) { sdata[tid] = v = reduction_op(v, sdata[tid + 512]); } __syncthreads(); }
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile double* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); }
if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); }
if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); }
if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); }
if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); }
if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
/**
* Does a reduce (sum) over each row of the array.
* This kernel must be launched with as many blocks as there are rows.
* The intuition for this kernel is that each block does a reduction over a single row.
* The maximum number of blocks that can launched (as of compute capability 3.0) is 2^31 - 1
* This works out fine for SystemML, since the maximum elements in a Java array can be 2^31 - c (some small constant)
* If the matrix is "fat" and "short", i.e. there are small number of rows and a large number of columns,
* there could be under-utilization of the hardware.
* The template-ized version of this function is similar to what is found in NVIDIA CUB
* @param ReductionOp Type of the functor object that implements the reduction operation
* @param AssignmentOp Type of the functor object that is used to modify the value before writing it to its final location in global memory for each row
*/
template <typename ReductionOp,
typename AssignmentOp>
__device__ void reduce_row(
double *g_idata, ///< input data stored in device memory (of size rows*cols)
double *g_odata, ///< output/temporary array store in device memory (of size rows*cols)
unsigned int rows, ///< rows in input and temporary/output arrays
unsigned int cols, ///< columns in input and temporary/output arrays
ReductionOp reduction_op, ///< Reduction operation to perform (functor object)
AssignmentOp assignment_op, ///< Operation to perform before assigning this to its final location in global memory for each row
double initialValue){ ///< initial value for the reduction variable
extern __shared__ double sdata[];
// one block per row
if (blockIdx.x >= rows) {
return;
}
unsigned int block = blockIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = tid;
unsigned int block_offset = block * cols;
double v = initialValue;
while (i < cols){
v = reduction_op(v, g_idata[block_offset + i]);
i += blockDim.x;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024){ if (tid < 512) { sdata[tid] = v = reduction_op(v, sdata[tid + 512]); } __syncthreads(); }
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile double* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); }
if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); }
if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); }
if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); }
if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); }
if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); }
}
// write result for this block to global mem, modify it with assignment op
if (tid == 0)
g_odata[block] = assignment_op(sdata[0]);
}
/**
* Does a column wise reduction.
* The intuition is that there are as many global threads as there are columns
* Each global thread is responsible for a single element in the output vector
* This of course leads to a under-utilization of the GPU resources.
* For cases, where the number of columns is small, there can be unused SMs
*
* The template-ized version of this function is similar to what is found in NVIDIA CUB
* @param ReductionOp Type of the functor object that implements the reduction operation
* @param AssignmentOp Type of the functor object that is used to modify the value before writing it to its final location in global memory for each column
*/
template <typename ReductionOp,
typename AssignmentOp>
__device__ void reduce_col(
double *g_idata, ///< input data stored in device memory (of size rows*cols)
double *g_odata, ///< output/temporary array store in device memory (of size rows*cols)
unsigned int rows, ///< rows in input and temporary/output arrays
unsigned int cols, ///< columns in input and temporary/output arrays
ReductionOp reduction_op, ///< Reduction operation to perform (functor object)
AssignmentOp assignment_op, ///< Operation to perform before assigning this to its final location in global memory for each column
double initialValue) ///< initial value for the reduction variable
{
unsigned int global_tid = blockIdx.x * blockDim.x + threadIdx.x;
if (global_tid >= cols) {
return;
}
unsigned int i = global_tid;
unsigned int grid_size = cols;
double val = initialValue;
while (i < rows * cols) {
val = reduction_op(val, g_idata[i]);
i += grid_size;
}
g_odata[global_tid] = assignment_op(val);
}
/**
* Functor op for assignment op. This is a dummy/identity op.
*/
typedef struct {
__device__ __forceinline__
double operator()(double a) const {
return a;
}
} IdentityOp;
/**
* Functor op for summation operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return a + b;
}
} SumOp;
/**
* Do a summation over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stored in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_sum(double *g_idata, double *g_odata, unsigned int n){
SumOp op;
reduce<SumOp>(g_idata, g_odata, n, op, 0.0);
}
/**
* Do a summation over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row_sum(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
SumOp op;
IdentityOp aop;
reduce_row<SumOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, 0.0);
}
/**
* Do a summation over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col_sum(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
SumOp op;
IdentityOp aop;
reduce_col<SumOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, 0.0);
}
/**
* Functor op for max operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return fmax(a, b);
}
} MaxOp;
/**
* Do a max over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_max(double *g_idata, double *g_odata, unsigned int n){
MaxOp op;
reduce<MaxOp>(g_idata, g_odata, n, op, -DBL_MAX);
}
/**
* Do a max over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row_max(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
MaxOp op;
IdentityOp aop;
reduce_row<MaxOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, -DBL_MAX);
}
/**
* Do a max over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col_max(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
MaxOp op;
IdentityOp aop;
reduce_col<MaxOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, -DBL_MAX);
}
/**
* Functor op for min operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return fmin(a, b);
}
} MinOp;
/**
* Do a min over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_min(double *g_idata, double *g_odata, unsigned int n){
MinOp op;
reduce<MinOp>(g_idata, g_odata, n, op, DBL_MAX);
}
/**
* Do a min over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row_min(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
MinOp op;
IdentityOp aop;
reduce_row<MinOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, DBL_MAX);
}
/**
* Do a min over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col_min(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
MinOp op;
IdentityOp aop;
reduce_col<MinOp>(g_idata, g_odata, rows, cols, op, aop, DBL_MAX);
}
/**
* Functor op for product operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return a * b;
}
} ProductOp;
/**
* Do a product over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_prod(double *g_idata, double *g_odata, unsigned int n){
ProductOp op;
reduce<ProductOp>(g_idata, g_odata, n, op, 1.0);
}
/**
* Functor op for mean operation
*/
struct MeanOp {
const long _size; ///< Number of elements by which to divide to calculate mean
__device__ __forceinline__
MeanOp(long size): _size(size) {}
__device__ __forceinline__
double operator()(double total) const {
return total / _size;
}
};
/**
* Do a mean over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row_mean(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
SumOp op;
MeanOp aop(cols);
reduce_row<SumOp, MeanOp>(g_idata, g_odata, rows, cols, op, aop, 0.0);
}
/**
* Do a mean over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col_mean(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
SumOp op;
MeanOp aop(rows);
reduce_col<SumOp, MeanOp>(g_idata, g_odata, rows, cols, op, aop, 0.0);
}
/**
* Do an exp over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_exp(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = exp(A[index]);
}
}
/**
* Do an sqrt over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_sqrt(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = sqrt(A[index]);
}
}
/**
* Do an round over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_round(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)llround(A[index]);
}
}
/**
* Do an abs over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_abs(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)fabs(A[index]);
}
}
/**
* Do an log over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_log(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = log(A[index]);
}
}
/**
* Do an floor over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_floor(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = floor(A[index]);
}
}
/**
* Do an ceil over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_ceil(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = ceil(A[index]);
}
}
/**
* Do an sin over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_sin(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = sin(A[index]);
}
}
/**
* Do an cos over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_cos(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = cos(A[index]);
}
}
/**
* Do an tan over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_tan(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = tan(A[index]);
}
}
/**
* Do an asin over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_asin(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = asin(A[index]);
}
}
/**
* Do an acos over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_acos(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = acos(A[index]);
}
}
/**
* Do an atan over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_atan(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = atan(A[index]);
}
}
/**
* Do an sign over all the elements of a matrix
* Assign -1, 0 or 1 depending on the element being negative, 0 or positive
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_sign(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
if (A[index] == 0.0) {
C[index] = 0.0;
} else {
C[index] = copysign(1.0, A[index]);
}
}
}
|
6e7affa88eb521f29ce590dbedcdf992f5a33c9f.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
#include <cfloat>
#include <cmath>
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
*/
extern "C"
__global__ void slice_sparse_dense(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru){
int retClen = cu - cl + 1;
// Iterate over elements of the row 'rowIndex'.
for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) {
// Only slice if the index falls into the given range
if(cl <= colInd[i] && colInd[i] <= cu) {
ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
}
}
}
}
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
__global__ void copy_u2l_dense(double* ret, int dim, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / dim;
int iy = tid % dim;
int id_dest = iy * dim + ix;
if(iy > ix && id_dest < N) {
// TODO: Potential to reduce the number of threads by half
int id_src = tid;
ret[id_dest] = ret[id_src];
}
}
extern "C"
__forceinline__ __device__ double getBoolean(int val) {
if(val == 0)
return 0.0;
else
return 1.0;
}
// op = {0=plus, 1=minus, 2=multiply, 3=divide, 4=power,
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
// 11=min, 12=max, 13=and, 14=or, 15=minus1multiply, 16=minusnz,
// 17=modulus, 18=integer division}
extern "C"
__forceinline__ __device__ double binaryOp(double x, double y, int op) {
switch(op) {
case 0 : return x + y;
case 1 : return x - y;
case 2 : return x * y;
case 3 : return x / y;
case 4 : return pow(x, y);
case 5 : return getBoolean(x < y);
case 6 : return getBoolean(x <= y);
case 7 : return getBoolean(x > y);
case 8 : return getBoolean(x >= y);
case 9 : return getBoolean(x == y);
case 10 : return getBoolean(x != y);
case 11 : return min(x, y);
case 12 : return max(x, y);
case 13 : return getBoolean((int)llrint(x) & (int)llrint(y));
case 14 : return getBoolean((int)llrint(x) | (int)llrint(y));
case 15 : return 1 - x * y;
case 16 : return (x != 0.0 ? x - y : 0.0);
case 17 : {
if (y == 0.0 || y == -0.0){
return nan("");
}
double v = x / y;
// Check for v being NaN (v != v) or if it is infinity
if (isnan(v) || isinf(v)){
return v;
} else {
v = floor(v);
}
return x - v * y;
}
case 18:{
double v = x / y;
if (isnan(v) || isinf(v)){
return v;
} else {
return floor(v);
}
}
default : return DBL_MAX;
}
}
extern "C"
__global__ void relu(double* A, double* ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
ret[index] = max(0.0, A[index]);
}
}
// This method computes the backpropagation errors for previous layer of relu operation
extern "C"
__global__ void relu_backward(double* X, double* dout, double* ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
ret[index] = X[index] > 0 ? dout[index] : 0;
}
}
// Performs the operation corresponding to the DML script:
// ones = matrix(1, rows=1, cols=Hout*Wout)
// output = input + matrix(bias %*% ones, rows=1, cols=F*Hout*Wout)
// This operation is often followed by conv2d and hence we have introduced bias_add(input, bias) built-in function
extern "C"
__global__ void bias_add(double* input, double* bias, double* ret, int rlen, int clen, int PQ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
int biasIndex = iy / PQ;
ret[index] = input[index] + bias[biasIndex];
}
}
// Performs the operation "ret <- A + alpha*B", where B is a vector
extern "C"
__global__ void daxpy_matrix_vector(double* A, double* B, double alpha, double* ret, int rlenA, int clenA, int rlenB, int clenB) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clenA;
int iy = tid % clenA;
if(ix < rlenA && iy < clenA) {
int index = ix * clenA + iy;
if(rlenB == 1) {
ret[index] = A[index] + alpha*B[iy];
}
else {
ret[index] = A[index] + alpha*B[ix];
}
}
}
// Performs similar operation as bias_add except elementwise multiplication instead of add
extern "C"
__global__ void bias_multiply(double* input, double* bias, double* ret, int rlen, int clen, int PQ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
int biasIndex = iy / PQ;
ret[index] = input[index] * bias[biasIndex];
}
}
// Compares the value and set
extern "C"
__global__ void compare_and_set(double* A, double* ret, int rlen, int clen, double compareVal, double tol, double ifEqualsVal, double ifLessThanVal, double ifGreaterThanVal) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
int index = ix * clen + iy;
if(ix < rlen && iy < clen) {
if(abs(A[index]-compareVal) < tol)
ret[index] = ifEqualsVal;
else if(A[index] < compareVal)
ret[index] = ifLessThanVal;
else
ret[index] = ifGreaterThanVal;
}
}
/**
* Performs a binary cellwise arithmetic operation on 2 matrices.
* Either both matrices are of equal size or one of them is a vector or both are.
* @param A first input matrix allocated on GPU
* @param B second input matrix allocated on GPU
* @param C output allocated on GPU
* @param maxRlen maximum of the row lengths of A and B
* @param maxClen maximum of the column lengths of A and B
* @param vectorAStatus if A is a row vector, column vector or neither
* @param vectorBStatus if B is a row vector, column vector or neither
* @param op the numeric code of the arithmetic operation to perform
*
*/
extern "C"
__global__ void matrix_matrix_cellwise_op(double* A, double* B, double* C,
int maxRlen, int maxClen, int vectorAStatus, int vectorBStatus, int op) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
if(ix < maxRlen && iy < maxClen) {
int outIndex = ix * maxClen + iy;
int aIndex = outIndex;
int bIndex = outIndex;
if(vectorAStatus == 1)
aIndex = ix; // clen == 1
else if(vectorAStatus == 2)
aIndex = iy; // rlen == 1
if(vectorBStatus == 1)
bIndex = ix; // clen == 1
else if(vectorBStatus == 2)
bIndex = iy; // rlen == 1
C[outIndex] = binaryOp(A[aIndex], B[bIndex], op);
//printf("C[%d] = A[%d](%f) B[%d](%f) (%d %d)\n", outIndex, aIndex, A[aIndex], bIndex, B[bIndex], (ix+1), (iy+1));
__syncthreads();
}
}
/**
* Performs an arithmetic operation between a matrix and a scalar.
* C = s op A or C = A op s (where A is the matrix, s is the scalar and op is the operation)
* @param A input matrix allocated on GPU
* @param scalar scalar input
* @param C output matrix allocated on GPU
* @param size number of elements in matrix A
* @param op number code of the arithmetic operation to perform
* @param isLeftScalar whether the scalar is on the left side
*/
extern "C"
__global__ void matrix_scalar_op(double* A, double scalar, double* C, int size, int op, int isLeftScalar) {
int index = blockIdx.x *blockDim.x + threadIdx.x;
if(index < size) {
if(isLeftScalar) {
C[index] = binaryOp(scalar, A[index], op);
} else {
C[index] = binaryOp(A[index], scalar, op);
}
}
__syncthreads();
}
/**
* Sets all elements (fills) of a double array of given length with a given scalar value
* @param A array to be filled
* @param scalar value to fill array with
* @param lenA length of array A
*/
extern "C"
__global__ void fill(double* A, double scalar, int lenA) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lenA){
A[index] = scalar;
}
}
/**
* Appends Matrix B to the right side of Matrix A into a new matrix C
* | 1 2 3 4 | | 8 8 8 | | 1 2 3 4 8 8 8 |
* cbind ( | 9 8 7 6 | , | 7 7 7 | ) = | 9 8 7 6 7 7 7 |
* | 4 3 2 1 | | 9 9 9 | | 4 3 2 1 9 9 9 |
* @param A input matrix A allocated on the GPU
* @param B input matrix B allocated on the GPU
* @param C input matrix C allocated on the GPU
* @param rowsA rows in A
* @param colsA columns in A
* @param rowsB rows in B
* @param colsB columns in B
*/
extern "C"
__global__ void cbind(double *A, double *B, double *C, int rowsA, int colsA, int rowsB, int colsB) {
int maxClen = max(colsA, colsB);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
int colsC = colsA + colsB;
int rowsC = rowsA;
// Copy an element of A into C into the appropriate location
if (ix < rowsA && iy < colsA) {
double elemA = A[ix * colsA + iy];
C[ix * colsC + iy] = elemA;
}
// Copy an element of B into C into the appropriate location
if (ix < rowsB && iy < colsB) {
double elemB = B[ix * colsB + iy];
C[ix * colsC + (iy + colsA)] = elemB;
}
}
/**
* Appends Matrix B to the bottom of Matrix A into a new matrix C
* | 2 3 4 | | 8 8 8 | | 2 3 4 |
* rbind ( | 8 7 6 | , | 7 7 7 | ) = | 8 7 6 |
* | 3 2 1 | | 3 2 1 |
| 8 8 8 |
| 7 7 7 |
* @param A input matrix A allocated on the GPU
* @param B input matrix B allocated on the GPU
* @param C input matrix C allocated on the GPU
* @param rowsA rows in A
* @param colsA columns in A
* @param rowsB rows in B
* @param colsB columns in B
*/
extern "C"
__global__ void rbind(double *A, double *B, double *C, int rowsA, int colsA, int rowsB, int colsB) {
int maxClen = max(colsA, colsB);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
int rowsC = rowsA + rowsB;
int colsC = colsA;
// Copy an element of A into C into the appropriate location
if (ix < rowsA && iy < colsA) {
double elemA = A[ix * colsA + iy];
C[ix * colsC + iy] = elemA;
}
// Copy an element of B into C into the appropriate location
if (ix < rowsB && iy < colsB) {
double elemB = B[ix * colsB + iy];
C[(ix + rowsA) * colsC + iy] = elemB;
}
}
/**
* Does a reduce operation over all elements of the array.
* This method has been adapted from the Reduction sample in the NVIDIA CUDA Samples (v8.0)
* and the Reduction example available through jcuda.org
* When invoked initially, all blocks partly compute the reduction operation over the entire array
* and writes it to the output/temporary array. A second invokation needs to happen to get the
* reduced value.
* The number of threads, blocks and amount of shared memory is calculated in a specific way.
* Please refer to the NVIDIA CUDA Sample or the SystemML code that invokes this method to see
* how its done.
* The template-ized version of this function is similar to what is found in NVIDIA CUB
*
* @param ReductionOp Type of the functor object that implements the reduction operation
*/
template <typename ReductionOp>
__device__ void reduce(
double *g_idata, ///< input data stored in device memory (of size n)
double *g_odata, ///< output/temporary array stored in device memory (of size n)
unsigned int n, ///< size of the input and temporary/output arrays
ReductionOp reduction_op, ///< Reduction operation to perform (functor object)
double initialValue) ///< initial value for the reduction variable
{
extern __shared__ double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x;
unsigned int gridSize = blockDim.x*2*gridDim.x;
double v = initialValue;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
v = reduction_op(v, g_idata[i]);
// ensure we don't read out of bounds
if (i + blockDim.x < n)
v = reduction_op(v, g_idata[i+blockDim.x]);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024){ if (tid < 512) { sdata[tid] = v = reduction_op(v, sdata[tid + 512]); } __syncthreads(); }
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile double* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); }
if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); }
if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); }
if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); }
if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); }
if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
/**
* Does a reduce (sum) over each row of the array.
* This kernel must be launched with as many blocks as there are rows.
* The intuition for this kernel is that each block does a reduction over a single row.
* The maximum number of blocks that can launched (as of compute capability 3.0) is 2^31 - 1
* This works out fine for SystemML, since the maximum elements in a Java array can be 2^31 - c (some small constant)
* If the matrix is "fat" and "short", i.e. there are small number of rows and a large number of columns,
* there could be under-utilization of the hardware.
* The template-ized version of this function is similar to what is found in NVIDIA CUB
* @param ReductionOp Type of the functor object that implements the reduction operation
* @param AssignmentOp Type of the functor object that is used to modify the value before writing it to its final location in global memory for each row
*/
template <typename ReductionOp,
typename AssignmentOp>
__device__ void reduce_row(
double *g_idata, ///< input data stored in device memory (of size rows*cols)
double *g_odata, ///< output/temporary array store in device memory (of size rows*cols)
unsigned int rows, ///< rows in input and temporary/output arrays
unsigned int cols, ///< columns in input and temporary/output arrays
ReductionOp reduction_op, ///< Reduction operation to perform (functor object)
AssignmentOp assignment_op, ///< Operation to perform before assigning this to its final location in global memory for each row
double initialValue){ ///< initial value for the reduction variable
extern __shared__ double sdata[];
// one block per row
if (blockIdx.x >= rows) {
return;
}
unsigned int block = blockIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = tid;
unsigned int block_offset = block * cols;
double v = initialValue;
while (i < cols){
v = reduction_op(v, g_idata[block_offset + i]);
i += blockDim.x;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024){ if (tid < 512) { sdata[tid] = v = reduction_op(v, sdata[tid + 512]); } __syncthreads(); }
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile double* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); }
if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); }
if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); }
if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); }
if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); }
if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); }
}
// write result for this block to global mem, modify it with assignment op
if (tid == 0)
g_odata[block] = assignment_op(sdata[0]);
}
/**
* Does a column wise reduction.
* The intuition is that there are as many global threads as there are columns
* Each global thread is responsible for a single element in the output vector
* This of course leads to a under-utilization of the GPU resources.
* For cases, where the number of columns is small, there can be unused SMs
*
* The template-ized version of this function is similar to what is found in NVIDIA CUB
* @param ReductionOp Type of the functor object that implements the reduction operation
* @param AssignmentOp Type of the functor object that is used to modify the value before writing it to its final location in global memory for each column
*/
template <typename ReductionOp,
typename AssignmentOp>
__device__ void reduce_col(
double *g_idata, ///< input data stored in device memory (of size rows*cols)
double *g_odata, ///< output/temporary array store in device memory (of size rows*cols)
unsigned int rows, ///< rows in input and temporary/output arrays
unsigned int cols, ///< columns in input and temporary/output arrays
ReductionOp reduction_op, ///< Reduction operation to perform (functor object)
AssignmentOp assignment_op, ///< Operation to perform before assigning this to its final location in global memory for each column
double initialValue) ///< initial value for the reduction variable
{
unsigned int global_tid = blockIdx.x * blockDim.x + threadIdx.x;
if (global_tid >= cols) {
return;
}
unsigned int i = global_tid;
unsigned int grid_size = cols;
double val = initialValue;
while (i < rows * cols) {
val = reduction_op(val, g_idata[i]);
i += grid_size;
}
g_odata[global_tid] = assignment_op(val);
}
/**
* Functor op for assignment op. This is a dummy/identity op.
*/
typedef struct {
__device__ __forceinline__
double operator()(double a) const {
return a;
}
} IdentityOp;
/**
* Functor op for summation operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return a + b;
}
} SumOp;
/**
* Do a summation over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stored in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_sum(double *g_idata, double *g_odata, unsigned int n){
SumOp op;
reduce<SumOp>(g_idata, g_odata, n, op, 0.0);
}
/**
* Do a summation over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row_sum(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
SumOp op;
IdentityOp aop;
reduce_row<SumOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, 0.0);
}
/**
* Do a summation over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col_sum(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
SumOp op;
IdentityOp aop;
reduce_col<SumOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, 0.0);
}
/**
* Functor op for max operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return fmax(a, b);
}
} MaxOp;
/**
* Do a max over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_max(double *g_idata, double *g_odata, unsigned int n){
MaxOp op;
reduce<MaxOp>(g_idata, g_odata, n, op, -DBL_MAX);
}
/**
* Do a max over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row_max(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
MaxOp op;
IdentityOp aop;
reduce_row<MaxOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, -DBL_MAX);
}
/**
* Do a max over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col_max(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
MaxOp op;
IdentityOp aop;
reduce_col<MaxOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, -DBL_MAX);
}
/**
* Functor op for min operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return fmin(a, b);
}
} MinOp;
/**
* Do a min over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_min(double *g_idata, double *g_odata, unsigned int n){
MinOp op;
reduce<MinOp>(g_idata, g_odata, n, op, DBL_MAX);
}
/**
* Do a min over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row_min(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
MinOp op;
IdentityOp aop;
reduce_row<MinOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, DBL_MAX);
}
/**
* Do a min over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col_min(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
MinOp op;
IdentityOp aop;
reduce_col<MinOp>(g_idata, g_odata, rows, cols, op, aop, DBL_MAX);
}
/**
* Functor op for product operation
*/
typedef struct {
__device__ __forceinline__
double operator()(double a, double b) const {
return a * b;
}
} ProductOp;
/**
* Do a product over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
extern "C"
__global__ void reduce_prod(double *g_idata, double *g_odata, unsigned int n){
ProductOp op;
reduce<ProductOp>(g_idata, g_odata, n, op, 1.0);
}
/**
* Functor op for mean operation
*/
struct MeanOp {
const long _size; ///< Number of elements by which to divide to calculate mean
__device__ __forceinline__
MeanOp(long size): _size(size) {}
__device__ __forceinline__
double operator()(double total) const {
return total / _size;
}
};
/**
* Do a mean over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_row_mean(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
SumOp op;
MeanOp aop(cols);
reduce_row<SumOp, MeanOp>(g_idata, g_odata, rows, cols, op, aop, 0.0);
}
/**
* Do a mean over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
extern "C"
__global__ void reduce_col_mean(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){
SumOp op;
MeanOp aop(rows);
reduce_col<SumOp, MeanOp>(g_idata, g_odata, rows, cols, op, aop, 0.0);
}
/**
* Do an exp over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_exp(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = exp(A[index]);
}
}
/**
* Do an sqrt over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_sqrt(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = sqrt(A[index]);
}
}
/**
* Do an round over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_round(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)llround(A[index]);
}
}
/**
* Do an abs over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_abs(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)fabs(A[index]);
}
}
/**
* Do an log over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_log(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = log(A[index]);
}
}
/**
* Do an floor over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_floor(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = floor(A[index]);
}
}
/**
* Do an ceil over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_ceil(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = ceil(A[index]);
}
}
/**
* Do an sin over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_sin(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = sin(A[index]);
}
}
/**
* Do an cos over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_cos(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = cos(A[index]);
}
}
/**
* Do an tan over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_tan(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = tan(A[index]);
}
}
/**
* Do an asin over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_asin(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = asin(A[index]);
}
}
/**
* Do an acos over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_acos(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = acos(A[index]);
}
}
/**
* Do an atan over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_atan(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = atan(A[index]);
}
}
/**
* Do an sign over all the elements of a matrix
* Assign -1, 0 or 1 depending on the element being negative, 0 or positive
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
extern "C"
__global__ void matrix_sign(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
if (A[index] == 0.0) {
C[index] = 0.0;
} else {
C[index] = copysign(1.0, A[index]);
}
}
}
|
a9fe6cc1a2a9204e0ee1add6cae4f050c98e15b6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <limits>
#include <hip/hip_runtime.h>
#include "DataFormats/EcalDigi/interface/EcalDataFrame.h"
#include "DataFormats/EcalRecHit/interface/EcalUncalibratedRecHit.h"
#include "DataFormats/Math/interface/approx_exp.h"
#include "DataFormats/Math/interface/approx_log.h"
#include "FWCore/Utilities/interface/CMSUnrollLoop.h"
#include "Common.h"
#include "TimeComputationKernels.h"
#include "KernelHelpers.h"
//#define DEBUG
//#define ECAL_RECO_CUDA_DEBUG
namespace ecal {
namespace multifit {
__device__ __forceinline__ bool use_sample(unsigned int sample_mask, unsigned int sample) {
return sample_mask & (0x1 << (EcalDataFrame::MAXSAMPLES - (sample + 1)));
}
__global__ void kernel_time_compute_nullhypot(SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
bool const* useless_sample_values,
SampleVector::Scalar* chi2s,
SampleVector::Scalar* sum0s,
SampleVector::Scalar* sumAAs,
const int nchannels) {
using ScalarType = SampleVector::Scalar;
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ltx = threadIdx.x;
int ch = tx / nsamples;
int nchannels_per_block = blockDim.x / nsamples;
// threads that return here should not affect the __syncthreads() below since they have exitted the kernel
if (ch >= nchannels)
return;
int sample = tx % nsamples;
// shared mem inits
extern __shared__ char sdata[];
char* s_sum0 = sdata;
SampleVector::Scalar* s_sum1 = reinterpret_cast<SampleVector::Scalar*>(s_sum0 + nchannels_per_block * nsamples);
SampleVector::Scalar* s_sumA = s_sum1 + nchannels_per_block * nsamples;
SampleVector::Scalar* s_sumAA = s_sumA + nchannels_per_block * nsamples;
// TODO make sure no div by 0
const auto inv_error =
useless_sample_values[tx] ? 0.0 : 1.0 / (sample_value_errors[tx] * sample_value_errors[tx]);
const auto sample_value = sample_values[tx];
s_sum0[ltx] = useless_sample_values[tx] ? 0 : 1;
s_sum1[ltx] = inv_error;
s_sumA[ltx] = sample_value * inv_error;
s_sumAA[ltx] = sample_value * sample_value * inv_error;
__syncthreads();
// 5 threads for [0, 4] samples
if (sample < 5) {
s_sum0[ltx] += s_sum0[ltx + 5];
s_sum1[ltx] += s_sum1[ltx + 5];
s_sumA[ltx] += s_sumA[ltx + 5];
s_sumAA[ltx] += s_sumAA[ltx + 5];
}
__syncthreads();
if (sample < 2) {
// note double counting of sample 3
s_sum0[ltx] += s_sum0[ltx + 2] + s_sum0[ltx + 3];
s_sum1[ltx] += s_sum1[ltx + 2] + s_sum1[ltx + 3];
s_sumA[ltx] += s_sumA[ltx + 2] + s_sumA[ltx + 3];
s_sumAA[ltx] += s_sumAA[ltx + 2] + s_sumAA[ltx + 3];
}
__syncthreads();
if (sample == 0) {
// note, subtract to remove the double counting of sample == 3
const auto sum0 = s_sum0[ltx] + s_sum0[ltx + 1] - s_sum0[ltx + 3];
const auto sum1 = s_sum1[ltx] + s_sum1[ltx + 1] - s_sum1[ltx + 3];
const auto sumA = s_sumA[ltx] + s_sumA[ltx + 1] - s_sumA[ltx + 3];
const auto sumAA = s_sumAA[ltx] + s_sumAA[ltx + 1] - s_sumAA[ltx + 3];
const auto chi2 = sum0 > 0 ? (sumAA - sumA * sumA / sum1) / sum0 : static_cast<ScalarType>(0);
chi2s[ch] = chi2;
sum0s[ch] = sum0;
sumAAs[ch] = sumAA;
#ifdef DEBUG_TC_NULLHYPOT
if (ch == 0) {
printf("chi2 = %f sum0 = %d sumAA = %f\n", chi2, static_cast<int>(sum0), sumAA);
}
#endif
}
}
constexpr float fast_expf(float x) { return unsafe_expf<6>(x); }
constexpr float fast_logf(float x) { return unsafe_logf<7>(x); }
//#define DEBUG_TC_MAKERATIO
//
// launch ctx parameters are
// 45 threads per channel, X channels per block, Y blocks
// 45 comes from: 10 samples for i <- 0 to 9 and for j <- i+1 to 9
// TODO: it might be much beter to use 32 threads per channel instead of 45
// to simplify the synchronization
//
__global__ void kernel_time_compute_makeratio(SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
uint32_t const* dids_eb,
uint32_t const* dids_ee,
bool const* useless_sample_values,
char const* pedestal_nums,
ConfigurationParameters::type const* amplitudeFitParametersEB,
ConfigurationParameters::type const* amplitudeFitParametersEE,
ConfigurationParameters::type const* timeFitParametersEB,
ConfigurationParameters::type const* timeFitParametersEE,
SampleVector::Scalar const* sumAAsNullHypot,
SampleVector::Scalar const* sum0sNullHypot,
SampleVector::Scalar* tMaxAlphaBetas,
SampleVector::Scalar* tMaxErrorAlphaBetas,
SampleVector::Scalar* g_accTimeMax,
SampleVector::Scalar* g_accTimeWgt,
TimeComputationState* g_state,
unsigned const int timeFitParameters_sizeEB,
unsigned const int timeFitParameters_sizeEE,
ConfigurationParameters::type const timeFitLimits_firstEB,
ConfigurationParameters::type const timeFitLimits_firstEE,
ConfigurationParameters::type const timeFitLimits_secondEB,
ConfigurationParameters::type const timeFitLimits_secondEE,
const int nchannels,
uint32_t const offsetForInputs) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nthreads_per_channel = 45; // n=10, n(n-1)/2
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockDim.x * blockIdx.x;
const int ch = gtx / nthreads_per_channel;
const int ltx = threadIdx.x % nthreads_per_channel;
const int ch_start = ch * nsamples;
const auto* dids = ch >= offsetForInputs ? dids_ee : dids_eb;
const int inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch;
// remove inactive threads
// threads that return here should not affect the __syncthreads() below since they have exitted the kernel
if (ch >= nchannels)
return;
const auto did = DetId{dids[inputCh]};
const auto isBarrel = did.subdetId() == EcalBarrel;
const auto* amplitudeFitParameters = isBarrel ? amplitudeFitParametersEB : amplitudeFitParametersEE;
const auto* timeFitParameters = isBarrel ? timeFitParametersEB : timeFitParametersEE;
const auto timeFitParameters_size = isBarrel ? timeFitParameters_sizeEB : timeFitParameters_sizeEE;
const auto timeFitLimits_first = isBarrel ? timeFitLimits_firstEB : timeFitLimits_firstEE;
const auto timeFitLimits_second = isBarrel ? timeFitLimits_secondEB : timeFitLimits_secondEE;
extern __shared__ char smem[];
ScalarType* shr_chi2s = reinterpret_cast<ScalarType*>(smem);
ScalarType* shr_time_wgt = shr_chi2s + blockDim.x;
ScalarType* shr_time_max = shr_time_wgt + blockDim.x;
ScalarType* shrTimeMax = shr_time_max + blockDim.x;
ScalarType* shrTimeWgt = shrTimeMax + blockDim.x;
// map tx -> (sample_i, sample_j)
int sample_i, sample_j = 0;
if (ltx >= 0 && ltx <= 8) {
sample_i = 0;
sample_j = 1 + ltx;
} else if (ltx <= 16) {
sample_i = 1;
sample_j = 2 + ltx - 9;
} else if (ltx <= 23) {
sample_i = 2;
sample_j = 3 + ltx - 17;
} else if (ltx <= 29) {
sample_i = 3;
sample_j = 4 + ltx - 24;
} else if (ltx <= 34) {
sample_i = 4;
sample_j = 5 + ltx - 30;
} else if (ltx <= 38) {
sample_i = 5;
sample_j = 6 + ltx - 35;
} else if (ltx <= 41) {
sample_i = 6;
sample_j = 7 + ltx - 39;
} else if (ltx <= 43) {
sample_i = 7;
sample_j = 8 + ltx - 42;
} else if (ltx <= 44) {
sample_i = 8;
sample_j = 9;
} else
assert(false);
const auto tx_i = ch_start + sample_i;
const auto tx_j = ch_start + sample_j;
//
// note, given the way we partition the block, with 45 threads per channel
// we will end up with inactive threads which need to be dragged along
// through the synching point
//
bool const condForUselessSamples = useless_sample_values[tx_i] || useless_sample_values[tx_j] ||
sample_values[tx_i] <= 1 || sample_values[tx_j] <= 1;
//
// see cpu implementation for explanation
//
ScalarType chi2 = std::numeric_limits<ScalarType>::max();
ScalarType tmax = 0;
ScalarType tmaxerr = 0;
shrTimeMax[threadIdx.x] = 0;
shrTimeWgt[threadIdx.x] = 0;
bool internalCondForSkipping1 = true;
bool internalCondForSkipping2 = true;
if (!condForUselessSamples) {
const auto rtmp = sample_values[tx_i] / sample_values[tx_j];
const auto invampl_i = 1.0 / sample_values[tx_i];
const auto relErr2_i = sample_value_errors[tx_i] * sample_value_errors[tx_i] * invampl_i * invampl_i;
const auto invampl_j = 1.0 / sample_values[tx_j];
const auto relErr2_j = sample_value_errors[tx_j] * sample_value_errors[tx_j] * invampl_j * invampl_j;
const auto err1 = rtmp * rtmp * (relErr2_i + relErr2_j);
auto err2 = sample_value_errors[tx_j] * (sample_values[tx_i] - sample_values[tx_j]) * (invampl_j * invampl_j);
// TODO non-divergent branch for a block if each block has 1 channel
// otherwise non-divergent for groups of 45 threads
// at this point, pedestal_nums[ch] can be either 0, 1 or 2
if (pedestal_nums[ch] == 2)
err2 *= err2 * 0.5;
const auto err3 = (0.289 * 0.289) * (invampl_j * invampl_j);
const auto total_error = std::sqrt(err1 + err2 + err3);
const auto alpha = amplitudeFitParameters[0];
const auto beta = amplitudeFitParameters[1];
const auto alphabeta = alpha * beta;
const auto invalphabeta = 1.0 / alphabeta;
// variables instead of a struct
const auto ratio_index = sample_i;
const auto ratio_step = sample_j - sample_i;
const auto ratio_value = rtmp;
const auto ratio_error = total_error;
const auto rlim_i_j = fast_expf(static_cast<ScalarType>(sample_j - sample_i) / beta) - 0.001;
internalCondForSkipping1 = !(total_error < 1.0 && rtmp > 0.001 && rtmp < rlim_i_j);
if (!internalCondForSkipping1) {
//
// precompute.
// in cpu version this was done conditionally
// however easier to do it here (precompute) and then just filter out
// if not needed
//
const auto l_timeFitLimits_first = timeFitLimits_first;
const auto l_timeFitLimits_second = timeFitLimits_second;
if (ratio_step == 1 && ratio_value >= l_timeFitLimits_first && ratio_value <= l_timeFitLimits_second) {
const auto time_max_i = static_cast<ScalarType>(ratio_index);
auto u = timeFitParameters[timeFitParameters_size - 1];
CMS_UNROLL_LOOP
for (int k = timeFitParameters_size - 2; k >= 0; k--)
u = u * ratio_value + timeFitParameters[k];
auto du = (timeFitParameters_size - 1) * (timeFitParameters[timeFitParameters_size - 1]);
for (int k = timeFitParameters_size - 2; k >= 1; k--)
du = du * ratio_value + k * timeFitParameters[k];
const auto error2 = ratio_error * ratio_error * du * du;
const auto time_max = error2 > 0 ? (time_max_i - u) / error2 : static_cast<ScalarType>(0);
const auto time_wgt = error2 > 0 ? 1.0 / error2 : static_cast<ScalarType>(0);
// store into shared mem
// note, this name is essentially identical to the one used
// below.
shrTimeMax[threadIdx.x] = error2 > 0 ? time_max : 0;
shrTimeWgt[threadIdx.x] = error2 > 0 ? time_wgt : 0;
} else {
shrTimeMax[threadIdx.x] = 0;
shrTimeWgt[threadIdx.x] = 0;
}
// continue with ratios
const auto stepOverBeta = static_cast<SampleVector::Scalar>(ratio_step) / beta;
const auto offset = static_cast<SampleVector::Scalar>(ratio_index) + alphabeta;
const auto rmin = ::max(ratio_value - ratio_error, 0.001);
const auto rmax = ::min(ratio_value + ratio_error,
fast_expf(static_cast<SampleVector::Scalar>(ratio_step) / beta) - 0.001);
const auto time1 = offset - ratio_step / (fast_expf((stepOverBeta - fast_logf(rmin)) / alpha) - 1.0);
const auto time2 = offset - ratio_step / (fast_expf((stepOverBeta - fast_logf(rmax)) / alpha) - 1.0);
// set these guys
tmax = 0.5 * (time1 + time2);
tmaxerr = 0.5 * std::sqrt((time1 - time2) * (time1 - time2));
#ifdef DEBUG_TC_MAKERATIO
if (ch == 1 || ch == 0)
printf("ch = %d ltx = %d tmax = %f tmaxerr = %f time1 = %f time2 = %f offset = %f rmin = %f rmax = %f\n",
ch,
ltx,
tmax,
tmaxerr,
time1,
time2,
offset,
rmin,
rmax);
#endif
SampleVector::Scalar sumAf = 0;
SampleVector::Scalar sumff = 0;
const int itmin = ::max(-1, static_cast<int>(::floor(tmax - alphabeta)));
auto loffset = (static_cast<ScalarType>(itmin) - tmax) * invalphabeta;
// TODO: data dependence
for (int it = itmin + 1; it < nsamples; it++) {
loffset += invalphabeta;
if (useless_sample_values[ch_start + it])
continue;
const auto inverr2 = 1.0 / (sample_value_errors[ch_start + it] * sample_value_errors[ch_start + it]);
const auto term1 = 1.0 + loffset;
const auto f = (term1 > 1e-6) ? fast_expf(alpha * (fast_logf(term1) - loffset)) : 0;
sumAf += sample_values[ch_start + it] * (f * inverr2);
sumff += f * (f * inverr2);
}
const auto sumAA = sumAAsNullHypot[ch];
const auto sum0 = sum0sNullHypot[ch];
chi2 = sumAA;
// TODO: sum0 can not be 0 below, need to introduce the check upfront
if (sumff > 0) {
chi2 = sumAA - sumAf * (sumAf / sumff);
}
chi2 /= sum0;
#ifdef DEBUG_TC_MAKERATIO
if (ch == 1 || ch == 0)
printf("ch = %d ltx = %d sumAf = %f sumff = %f sumAA = %f sum0 = %d tmax = %f tmaxerr = %f chi2 = %f\n",
ch,
ltx,
sumAf,
sumff,
sumAA,
static_cast<int>(sum0),
tmax,
tmaxerr,
chi2);
#endif
if (chi2 > 0 && tmax > 0 && tmaxerr > 0)
internalCondForSkipping2 = false;
else
chi2 = std::numeric_limits<ScalarType>::max();
}
}
// store into smem
shr_chi2s[threadIdx.x] = chi2;
__syncthreads();
// find min chi2 - quite crude for now
// TODO validate/check
char iter = nthreads_per_channel / 2 + nthreads_per_channel % 2;
bool oddElements = nthreads_per_channel % 2;
CMS_UNROLL_LOOP
while (iter >= 1) {
if (ltx < iter)
// for odd ns, the last guy will just store itself
// exception is for ltx == 0 and iter==1
shr_chi2s[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shr_chi2s[threadIdx.x]
: ::min(shr_chi2s[threadIdx.x], shr_chi2s[threadIdx.x + iter]);
__syncthreads();
oddElements = iter % 2;
iter = iter == 1 ? iter / 2 : iter / 2 + iter % 2;
}
// filter out inactive or useless samples threads
if (!condForUselessSamples && !internalCondForSkipping1 && !internalCondForSkipping2) {
// min chi2, now compute weighted average of tmax measurements
// see cpu version for more explanation
const auto chi2min = shr_chi2s[threadIdx.x - ltx];
const auto chi2Limit = chi2min + 1.0;
const auto inverseSigmaSquared = chi2 < chi2Limit ? 1.0 / (tmaxerr * tmaxerr) : 0.0;
#ifdef DEBUG_TC_MAKERATIO
if (ch == 1 || ch == 0)
printf("ch = %d ltx = %d chi2min = %f chi2Limit = %f inverseSigmaSquared = %f\n",
ch,
ltx,
chi2min,
chi2Limit,
inverseSigmaSquared);
#endif
// store into shared mem and run reduction
// TODO: check if cooperative groups would be better
// TODO: check if shuffling intrinsics are better
shr_time_wgt[threadIdx.x] = inverseSigmaSquared;
shr_time_max[threadIdx.x] = tmax * inverseSigmaSquared;
} else {
shr_time_wgt[threadIdx.x] = 0;
shr_time_max[threadIdx.x] = 0;
}
__syncthreads();
// reduce to compute time_max and time_wgt
iter = nthreads_per_channel / 2 + nthreads_per_channel % 2;
oddElements = nthreads_per_channel % 2;
CMS_UNROLL_LOOP
while (iter >= 1) {
if (ltx < iter) {
shr_time_wgt[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shr_time_wgt[threadIdx.x]
: shr_time_wgt[threadIdx.x] + shr_time_wgt[threadIdx.x + iter];
shr_time_max[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shr_time_max[threadIdx.x]
: shr_time_max[threadIdx.x] + shr_time_max[threadIdx.x + iter];
shrTimeMax[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shrTimeMax[threadIdx.x]
: shrTimeMax[threadIdx.x] + shrTimeMax[threadIdx.x + iter];
shrTimeWgt[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shrTimeWgt[threadIdx.x]
: shrTimeWgt[threadIdx.x] + shrTimeWgt[threadIdx.x + iter];
}
__syncthreads();
oddElements = iter % 2;
iter = iter == 1 ? iter / 2 : iter / 2 + iter % 2;
}
// load from shared memory the 0th guy (will contain accumulated values)
// compute
// store into global mem
if (ltx == 0) {
const auto tmp_time_max = shr_time_max[threadIdx.x];
const auto tmp_time_wgt = shr_time_wgt[threadIdx.x];
// we are done if there number of time ratios is 0
if (tmp_time_wgt == 0 && tmp_time_max == 0) {
g_state[ch] = TimeComputationState::Finished;
return;
}
// no div by 0
const auto tMaxAlphaBeta = tmp_time_max / tmp_time_wgt;
const auto tMaxErrorAlphaBeta = 1.0 / std::sqrt(tmp_time_wgt);
tMaxAlphaBetas[ch] = tMaxAlphaBeta;
tMaxErrorAlphaBetas[ch] = tMaxErrorAlphaBeta;
g_accTimeMax[ch] = shrTimeMax[threadIdx.x];
g_accTimeWgt[ch] = shrTimeWgt[threadIdx.x];
g_state[ch] = TimeComputationState::NotFinished;
#ifdef DEBUG_TC_MAKERATIO
printf("ch = %d time_max = %f time_wgt = %f\n", ch, tmp_time_max, tmp_time_wgt);
printf("ch = %d tMaxAlphaBeta = %f tMaxErrorAlphaBeta = %f timeMax = %f timeWgt = %f\n",
ch,
tMaxAlphaBeta,
tMaxErrorAlphaBeta,
shrTimeMax[threadIdx.x],
shrTimeWgt[threadIdx.x]);
#endif
}
}
/// launch ctx parameters are
/// 10 threads per channel, N channels per block, Y blocks
/// TODO: do we need to keep the state around or can be removed?!
//#define DEBUG_FINDAMPLCHI2_AND_FINISH
__global__ void kernel_time_compute_findamplchi2_and_finish(
SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
uint32_t const* dids_eb,
uint32_t const* dids_ee,
bool const* useless_samples,
SampleVector::Scalar const* g_tMaxAlphaBeta,
SampleVector::Scalar const* g_tMaxErrorAlphaBeta,
SampleVector::Scalar const* g_accTimeMax,
SampleVector::Scalar const* g_accTimeWgt,
ConfigurationParameters::type const* amplitudeFitParametersEB,
ConfigurationParameters::type const* amplitudeFitParametersEE,
SampleVector::Scalar const* sumAAsNullHypot,
SampleVector::Scalar const* sum0sNullHypot,
SampleVector::Scalar const* chi2sNullHypot,
TimeComputationState* g_state,
SampleVector::Scalar* g_ampMaxAlphaBeta,
SampleVector::Scalar* g_ampMaxError,
SampleVector::Scalar* g_timeMax,
SampleVector::Scalar* g_timeError,
const int nchannels,
uint32_t const offsetForInputs) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch = gtx / nsamples;
const int sample = threadIdx.x % nsamples;
const auto* dids = ch >= offsetForInputs ? dids_ee : dids_eb;
const int inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch;
// configure shared mem
// per block, we need #threads per block * 2 * sizeof(ScalarType)
// we run with N channels per block
extern __shared__ char smem[];
ScalarType* shr_sumAf = reinterpret_cast<ScalarType*>(smem);
ScalarType* shr_sumff = shr_sumAf + blockDim.x;
if (ch >= nchannels)
return;
auto state = g_state[ch];
const auto did = DetId{dids[inputCh]};
const auto* amplitudeFitParameters =
did.subdetId() == EcalBarrel ? amplitudeFitParametersEB : amplitudeFitParametersEE;
// TODO is that better than storing into global and launching another kernel
// for the first 10 threads
if (state == TimeComputationState::NotFinished) {
const auto alpha = amplitudeFitParameters[0];
const auto beta = amplitudeFitParameters[1];
const auto alphabeta = alpha * beta;
const auto invalphabeta = 1.0 / alphabeta;
const auto tMaxAlphaBeta = g_tMaxAlphaBeta[ch];
const auto sample_value = sample_values[gtx];
const auto sample_value_error = sample_value_errors[gtx];
const auto inverr2 =
useless_samples[gtx] ? static_cast<ScalarType>(0) : 1.0 / (sample_value_error * sample_value_error);
const auto offset = (static_cast<ScalarType>(sample) - tMaxAlphaBeta) * invalphabeta;
const auto term1 = 1.0 + offset;
const auto f = term1 > 1e-6 ? fast_expf(alpha * (fast_logf(term1) - offset)) : static_cast<ScalarType>(0.0);
const auto sumAf = sample_value * (f * inverr2);
const auto sumff = f * (f * inverr2);
// store into shared mem
shr_sumAf[threadIdx.x] = sumAf;
shr_sumff[threadIdx.x] = sumff;
} else {
shr_sumAf[threadIdx.x] = 0;
shr_sumff[threadIdx.x] = 0;
}
__syncthreads();
// reduce
// unroll completely here (but hardcoded)
if (sample < 5) {
shr_sumAf[threadIdx.x] += shr_sumAf[threadIdx.x + 5];
shr_sumff[threadIdx.x] += shr_sumff[threadIdx.x + 5];
}
__syncthreads();
if (sample < 2) {
// will need to subtract for ltx = 3, we double count here
shr_sumAf[threadIdx.x] += shr_sumAf[threadIdx.x + 2] + shr_sumAf[threadIdx.x + 3];
shr_sumff[threadIdx.x] += shr_sumff[threadIdx.x + 2] + shr_sumff[threadIdx.x + 3];
}
__syncthreads();
if (sample == 0) {
// exit if the state is done
// note, we do not exit before all __synchtreads are finished
if (state == TimeComputationState::Finished) {
g_timeMax[ch] = 5;
g_timeError[ch] = -999;
return;
}
// subtract to avoid double counting
const auto sumff = shr_sumff[threadIdx.x] + shr_sumff[threadIdx.x + 1] - shr_sumff[threadIdx.x + 3];
const auto sumAf = shr_sumAf[threadIdx.x] + shr_sumAf[threadIdx.x + 1] - shr_sumAf[threadIdx.x + 3];
const auto ampMaxAlphaBeta = sumff > 0 ? sumAf / sumff : 0;
const auto sumAA = sumAAsNullHypot[ch];
const auto sum0 = sum0sNullHypot[ch];
const auto nullChi2 = chi2sNullHypot[ch];
if (sumff > 0) {
const auto chi2AlphaBeta = (sumAA - sumAf * sumAf / sumff) / sum0;
if (chi2AlphaBeta > nullChi2) {
// null hypothesis is better
state = TimeComputationState::Finished;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d chi2AlphaBeta = %f nullChi2 = %f sumAA = %f sumAf = %f sumff = %f sum0 = %f\n",
ch,
chi2AlphaBeta,
nullChi2,
sumAA,
sumAf,
sumff,
sum0);
#endif
}
// store to global
g_ampMaxAlphaBeta[ch] = ampMaxAlphaBeta;
} else {
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d sum0 = %f sumAA = %f sumff = %f sumAf = %f\n", ch, sum0, sumAA, sumff, sumAf);
#endif
state = TimeComputationState::Finished;
}
// store the state to global and finish calcs
g_state[ch] = state;
if (state == TimeComputationState::Finished) {
// store default values into global
g_timeMax[ch] = 5;
g_timeError[ch] = -999;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d finished state\n", ch);
#endif
return;
}
const auto ampMaxError = g_ampMaxError[ch];
const auto test_ratio = ampMaxAlphaBeta / ampMaxError;
const auto accTimeMax = g_accTimeMax[ch];
const auto accTimeWgt = g_accTimeWgt[ch];
const auto tMaxAlphaBeta = g_tMaxAlphaBeta[ch];
const auto tMaxErrorAlphaBeta = g_tMaxErrorAlphaBeta[ch];
// branch to separate large vs small pulses
// see cpu version for more info
if (test_ratio > 5.0 && accTimeWgt > 0) {
const auto tMaxRatio = accTimeWgt > 0 ? accTimeMax / accTimeWgt : static_cast<ScalarType>(0);
const auto tMaxErrorRatio = accTimeWgt > 0 ? 1.0 / std::sqrt(accTimeWgt) : static_cast<ScalarType>(0);
if (test_ratio > 10.0) {
g_timeMax[ch] = tMaxRatio;
g_timeError[ch] = tMaxErrorRatio;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d tMaxRatio = %f tMaxErrorRatio = %f\n", ch, tMaxRatio, tMaxErrorRatio);
#endif
} else {
const auto timeMax = (tMaxAlphaBeta * (10.0 - ampMaxAlphaBeta / ampMaxError) +
tMaxRatio * (ampMaxAlphaBeta / ampMaxError - 5.0)) /
5.0;
const auto timeError = (tMaxErrorAlphaBeta * (10.0 - ampMaxAlphaBeta / ampMaxError) +
tMaxErrorRatio * (ampMaxAlphaBeta / ampMaxError - 5.0)) /
5.0;
state = TimeComputationState::Finished;
g_state[ch] = state;
g_timeMax[ch] = timeMax;
g_timeError[ch] = timeError;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d timeMax = %f timeError = %f\n", ch, timeMax, timeError);
#endif
}
} else {
state = TimeComputationState::Finished;
g_state[ch] = state;
g_timeMax[ch] = tMaxAlphaBeta;
g_timeError[ch] = tMaxErrorAlphaBeta;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d tMaxAlphaBeta = %f tMaxErrorAlphaBeta = %f\n", ch, tMaxAlphaBeta, tMaxErrorAlphaBeta);
#endif
}
}
}
__global__ void kernel_time_compute_fixMGPAslew(uint16_t const* digis_eb,
uint16_t const* digis_ee,
SampleVector::Scalar* sample_values,
SampleVector::Scalar* sample_value_errors,
bool* useless_sample_values,
unsigned const int sample_mask,
const int nchannels,
uint32_t const offsetForInputs) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch = gtx / nsamples;
const int sample = threadIdx.x % nsamples;
const int inputGtx = ch >= offsetForInputs ? gtx - offsetForInputs * nsamples : gtx;
const auto* digis = ch >= offsetForInputs ? digis_ee : digis_eb;
// remove thread for sample 0, oversubscribing is easier than ....
if (ch >= nchannels || sample == 0)
return;
if (!use_sample(sample_mask, sample))
return;
const auto gainIdPrev = ecal::mgpa::gainId(digis[inputGtx - 1]);
const auto gainIdNext = ecal::mgpa::gainId(digis[inputGtx]);
if (gainIdPrev >= 1 && gainIdPrev <= 3 && gainIdNext >= 1 && gainIdNext <= 3 && gainIdPrev < gainIdNext) {
sample_values[gtx - 1] = 0;
sample_value_errors[gtx - 1] = 1e+9;
useless_sample_values[gtx - 1] = true;
}
}
__global__ void kernel_time_compute_ampl(SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
uint32_t const* dids,
bool const* useless_samples,
SampleVector::Scalar const* g_timeMax,
SampleVector::Scalar const* amplitudeFitParametersEB,
SampleVector::Scalar const* amplitudeFitParametersEE,
SampleVector::Scalar* g_amplitudeMax,
const int nchannels) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr ScalarType corr4 = 1.;
constexpr ScalarType corr6 = 1.;
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch = gtx / nsamples;
const int sample = threadIdx.x % nsamples;
if (ch >= nchannels)
return;
const auto did = DetId{dids[ch]};
const auto* amplitudeFitParameters =
did.subdetId() == EcalBarrel ? amplitudeFitParametersEB : amplitudeFitParametersEE;
// configure shared mem
extern __shared__ char smem[];
ScalarType* shr_sum1 = reinterpret_cast<ScalarType*>(smem);
auto* shr_sumA = shr_sum1 + blockDim.x;
auto* shr_sumF = shr_sumA + blockDim.x;
auto* shr_sumAF = shr_sumF + blockDim.x;
auto* shr_sumFF = shr_sumAF + blockDim.x;
const auto alpha = amplitudeFitParameters[0];
const auto beta = amplitudeFitParameters[1];
const auto timeMax = g_timeMax[ch];
const auto pedestalLimit = timeMax - (alpha * beta) - 1.0;
const auto sample_value = sample_values[gtx];
const auto sample_value_error = sample_value_errors[gtx];
const auto inverr2 =
sample_value_error > 0 ? 1. / (sample_value_error * sample_value_error) : static_cast<ScalarType>(0);
const auto termOne = 1 + (sample - timeMax) / (alpha * beta);
const auto f = termOne > 1.e-5 ? fast_expf(alpha * fast_logf(termOne) - (sample - timeMax) / beta)
: static_cast<ScalarType>(0.);
bool const cond = ((sample < pedestalLimit) || (f > 0.6 * corr6 && sample <= timeMax) ||
(f > 0.4 * corr4 && sample >= timeMax)) &&
!useless_samples[gtx];
// store into shared mem
shr_sum1[threadIdx.x] = cond ? inverr2 : static_cast<ScalarType>(0);
shr_sumA[threadIdx.x] = cond ? sample_value * inverr2 : static_cast<ScalarType>(0);
shr_sumF[threadIdx.x] = cond ? f * inverr2 : static_cast<ScalarType>(0);
shr_sumAF[threadIdx.x] = cond ? (f * inverr2) * sample_value : static_cast<ScalarType>(0);
shr_sumFF[threadIdx.x] = cond ? f * (f * inverr2) : static_cast<ScalarType>(0);
// reduction
if (sample <= 4) {
shr_sum1[threadIdx.x] += shr_sum1[threadIdx.x + 5];
shr_sumA[threadIdx.x] += shr_sumA[threadIdx.x + 5];
shr_sumF[threadIdx.x] += shr_sumF[threadIdx.x + 5];
shr_sumAF[threadIdx.x] += shr_sumAF[threadIdx.x + 5];
shr_sumFF[threadIdx.x] += shr_sumFF[threadIdx.x + 5];
}
__syncthreads();
if (sample < 2) {
// note: we double count sample 3
shr_sum1[threadIdx.x] += shr_sum1[threadIdx.x + 2] + shr_sum1[threadIdx.x + 3];
shr_sumA[threadIdx.x] += shr_sumA[threadIdx.x + 2] + shr_sumA[threadIdx.x + 3];
shr_sumF[threadIdx.x] += shr_sumF[threadIdx.x + 2] + shr_sumF[threadIdx.x + 3];
shr_sumAF[threadIdx.x] += shr_sumAF[threadIdx.x + 2] + shr_sumAF[threadIdx.x + 3];
shr_sumFF[threadIdx.x] += shr_sumFF[threadIdx.x + 2] + shr_sumFF[threadIdx.x + 3];
}
__syncthreads();
if (sample == 0) {
const auto sum1 = shr_sum1[threadIdx.x] + shr_sum1[threadIdx.x + 1] - shr_sum1[threadIdx.x + 3];
const auto sumA = shr_sumA[threadIdx.x] + shr_sumA[threadIdx.x + 1] - shr_sumA[threadIdx.x + 3];
const auto sumF = shr_sumF[threadIdx.x] + shr_sumF[threadIdx.x + 1] - shr_sumF[threadIdx.x + 3];
const auto sumAF = shr_sumAF[threadIdx.x] + shr_sumAF[threadIdx.x + 1] - shr_sumAF[threadIdx.x + 3];
const auto sumFF = shr_sumFF[threadIdx.x] + shr_sumFF[threadIdx.x + 1] - shr_sumFF[threadIdx.x + 3];
const auto denom = sumFF * sum1 - sumF * sumF;
const auto condForDenom = sum1 > 0 && std::abs(denom) > 1.e-20;
const auto amplitudeMax = condForDenom ? (sumAF * sum1 - sumA * sumF) / denom : static_cast<ScalarType>(0.);
// store into global mem
g_amplitudeMax[ch] = amplitudeMax;
}
}
//#define ECAL_RECO_CUDA_TC_INIT_DEBUG
__global__ void kernel_time_computation_init(uint16_t const* digis_eb,
uint32_t const* dids_eb,
uint16_t const* digis_ee,
uint32_t const* dids_ee,
float const* rms_x12,
float const* rms_x6,
float const* rms_x1,
float const* mean_x12,
float const* mean_x6,
float const* mean_x1,
float const* gain12Over6,
float const* gain6Over1,
SampleVector::Scalar* sample_values,
SampleVector::Scalar* sample_value_errors,
SampleVector::Scalar* ampMaxError,
bool* useless_sample_values,
char* pedestal_nums,
uint32_t const offsetForHashes,
uint32_t const offsetForInputs,
unsigned const int sample_maskEB,
unsigned const int sample_maskEE,
int nchannels) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int tx = threadIdx.x + blockDim.x * blockIdx.x;
const int ch = tx / nsamples;
const int inputTx = ch >= offsetForInputs ? tx - offsetForInputs * nsamples : tx;
const int inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch;
const auto* digis = ch >= offsetForInputs ? digis_ee : digis_eb;
const auto* dids = ch >= offsetForInputs ? dids_ee : dids_eb;
// threads that return here should not affect the __syncthreads() below since they have exitted the kernel
if (ch >= nchannels)
return;
// indices/inits
const int sample = tx % nsamples;
const int input_ch_start = inputCh * nsamples;
SampleVector::Scalar pedestal = 0.;
int num = 0;
// configure shared mem
extern __shared__ char smem[];
ScalarType* shrSampleValues = reinterpret_cast<SampleVector::Scalar*>(smem);
ScalarType* shrSampleValueErrors = shrSampleValues + blockDim.x;
// 0 and 1 sample values
const auto adc0 = ecal::mgpa::adc(digis[input_ch_start]);
const auto gainId0 = ecal::mgpa::gainId(digis[input_ch_start]);
const auto adc1 = ecal::mgpa::adc(digis[input_ch_start + 1]);
const auto gainId1 = ecal::mgpa::gainId(digis[input_ch_start + 1]);
const auto did = DetId{dids[inputCh]};
const auto isBarrel = did.subdetId() == EcalBarrel;
const auto sample_mask = did.subdetId() == EcalBarrel ? sample_maskEB : sample_maskEE;
const auto hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId())
: offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId());
// set pedestal
// TODO this branch is non-divergent for a group of 10 threads
if (gainId0 == 1 && use_sample(sample_mask, 0)) {
pedestal = static_cast<SampleVector::Scalar>(adc0);
num = 1;
const auto diff = adc1 - adc0;
if (gainId1 == 1 && use_sample(sample_mask, 1) && std::abs(diff) < 3 * rms_x12[hashedId]) {
pedestal = (pedestal + static_cast<SampleVector::Scalar>(adc1)) / 2.0;
num = 2;
}
} else {
pedestal = mean_x12[ch];
}
// ped subtracted and gain-renormalized samples.
const auto gainId = ecal::mgpa::gainId(digis[inputTx]);
const auto adc = ecal::mgpa::adc(digis[inputTx]);
bool bad = false;
SampleVector::Scalar sample_value, sample_value_error;
// TODO divergent branch
// TODO: piece below is general both for amplitudes and timing
// potentially there is a way to reduce the amount of code...
if (!use_sample(sample_mask, sample)) {
bad = true;
sample_value = 0;
sample_value_error = 0;
} else if (gainId == 1) {
sample_value = static_cast<SampleVector::Scalar>(adc) - pedestal;
sample_value_error = rms_x12[hashedId];
} else if (gainId == 2) {
sample_value = (static_cast<SampleVector::Scalar>(adc) - mean_x6[hashedId]) * gain12Over6[hashedId];
sample_value_error = rms_x6[hashedId] * gain12Over6[hashedId];
} else if (gainId == 3) {
sample_value =
(static_cast<SampleVector::Scalar>(adc) - mean_x1[hashedId]) * gain6Over1[hashedId] * gain12Over6[hashedId];
sample_value_error = rms_x1[hashedId] * gain6Over1[hashedId] * gain12Over6[hashedId];
} else {
sample_value = 0;
sample_value_error = 0;
bad = true;
}
// TODO: make sure we save things correctly when sample is useless
const auto useless_sample = (sample_value_error <= 0) | bad;
useless_sample_values[tx] = useless_sample;
sample_values[tx] = sample_value;
sample_value_errors[tx] = useless_sample ? 1e+9 : sample_value_error;
// DEBUG
#ifdef ECAL_RECO_CUDA_TC_INIT_DEBUG
if (ch == 0) {
printf("sample = %d sample_value = %f sample_value_error = %f useless = %c\n",
sample,
sample_value,
sample_value_error,
useless_sample ? '1' : '0');
}
#endif
// store into the shared mem
shrSampleValues[threadIdx.x] = sample_value_error > 0 ? sample_value : std::numeric_limits<ScalarType>::min();
shrSampleValueErrors[threadIdx.x] = sample_value_error;
__syncthreads();
// perform the reduction with min
if (sample < 5) {
// note, if equal -> we keep the value with lower sample as for cpu
shrSampleValueErrors[threadIdx.x] = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 5]
? shrSampleValueErrors[threadIdx.x + 5]
: shrSampleValueErrors[threadIdx.x];
shrSampleValues[threadIdx.x] = ::max(shrSampleValues[threadIdx.x], shrSampleValues[threadIdx.x + 5]);
}
__syncthreads();
// a bit of an overkill, but easier than to compare across 3 values
if (sample < 3) {
shrSampleValueErrors[threadIdx.x] = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 3]
? shrSampleValueErrors[threadIdx.x + 3]
: shrSampleValueErrors[threadIdx.x];
shrSampleValues[threadIdx.x] = ::max(shrSampleValues[threadIdx.x], shrSampleValues[threadIdx.x + 3]);
}
__syncthreads();
if (sample < 2) {
shrSampleValueErrors[threadIdx.x] = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 2]
? shrSampleValueErrors[threadIdx.x + 2]
: shrSampleValueErrors[threadIdx.x];
shrSampleValues[threadIdx.x] = ::max(shrSampleValues[threadIdx.x], shrSampleValues[threadIdx.x + 2]);
}
__syncthreads();
if (sample == 0) {
// we only needd the max error
const auto maxSampleValueError = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 1]
? shrSampleValueErrors[threadIdx.x + 1]
: shrSampleValueErrors[threadIdx.x];
// # pedestal samples used
pedestal_nums[ch] = num;
// this is used downstream
ampMaxError[ch] = maxSampleValueError;
// DEBUG
#ifdef ECAL_RECO_CUDA_TC_INIT_DEBUG
if (ch == 0) {
printf("pedestal_nums = %d ampMaxError = %f\n", num, maxSampleValueError);
}
#endif
}
}
///
/// launch context parameters: 1 thread per channel
///
//#define DEBUG_TIME_CORRECTION
__global__ void kernel_time_correction_and_finalize(
// SampleVector::Scalar const* g_amplitude,
::ecal::reco::StorageScalarType const* g_amplitudeEB,
::ecal::reco::StorageScalarType const* g_amplitudeEE,
uint16_t const* digis_eb,
uint32_t const* dids_eb,
uint16_t const* digis_ee,
uint32_t const* dids_ee,
float const* amplitudeBinsEB,
float const* amplitudeBinsEE,
float const* shiftBinsEB,
float const* shiftBinsEE,
SampleVector::Scalar const* g_timeMax,
SampleVector::Scalar const* g_timeError,
float const* g_rms_x12,
float const* timeCalibConstant,
float* g_jitterEB,
float* g_jitterEE,
float* g_jitterErrorEB,
float* g_jitterErrorEE,
uint32_t* flagsEB,
uint32_t* flagsEE,
const int amplitudeBinsSizeEB,
const int amplitudeBinsSizeEE,
ConfigurationParameters::type const timeConstantTermEB,
ConfigurationParameters::type const timeConstantTermEE,
float const offsetTimeValueEB,
float const offsetTimeValueEE,
ConfigurationParameters::type const timeNconstEB,
ConfigurationParameters::type const timeNconstEE,
ConfigurationParameters::type const amplitudeThresholdEB,
ConfigurationParameters::type const amplitudeThresholdEE,
ConfigurationParameters::type const outOfTimeThreshG12pEB,
ConfigurationParameters::type const outOfTimeThreshG12pEE,
ConfigurationParameters::type const outOfTimeThreshG12mEB,
ConfigurationParameters::type const outOfTimeThreshG12mEE,
ConfigurationParameters::type const outOfTimeThreshG61pEB,
ConfigurationParameters::type const outOfTimeThreshG61pEE,
ConfigurationParameters::type const outOfTimeThreshG61mEB,
ConfigurationParameters::type const outOfTimeThreshG61mEE,
uint32_t const offsetForHashes,
uint32_t const offsetForInputs,
const int nchannels) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int inputGtx = gtx >= offsetForInputs ? gtx - offsetForInputs : gtx;
const auto* dids = gtx >= offsetForInputs ? dids_ee : dids_eb;
const auto& digis = gtx >= offsetForInputs ? digis_ee : digis_eb;
// filter out outside of range threads
if (gtx >= nchannels)
return;
// need to ref the right ptrs
#define ARRANGE(var) auto* var = gtx >= offsetForInputs ? var##EE : var##EB
ARRANGE(g_amplitude);
ARRANGE(g_jitter);
ARRANGE(g_jitterError);
ARRANGE(flags);
#undef ARRANGE
const auto did = DetId{dids[inputGtx]};
const auto isBarrel = did.subdetId() == EcalBarrel;
const auto hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId())
: offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId());
const auto* amplitudeBins = isBarrel ? amplitudeBinsEB : amplitudeBinsEE;
const auto* shiftBins = isBarrel ? shiftBinsEB : shiftBinsEE;
const auto amplitudeBinsSize = isBarrel ? amplitudeBinsSizeEB : amplitudeBinsSizeEE;
const auto timeConstantTerm = isBarrel ? timeConstantTermEB : timeConstantTermEE;
const auto timeNconst = isBarrel ? timeNconstEB : timeNconstEE;
const auto offsetTimeValue = isBarrel ? offsetTimeValueEB : offsetTimeValueEE;
const auto amplitudeThreshold = isBarrel ? amplitudeThresholdEB : amplitudeThresholdEE;
const auto outOfTimeThreshG12p = isBarrel ? outOfTimeThreshG12pEB : outOfTimeThreshG12pEE;
const auto outOfTimeThreshG12m = isBarrel ? outOfTimeThreshG12mEB : outOfTimeThreshG12mEE;
const auto outOfTimeThreshG61p = isBarrel ? outOfTimeThreshG61pEB : outOfTimeThreshG61pEE;
const auto outOfTimeThreshG61m = isBarrel ? outOfTimeThreshG61mEB : outOfTimeThreshG61mEE;
// load some
const auto amplitude = g_amplitude[inputGtx];
const auto rms_x12 = g_rms_x12[hashedId];
const auto timeCalibConst = timeCalibConstant[hashedId];
int myBin = -1;
for (int bin = 0; bin < amplitudeBinsSize; bin++) {
if (amplitude > amplitudeBins[bin])
myBin = bin;
else
break;
}
ScalarType correction = 0;
if (myBin == -1) {
correction = shiftBins[0];
} else if (myBin == amplitudeBinsSize - 1) {
correction = shiftBins[myBin];
} else {
correction = shiftBins[myBin + 1] - shiftBins[myBin];
correction *= (amplitude - amplitudeBins[myBin]) / (amplitudeBins[myBin + 1] - amplitudeBins[myBin]);
correction += shiftBins[myBin];
}
// correction * 1./25.
correction = correction * 0.04;
const auto timeMax = g_timeMax[gtx];
const auto timeError = g_timeError[gtx];
const auto jitter = timeMax - 5 + correction;
const auto jitterError =
std::sqrt(timeError * timeError + timeConstantTerm * timeConstantTerm * 0.04 * 0.04); // 0.04 = 1./25.
#ifdef DEBUG_TIME_CORRECTION
printf("ch = %d timeMax = %f timeError = %f jitter = %f correction = %f\n",
gtx,
timeMax,
timeError,
jitter,
correction);
// }
#endif
// store back to global
g_jitter[inputGtx] = jitter;
g_jitterError[inputGtx] = jitterError;
// set the flag
// TODO: replace with something more efficient (if required),
// for now just to make it work
if (amplitude > amplitudeThreshold * rms_x12) {
auto threshP = outOfTimeThreshG12p;
auto threshM = outOfTimeThreshG12m;
if (amplitude > 3000.) {
for (int isample = 0; isample < nsamples; isample++) {
int gainid = ecal::mgpa::gainId(digis[nsamples * inputGtx + isample]);
if (gainid != 1) {
threshP = outOfTimeThreshG61p;
threshM = outOfTimeThreshG61m;
break;
}
}
}
const auto correctedTime = (timeMax - 5) * 25 + timeCalibConst + offsetTimeValue;
const auto nterm = timeNconst * rms_x12 / amplitude;
const auto sigmat = std::sqrt(nterm * nterm + timeConstantTerm * timeConstantTerm);
if (correctedTime > sigmat * threshP || correctedTime < -sigmat * threshM)
flags[inputGtx] |= 0x1 << EcalUncalibratedRecHit::kOutOfTime;
}
}
} // namespace multifit
} // namespace ecal
|
a9fe6cc1a2a9204e0ee1add6cae4f050c98e15b6.cu
|
#include <cmath>
#include <limits>
#include <cuda.h>
#include "DataFormats/EcalDigi/interface/EcalDataFrame.h"
#include "DataFormats/EcalRecHit/interface/EcalUncalibratedRecHit.h"
#include "DataFormats/Math/interface/approx_exp.h"
#include "DataFormats/Math/interface/approx_log.h"
#include "FWCore/Utilities/interface/CMSUnrollLoop.h"
#include "Common.h"
#include "TimeComputationKernels.h"
#include "KernelHelpers.h"
//#define DEBUG
//#define ECAL_RECO_CUDA_DEBUG
namespace ecal {
namespace multifit {
__device__ __forceinline__ bool use_sample(unsigned int sample_mask, unsigned int sample) {
return sample_mask & (0x1 << (EcalDataFrame::MAXSAMPLES - (sample + 1)));
}
__global__ void kernel_time_compute_nullhypot(SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
bool const* useless_sample_values,
SampleVector::Scalar* chi2s,
SampleVector::Scalar* sum0s,
SampleVector::Scalar* sumAAs,
const int nchannels) {
using ScalarType = SampleVector::Scalar;
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ltx = threadIdx.x;
int ch = tx / nsamples;
int nchannels_per_block = blockDim.x / nsamples;
// threads that return here should not affect the __syncthreads() below since they have exitted the kernel
if (ch >= nchannels)
return;
int sample = tx % nsamples;
// shared mem inits
extern __shared__ char sdata[];
char* s_sum0 = sdata;
SampleVector::Scalar* s_sum1 = reinterpret_cast<SampleVector::Scalar*>(s_sum0 + nchannels_per_block * nsamples);
SampleVector::Scalar* s_sumA = s_sum1 + nchannels_per_block * nsamples;
SampleVector::Scalar* s_sumAA = s_sumA + nchannels_per_block * nsamples;
// TODO make sure no div by 0
const auto inv_error =
useless_sample_values[tx] ? 0.0 : 1.0 / (sample_value_errors[tx] * sample_value_errors[tx]);
const auto sample_value = sample_values[tx];
s_sum0[ltx] = useless_sample_values[tx] ? 0 : 1;
s_sum1[ltx] = inv_error;
s_sumA[ltx] = sample_value * inv_error;
s_sumAA[ltx] = sample_value * sample_value * inv_error;
__syncthreads();
// 5 threads for [0, 4] samples
if (sample < 5) {
s_sum0[ltx] += s_sum0[ltx + 5];
s_sum1[ltx] += s_sum1[ltx + 5];
s_sumA[ltx] += s_sumA[ltx + 5];
s_sumAA[ltx] += s_sumAA[ltx + 5];
}
__syncthreads();
if (sample < 2) {
// note double counting of sample 3
s_sum0[ltx] += s_sum0[ltx + 2] + s_sum0[ltx + 3];
s_sum1[ltx] += s_sum1[ltx + 2] + s_sum1[ltx + 3];
s_sumA[ltx] += s_sumA[ltx + 2] + s_sumA[ltx + 3];
s_sumAA[ltx] += s_sumAA[ltx + 2] + s_sumAA[ltx + 3];
}
__syncthreads();
if (sample == 0) {
// note, subtract to remove the double counting of sample == 3
const auto sum0 = s_sum0[ltx] + s_sum0[ltx + 1] - s_sum0[ltx + 3];
const auto sum1 = s_sum1[ltx] + s_sum1[ltx + 1] - s_sum1[ltx + 3];
const auto sumA = s_sumA[ltx] + s_sumA[ltx + 1] - s_sumA[ltx + 3];
const auto sumAA = s_sumAA[ltx] + s_sumAA[ltx + 1] - s_sumAA[ltx + 3];
const auto chi2 = sum0 > 0 ? (sumAA - sumA * sumA / sum1) / sum0 : static_cast<ScalarType>(0);
chi2s[ch] = chi2;
sum0s[ch] = sum0;
sumAAs[ch] = sumAA;
#ifdef DEBUG_TC_NULLHYPOT
if (ch == 0) {
printf("chi2 = %f sum0 = %d sumAA = %f\n", chi2, static_cast<int>(sum0), sumAA);
}
#endif
}
}
constexpr float fast_expf(float x) { return unsafe_expf<6>(x); }
constexpr float fast_logf(float x) { return unsafe_logf<7>(x); }
//#define DEBUG_TC_MAKERATIO
//
// launch ctx parameters are
// 45 threads per channel, X channels per block, Y blocks
// 45 comes from: 10 samples for i <- 0 to 9 and for j <- i+1 to 9
// TODO: it might be much beter to use 32 threads per channel instead of 45
// to simplify the synchronization
//
__global__ void kernel_time_compute_makeratio(SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
uint32_t const* dids_eb,
uint32_t const* dids_ee,
bool const* useless_sample_values,
char const* pedestal_nums,
ConfigurationParameters::type const* amplitudeFitParametersEB,
ConfigurationParameters::type const* amplitudeFitParametersEE,
ConfigurationParameters::type const* timeFitParametersEB,
ConfigurationParameters::type const* timeFitParametersEE,
SampleVector::Scalar const* sumAAsNullHypot,
SampleVector::Scalar const* sum0sNullHypot,
SampleVector::Scalar* tMaxAlphaBetas,
SampleVector::Scalar* tMaxErrorAlphaBetas,
SampleVector::Scalar* g_accTimeMax,
SampleVector::Scalar* g_accTimeWgt,
TimeComputationState* g_state,
unsigned const int timeFitParameters_sizeEB,
unsigned const int timeFitParameters_sizeEE,
ConfigurationParameters::type const timeFitLimits_firstEB,
ConfigurationParameters::type const timeFitLimits_firstEE,
ConfigurationParameters::type const timeFitLimits_secondEB,
ConfigurationParameters::type const timeFitLimits_secondEE,
const int nchannels,
uint32_t const offsetForInputs) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nthreads_per_channel = 45; // n=10, n(n-1)/2
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockDim.x * blockIdx.x;
const int ch = gtx / nthreads_per_channel;
const int ltx = threadIdx.x % nthreads_per_channel;
const int ch_start = ch * nsamples;
const auto* dids = ch >= offsetForInputs ? dids_ee : dids_eb;
const int inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch;
// remove inactive threads
// threads that return here should not affect the __syncthreads() below since they have exitted the kernel
if (ch >= nchannels)
return;
const auto did = DetId{dids[inputCh]};
const auto isBarrel = did.subdetId() == EcalBarrel;
const auto* amplitudeFitParameters = isBarrel ? amplitudeFitParametersEB : amplitudeFitParametersEE;
const auto* timeFitParameters = isBarrel ? timeFitParametersEB : timeFitParametersEE;
const auto timeFitParameters_size = isBarrel ? timeFitParameters_sizeEB : timeFitParameters_sizeEE;
const auto timeFitLimits_first = isBarrel ? timeFitLimits_firstEB : timeFitLimits_firstEE;
const auto timeFitLimits_second = isBarrel ? timeFitLimits_secondEB : timeFitLimits_secondEE;
extern __shared__ char smem[];
ScalarType* shr_chi2s = reinterpret_cast<ScalarType*>(smem);
ScalarType* shr_time_wgt = shr_chi2s + blockDim.x;
ScalarType* shr_time_max = shr_time_wgt + blockDim.x;
ScalarType* shrTimeMax = shr_time_max + blockDim.x;
ScalarType* shrTimeWgt = shrTimeMax + blockDim.x;
// map tx -> (sample_i, sample_j)
int sample_i, sample_j = 0;
if (ltx >= 0 && ltx <= 8) {
sample_i = 0;
sample_j = 1 + ltx;
} else if (ltx <= 16) {
sample_i = 1;
sample_j = 2 + ltx - 9;
} else if (ltx <= 23) {
sample_i = 2;
sample_j = 3 + ltx - 17;
} else if (ltx <= 29) {
sample_i = 3;
sample_j = 4 + ltx - 24;
} else if (ltx <= 34) {
sample_i = 4;
sample_j = 5 + ltx - 30;
} else if (ltx <= 38) {
sample_i = 5;
sample_j = 6 + ltx - 35;
} else if (ltx <= 41) {
sample_i = 6;
sample_j = 7 + ltx - 39;
} else if (ltx <= 43) {
sample_i = 7;
sample_j = 8 + ltx - 42;
} else if (ltx <= 44) {
sample_i = 8;
sample_j = 9;
} else
assert(false);
const auto tx_i = ch_start + sample_i;
const auto tx_j = ch_start + sample_j;
//
// note, given the way we partition the block, with 45 threads per channel
// we will end up with inactive threads which need to be dragged along
// through the synching point
//
bool const condForUselessSamples = useless_sample_values[tx_i] || useless_sample_values[tx_j] ||
sample_values[tx_i] <= 1 || sample_values[tx_j] <= 1;
//
// see cpu implementation for explanation
//
ScalarType chi2 = std::numeric_limits<ScalarType>::max();
ScalarType tmax = 0;
ScalarType tmaxerr = 0;
shrTimeMax[threadIdx.x] = 0;
shrTimeWgt[threadIdx.x] = 0;
bool internalCondForSkipping1 = true;
bool internalCondForSkipping2 = true;
if (!condForUselessSamples) {
const auto rtmp = sample_values[tx_i] / sample_values[tx_j];
const auto invampl_i = 1.0 / sample_values[tx_i];
const auto relErr2_i = sample_value_errors[tx_i] * sample_value_errors[tx_i] * invampl_i * invampl_i;
const auto invampl_j = 1.0 / sample_values[tx_j];
const auto relErr2_j = sample_value_errors[tx_j] * sample_value_errors[tx_j] * invampl_j * invampl_j;
const auto err1 = rtmp * rtmp * (relErr2_i + relErr2_j);
auto err2 = sample_value_errors[tx_j] * (sample_values[tx_i] - sample_values[tx_j]) * (invampl_j * invampl_j);
// TODO non-divergent branch for a block if each block has 1 channel
// otherwise non-divergent for groups of 45 threads
// at this point, pedestal_nums[ch] can be either 0, 1 or 2
if (pedestal_nums[ch] == 2)
err2 *= err2 * 0.5;
const auto err3 = (0.289 * 0.289) * (invampl_j * invampl_j);
const auto total_error = std::sqrt(err1 + err2 + err3);
const auto alpha = amplitudeFitParameters[0];
const auto beta = amplitudeFitParameters[1];
const auto alphabeta = alpha * beta;
const auto invalphabeta = 1.0 / alphabeta;
// variables instead of a struct
const auto ratio_index = sample_i;
const auto ratio_step = sample_j - sample_i;
const auto ratio_value = rtmp;
const auto ratio_error = total_error;
const auto rlim_i_j = fast_expf(static_cast<ScalarType>(sample_j - sample_i) / beta) - 0.001;
internalCondForSkipping1 = !(total_error < 1.0 && rtmp > 0.001 && rtmp < rlim_i_j);
if (!internalCondForSkipping1) {
//
// precompute.
// in cpu version this was done conditionally
// however easier to do it here (precompute) and then just filter out
// if not needed
//
const auto l_timeFitLimits_first = timeFitLimits_first;
const auto l_timeFitLimits_second = timeFitLimits_second;
if (ratio_step == 1 && ratio_value >= l_timeFitLimits_first && ratio_value <= l_timeFitLimits_second) {
const auto time_max_i = static_cast<ScalarType>(ratio_index);
auto u = timeFitParameters[timeFitParameters_size - 1];
CMS_UNROLL_LOOP
for (int k = timeFitParameters_size - 2; k >= 0; k--)
u = u * ratio_value + timeFitParameters[k];
auto du = (timeFitParameters_size - 1) * (timeFitParameters[timeFitParameters_size - 1]);
for (int k = timeFitParameters_size - 2; k >= 1; k--)
du = du * ratio_value + k * timeFitParameters[k];
const auto error2 = ratio_error * ratio_error * du * du;
const auto time_max = error2 > 0 ? (time_max_i - u) / error2 : static_cast<ScalarType>(0);
const auto time_wgt = error2 > 0 ? 1.0 / error2 : static_cast<ScalarType>(0);
// store into shared mem
// note, this name is essentially identical to the one used
// below.
shrTimeMax[threadIdx.x] = error2 > 0 ? time_max : 0;
shrTimeWgt[threadIdx.x] = error2 > 0 ? time_wgt : 0;
} else {
shrTimeMax[threadIdx.x] = 0;
shrTimeWgt[threadIdx.x] = 0;
}
// continue with ratios
const auto stepOverBeta = static_cast<SampleVector::Scalar>(ratio_step) / beta;
const auto offset = static_cast<SampleVector::Scalar>(ratio_index) + alphabeta;
const auto rmin = std::max(ratio_value - ratio_error, 0.001);
const auto rmax = std::min(ratio_value + ratio_error,
fast_expf(static_cast<SampleVector::Scalar>(ratio_step) / beta) - 0.001);
const auto time1 = offset - ratio_step / (fast_expf((stepOverBeta - fast_logf(rmin)) / alpha) - 1.0);
const auto time2 = offset - ratio_step / (fast_expf((stepOverBeta - fast_logf(rmax)) / alpha) - 1.0);
// set these guys
tmax = 0.5 * (time1 + time2);
tmaxerr = 0.5 * std::sqrt((time1 - time2) * (time1 - time2));
#ifdef DEBUG_TC_MAKERATIO
if (ch == 1 || ch == 0)
printf("ch = %d ltx = %d tmax = %f tmaxerr = %f time1 = %f time2 = %f offset = %f rmin = %f rmax = %f\n",
ch,
ltx,
tmax,
tmaxerr,
time1,
time2,
offset,
rmin,
rmax);
#endif
SampleVector::Scalar sumAf = 0;
SampleVector::Scalar sumff = 0;
const int itmin = std::max(-1, static_cast<int>(std::floor(tmax - alphabeta)));
auto loffset = (static_cast<ScalarType>(itmin) - tmax) * invalphabeta;
// TODO: data dependence
for (int it = itmin + 1; it < nsamples; it++) {
loffset += invalphabeta;
if (useless_sample_values[ch_start + it])
continue;
const auto inverr2 = 1.0 / (sample_value_errors[ch_start + it] * sample_value_errors[ch_start + it]);
const auto term1 = 1.0 + loffset;
const auto f = (term1 > 1e-6) ? fast_expf(alpha * (fast_logf(term1) - loffset)) : 0;
sumAf += sample_values[ch_start + it] * (f * inverr2);
sumff += f * (f * inverr2);
}
const auto sumAA = sumAAsNullHypot[ch];
const auto sum0 = sum0sNullHypot[ch];
chi2 = sumAA;
// TODO: sum0 can not be 0 below, need to introduce the check upfront
if (sumff > 0) {
chi2 = sumAA - sumAf * (sumAf / sumff);
}
chi2 /= sum0;
#ifdef DEBUG_TC_MAKERATIO
if (ch == 1 || ch == 0)
printf("ch = %d ltx = %d sumAf = %f sumff = %f sumAA = %f sum0 = %d tmax = %f tmaxerr = %f chi2 = %f\n",
ch,
ltx,
sumAf,
sumff,
sumAA,
static_cast<int>(sum0),
tmax,
tmaxerr,
chi2);
#endif
if (chi2 > 0 && tmax > 0 && tmaxerr > 0)
internalCondForSkipping2 = false;
else
chi2 = std::numeric_limits<ScalarType>::max();
}
}
// store into smem
shr_chi2s[threadIdx.x] = chi2;
__syncthreads();
// find min chi2 - quite crude for now
// TODO validate/check
char iter = nthreads_per_channel / 2 + nthreads_per_channel % 2;
bool oddElements = nthreads_per_channel % 2;
CMS_UNROLL_LOOP
while (iter >= 1) {
if (ltx < iter)
// for odd ns, the last guy will just store itself
// exception is for ltx == 0 and iter==1
shr_chi2s[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shr_chi2s[threadIdx.x]
: std::min(shr_chi2s[threadIdx.x], shr_chi2s[threadIdx.x + iter]);
__syncthreads();
oddElements = iter % 2;
iter = iter == 1 ? iter / 2 : iter / 2 + iter % 2;
}
// filter out inactive or useless samples threads
if (!condForUselessSamples && !internalCondForSkipping1 && !internalCondForSkipping2) {
// min chi2, now compute weighted average of tmax measurements
// see cpu version for more explanation
const auto chi2min = shr_chi2s[threadIdx.x - ltx];
const auto chi2Limit = chi2min + 1.0;
const auto inverseSigmaSquared = chi2 < chi2Limit ? 1.0 / (tmaxerr * tmaxerr) : 0.0;
#ifdef DEBUG_TC_MAKERATIO
if (ch == 1 || ch == 0)
printf("ch = %d ltx = %d chi2min = %f chi2Limit = %f inverseSigmaSquared = %f\n",
ch,
ltx,
chi2min,
chi2Limit,
inverseSigmaSquared);
#endif
// store into shared mem and run reduction
// TODO: check if cooperative groups would be better
// TODO: check if shuffling intrinsics are better
shr_time_wgt[threadIdx.x] = inverseSigmaSquared;
shr_time_max[threadIdx.x] = tmax * inverseSigmaSquared;
} else {
shr_time_wgt[threadIdx.x] = 0;
shr_time_max[threadIdx.x] = 0;
}
__syncthreads();
// reduce to compute time_max and time_wgt
iter = nthreads_per_channel / 2 + nthreads_per_channel % 2;
oddElements = nthreads_per_channel % 2;
CMS_UNROLL_LOOP
while (iter >= 1) {
if (ltx < iter) {
shr_time_wgt[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shr_time_wgt[threadIdx.x]
: shr_time_wgt[threadIdx.x] + shr_time_wgt[threadIdx.x + iter];
shr_time_max[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shr_time_max[threadIdx.x]
: shr_time_max[threadIdx.x] + shr_time_max[threadIdx.x + iter];
shrTimeMax[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shrTimeMax[threadIdx.x]
: shrTimeMax[threadIdx.x] + shrTimeMax[threadIdx.x + iter];
shrTimeWgt[threadIdx.x] = oddElements && (ltx == iter - 1 && ltx > 0)
? shrTimeWgt[threadIdx.x]
: shrTimeWgt[threadIdx.x] + shrTimeWgt[threadIdx.x + iter];
}
__syncthreads();
oddElements = iter % 2;
iter = iter == 1 ? iter / 2 : iter / 2 + iter % 2;
}
// load from shared memory the 0th guy (will contain accumulated values)
// compute
// store into global mem
if (ltx == 0) {
const auto tmp_time_max = shr_time_max[threadIdx.x];
const auto tmp_time_wgt = shr_time_wgt[threadIdx.x];
// we are done if there number of time ratios is 0
if (tmp_time_wgt == 0 && tmp_time_max == 0) {
g_state[ch] = TimeComputationState::Finished;
return;
}
// no div by 0
const auto tMaxAlphaBeta = tmp_time_max / tmp_time_wgt;
const auto tMaxErrorAlphaBeta = 1.0 / std::sqrt(tmp_time_wgt);
tMaxAlphaBetas[ch] = tMaxAlphaBeta;
tMaxErrorAlphaBetas[ch] = tMaxErrorAlphaBeta;
g_accTimeMax[ch] = shrTimeMax[threadIdx.x];
g_accTimeWgt[ch] = shrTimeWgt[threadIdx.x];
g_state[ch] = TimeComputationState::NotFinished;
#ifdef DEBUG_TC_MAKERATIO
printf("ch = %d time_max = %f time_wgt = %f\n", ch, tmp_time_max, tmp_time_wgt);
printf("ch = %d tMaxAlphaBeta = %f tMaxErrorAlphaBeta = %f timeMax = %f timeWgt = %f\n",
ch,
tMaxAlphaBeta,
tMaxErrorAlphaBeta,
shrTimeMax[threadIdx.x],
shrTimeWgt[threadIdx.x]);
#endif
}
}
/// launch ctx parameters are
/// 10 threads per channel, N channels per block, Y blocks
/// TODO: do we need to keep the state around or can be removed?!
//#define DEBUG_FINDAMPLCHI2_AND_FINISH
__global__ void kernel_time_compute_findamplchi2_and_finish(
SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
uint32_t const* dids_eb,
uint32_t const* dids_ee,
bool const* useless_samples,
SampleVector::Scalar const* g_tMaxAlphaBeta,
SampleVector::Scalar const* g_tMaxErrorAlphaBeta,
SampleVector::Scalar const* g_accTimeMax,
SampleVector::Scalar const* g_accTimeWgt,
ConfigurationParameters::type const* amplitudeFitParametersEB,
ConfigurationParameters::type const* amplitudeFitParametersEE,
SampleVector::Scalar const* sumAAsNullHypot,
SampleVector::Scalar const* sum0sNullHypot,
SampleVector::Scalar const* chi2sNullHypot,
TimeComputationState* g_state,
SampleVector::Scalar* g_ampMaxAlphaBeta,
SampleVector::Scalar* g_ampMaxError,
SampleVector::Scalar* g_timeMax,
SampleVector::Scalar* g_timeError,
const int nchannels,
uint32_t const offsetForInputs) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch = gtx / nsamples;
const int sample = threadIdx.x % nsamples;
const auto* dids = ch >= offsetForInputs ? dids_ee : dids_eb;
const int inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch;
// configure shared mem
// per block, we need #threads per block * 2 * sizeof(ScalarType)
// we run with N channels per block
extern __shared__ char smem[];
ScalarType* shr_sumAf = reinterpret_cast<ScalarType*>(smem);
ScalarType* shr_sumff = shr_sumAf + blockDim.x;
if (ch >= nchannels)
return;
auto state = g_state[ch];
const auto did = DetId{dids[inputCh]};
const auto* amplitudeFitParameters =
did.subdetId() == EcalBarrel ? amplitudeFitParametersEB : amplitudeFitParametersEE;
// TODO is that better than storing into global and launching another kernel
// for the first 10 threads
if (state == TimeComputationState::NotFinished) {
const auto alpha = amplitudeFitParameters[0];
const auto beta = amplitudeFitParameters[1];
const auto alphabeta = alpha * beta;
const auto invalphabeta = 1.0 / alphabeta;
const auto tMaxAlphaBeta = g_tMaxAlphaBeta[ch];
const auto sample_value = sample_values[gtx];
const auto sample_value_error = sample_value_errors[gtx];
const auto inverr2 =
useless_samples[gtx] ? static_cast<ScalarType>(0) : 1.0 / (sample_value_error * sample_value_error);
const auto offset = (static_cast<ScalarType>(sample) - tMaxAlphaBeta) * invalphabeta;
const auto term1 = 1.0 + offset;
const auto f = term1 > 1e-6 ? fast_expf(alpha * (fast_logf(term1) - offset)) : static_cast<ScalarType>(0.0);
const auto sumAf = sample_value * (f * inverr2);
const auto sumff = f * (f * inverr2);
// store into shared mem
shr_sumAf[threadIdx.x] = sumAf;
shr_sumff[threadIdx.x] = sumff;
} else {
shr_sumAf[threadIdx.x] = 0;
shr_sumff[threadIdx.x] = 0;
}
__syncthreads();
// reduce
// unroll completely here (but hardcoded)
if (sample < 5) {
shr_sumAf[threadIdx.x] += shr_sumAf[threadIdx.x + 5];
shr_sumff[threadIdx.x] += shr_sumff[threadIdx.x + 5];
}
__syncthreads();
if (sample < 2) {
// will need to subtract for ltx = 3, we double count here
shr_sumAf[threadIdx.x] += shr_sumAf[threadIdx.x + 2] + shr_sumAf[threadIdx.x + 3];
shr_sumff[threadIdx.x] += shr_sumff[threadIdx.x + 2] + shr_sumff[threadIdx.x + 3];
}
__syncthreads();
if (sample == 0) {
// exit if the state is done
// note, we do not exit before all __synchtreads are finished
if (state == TimeComputationState::Finished) {
g_timeMax[ch] = 5;
g_timeError[ch] = -999;
return;
}
// subtract to avoid double counting
const auto sumff = shr_sumff[threadIdx.x] + shr_sumff[threadIdx.x + 1] - shr_sumff[threadIdx.x + 3];
const auto sumAf = shr_sumAf[threadIdx.x] + shr_sumAf[threadIdx.x + 1] - shr_sumAf[threadIdx.x + 3];
const auto ampMaxAlphaBeta = sumff > 0 ? sumAf / sumff : 0;
const auto sumAA = sumAAsNullHypot[ch];
const auto sum0 = sum0sNullHypot[ch];
const auto nullChi2 = chi2sNullHypot[ch];
if (sumff > 0) {
const auto chi2AlphaBeta = (sumAA - sumAf * sumAf / sumff) / sum0;
if (chi2AlphaBeta > nullChi2) {
// null hypothesis is better
state = TimeComputationState::Finished;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d chi2AlphaBeta = %f nullChi2 = %f sumAA = %f sumAf = %f sumff = %f sum0 = %f\n",
ch,
chi2AlphaBeta,
nullChi2,
sumAA,
sumAf,
sumff,
sum0);
#endif
}
// store to global
g_ampMaxAlphaBeta[ch] = ampMaxAlphaBeta;
} else {
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d sum0 = %f sumAA = %f sumff = %f sumAf = %f\n", ch, sum0, sumAA, sumff, sumAf);
#endif
state = TimeComputationState::Finished;
}
// store the state to global and finish calcs
g_state[ch] = state;
if (state == TimeComputationState::Finished) {
// store default values into global
g_timeMax[ch] = 5;
g_timeError[ch] = -999;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d finished state\n", ch);
#endif
return;
}
const auto ampMaxError = g_ampMaxError[ch];
const auto test_ratio = ampMaxAlphaBeta / ampMaxError;
const auto accTimeMax = g_accTimeMax[ch];
const auto accTimeWgt = g_accTimeWgt[ch];
const auto tMaxAlphaBeta = g_tMaxAlphaBeta[ch];
const auto tMaxErrorAlphaBeta = g_tMaxErrorAlphaBeta[ch];
// branch to separate large vs small pulses
// see cpu version for more info
if (test_ratio > 5.0 && accTimeWgt > 0) {
const auto tMaxRatio = accTimeWgt > 0 ? accTimeMax / accTimeWgt : static_cast<ScalarType>(0);
const auto tMaxErrorRatio = accTimeWgt > 0 ? 1.0 / std::sqrt(accTimeWgt) : static_cast<ScalarType>(0);
if (test_ratio > 10.0) {
g_timeMax[ch] = tMaxRatio;
g_timeError[ch] = tMaxErrorRatio;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d tMaxRatio = %f tMaxErrorRatio = %f\n", ch, tMaxRatio, tMaxErrorRatio);
#endif
} else {
const auto timeMax = (tMaxAlphaBeta * (10.0 - ampMaxAlphaBeta / ampMaxError) +
tMaxRatio * (ampMaxAlphaBeta / ampMaxError - 5.0)) /
5.0;
const auto timeError = (tMaxErrorAlphaBeta * (10.0 - ampMaxAlphaBeta / ampMaxError) +
tMaxErrorRatio * (ampMaxAlphaBeta / ampMaxError - 5.0)) /
5.0;
state = TimeComputationState::Finished;
g_state[ch] = state;
g_timeMax[ch] = timeMax;
g_timeError[ch] = timeError;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d timeMax = %f timeError = %f\n", ch, timeMax, timeError);
#endif
}
} else {
state = TimeComputationState::Finished;
g_state[ch] = state;
g_timeMax[ch] = tMaxAlphaBeta;
g_timeError[ch] = tMaxErrorAlphaBeta;
#ifdef DEBUG_FINDAMPLCHI2_AND_FINISH
printf("ch = %d tMaxAlphaBeta = %f tMaxErrorAlphaBeta = %f\n", ch, tMaxAlphaBeta, tMaxErrorAlphaBeta);
#endif
}
}
}
__global__ void kernel_time_compute_fixMGPAslew(uint16_t const* digis_eb,
uint16_t const* digis_ee,
SampleVector::Scalar* sample_values,
SampleVector::Scalar* sample_value_errors,
bool* useless_sample_values,
unsigned const int sample_mask,
const int nchannels,
uint32_t const offsetForInputs) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch = gtx / nsamples;
const int sample = threadIdx.x % nsamples;
const int inputGtx = ch >= offsetForInputs ? gtx - offsetForInputs * nsamples : gtx;
const auto* digis = ch >= offsetForInputs ? digis_ee : digis_eb;
// remove thread for sample 0, oversubscribing is easier than ....
if (ch >= nchannels || sample == 0)
return;
if (!use_sample(sample_mask, sample))
return;
const auto gainIdPrev = ecal::mgpa::gainId(digis[inputGtx - 1]);
const auto gainIdNext = ecal::mgpa::gainId(digis[inputGtx]);
if (gainIdPrev >= 1 && gainIdPrev <= 3 && gainIdNext >= 1 && gainIdNext <= 3 && gainIdPrev < gainIdNext) {
sample_values[gtx - 1] = 0;
sample_value_errors[gtx - 1] = 1e+9;
useless_sample_values[gtx - 1] = true;
}
}
__global__ void kernel_time_compute_ampl(SampleVector::Scalar const* sample_values,
SampleVector::Scalar const* sample_value_errors,
uint32_t const* dids,
bool const* useless_samples,
SampleVector::Scalar const* g_timeMax,
SampleVector::Scalar const* amplitudeFitParametersEB,
SampleVector::Scalar const* amplitudeFitParametersEE,
SampleVector::Scalar* g_amplitudeMax,
const int nchannels) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr ScalarType corr4 = 1.;
constexpr ScalarType corr6 = 1.;
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int ch = gtx / nsamples;
const int sample = threadIdx.x % nsamples;
if (ch >= nchannels)
return;
const auto did = DetId{dids[ch]};
const auto* amplitudeFitParameters =
did.subdetId() == EcalBarrel ? amplitudeFitParametersEB : amplitudeFitParametersEE;
// configure shared mem
extern __shared__ char smem[];
ScalarType* shr_sum1 = reinterpret_cast<ScalarType*>(smem);
auto* shr_sumA = shr_sum1 + blockDim.x;
auto* shr_sumF = shr_sumA + blockDim.x;
auto* shr_sumAF = shr_sumF + blockDim.x;
auto* shr_sumFF = shr_sumAF + blockDim.x;
const auto alpha = amplitudeFitParameters[0];
const auto beta = amplitudeFitParameters[1];
const auto timeMax = g_timeMax[ch];
const auto pedestalLimit = timeMax - (alpha * beta) - 1.0;
const auto sample_value = sample_values[gtx];
const auto sample_value_error = sample_value_errors[gtx];
const auto inverr2 =
sample_value_error > 0 ? 1. / (sample_value_error * sample_value_error) : static_cast<ScalarType>(0);
const auto termOne = 1 + (sample - timeMax) / (alpha * beta);
const auto f = termOne > 1.e-5 ? fast_expf(alpha * fast_logf(termOne) - (sample - timeMax) / beta)
: static_cast<ScalarType>(0.);
bool const cond = ((sample < pedestalLimit) || (f > 0.6 * corr6 && sample <= timeMax) ||
(f > 0.4 * corr4 && sample >= timeMax)) &&
!useless_samples[gtx];
// store into shared mem
shr_sum1[threadIdx.x] = cond ? inverr2 : static_cast<ScalarType>(0);
shr_sumA[threadIdx.x] = cond ? sample_value * inverr2 : static_cast<ScalarType>(0);
shr_sumF[threadIdx.x] = cond ? f * inverr2 : static_cast<ScalarType>(0);
shr_sumAF[threadIdx.x] = cond ? (f * inverr2) * sample_value : static_cast<ScalarType>(0);
shr_sumFF[threadIdx.x] = cond ? f * (f * inverr2) : static_cast<ScalarType>(0);
// reduction
if (sample <= 4) {
shr_sum1[threadIdx.x] += shr_sum1[threadIdx.x + 5];
shr_sumA[threadIdx.x] += shr_sumA[threadIdx.x + 5];
shr_sumF[threadIdx.x] += shr_sumF[threadIdx.x + 5];
shr_sumAF[threadIdx.x] += shr_sumAF[threadIdx.x + 5];
shr_sumFF[threadIdx.x] += shr_sumFF[threadIdx.x + 5];
}
__syncthreads();
if (sample < 2) {
// note: we double count sample 3
shr_sum1[threadIdx.x] += shr_sum1[threadIdx.x + 2] + shr_sum1[threadIdx.x + 3];
shr_sumA[threadIdx.x] += shr_sumA[threadIdx.x + 2] + shr_sumA[threadIdx.x + 3];
shr_sumF[threadIdx.x] += shr_sumF[threadIdx.x + 2] + shr_sumF[threadIdx.x + 3];
shr_sumAF[threadIdx.x] += shr_sumAF[threadIdx.x + 2] + shr_sumAF[threadIdx.x + 3];
shr_sumFF[threadIdx.x] += shr_sumFF[threadIdx.x + 2] + shr_sumFF[threadIdx.x + 3];
}
__syncthreads();
if (sample == 0) {
const auto sum1 = shr_sum1[threadIdx.x] + shr_sum1[threadIdx.x + 1] - shr_sum1[threadIdx.x + 3];
const auto sumA = shr_sumA[threadIdx.x] + shr_sumA[threadIdx.x + 1] - shr_sumA[threadIdx.x + 3];
const auto sumF = shr_sumF[threadIdx.x] + shr_sumF[threadIdx.x + 1] - shr_sumF[threadIdx.x + 3];
const auto sumAF = shr_sumAF[threadIdx.x] + shr_sumAF[threadIdx.x + 1] - shr_sumAF[threadIdx.x + 3];
const auto sumFF = shr_sumFF[threadIdx.x] + shr_sumFF[threadIdx.x + 1] - shr_sumFF[threadIdx.x + 3];
const auto denom = sumFF * sum1 - sumF * sumF;
const auto condForDenom = sum1 > 0 && std::abs(denom) > 1.e-20;
const auto amplitudeMax = condForDenom ? (sumAF * sum1 - sumA * sumF) / denom : static_cast<ScalarType>(0.);
// store into global mem
g_amplitudeMax[ch] = amplitudeMax;
}
}
//#define ECAL_RECO_CUDA_TC_INIT_DEBUG
__global__ void kernel_time_computation_init(uint16_t const* digis_eb,
uint32_t const* dids_eb,
uint16_t const* digis_ee,
uint32_t const* dids_ee,
float const* rms_x12,
float const* rms_x6,
float const* rms_x1,
float const* mean_x12,
float const* mean_x6,
float const* mean_x1,
float const* gain12Over6,
float const* gain6Over1,
SampleVector::Scalar* sample_values,
SampleVector::Scalar* sample_value_errors,
SampleVector::Scalar* ampMaxError,
bool* useless_sample_values,
char* pedestal_nums,
uint32_t const offsetForHashes,
uint32_t const offsetForInputs,
unsigned const int sample_maskEB,
unsigned const int sample_maskEE,
int nchannels) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int tx = threadIdx.x + blockDim.x * blockIdx.x;
const int ch = tx / nsamples;
const int inputTx = ch >= offsetForInputs ? tx - offsetForInputs * nsamples : tx;
const int inputCh = ch >= offsetForInputs ? ch - offsetForInputs : ch;
const auto* digis = ch >= offsetForInputs ? digis_ee : digis_eb;
const auto* dids = ch >= offsetForInputs ? dids_ee : dids_eb;
// threads that return here should not affect the __syncthreads() below since they have exitted the kernel
if (ch >= nchannels)
return;
// indices/inits
const int sample = tx % nsamples;
const int input_ch_start = inputCh * nsamples;
SampleVector::Scalar pedestal = 0.;
int num = 0;
// configure shared mem
extern __shared__ char smem[];
ScalarType* shrSampleValues = reinterpret_cast<SampleVector::Scalar*>(smem);
ScalarType* shrSampleValueErrors = shrSampleValues + blockDim.x;
// 0 and 1 sample values
const auto adc0 = ecal::mgpa::adc(digis[input_ch_start]);
const auto gainId0 = ecal::mgpa::gainId(digis[input_ch_start]);
const auto adc1 = ecal::mgpa::adc(digis[input_ch_start + 1]);
const auto gainId1 = ecal::mgpa::gainId(digis[input_ch_start + 1]);
const auto did = DetId{dids[inputCh]};
const auto isBarrel = did.subdetId() == EcalBarrel;
const auto sample_mask = did.subdetId() == EcalBarrel ? sample_maskEB : sample_maskEE;
const auto hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId())
: offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId());
// set pedestal
// TODO this branch is non-divergent for a group of 10 threads
if (gainId0 == 1 && use_sample(sample_mask, 0)) {
pedestal = static_cast<SampleVector::Scalar>(adc0);
num = 1;
const auto diff = adc1 - adc0;
if (gainId1 == 1 && use_sample(sample_mask, 1) && std::abs(diff) < 3 * rms_x12[hashedId]) {
pedestal = (pedestal + static_cast<SampleVector::Scalar>(adc1)) / 2.0;
num = 2;
}
} else {
pedestal = mean_x12[ch];
}
// ped subtracted and gain-renormalized samples.
const auto gainId = ecal::mgpa::gainId(digis[inputTx]);
const auto adc = ecal::mgpa::adc(digis[inputTx]);
bool bad = false;
SampleVector::Scalar sample_value, sample_value_error;
// TODO divergent branch
// TODO: piece below is general both for amplitudes and timing
// potentially there is a way to reduce the amount of code...
if (!use_sample(sample_mask, sample)) {
bad = true;
sample_value = 0;
sample_value_error = 0;
} else if (gainId == 1) {
sample_value = static_cast<SampleVector::Scalar>(adc) - pedestal;
sample_value_error = rms_x12[hashedId];
} else if (gainId == 2) {
sample_value = (static_cast<SampleVector::Scalar>(adc) - mean_x6[hashedId]) * gain12Over6[hashedId];
sample_value_error = rms_x6[hashedId] * gain12Over6[hashedId];
} else if (gainId == 3) {
sample_value =
(static_cast<SampleVector::Scalar>(adc) - mean_x1[hashedId]) * gain6Over1[hashedId] * gain12Over6[hashedId];
sample_value_error = rms_x1[hashedId] * gain6Over1[hashedId] * gain12Over6[hashedId];
} else {
sample_value = 0;
sample_value_error = 0;
bad = true;
}
// TODO: make sure we save things correctly when sample is useless
const auto useless_sample = (sample_value_error <= 0) | bad;
useless_sample_values[tx] = useless_sample;
sample_values[tx] = sample_value;
sample_value_errors[tx] = useless_sample ? 1e+9 : sample_value_error;
// DEBUG
#ifdef ECAL_RECO_CUDA_TC_INIT_DEBUG
if (ch == 0) {
printf("sample = %d sample_value = %f sample_value_error = %f useless = %c\n",
sample,
sample_value,
sample_value_error,
useless_sample ? '1' : '0');
}
#endif
// store into the shared mem
shrSampleValues[threadIdx.x] = sample_value_error > 0 ? sample_value : std::numeric_limits<ScalarType>::min();
shrSampleValueErrors[threadIdx.x] = sample_value_error;
__syncthreads();
// perform the reduction with min
if (sample < 5) {
// note, if equal -> we keep the value with lower sample as for cpu
shrSampleValueErrors[threadIdx.x] = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 5]
? shrSampleValueErrors[threadIdx.x + 5]
: shrSampleValueErrors[threadIdx.x];
shrSampleValues[threadIdx.x] = std::max(shrSampleValues[threadIdx.x], shrSampleValues[threadIdx.x + 5]);
}
__syncthreads();
// a bit of an overkill, but easier than to compare across 3 values
if (sample < 3) {
shrSampleValueErrors[threadIdx.x] = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 3]
? shrSampleValueErrors[threadIdx.x + 3]
: shrSampleValueErrors[threadIdx.x];
shrSampleValues[threadIdx.x] = std::max(shrSampleValues[threadIdx.x], shrSampleValues[threadIdx.x + 3]);
}
__syncthreads();
if (sample < 2) {
shrSampleValueErrors[threadIdx.x] = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 2]
? shrSampleValueErrors[threadIdx.x + 2]
: shrSampleValueErrors[threadIdx.x];
shrSampleValues[threadIdx.x] = std::max(shrSampleValues[threadIdx.x], shrSampleValues[threadIdx.x + 2]);
}
__syncthreads();
if (sample == 0) {
// we only needd the max error
const auto maxSampleValueError = shrSampleValues[threadIdx.x] < shrSampleValues[threadIdx.x + 1]
? shrSampleValueErrors[threadIdx.x + 1]
: shrSampleValueErrors[threadIdx.x];
// # pedestal samples used
pedestal_nums[ch] = num;
// this is used downstream
ampMaxError[ch] = maxSampleValueError;
// DEBUG
#ifdef ECAL_RECO_CUDA_TC_INIT_DEBUG
if (ch == 0) {
printf("pedestal_nums = %d ampMaxError = %f\n", num, maxSampleValueError);
}
#endif
}
}
///
/// launch context parameters: 1 thread per channel
///
//#define DEBUG_TIME_CORRECTION
__global__ void kernel_time_correction_and_finalize(
// SampleVector::Scalar const* g_amplitude,
::ecal::reco::StorageScalarType const* g_amplitudeEB,
::ecal::reco::StorageScalarType const* g_amplitudeEE,
uint16_t const* digis_eb,
uint32_t const* dids_eb,
uint16_t const* digis_ee,
uint32_t const* dids_ee,
float const* amplitudeBinsEB,
float const* amplitudeBinsEE,
float const* shiftBinsEB,
float const* shiftBinsEE,
SampleVector::Scalar const* g_timeMax,
SampleVector::Scalar const* g_timeError,
float const* g_rms_x12,
float const* timeCalibConstant,
float* g_jitterEB,
float* g_jitterEE,
float* g_jitterErrorEB,
float* g_jitterErrorEE,
uint32_t* flagsEB,
uint32_t* flagsEE,
const int amplitudeBinsSizeEB,
const int amplitudeBinsSizeEE,
ConfigurationParameters::type const timeConstantTermEB,
ConfigurationParameters::type const timeConstantTermEE,
float const offsetTimeValueEB,
float const offsetTimeValueEE,
ConfigurationParameters::type const timeNconstEB,
ConfigurationParameters::type const timeNconstEE,
ConfigurationParameters::type const amplitudeThresholdEB,
ConfigurationParameters::type const amplitudeThresholdEE,
ConfigurationParameters::type const outOfTimeThreshG12pEB,
ConfigurationParameters::type const outOfTimeThreshG12pEE,
ConfigurationParameters::type const outOfTimeThreshG12mEB,
ConfigurationParameters::type const outOfTimeThreshG12mEE,
ConfigurationParameters::type const outOfTimeThreshG61pEB,
ConfigurationParameters::type const outOfTimeThreshG61pEE,
ConfigurationParameters::type const outOfTimeThreshG61mEB,
ConfigurationParameters::type const outOfTimeThreshG61mEE,
uint32_t const offsetForHashes,
uint32_t const offsetForInputs,
const int nchannels) {
using ScalarType = SampleVector::Scalar;
// constants
constexpr int nsamples = EcalDataFrame::MAXSAMPLES;
// indices
const int gtx = threadIdx.x + blockIdx.x * blockDim.x;
const int inputGtx = gtx >= offsetForInputs ? gtx - offsetForInputs : gtx;
const auto* dids = gtx >= offsetForInputs ? dids_ee : dids_eb;
const auto& digis = gtx >= offsetForInputs ? digis_ee : digis_eb;
// filter out outside of range threads
if (gtx >= nchannels)
return;
// need to ref the right ptrs
#define ARRANGE(var) auto* var = gtx >= offsetForInputs ? var##EE : var##EB
ARRANGE(g_amplitude);
ARRANGE(g_jitter);
ARRANGE(g_jitterError);
ARRANGE(flags);
#undef ARRANGE
const auto did = DetId{dids[inputGtx]};
const auto isBarrel = did.subdetId() == EcalBarrel;
const auto hashedId = isBarrel ? ecal::reconstruction::hashedIndexEB(did.rawId())
: offsetForHashes + ecal::reconstruction::hashedIndexEE(did.rawId());
const auto* amplitudeBins = isBarrel ? amplitudeBinsEB : amplitudeBinsEE;
const auto* shiftBins = isBarrel ? shiftBinsEB : shiftBinsEE;
const auto amplitudeBinsSize = isBarrel ? amplitudeBinsSizeEB : amplitudeBinsSizeEE;
const auto timeConstantTerm = isBarrel ? timeConstantTermEB : timeConstantTermEE;
const auto timeNconst = isBarrel ? timeNconstEB : timeNconstEE;
const auto offsetTimeValue = isBarrel ? offsetTimeValueEB : offsetTimeValueEE;
const auto amplitudeThreshold = isBarrel ? amplitudeThresholdEB : amplitudeThresholdEE;
const auto outOfTimeThreshG12p = isBarrel ? outOfTimeThreshG12pEB : outOfTimeThreshG12pEE;
const auto outOfTimeThreshG12m = isBarrel ? outOfTimeThreshG12mEB : outOfTimeThreshG12mEE;
const auto outOfTimeThreshG61p = isBarrel ? outOfTimeThreshG61pEB : outOfTimeThreshG61pEE;
const auto outOfTimeThreshG61m = isBarrel ? outOfTimeThreshG61mEB : outOfTimeThreshG61mEE;
// load some
const auto amplitude = g_amplitude[inputGtx];
const auto rms_x12 = g_rms_x12[hashedId];
const auto timeCalibConst = timeCalibConstant[hashedId];
int myBin = -1;
for (int bin = 0; bin < amplitudeBinsSize; bin++) {
if (amplitude > amplitudeBins[bin])
myBin = bin;
else
break;
}
ScalarType correction = 0;
if (myBin == -1) {
correction = shiftBins[0];
} else if (myBin == amplitudeBinsSize - 1) {
correction = shiftBins[myBin];
} else {
correction = shiftBins[myBin + 1] - shiftBins[myBin];
correction *= (amplitude - amplitudeBins[myBin]) / (amplitudeBins[myBin + 1] - amplitudeBins[myBin]);
correction += shiftBins[myBin];
}
// correction * 1./25.
correction = correction * 0.04;
const auto timeMax = g_timeMax[gtx];
const auto timeError = g_timeError[gtx];
const auto jitter = timeMax - 5 + correction;
const auto jitterError =
std::sqrt(timeError * timeError + timeConstantTerm * timeConstantTerm * 0.04 * 0.04); // 0.04 = 1./25.
#ifdef DEBUG_TIME_CORRECTION
printf("ch = %d timeMax = %f timeError = %f jitter = %f correction = %f\n",
gtx,
timeMax,
timeError,
jitter,
correction);
// }
#endif
// store back to global
g_jitter[inputGtx] = jitter;
g_jitterError[inputGtx] = jitterError;
// set the flag
// TODO: replace with something more efficient (if required),
// for now just to make it work
if (amplitude > amplitudeThreshold * rms_x12) {
auto threshP = outOfTimeThreshG12p;
auto threshM = outOfTimeThreshG12m;
if (amplitude > 3000.) {
for (int isample = 0; isample < nsamples; isample++) {
int gainid = ecal::mgpa::gainId(digis[nsamples * inputGtx + isample]);
if (gainid != 1) {
threshP = outOfTimeThreshG61p;
threshM = outOfTimeThreshG61m;
break;
}
}
}
const auto correctedTime = (timeMax - 5) * 25 + timeCalibConst + offsetTimeValue;
const auto nterm = timeNconst * rms_x12 / amplitude;
const auto sigmat = std::sqrt(nterm * nterm + timeConstantTerm * timeConstantTerm);
if (correctedTime > sigmat * threshP || correctedTime < -sigmat * threshM)
flags[inputGtx] |= 0x1 << EcalUncalibratedRecHit::kOutOfTime;
}
}
} // namespace multifit
} // namespace ecal
|
4df238e4adcdaba18b96652e5731084a2234a1e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <scalar.h>
__device__ float op(float d1,float d2,float *params) {
if(d1 < d2)
return 1;
return 0;
}
extern "C"
__global__ void min_scalar_float(int n, int idx,float dx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dx,dy,incy,params,result);
}
|
4df238e4adcdaba18b96652e5731084a2234a1e7.cu
|
#include <scalar.h>
__device__ float op(float d1,float d2,float *params) {
if(d1 < d2)
return 1;
return 0;
}
extern "C"
__global__ void min_scalar_float(int n, int idx,float dx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dx,dy,incy,params,result);
}
|
7e350d1a5adcf6d18c5b1cb440f0a14b1ad1b57a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "image.h"
#include "common.h"
#include "conv_gpu.h"
__constant__ float d_c_kernel[127];
__global__ void conv_v_gpu_gmem_kernel(unsigned int *dst_data, const unsigned int *src_data, const float *kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int yy = ty + (i - kernel_size / 2);
// Clamp to [0, h-1]
int tmp = (yy < (height - 1)) ? yy : (height - 1);
yy = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
unsigned int pixel = src_data_row[tx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_gmem_kernel(unsigned int *dst_data, const unsigned int *src_data, const float *kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int xx = tx + (i - kernel_size / 2);
// Clamp to [0, w-1]
int tmp = (xx < (width - 1)) ? xx : (width - 1);
xx = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
unsigned int pixel = src_data_row[xx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_smem_kernel(unsigned int *dst_data, const unsigned int *src_data, const float *kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
extern __shared__ unsigned int shared_src_data[];
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int shared_src_data_width = kernel_size / 2 + blockDim.x + kernel_size / 2;
if (tx < width && ty < height)
{
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + threadIdx.x] = src_data_row[tx];
if (threadIdx.x == 0)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (tx == 0)
shared_src_data[threadIdx.y * shared_src_data_width + i] = shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2];
else
{
int xx = blockIdx.x * blockDim.x - kernel_size / 2 + i;
xx = (xx > 0) ? xx : 0;
shared_src_data[threadIdx.y * shared_src_data_width + i] = src_data_row[xx];
}
}
}
if (threadIdx.x == blockDim.x - 1)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (tx == width - 1)
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x + i] = shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x - 1];
else
{
int xx = (blockIdx.x + 1) * blockDim.x + i;
xx = (xx < (width - 1)) ? xx : (width - 1);
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x + i] = src_data_row[xx];
}
}
}
}
__syncthreads();
if (ty >= height || tx >= width)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
unsigned int pixel = shared_src_data[threadIdx.y * shared_src_data_width + threadIdx.x + i];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_v_gpu_smem_kernel(unsigned int *dst_data, const unsigned int *src_data, const float *kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
extern __shared__ unsigned int shared_src_data[];
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx < width && ty < height)
{
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
shared_src_data[(kernel_size / 2 + threadIdx.y) * blockDim.y + threadIdx.x] = src_data_row[tx];
if (threadIdx.y == 0)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (ty == 0)
shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x] = shared_src_data[(threadIdx.y + kernel_size / 2) * blockDim.y + threadIdx.x];
else
{
int yy = blockIdx.y * blockDim.y - kernel_size / 2 + i;
yy = (yy > 0) ? yy : 0;
src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x] = src_data_row[tx];
}
}
}
if (threadIdx.y == blockDim.y - 1)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (ty == height - 1)
shared_src_data[(kernel_size / 2 + blockDim.y + i) * blockDim.y + threadIdx.x] = shared_src_data[(kernel_size / 2 + blockDim.y - 1) * blockDim.y + threadIdx.x];
else
{
int yy = (blockIdx.y + 1) * blockDim.y + i;
yy = (yy < (height - 1)) ? yy : (height - 1);
src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
shared_src_data[(kernel_size / 2 + blockDim.y + i) * blockDim.y + threadIdx.x] = src_data_row[tx];
}
}
}
}
__syncthreads();
if (ty >= height || tx >= width)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
unsigned int pixel = shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_cmem_kernel(unsigned int *dst_data, const unsigned int *src_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int xx = tx + (i - kernel_size / 2);
// Clamp to [0, w-1]
int tmp = (xx < (width - 1)) ? xx : (width - 1);
xx = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
unsigned int pixel = src_data_row[xx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * d_c_kernel[i];
gg += g * d_c_kernel[i];
bb += b * d_c_kernel[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_v_gpu_cmem_kernel(unsigned int *dst_data, const unsigned int *src_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int yy = ty + (i - kernel_size / 2);
// Clamp to [0, h-1]
int tmp = (yy < (height - 1)) ? yy : (height - 1);
yy = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
unsigned int pixel = src_data_row[tx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * d_c_kernel[i];
gg += g * d_c_kernel[i];
bb += b * d_c_kernel[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_v_gpu_tmem_kernel(unsigned int* __restrict__ dst_data, const unsigned int* __restrict__ src_data, const float* __restrict__ kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int yy = ty + (i - kernel_size / 2);
// Clamp to [0, h-1]
int tmp = (yy < (height - 1)) ? yy : (height - 1);
yy = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
unsigned int pixel = src_data_row[tx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_tmem_kernel(unsigned int* __restrict__ dst_data, const unsigned int* __restrict__ src_data, const float* __restrict__ kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int xx = tx + (i - kernel_size / 2);
// Clamp to [0, w-1]
int tmp = (xx < (width - 1)) ? xx : (width - 1);
xx = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
unsigned int pixel = src_data_row[xx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_all_kernel(unsigned int* __restrict__ dst_data, const unsigned int* __restrict__ src_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
extern __shared__ unsigned int shared_src_data[];
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int shared_src_data_width = kernel_size / 2 + blockDim.x + kernel_size / 2;
if (tx < width && ty < height)
{
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + threadIdx.x] = src_data_row[tx];
if (threadIdx.x == 0)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (tx == 0)
shared_src_data[threadIdx.y * shared_src_data_width + i] = shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2];
else
{
int xx = blockIdx.x * blockDim.x - kernel_size / 2 + i;
xx = (xx > 0) ? xx : 0;
shared_src_data[threadIdx.y * shared_src_data_width + i] = src_data_row[xx];
}
}
}
if (threadIdx.x == blockDim.x - 1)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (tx == width - 1)
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x + i] = shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x - 1];
else
{
int xx = (blockIdx.x + 1) * blockDim.x + i;
xx = (xx < (width - 1)) ? xx : (width - 1);
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x + i] = src_data_row[xx];
}
}
}
}
__syncthreads();
if (ty >= height || tx >= width)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
unsigned int pixel = shared_src_data[threadIdx.y * shared_src_data_width + threadIdx.x + i];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * d_c_kernel[i];
gg += g * d_c_kernel[i];
bb += b * d_c_kernel[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_v_gpu_all_kernel(unsigned int* __restrict__ dst_data, const unsigned int* __restrict__ src_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
extern __shared__ unsigned int shared_src_data[];
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx < width && ty < height)
{
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
shared_src_data[(kernel_size / 2 + threadIdx.y) * blockDim.y + threadIdx.x] = src_data_row[tx];
if (threadIdx.y == 0)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (ty == 0)
shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x] = shared_src_data[(threadIdx.y + kernel_size / 2) * blockDim.y + threadIdx.x];
else
{
int yy = blockIdx.y * blockDim.y - kernel_size / 2 + i;
yy = (yy > 0) ? yy : 0;
src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x] = src_data_row[tx];
}
}
}
if (threadIdx.y == blockDim.y - 1)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (ty == height - 1)
shared_src_data[(kernel_size / 2 + blockDim.y + i) * blockDim.y + threadIdx.x] = shared_src_data[(kernel_size / 2 + blockDim.y - 1) * blockDim.y + threadIdx.x];
else
{
int yy = (blockIdx.y + 1) * blockDim.y + i;
yy = (yy < (height - 1)) ? yy : (height - 1);
src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
shared_src_data[(kernel_size / 2 + blockDim.y + i) * blockDim.y + threadIdx.x] = src_data_row[tx];
}
}
}
}
__syncthreads();
if (ty >= height || tx >= width)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
unsigned int pixel = shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * d_c_kernel[i];
gg += g * d_c_kernel[i];
bb += b * d_c_kernel[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
void conv_h_gpu_gmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_h_gpu_gmem_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_gmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_v_gpu_gmem_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_h_gpu_smem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
int shared_src_data_width = kernel.ks / 2 + block_width + kernel.ks / 2;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_h_gpu_smem_kernel), dim3(grid_dim), dim3(block_dim), shared_src_data_width * block_height * sizeof(unsigned int), 0, dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_smem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
int shared_src_data_height = kernel.ks / 2 + block_height + kernel.ks / 2;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_v_gpu_smem_kernel), dim3(grid_dim), dim3(block_dim), block_width * shared_src_data_height * sizeof(unsigned int), 0, dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_h_gpu_cmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
float *h_kernel_data = new float[kernel.ks];
hipMemcpy(h_kernel_data, kernel.data, kernel.ks * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpyToSymbol(d_c_kernel, h_kernel_data, sizeof(float) * kernel.ks);
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_h_gpu_cmem_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, dst.data, src.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_cmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
float *h_kernel_data = new float[kernel.ks];
hipMemcpy(h_kernel_data, kernel.data, kernel.ks * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpyToSymbol(d_c_kernel, h_kernel_data, sizeof(float) * kernel.ks);
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_v_gpu_cmem_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, dst.data, src.data, w, h, kernel.ks, src.pitch);
}
void conv_h_gpu_tmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_h_gpu_tmem_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_tmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_v_gpu_tmem_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_h_gpu_all(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
float *h_kernel_data = new float[kernel.ks];
hipMemcpy(h_kernel_data, kernel.data, kernel.ks * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpyToSymbol(d_c_kernel, h_kernel_data, sizeof(float) * kernel.ks);
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
int shared_src_data_width = kernel.ks / 2 + block_width + kernel.ks / 2;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_h_gpu_all_kernel), dim3(grid_dim), dim3(block_dim), shared_src_data_width * block_height * sizeof(unsigned int), 0, dst.data, src.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_all(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
float *h_kernel_data = new float[kernel.ks];
hipMemcpy(h_kernel_data, kernel.data, kernel.ks * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpyToSymbol(d_c_kernel, h_kernel_data, sizeof(float) * kernel.ks);
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
int shared_src_data_height = kernel.ks / 2 + block_height + kernel.ks / 2;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
hipLaunchKernelGGL(( conv_v_gpu_all_kernel), dim3(grid_dim), dim3(block_dim), block_width * shared_src_data_height * sizeof(unsigned int), 0, dst.data, src.data, w, h, kernel.ks, src.pitch);
}
|
7e350d1a5adcf6d18c5b1cb440f0a14b1ad1b57a.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
#include "image.h"
#include "common.h"
#include "conv_gpu.h"
__constant__ float d_c_kernel[127];
__global__ void conv_v_gpu_gmem_kernel(unsigned int *dst_data, const unsigned int *src_data, const float *kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int yy = ty + (i - kernel_size / 2);
// Clamp to [0, h-1]
int tmp = (yy < (height - 1)) ? yy : (height - 1);
yy = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
unsigned int pixel = src_data_row[tx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_gmem_kernel(unsigned int *dst_data, const unsigned int *src_data, const float *kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int xx = tx + (i - kernel_size / 2);
// Clamp to [0, w-1]
int tmp = (xx < (width - 1)) ? xx : (width - 1);
xx = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
unsigned int pixel = src_data_row[xx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_smem_kernel(unsigned int *dst_data, const unsigned int *src_data, const float *kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
extern __shared__ unsigned int shared_src_data[];
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int shared_src_data_width = kernel_size / 2 + blockDim.x + kernel_size / 2;
if (tx < width && ty < height)
{
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + threadIdx.x] = src_data_row[tx];
if (threadIdx.x == 0)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (tx == 0)
shared_src_data[threadIdx.y * shared_src_data_width + i] = shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2];
else
{
int xx = blockIdx.x * blockDim.x - kernel_size / 2 + i;
xx = (xx > 0) ? xx : 0;
shared_src_data[threadIdx.y * shared_src_data_width + i] = src_data_row[xx];
}
}
}
if (threadIdx.x == blockDim.x - 1)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (tx == width - 1)
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x + i] = shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x - 1];
else
{
int xx = (blockIdx.x + 1) * blockDim.x + i;
xx = (xx < (width - 1)) ? xx : (width - 1);
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x + i] = src_data_row[xx];
}
}
}
}
__syncthreads();
if (ty >= height || tx >= width)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
unsigned int pixel = shared_src_data[threadIdx.y * shared_src_data_width + threadIdx.x + i];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_v_gpu_smem_kernel(unsigned int *dst_data, const unsigned int *src_data, const float *kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
extern __shared__ unsigned int shared_src_data[];
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx < width && ty < height)
{
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
shared_src_data[(kernel_size / 2 + threadIdx.y) * blockDim.y + threadIdx.x] = src_data_row[tx];
if (threadIdx.y == 0)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (ty == 0)
shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x] = shared_src_data[(threadIdx.y + kernel_size / 2) * blockDim.y + threadIdx.x];
else
{
int yy = blockIdx.y * blockDim.y - kernel_size / 2 + i;
yy = (yy > 0) ? yy : 0;
src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x] = src_data_row[tx];
}
}
}
if (threadIdx.y == blockDim.y - 1)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (ty == height - 1)
shared_src_data[(kernel_size / 2 + blockDim.y + i) * blockDim.y + threadIdx.x] = shared_src_data[(kernel_size / 2 + blockDim.y - 1) * blockDim.y + threadIdx.x];
else
{
int yy = (blockIdx.y + 1) * blockDim.y + i;
yy = (yy < (height - 1)) ? yy : (height - 1);
src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
shared_src_data[(kernel_size / 2 + blockDim.y + i) * blockDim.y + threadIdx.x] = src_data_row[tx];
}
}
}
}
__syncthreads();
if (ty >= height || tx >= width)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
unsigned int pixel = shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_cmem_kernel(unsigned int *dst_data, const unsigned int *src_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int xx = tx + (i - kernel_size / 2);
// Clamp to [0, w-1]
int tmp = (xx < (width - 1)) ? xx : (width - 1);
xx = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
unsigned int pixel = src_data_row[xx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * d_c_kernel[i];
gg += g * d_c_kernel[i];
bb += b * d_c_kernel[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_v_gpu_cmem_kernel(unsigned int *dst_data, const unsigned int *src_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int yy = ty + (i - kernel_size / 2);
// Clamp to [0, h-1]
int tmp = (yy < (height - 1)) ? yy : (height - 1);
yy = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
unsigned int pixel = src_data_row[tx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * d_c_kernel[i];
gg += g * d_c_kernel[i];
bb += b * d_c_kernel[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_v_gpu_tmem_kernel(unsigned int* __restrict__ dst_data, const unsigned int* __restrict__ src_data, const float* __restrict__ kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int yy = ty + (i - kernel_size / 2);
// Clamp to [0, h-1]
int tmp = (yy < (height - 1)) ? yy : (height - 1);
yy = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
unsigned int pixel = src_data_row[tx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_tmem_kernel(unsigned int* __restrict__ dst_data, const unsigned int* __restrict__ src_data, const float* __restrict__ kernel_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx >= width)
return;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (ty >= height)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
int xx = tx + (i - kernel_size / 2);
// Clamp to [0, w-1]
int tmp = (xx < (width - 1)) ? xx : (width - 1);
xx = (tmp > 0) ? tmp : 0;
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
unsigned int pixel = src_data_row[xx];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * kernel_data[i];
gg += g * kernel_data[i];
bb += b * kernel_data[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_h_gpu_all_kernel(unsigned int* __restrict__ dst_data, const unsigned int* __restrict__ src_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
extern __shared__ unsigned int shared_src_data[];
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int shared_src_data_width = kernel_size / 2 + blockDim.x + kernel_size / 2;
if (tx < width && ty < height)
{
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + threadIdx.x] = src_data_row[tx];
if (threadIdx.x == 0)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (tx == 0)
shared_src_data[threadIdx.y * shared_src_data_width + i] = shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2];
else
{
int xx = blockIdx.x * blockDim.x - kernel_size / 2 + i;
xx = (xx > 0) ? xx : 0;
shared_src_data[threadIdx.y * shared_src_data_width + i] = src_data_row[xx];
}
}
}
if (threadIdx.x == blockDim.x - 1)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (tx == width - 1)
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x + i] = shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x - 1];
else
{
int xx = (blockIdx.x + 1) * blockDim.x + i;
xx = (xx < (width - 1)) ? xx : (width - 1);
shared_src_data[threadIdx.y * shared_src_data_width + kernel_size / 2 + blockDim.x + i] = src_data_row[xx];
}
}
}
}
__syncthreads();
if (ty >= height || tx >= width)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
unsigned int pixel = shared_src_data[threadIdx.y * shared_src_data_width + threadIdx.x + i];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * d_c_kernel[i];
gg += g * d_c_kernel[i];
bb += b * d_c_kernel[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
__global__ void conv_v_gpu_all_kernel(unsigned int* __restrict__ dst_data, const unsigned int* __restrict__ src_data, const int width, const int height, const int kernel_size, const size_t pitch)
{
extern __shared__ unsigned int shared_src_data[];
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx < width && ty < height)
{
unsigned int *src_data_row = (unsigned int*)((char*)src_data + ty * pitch);
shared_src_data[(kernel_size / 2 + threadIdx.y) * blockDim.y + threadIdx.x] = src_data_row[tx];
if (threadIdx.y == 0)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (ty == 0)
shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x] = shared_src_data[(threadIdx.y + kernel_size / 2) * blockDim.y + threadIdx.x];
else
{
int yy = blockIdx.y * blockDim.y - kernel_size / 2 + i;
yy = (yy > 0) ? yy : 0;
src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x] = src_data_row[tx];
}
}
}
if (threadIdx.y == blockDim.y - 1)
{
for (int i = 0; i < kernel_size / 2; i++)
{
if (ty == height - 1)
shared_src_data[(kernel_size / 2 + blockDim.y + i) * blockDim.y + threadIdx.x] = shared_src_data[(kernel_size / 2 + blockDim.y - 1) * blockDim.y + threadIdx.x];
else
{
int yy = (blockIdx.y + 1) * blockDim.y + i;
yy = (yy < (height - 1)) ? yy : (height - 1);
src_data_row = (unsigned int*)((char*)src_data + yy * pitch);
shared_src_data[(kernel_size / 2 + blockDim.y + i) * blockDim.y + threadIdx.x] = src_data_row[tx];
}
}
}
}
__syncthreads();
if (ty >= height || tx >= width)
return;
float rr = 0.0f, gg = 0.0f, bb = 0.0f;
for (int i = 0; i < kernel_size; i++) {
unsigned int pixel = shared_src_data[(threadIdx.y + i) * blockDim.y + threadIdx.x];
unsigned char r = pixel & 0xff;
unsigned char g = (pixel >> 8) & 0xff;
unsigned char b = (pixel >> 16) & 0xff;
rr += r * d_c_kernel[i];
gg += g * d_c_kernel[i];
bb += b * d_c_kernel[i];
}
unsigned char rr_c = rr + 0.5f;
unsigned char gg_c = gg + 0.5f;
unsigned char bb_c = bb + 0.5f;
unsigned int *dst_data_row = (unsigned int*)((char*)dst_data + ty * pitch);
dst_data_row[tx] = rr_c | (gg_c << 8) | (bb_c << 16);
}
void conv_h_gpu_gmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_h_gpu_gmem_kernel<<<grid_dim, block_dim>>>(dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_gmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_v_gpu_gmem_kernel<<<grid_dim, block_dim>>>(dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_h_gpu_smem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
int shared_src_data_width = kernel.ks / 2 + block_width + kernel.ks / 2;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_h_gpu_smem_kernel<<<grid_dim, block_dim, shared_src_data_width * block_height * sizeof(unsigned int)>>>(dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_smem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
int shared_src_data_height = kernel.ks / 2 + block_height + kernel.ks / 2;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_v_gpu_smem_kernel<<<grid_dim, block_dim, block_width * shared_src_data_height * sizeof(unsigned int)>>>(dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_h_gpu_cmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
float *h_kernel_data = new float[kernel.ks];
cudaMemcpy(h_kernel_data, kernel.data, kernel.ks * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(d_c_kernel, h_kernel_data, sizeof(float) * kernel.ks);
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_h_gpu_cmem_kernel<<<grid_dim, block_dim>>>(dst.data, src.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_cmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
float *h_kernel_data = new float[kernel.ks];
cudaMemcpy(h_kernel_data, kernel.data, kernel.ks * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(d_c_kernel, h_kernel_data, sizeof(float) * kernel.ks);
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_v_gpu_cmem_kernel<<<grid_dim, block_dim>>>(dst.data, src.data, w, h, kernel.ks, src.pitch);
}
void conv_h_gpu_tmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_h_gpu_tmem_kernel<<<grid_dim, block_dim>>>(dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_tmem(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_v_gpu_tmem_kernel<<<grid_dim, block_dim>>>(dst.data, src.data, kernel.data, w, h, kernel.ks, src.pitch);
}
void conv_h_gpu_all(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
float *h_kernel_data = new float[kernel.ks];
cudaMemcpy(h_kernel_data, kernel.data, kernel.ks * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(d_c_kernel, h_kernel_data, sizeof(float) * kernel.ks);
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
int shared_src_data_width = kernel.ks / 2 + block_width + kernel.ks / 2;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_h_gpu_all_kernel<<<grid_dim, block_dim, shared_src_data_width * block_height * sizeof(unsigned int)>>>(dst.data, src.data, w, h, kernel.ks, src.pitch);
}
void conv_v_gpu_all(image_gpu &dst, const image_gpu &src, const filterkernel_gpu &kernel)
{
float *h_kernel_data = new float[kernel.ks];
cudaMemcpy(h_kernel_data, kernel.data, kernel.ks * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(d_c_kernel, h_kernel_data, sizeof(float) * kernel.ks);
int w = src.width, h = src.height;
int block_width = 32;
int block_height = 32;
int shared_src_data_height = kernel.ks / 2 + block_height + kernel.ks / 2;
dim3 block_dim(block_width, block_height);
dim3 grid_dim(div_up(w, block_width), div_up(h, block_height));
conv_v_gpu_all_kernel<<<grid_dim, block_dim, block_width * shared_src_data_height * sizeof(unsigned int)>>>(dst.data, src.data, w, h, kernel.ks, src.pitch);
}
|
0db1a92fdc2494a76bba078c06c9f5f55f95408e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cmath>
#include "paddle/fluid/memory/buffer.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/optimizers/cast_with_ptr.h"
#include "paddle/fluid/operators/optimizers/distributed_fused_lamb_op.h"
#include "paddle/fluid/operators/optimizers/multi_tensor_apply.h"
#include "paddle/fluid/operators/tensor_to_string.h"
#include "paddle/fluid/platform/aligned_vector.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/core/utils/data_type.h"
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#include "math.h" // NOLINT
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
#include "math.h" // NOLINT
namespace cub = hipcub;
#endif
namespace paddle {
namespace operators {
template <typename T>
using MasterT = typename details::MPTypeTrait<T>::Type;
template <typename T>
static void FillZeroWithPtr(T *x, size_t n, gpuStream_t stream) {
static_assert(!std::is_same<T, void>::value, "T cannot be void.");
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemsetAsync(x, 0, n * sizeof(T), stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemsetAsync(x, 0, n * sizeof(T), stream));
#endif
}
template <typename T, int BlockDim, int VecSize>
struct L2NormFunctor {
DEVICE void operator()(int tensor_id, int chunk_id, int offset, int size,
const T *x, MasterT<T> *y, int max_chunk_num) const {
using MT = MasterT<T>;
const T *ptr = x + offset;
using BlockReduce = hipcub::BlockReduce<MT, BlockDim>;
__shared__ typename BlockReduce::TempStorage storage;
MT square_sum = static_cast<MT>(0);
int i;
for (i = threadIdx.x * VecSize; i + VecSize <= size;
i += (BlockDim * VecSize)) {
platform::AlignedVector<T, VecSize> tmp_vec;
platform::Load(ptr + i, &tmp_vec);
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
auto tmp = static_cast<MT>(tmp_vec[j]);
square_sum += (tmp * tmp);
}
}
for (; i < size; ++i) {
auto tmp = static_cast<MT>(ptr[i]);
square_sum += (tmp * tmp);
}
square_sum = BlockReduce(storage).Reduce(square_sum, hipcub::Sum());
if (threadIdx.x == 0) {
y[tensor_id * max_chunk_num + chunk_id] = square_sum;
}
}
};
template <typename InT, typename OutT, int BlockDim>
static __global__ void MultiTensorL2NormReduceAgainCUDAKernel(
const InT *x, OutT *y, int max_chunk_num) {
int tensor_id = blockIdx.x;
x += (tensor_id * max_chunk_num);
using BlockReduce = hipcub::BlockReduce<InT, BlockDim>;
__shared__ typename BlockReduce::TempStorage storage;
InT sum = static_cast<InT>(0);
for (int i = threadIdx.x; i < max_chunk_num; i += BlockDim) {
sum += x[i];
}
sum = BlockReduce(storage).Reduce(sum, hipcub::Sum());
if (threadIdx.x == 0) {
y[blockIdx.x] = static_cast<OutT>(sum);
}
}
template <typename T>
static int GetChunkedVecSize(const T *ptr, int chunk_size) {
static_assert(!std::is_same<T, void>::value, "T cannot be void.");
constexpr int max_load_bits = 128;
int valid_vec_size = max_load_bits / CHAR_BIT / sizeof(T);
auto address = reinterpret_cast<uintptr_t>(ptr);
constexpr int vec8 = alignof(platform::AlignedVector<T, 8>);
constexpr int vec4 = alignof(platform::AlignedVector<T, 4>);
constexpr int vec2 = alignof(platform::AlignedVector<T, 2>);
chunk_size *= sizeof(T);
if (address % vec8 == 0 && chunk_size % vec8 == 0) {
return ::min(8, valid_vec_size);
} else if (address % vec4 == 0 && chunk_size % vec4 == 0) {
return ::min(4, valid_vec_size);
} else if (address % vec2 == 0 && chunk_size % vec2 == 0) {
return ::min(2, valid_vec_size);
} else {
return 1;
}
}
#define PD_VEC_LAUNCH_KERNEL_CASE(__vec_size, ...) \
case __vec_size: { \
constexpr int kVecSize = __vec_size; \
__VA_ARGS__; \
break; \
}
#define PD_VEC_LAUNCH_KERNEL(__vec_size, ...) \
do { \
switch (__vec_size) { \
PD_VEC_LAUNCH_KERNEL_CASE(8, __VA_ARGS__); \
PD_VEC_LAUNCH_KERNEL_CASE(4, __VA_ARGS__); \
PD_VEC_LAUNCH_KERNEL_CASE(2, __VA_ARGS__); \
PD_VEC_LAUNCH_KERNEL_CASE(1, __VA_ARGS__); \
} \
} while (0)
// TODO(zengjinle): which chunk_size is better?
template <typename InT, typename OutT, int MaxTensorNumPerLaunch = 160,
int MaxChunkNumPerLaunch = 780>
static void MultiTensorL2Norm(const platform::CUDAPlace &place,
gpuStream_t stream, const InT *x,
const int *offsets, int n, OutT *y,
int chunk_size = 65536) {
if (n <= 0) return;
constexpr int kNumTensor = MaxTensorNumPerLaunch;
constexpr int kNumChunk = MaxChunkNumPerLaunch;
constexpr int kBlockDim = 512;
int max_chunk_num = -1;
int vec_size = 8;
int total_chunk_num = 0;
for (int i = 0; i < n; ++i) {
vec_size = ::min(
vec_size, GetChunkedVecSize(x + offsets[i] - offsets[0], chunk_size));
int length = offsets[i + 1] - offsets[i];
auto tmp_chunk_num = (length + chunk_size - 1) / chunk_size;
max_chunk_num = ::max(max_chunk_num, tmp_chunk_num);
total_chunk_num += tmp_chunk_num;
}
VLOG(1) << "MultiTensorL2Norm max_chunk_num = " << max_chunk_num
<< " , total_chunk_num = " << total_chunk_num
<< " , tensor_num = " << n;
using MT = MasterT<InT>;
memory::Buffer tmp_out(place);
auto *tmp_out_ptr = tmp_out.Alloc<MT>(n * max_chunk_num);
FillZeroWithPtr(tmp_out_ptr, n * max_chunk_num, stream);
#define PD_LAUNCH_MULTI_TENSOR_APPLY_L2_NORM_KERNEL \
do { \
using FunctorT = L2NormFunctor<InT, kBlockDim, kVecSize>; \
VLOG(10) << __func__ << " " << typeid(InT).name() \
<< " VecSize = " << kVecSize; \
MultiTensorApply<FunctorT, kNumTensor, kNumChunk>( \
FunctorT(), stream, offsets, n, chunk_size, kBlockDim, x, tmp_out_ptr, \
max_chunk_num); \
} while (0)
PD_VEC_LAUNCH_KERNEL(vec_size, PD_LAUNCH_MULTI_TENSOR_APPLY_L2_NORM_KERNEL);
#undef PD_LAUNCH_MULTI_TENSOR_APPLY_L2_NORM_KERNEL
hipLaunchKernelGGL(( MultiTensorL2NormReduceAgainCUDAKernel<
MT, OutT, kBlockDim>), dim3(n), dim3(kBlockDim), 0, stream, tmp_out_ptr, y,
max_chunk_num);
}
template <int LogLevel>
static void LogParamAndTrustRatioDivSquareNorm(
const framework::ExecutionContext &ctx, const float *param_square_norm,
const float *trust_ratio_div_square_norm) {
if (!VLOG_IS_ON(LogLevel)) return;
auto tensors = ctx.MultiInput<framework::Tensor>("Param");
if (tensors.empty()) return;
const auto *order = ctx.Input<framework::Tensor>("ParamOrder")->data<int>();
size_t n = tensors.size();
auto place = tensors[0]->place();
auto pn_vec = ToVector(param_square_norm, n, place);
auto tn_vec = ToVector(trust_ratio_div_square_norm, n, place);
const auto &names = ctx.GetOp().Inputs("Param");
for (size_t i = 0; i < n; ++i) {
auto idx = order[i];
VLOG(LogLevel) << "Param " << tensors[idx]->dtype() << " " << names[idx]
<< " pn = " << pn_vec[i] << " , tn = " << tn_vec[i];
}
}
static bool IsFinite(const platform::CUDADeviceContext &dev_ctx,
const float *ptr) {
auto stream = dev_ctx.stream();
float cpu_value;
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(&cpu_value, ptr, sizeof(float),
hipMemcpyDeviceToHost, stream));
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamSynchronize(stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(&cpu_value, ptr, sizeof(float),
hipMemcpyDeviceToHost, stream));
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamSynchronize(stream));
#endif
LOG(INFO) << "NAN_INF indicator value: " << cpu_value;
return isfinite(cpu_value);
}
template <typename T>
static const T *GetInputTensorPtr(const framework::ExecutionContext &ctx,
const char *in_name,
int64_t *numel = nullptr) {
const auto *in_tensor = ctx.Input<framework::Tensor>(in_name);
PADDLE_ENFORCE_NOT_NULL(in_tensor, platform::errors::InvalidArgument(
"Input(%s) cannot be NULL.", in_name));
if (in_tensor->IsInitialized()) {
if (numel) *numel = in_tensor->numel();
return in_tensor->data<T>();
} else {
if (numel) *numel = 0;
return nullptr;
}
}
template <typename T, bool AllowNotExist = false>
static T *GetSameInOutTensorPtr(const framework::ExecutionContext &ctx,
const platform::Place &place,
const char *in_name, const char *out_name,
int64_t *numel = nullptr) {
const auto *in_tensor = ctx.Input<framework::Tensor>(in_name);
if (in_tensor == nullptr || !in_tensor->IsInitialized()) {
PADDLE_ENFORCE_EQ(AllowNotExist, true,
platform::errors::InvalidArgument(
"Input(%s) cannot be NULL.", in_name));
if (numel) *numel = 0;
return nullptr;
}
auto *out_tensor = ctx.Output<framework::Tensor>(out_name);
PADDLE_ENFORCE_NOT_NULL(in_tensor, platform::errors::InvalidArgument(
"Input(%s) cannot be NULL.", in_name));
PADDLE_ENFORCE_NOT_NULL(out_tensor,
platform::errors::InvalidArgument(
"Output(%s) cannot be NULL.", out_name));
const T *in_data = in_tensor->data<T>();
T *out_data = out_tensor->mutable_data<T>(place);
PADDLE_ENFORCE_EQ(in_data, out_data,
platform::errors::InvalidArgument(
"Input(%s) and Output(%s) must be the same Tensor.",
in_name, out_name));
if (numel) *numel = out_tensor->numel();
return out_data;
}
template <typename T>
struct SquareFunctor {
HOSTDEVICE MasterT<T> operator()(T x) const {
auto y = static_cast<MasterT<T>>(x);
return y * y;
}
};
template <typename T>
struct IsNanInfFunctor {
HOSTDEVICE bool operator()(T x) const { return !isfinite(x); }
};
struct OrFunctor {
HOSTDEVICE bool operator()(bool x, bool y) const { return x || y; }
};
struct AndFunctor {
HOSTDEVICE bool operator()(bool x, bool y) const { return x && y; }
};
template <typename T1, typename T2, int VecSize>
static __global__ void ScaleCUDAKernel(const T1 *__restrict__ x,
const T2 *__restrict__ scale,
T1 *__restrict__ y, int num) {
static_assert(sizeof(T1) <= sizeof(T2),
"sizeof(T1) must be not greater than sizeof(T2).");
T2 s = scale[0];
int i = (threadIdx.x + blockIdx.x * blockDim.x) * VecSize;
int stride = blockDim.x * gridDim.x * VecSize;
for (; i + VecSize <= num; i += stride) {
platform::AlignedVector<T1, VecSize> x_vec;
platform::AlignedVector<T1, VecSize> y_vec;
platform::Load(x + i, &x_vec);
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
y_vec[j] = static_cast<T1>(static_cast<T2>(x_vec[j]) * s);
}
platform::Store(y_vec, y + i);
}
for (; i < num; ++i) {
y[i] = static_cast<T1>(static_cast<T2>(x[i]) * s);
}
}
template <typename T>
static __global__ void AddToCUDAKernel(const T *__restrict__ x,
T *__restrict__ y) {
y[0] += x[0];
}
// If clip before allreduce,
// coeff = global_scale * max_global_grad_norm / (1e-6 + sqrt(square_grad_norm)
// * rescale_grad)
// if coeff >= 1 or coeff is Nan/Inf, scale = 1.0
// else scale = coeff
template <typename T1, typename T2>
static __global__ void CalcGradNormClipBeforeAllReduceScale(
const T1 *__restrict__ global_scale, T1 max_global_grad_norm,
const T1 *__restrict__ square_grad_norm, T1 *__restrict__ out1,
T2 *__restrict__ out2, T1 clip_rescale_grad) {
T1 grad_norm = static_cast<T1>(sqrtf(*square_grad_norm)) * clip_rescale_grad;
T1 scale = global_scale[0] * max_global_grad_norm / (1e-6 + grad_norm);
bool found_nan_inf = !isfinite(scale);
if (scale >= 1 || found_nan_inf) {
scale = static_cast<T1>(1.0);
}
if (out1) {
*out1 = scale;
}
if (out2) {
*out2 = static_cast<T2>(scale);
}
}
static __global__ void SetNanInfValueCUDAKernelOneFlag(const bool *in_flag_p,
float *out_p) {
*out_p = (*in_flag_p) ? __int_as_float(0x7fffffffU) : 0.0f;
}
static __global__ void SetNanInfValueCUDAKernelTwoFlag(const bool *in_flag_p_1,
const bool *in_flag_p_2,
float *out_p) {
*out_p =
((*in_flag_p_1) || (*in_flag_p_2)) ? __int_as_float(0x7fffffffU) : 0.0f;
}
template <typename T, typename GradT, int VecSize>
static __global__ void UpdateLambMomentAndTrustRatioDivCUDAKernel(
const T *__restrict__ param_p, const GradT *__restrict__ grad_p,
const T *__restrict__ square_grad_norm_p,
const T *__restrict__ global_scale, const T *__restrict__ beta1pow_p,
const T *__restrict__ beta2pow_p, T *__restrict__ mom1_p,
T *__restrict__ mom2_p, T *__restrict__ trust_ratio_div_p, bool *found_inf,
T weight_decay, int weight_decay_end_numel, T beta1, T beta2, T epsilon,
T max_global_grad_norm, int num, T rescale_grad) {
T square_grad_norm = *square_grad_norm_p;
bool need_update_found_inf =
(found_inf && threadIdx.x == 0 && blockIdx.x == 0);
if (!isfinite(square_grad_norm)) {
if (need_update_found_inf) *found_inf = true;
return;
} else if (need_update_found_inf) {
*found_inf = false;
}
T scale = rescale_grad / global_scale[0];
if (max_global_grad_norm > 0) {
T clip_scale =
max_global_grad_norm / (sqrtf(square_grad_norm) * scale + 1e-6);
if (clip_scale < static_cast<T>(1)) {
scale *= clip_scale;
}
}
T one_minus_beta1pow = 1 - beta1pow_p[0];
T one_minus_beta2pow = 1 - beta2pow_p[0];
int i = (threadIdx.x + blockIdx.x * blockDim.x) * VecSize;
int stride = blockDim.x * gridDim.x * VecSize;
for (; i + VecSize <= num; i += stride) {
platform::AlignedVector<T, VecSize> param_vec;
platform::AlignedVector<GradT, VecSize> grad_vec;
platform::AlignedVector<T, VecSize> mom1_vec;
platform::AlignedVector<T, VecSize> mom2_vec;
platform::AlignedVector<T, VecSize> trust_ratio_div_vec;
T cur_weight_decay = (i < weight_decay_end_numel) * weight_decay;
if (cur_weight_decay != static_cast<T>(0.0)) {
platform::Load(param_p + i, ¶m_vec);
} else {
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
param_vec[j] = static_cast<T>(0);
}
}
platform::Load(grad_p + i, &grad_vec);
platform::Load(mom1_p + i, &mom1_vec);
platform::Load(mom2_p + i, &mom2_vec);
#define PD_LAMB_MOM_TRUST_RATIO_DIV_UPDATE(__param, __grad, __mom1, __mom2, \
__trust_ratio_div, __idx) \
T p = __param[__idx]; \
T g = static_cast<T>(__grad[__idx]) * scale; \
T mom1 = __mom1[__idx]; \
T mom2 = __mom2[__idx]; \
mom1 = beta1 * mom1 + (1 - beta1) * g; \
mom2 = beta2 * mom2 + (1 - beta2) * g * g; \
T mom1_unbiased = mom1 / one_minus_beta1pow; \
T mom2_unbiased = mom2 / one_minus_beta2pow; \
__trust_ratio_div[__idx] = \
mom1_unbiased / (sqrtf(mom2_unbiased) + epsilon) + cur_weight_decay * p; \
__mom1[__idx] = mom1; \
__mom2[__idx] = mom2;
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
PD_LAMB_MOM_TRUST_RATIO_DIV_UPDATE(param_vec, grad_vec, mom1_vec,
mom2_vec, trust_ratio_div_vec, j);
}
platform::Store(mom1_vec, mom1_p + i);
platform::Store(mom2_vec, mom2_p + i);
platform::Store(trust_ratio_div_vec, trust_ratio_div_p + i);
}
for (; i < num; ++i) {
T cur_weight_decay = (i < weight_decay_end_numel) * weight_decay;
PD_LAMB_MOM_TRUST_RATIO_DIV_UPDATE(param_p, grad_p, mom1_p, mom2_p,
trust_ratio_div_p, i);
}
}
template <typename T, typename GradT>
static void MultiTensorUpdateLambMomentAndTrustRatioDiv(
const platform::CUDADeviceContext &dev_ctx, const int *offsets, int n,
const T *param_p, const GradT *grad_p, const T *square_grad_norm_p,
const T *global_scale, const T *beta1pow_p, const T *beta2pow_p, T *mom1_p,
T *mom2_p, T *trust_ratio_div_p, bool *found_inf_p, T weight_decay,
int weight_decay_end_idx, T beta1, T beta2, T epsilon,
T max_global_grad_norm, T rescale_grad) {
if (n <= 0) return;
int numel = offsets[n] - offsets[0];
PADDLE_ENFORCE_GE(weight_decay_end_idx, 0,
platform::errors::InvalidArgument(
"The weight decay end index should be >= 0."));
PADDLE_ENFORCE_LE(weight_decay_end_idx, n,
platform::errors::InvalidArgument(
"The weight decay end index should be < %d.", n));
auto weight_decay_end_numel = offsets[weight_decay_end_idx] - offsets[0];
int vec_size = GetChunkedVecSize(param_p, 0);
vec_size = ::min(vec_size, GetChunkedVecSize(grad_p, 0));
vec_size = ::min(vec_size, GetChunkedVecSize(mom1_p, 0));
vec_size = ::min(vec_size, GetChunkedVecSize(mom2_p, 0));
vec_size = ::min(vec_size, GetChunkedVecSize(trust_ratio_div_p, 0));
for (int i = 0; i < n; ++i) {
auto length = offsets[i + 1] - offsets[i];
while (length % vec_size != 0) {
vec_size /= 2;
}
}
VLOG(1) << __func__ << " VecSize = " << vec_size;
auto stream = dev_ctx.stream();
auto config = platform::GetGpuLaunchConfig1D(dev_ctx, numel, vec_size);
#define PD_LAUNCH_LAMB_MOM_TRUST_RATIO_DIV_KERNEL \
do { \
hipLaunchKernelGGL(( UpdateLambMomentAndTrustRatioDivCUDAKernel<T, GradT, kVecSize>), \
config.block_per_grid, dim3(config.thread_per_block), 0, stream, \
param_p, grad_p, square_grad_norm_p, global_scale, beta1pow_p, \
beta2pow_p, mom1_p, mom2_p, trust_ratio_div_p, found_inf_p, \
weight_decay, weight_decay_end_numel, beta1, beta2, epsilon, \
max_global_grad_norm, numel, rescale_grad); \
} while (0)
PD_VEC_LAUNCH_KERNEL(vec_size, PD_LAUNCH_LAMB_MOM_TRUST_RATIO_DIV_KERNEL);
#undef PD_LAUNCH_LAMB_MOM_TRUST_RATIO_DIV_KERNEL
}
template <typename T, bool NeedUpdate /*=true*/>
struct LambBetaPowUpdateOnceHelper {
LambBetaPowUpdateOnceHelper(T *beta1pow, T *beta2pow, T beta1, T beta2) {
PADDLE_ENFORCE_NOT_NULL(beta1pow,
platform::errors::InvalidArgument(
"The beta1pow should not be nullptr."));
PADDLE_ENFORCE_NOT_NULL(beta2pow,
platform::errors::InvalidArgument(
"The beta2pow should not be nullptr."));
beta1pow_ = beta1pow;
beta2pow_ = beta2pow;
beta1_ = beta1;
beta2_ = beta2;
}
HOSTDEVICE void UpdateBetaPows() const {
beta1pow_[0] *= beta1_;
beta2pow_[0] *= beta2_;
}
private:
T *__restrict__ beta1pow_;
T *__restrict__ beta2pow_;
T beta1_;
T beta2_;
};
template <typename T>
struct LambBetaPowUpdateOnceHelper<T, false> {
LambBetaPowUpdateOnceHelper(T *beta1pow, T *beta2pow, T beta1, T beta2) {
PADDLE_ENFORCE_EQ(
beta1pow, nullptr,
platform::errors::InvalidArgument("The beta1pow should be nullptr."));
PADDLE_ENFORCE_EQ(
beta2pow, nullptr,
platform::errors::InvalidArgument("The beta2pow should be nullptr."));
}
HOSTDEVICE void UpdateBetaPows() const {}
};
template <typename T, bool HasMasterParam /*=true*/>
struct LambParamHelper {
LambParamHelper(T *param, MasterT<T> *master_param) {
constexpr bool kIsSameType = std::is_same<T, MasterT<T>>::value;
PADDLE_ENFORCE_EQ(kIsSameType, false,
platform::errors::InvalidArgument(
"T must not be the same with MasterT<T>."));
PADDLE_ENFORCE_NOT_NULL(master_param,
platform::errors::InvalidArgument(
"Master parameter must be provided."));
param_ = param;
master_param_ = master_param;
}
HOSTDEVICE T *__restrict__ ParamPtr() { return param_; }
HOSTDEVICE MasterT<T> *__restrict__ MasterParamPtr() { return master_param_; }
private:
T *__restrict__ param_;
MasterT<T> *__restrict__ master_param_;
};
template <typename T>
struct LambParamHelper<T, false> {
LambParamHelper(T *param, MasterT<T> *master_param) {
constexpr bool kIsSameType = std::is_same<T, MasterT<T>>::value;
PADDLE_ENFORCE_EQ(kIsSameType, true,
platform::errors::InvalidArgument(
"T must be the same with MasterT<T>."));
if (master_param != nullptr) {
PADDLE_ENFORCE_EQ(static_cast<void *>(param),
static_cast<void *>(master_param),
platform::errors::InvalidArgument(
"Master parameter must be nullptr or the same as "
"non-master parameter."));
}
param_ = param;
}
HOSTDEVICE T *__restrict__ ParamPtr() { return param_; }
HOSTDEVICE constexpr MasterT<T> *MasterParamPtr() { return nullptr; }
private:
T *__restrict__ param_;
};
template <typename ParamT, bool HasMasterParam, bool NeedUpdateBetaPow,
int VecSize>
struct LambUpdateParamAndBetaPowsFunctor {
DEVICE void operator()(
int tensor_id, int chunk_id, int offset, int size,
LambParamHelper<ParamT, HasMasterParam> param_helper,
const MasterT<ParamT> *trust_ratio_div, const MasterT<ParamT> *lr,
const MasterT<ParamT> *param_square_norm,
const MasterT<ParamT> *trust_ratio_div_square_norm, const bool *found_inf,
LambBetaPowUpdateOnceHelper<MasterT<ParamT>, NeedUpdateBetaPow>
betapow_helper) const {
if (*found_inf) return;
using MT = MasterT<ParamT>;
MT p_square_norm = param_square_norm[tensor_id];
MT t_square_norm = trust_ratio_div_square_norm[tensor_id];
MT lr_value = *lr;
MT ratio = (p_square_norm != static_cast<MT>(0) &&
t_square_norm != static_cast<MT>(0)
? lr_value * sqrtf(p_square_norm / t_square_norm)
: lr_value);
int i;
int stride = blockDim.x * VecSize;
ParamT *param = param_helper.ParamPtr() + offset;
MT *master_param = HasMasterParam ? param_helper.MasterParamPtr() + offset
: param_helper.MasterParamPtr();
trust_ratio_div += offset;
for (i = threadIdx.x * VecSize; i + VecSize <= size; i += stride) {
platform::AlignedVector<MT, VecSize> trust_ratio_div_vec;
platform::Load(trust_ratio_div + i, &trust_ratio_div_vec);
if (HasMasterParam) {
platform::AlignedVector<MT, VecSize> master_param_vec;
platform::Load(master_param + i, &master_param_vec);
platform::AlignedVector<ParamT, VecSize> param_vec;
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
MT p = master_param_vec[j] - ratio * trust_ratio_div_vec[j];
master_param_vec[j] = p;
param_vec[j] = static_cast<ParamT>(p);
}
platform::Store(master_param_vec, master_param + i);
platform::Store(param_vec, param + i);
} else {
platform::AlignedVector<ParamT, VecSize> param_vec;
platform::Load(param + i, ¶m_vec);
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
MT p = static_cast<MT>(param_vec[j]) - ratio * trust_ratio_div_vec[j];
param_vec[j] = static_cast<ParamT>(p);
}
platform::Store(param_vec, param + i);
}
}
for (; i < size; ++i) {
if (HasMasterParam) {
MT p = master_param[i] - ratio * trust_ratio_div[i];
master_param[i] = p;
param[i] = static_cast<ParamT>(p);
} else {
MT p = static_cast<MT>(param[i]) - ratio * trust_ratio_div[i];
param[i] = static_cast<ParamT>(p);
}
}
if (NeedUpdateBetaPow && threadIdx.x == 0 && blockIdx.x == 0) {
betapow_helper.UpdateBetaPows();
}
}
};
// TODO(zengjinle): which block_dim and chunk_size would be better?
template <typename ParamT, int MaxTensorNumPerLaunch = 160,
int MaxChunkNumPerLaunch = 780>
static void MultiTensorUpdateLambParamAndBetaPows(
const platform::CUDADeviceContext &dev_ctx, const int *offsets, int n,
const MasterT<ParamT> *trust_ratio_div, const MasterT<ParamT> *lr,
const MasterT<ParamT> *param_square_norm,
const MasterT<ParamT> *trust_ratio_div_square_norm, const bool *found_inf,
ParamT *param, MasterT<ParamT> *master_param, MasterT<ParamT> *beta1pow,
MasterT<ParamT> *beta2pow, MasterT<ParamT> beta1, MasterT<ParamT> beta2,
int chunk_size = 65536) {
constexpr bool kHasMasterParam =
!(std::is_same<ParamT, MasterT<ParamT>>::value);
bool has_beta_pow = (beta1pow != nullptr);
if (has_beta_pow) {
PADDLE_ENFORCE_NOT_NULL(beta2pow, platform::errors::InvalidArgument(
"Beta2Pow should not be nullptr."));
} else {
PADDLE_ENFORCE_EQ(beta2pow, nullptr, platform::errors::InvalidArgument(
"Beta2Pow should be nullptr."));
}
const int block_dim = 512;
int vec_size = 8;
for (int i = 0; i < n; ++i) {
int offset = offsets[i] - offsets[0];
vec_size =
::min(vec_size, GetChunkedVecSize(param + offset, chunk_size));
if (kHasMasterParam) {
vec_size = ::min(vec_size,
GetChunkedVecSize(master_param + offset, chunk_size));
}
vec_size = ::min(
vec_size, GetChunkedVecSize(trust_ratio_div + offset, chunk_size));
}
VLOG(1) << __func__ << " VecSize = " << vec_size;
constexpr auto kNumTensor = MaxTensorNumPerLaunch;
constexpr auto kNumChunk = MaxChunkNumPerLaunch;
auto stream = dev_ctx.stream();
#define PD_LAUNCH_MULTI_TENSOR_UPDATE_PARAM_BETAPOW(__has_beta_pow) \
do { \
using FunctorT = \
LambUpdateParamAndBetaPowsFunctor<ParamT, kHasMasterParam, \
__has_beta_pow, kVecSize>; \
LambParamHelper<ParamT, kHasMasterParam> param_helper(param, \
master_param); \
LambBetaPowUpdateOnceHelper<MasterT<ParamT>, __has_beta_pow> \
betapow_helper(beta1pow, beta2pow, beta1, beta2); \
launcher.Launch(FunctorT(), param_helper, trust_ratio_div, lr, \
param_square_norm, trust_ratio_div_square_norm, found_inf, \
betapow_helper); \
} while (0)
#define PD_LAUNCH_VEC_MULTI_TENSOR_UPDATE_PARAM_BETAPOW_CASE \
do { \
auto callback = [&]( \
const MultiTensorLauncher<kNumTensor, kNumChunk> &launcher, \
int launch_n) { \
if (has_beta_pow && launch_n == 0) { \
PD_LAUNCH_MULTI_TENSOR_UPDATE_PARAM_BETAPOW(true); \
beta1pow = nullptr; \
beta2pow = nullptr; \
} else { \
PD_LAUNCH_MULTI_TENSOR_UPDATE_PARAM_BETAPOW(false); \
} \
}; \
MultiTensorApplyWithCallback<kNumTensor, kNumChunk>( \
stream, offsets, n, chunk_size, block_dim, callback); \
} while (0)
PD_VEC_LAUNCH_KERNEL(vec_size,
PD_LAUNCH_VEC_MULTI_TENSOR_UPDATE_PARAM_BETAPOW_CASE);
#undef PD_LAUNCH_MULTI_TENSOR_UPDATE_PARAM_BETAPOW
#undef PD_LAUNCH_VEC_MULTI_TENSOR_UPDATE_PARAM_BETAPOW_CASE
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
static bool CreatePreMulScaleOpIfSupported(ncclDataType_t dtype,
ncclComm_t comm, const void *scale,
ncclRedOp_t *op) {
#if NCCL_VERSION_CODE >= 21100
int ver;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGetVersion(&ver));
if (ver >= 21100) {
VLOG(10) << "ncclRedOpCreatePreMulSum is supported.";
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRedOpCreatePreMulSum(
op, const_cast<void *>(scale), dtype, ncclScalarDevice, comm));
return true;
}
#endif
VLOG(10) << "ncclRedOpCreatePreMulSum is not supported.";
return false;
}
template <typename T1, typename T2>
static void LaunchScaleKernel(const platform::CUDADeviceContext &dev_ctx,
const T1 *x, const T2 *scale, T1 *y, int n,
gpuStream_t stream) {
int vec_size = ::min(GetChunkedVecSize(x, 0), GetChunkedVecSize(y, 0));
auto config = platform::GetGpuLaunchConfig1D(dev_ctx, n, vec_size);
#define PD_LAMB_VEC_SCALE_KERNEL_CASE \
do { \
hipLaunchKernelGGL(( ScaleCUDAKernel<T1, T2, kVecSize>), dim3(config.block_per_grid), \
config.thread_per_block, 0, stream, \
x, scale, y, n); \
} while (0)
PD_VEC_LAUNCH_KERNEL(vec_size, PD_LAMB_VEC_SCALE_KERNEL_CASE);
#undef PD_LAMB_VEC_SCALE_KERNEL_CASE
}
template <typename T>
static void NCCLReduceScatterWithScale(
const T *sendbuff, T *recvbuff, size_t recvcount, size_t nranks,
ncclComm_t comm, gpuStream_t stream,
const platform::CUDADeviceContext &dev_ctx, const T *scale = nullptr) {
static_assert(std::is_same<T, float>::value ||
std::is_same<T, platform::float16>::value,
"T must be either float32 or float16.");
if (recvcount == 0) return;
if (comm == nullptr) {
if (scale != nullptr) {
PADDLE_ENFORCE_EQ(nranks, 1,
platform::errors::InvalidArgument(
"nranks must be 1 when scale != nullptr."));
LaunchScaleKernel(dev_ctx, sendbuff, scale, recvbuff, recvcount * nranks,
stream);
}
return;
}
ncclRedOp_t op = ncclSum;
ncclDataType_t dtype =
std::is_same<T, float>::value ? ncclFloat32 : ncclFloat16;
bool should_destroy_op =
scale && CreatePreMulScaleOpIfSupported(dtype, comm, scale, &op);
memory::Buffer buffer(dev_ctx.GetPlace());
if (scale && !should_destroy_op) {
size_t numel = recvcount * nranks;
T *new_sendbuff = buffer.Alloc<T>(numel);
LaunchScaleKernel(dev_ctx, sendbuff, scale, new_sendbuff, numel, stream);
sendbuff = new_sendbuff;
}
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclReduceScatter(
sendbuff, recvbuff, recvcount, dtype, op, comm, stream));
#if NCCL_VERSION_CODE >= 21100
if (should_destroy_op) {
VLOG(10) << "ncclRedOpDestroy starts";
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRedOpDestroy(op, comm));
VLOG(10) << "ncclRedOpDestroy ends";
}
#endif
}
#endif
template <typename InputIteratorT, typename OutputIteratorT, typename ReduceOpT,
typename T>
static void CubDeviceReduce(InputIteratorT d_in, OutputIteratorT d_out,
int num_items, ReduceOpT reduction_op, T init,
gpuStream_t stream, memory::Buffer *buffer) {
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
PADDLE_ENFORCE_GPU_SUCCESS(
hipcub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out,
num_items, reduction_op, init, stream));
d_temp_storage = buffer->Alloc<void>(temp_storage_bytes);
VLOG(10) << "hipcub::DeviceReduce::Reduce needs " << temp_storage_bytes
<< " byte(s), ptr = " << d_temp_storage;
PADDLE_ENFORCE_GPU_SUCCESS(
hipcub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out,
num_items, reduction_op, init, stream));
}
template <typename T>
static void GetSquareGradNormImpl(const T *grad, int n, float *square_norm,
gpuStream_t stream,
memory::Buffer *cub_tmp_buffer) {
using Iterator =
hipcub::TransformInputIterator<float, SquareFunctor<T>, const T *>;
Iterator iter(grad, SquareFunctor<T>());
CubDeviceReduce(iter, square_norm, n, hipcub::Sum(), static_cast<float>(0),
stream, cub_tmp_buffer);
}
// square_norm is of length 2 at least
static void GetSquareGradNorm(const float *fp32_grad, int fp32_numel,
const platform::float16 *fp16_grad,
int fp16_numel, float *square_norm,
gpuStream_t stream,
memory::Buffer *cub_tmp_buffer) {
VLOG(10) << "GetSquareGradNorm starts, fp32_numel = " << fp32_numel
<< " , fp16_numel = " << fp16_numel;
if (fp32_numel > 0) {
GetSquareGradNormImpl(fp32_grad, fp32_numel, square_norm, stream,
cub_tmp_buffer);
VLOG(10) << "FP32 square L2-Norm: "
<< FlattenToString(square_norm, 1, cub_tmp_buffer->GetPlace());
}
if (fp16_numel > 0) {
float *fp16_square_norm = fp32_numel > 0 ? square_norm + 1 : square_norm;
GetSquareGradNormImpl(fp16_grad, fp16_numel, fp16_square_norm, stream,
cub_tmp_buffer);
VLOG(10) << "FP16 square L2-Norm: "
<< FlattenToString(fp16_square_norm, 1,
cub_tmp_buffer->GetPlace());
if (fp32_numel > 0) {
hipLaunchKernelGGL(( AddToCUDAKernel), dim3(1), dim3(1), 0, stream, fp16_square_norm, square_norm);
VLOG(10) << "FP32+FP16 square L2-Norm: "
<< FlattenToString(square_norm, 1, cub_tmp_buffer->GetPlace());
}
}
VLOG(10) << "GetSquareGradNorm ends, fp32_numel = " << fp32_numel
<< " , fp16_numel = " << fp16_numel;
}
template <typename T>
std::string NumToString(T x) {
std::stringstream ss;
ss << x;
return ss.str();
}
template <typename T>
static std::string GetMinMaxStr(const T *x, size_t n,
const platform::Place &place) {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(place), true,
platform::errors::InvalidArgument("Only support CUDAPlace currently."));
auto *dev_ctx = static_cast<platform::CUDADeviceContext *>(
platform::DeviceContextPool::Instance().Get(place));
auto stream = dev_ctx->stream();
memory::Buffer ret_buffer(place);
T *ret = ret_buffer.Alloc<T>(2);
if (n > 0) {
memory::Buffer cub_buffer(place);
CubDeviceReduce(x, ret, n, hipcub::Min(), std::numeric_limits<T>::max(),
stream, &cub_buffer);
CubDeviceReduce(x, ret + 1, n, hipcub::Max(), std::numeric_limits<T>::lowest(),
stream, &cub_buffer);
T ret_cpu[2];
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(&ret_cpu[0], ret, 2 * sizeof(T),
hipMemcpyDeviceToHost, stream));
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamSynchronize(stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(&ret_cpu[0], ret, 2 * sizeof(T),
hipMemcpyDeviceToHost, stream));
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamSynchronize(stream));
#endif
return std::string("{\"min\": ") + NumToString(ret_cpu[0]) +
" , \"max\": " + NumToString(ret_cpu[1]) + "}";
} else {
return "{\"min\": null, \"max\": null}";
}
}
struct VisitDTypeFunctor {
VisitDTypeFunctor(const framework::Tensor *x, std::string *s)
: x_(x), s_(s) {}
template <typename T>
void apply() const {
*s_ = GetMinMaxStr<T>(x_->template data<T>(), x_->numel(), x_->place());
}
private:
const framework::Tensor *x_;
std::string *s_;
};
static std::string GetMinMaxStr(const framework::Tensor *x) {
if (x == nullptr) return "null";
if (!x->IsInitialized()) return "not_inited";
if (!platform::is_gpu_place(x->place())) return "CPUTensor";
std::string str;
VisitDTypeFunctor functor(x, &str);
phi::VisitDataType(x->dtype(), functor);
return str;
}
static void PrintAllMinMaxRange(const framework::ExecutionContext &ctx,
bool only_inputs) {
if (!VLOG_IS_ON(1)) return;
for (const auto &pair : ctx.GetOp().Inputs()) {
const auto &key = pair.first;
const auto tensors = ctx.MultiInput<framework::Tensor>(key);
size_t n = tensors.size();
for (size_t i = 0; i < n; ++i) {
VLOG(1) << "Input(" << key + ")[" << i << "] = " << pair.second[i]
<< " , " << GetMinMaxStr(tensors[i]);
}
}
if (only_inputs) return;
for (const auto &pair : ctx.GetOp().Outputs()) {
const auto &key = pair.first;
const auto tensors = ctx.MultiOutput<framework::Tensor>(key);
size_t n = tensors.size();
for (size_t i = 0; i < n; ++i) {
VLOG(1) << "Output(" << key + ")[" << i << "] = " << pair.second[i]
<< " , " << GetMinMaxStr(tensors[i]);
}
}
}
static void CheckHasNanInfGrad(const float *fp32_grad, int fp32_numel,
const platform::float16 *fp16_grad,
int fp16_numel, float *nan_inf_flag,
gpuStream_t stream,
memory::Buffer *cub_tmp_buffer) {
bool *fp32_has_nan_inf = nullptr;
bool *fp16_has_nan_inf = nullptr;
if (fp32_numel > 0) {
fp32_has_nan_inf = reinterpret_cast<bool *>(nan_inf_flag + 1);
hipcub::TransformInputIterator<bool, IsNanInfFunctor<float>, const float *>
iter(fp32_grad, IsNanInfFunctor<float>());
CubDeviceReduce(iter, fp32_has_nan_inf, fp32_numel, OrFunctor(), false,
stream, cub_tmp_buffer);
}
if (fp16_numel > 0) {
fp16_has_nan_inf = reinterpret_cast<bool *>(nan_inf_flag + 1) + 1;
hipcub::TransformInputIterator<bool, IsNanInfFunctor<platform::float16>,
const platform::float16 *>
iter(fp16_grad, IsNanInfFunctor<platform::float16>());
CubDeviceReduce(iter, fp16_has_nan_inf, fp16_numel, OrFunctor(), false,
stream, cub_tmp_buffer);
}
if (fp32_has_nan_inf && fp16_has_nan_inf) {
hipLaunchKernelGGL(( SetNanInfValueCUDAKernelTwoFlag), dim3(1), dim3(1), 0, stream,
fp32_has_nan_inf, fp16_has_nan_inf, nan_inf_flag);
} else if (fp32_has_nan_inf) {
hipLaunchKernelGGL(( SetNanInfValueCUDAKernelOneFlag), dim3(1), dim3(1), 0, stream, fp32_has_nan_inf,
nan_inf_flag);
} else {
hipLaunchKernelGGL(( SetNanInfValueCUDAKernelOneFlag), dim3(1), dim3(1), 0, stream, fp16_has_nan_inf,
nan_inf_flag);
}
}
template <typename T>
class DistributedFusedLambOpKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto stream = dev_ctx.stream();
auto place = dev_ctx.GetPlace();
// Step 1: Get fp16 param and grad tensors
int64_t fp16_numel;
auto *fp16_param = GetSameInOutTensorPtr<platform::float16, true>(
ctx, place, "FP16FusedParam", "FP16FusedParamOut", &fp16_numel);
bool has_fp16_param = (fp16_numel > 0);
const platform::float16 *fp16_grad = nullptr;
if (has_fp16_param) {
fp16_grad = GetInputTensorPtr<platform::float16>(ctx, "FP16FusedGrad");
} else {
fp16_param = nullptr;
}
// Step 2: Get fp32 param and grad tensors
int64_t fp32_numel = 0;
auto *fp32_param = GetSameInOutTensorPtr<float, true>(
ctx, place, "FP32FusedParam", "FP32FusedParamOut", &fp32_numel);
PADDLE_ENFORCE_GE(fp32_numel, fp16_numel,
platform::errors::InvalidArgument(
"The element number in FP32FusedParam should be not "
"less than FP16FusedParam."));
fp32_numel -= fp16_numel; // the FP32FusedParam contains fp32 param and
// fp16 master weight
bool has_fp32_param = (fp32_numel > 0);
const float *fp32_grad = nullptr;
if (has_fp32_param) {
fp32_grad = GetInputTensorPtr<float>(ctx, "FP32FusedGrad");
} else {
PADDLE_ENFORCE_EQ(
has_fp16_param, true,
platform::errors::InvalidArgument(
"Either FP32FusedGrad or FP16FusedGrad cannot be NULL."));
}
auto numel = fp32_numel + fp16_numel;
VLOG(1) << "numel = " << numel << " , fp32_numel = " << fp32_numel
<< " , fp16_numel = " << fp16_numel;
// The NVIDIA cub library does not support number > INT32_MAX
PADDLE_ENFORCE_LE(numel, std::numeric_limits<int>::max(),
platform::errors::Unimplemented(
"Too many parameter number. Only <= %d is supported.",
std::numeric_limits<int>::max()));
// Step 3: Get ParamInfo
const auto *param_info_tensor = GetInputTensorPtr<int>(ctx, "ParamInfo");
auto fp32_local_start_idx = param_info_tensor[0];
auto fp32_local_param_num = param_info_tensor[1];
auto fp32_global_param_num = param_info_tensor[2];
auto fp32_weight_decay_end_idx = param_info_tensor[3];
auto fp16_local_start_idx = param_info_tensor[4];
auto fp16_local_param_num = param_info_tensor[5];
auto fp16_global_param_num = param_info_tensor[6];
auto fp16_weight_decay_end_idx = param_info_tensor[7];
auto local_param_num = fp32_local_param_num + fp16_local_param_num;
auto param_num = fp32_global_param_num + fp16_global_param_num;
PADDLE_ENFORCE_LE(local_param_num, param_num,
platform::errors::InvalidArgument(
"The local parameter number should not exceed the "
"global parameter number."));
VLOG(1) << "local_param_num = " << local_param_num
<< " , global_param_num = " << param_num
<< " , fp32_local_start_idx = " << fp32_local_start_idx
<< " , fp32_local_param_num = " << fp32_local_param_num
<< " , fp32_global_param_num = " << fp32_global_param_num
<< " , fp16_local_start_idx = " << fp16_local_start_idx
<< " , fp16_local_param_num = " << fp16_local_param_num
<< " , fp16_global_param_num = " << fp16_global_param_num;
// Step 4: Get LearningRate, Moment1, Moment2, Beta1Pow, Beta2Pow,
// GlobalScale, FoundInf
const auto *global_scale = GetInputTensorPtr<float>(ctx, "GlobalScale");
const auto *lr = GetInputTensorPtr<float>(ctx, "LearningRate");
int64_t partial_numel = 0;
auto *moment1 = GetSameInOutTensorPtr<float>(ctx, place, "Moment1",
"Moment1Out", &partial_numel);
PADDLE_ENFORCE_EQ(numel % partial_numel, 0,
platform::errors::InvalidArgument(
"The total parameter number %d should be divided "
"exactly by the element number %d of Moment1.",
numel, partial_numel));
int64_t num_devices = numel / partial_numel;
VLOG(1) << "num_devices = " << num_devices
<< " , partial_numel = " << partial_numel;
PADDLE_ENFORCE_EQ(fp32_numel % num_devices, 0,
platform::errors::InvalidArgument(
"The fp32 parameter number %d should be divided "
"exactly by the device number %d.",
fp32_numel, num_devices));
PADDLE_ENFORCE_EQ(fp16_numel % num_devices, 0,
platform::errors::InvalidArgument(
"The fp16 parameter number %d should be divided "
"exactly by the device number %d.",
fp16_numel, num_devices));
auto *moment2 =
GetSameInOutTensorPtr<float>(ctx, place, "Moment2", "Moment2Out");
auto *beta1pow =
GetSameInOutTensorPtr<float>(ctx, place, "Beta1Pow", "Beta1PowOut");
auto *beta2pow =
GetSameInOutTensorPtr<float>(ctx, place, "Beta2Pow", "Beta2PowOut");
auto *found_inf_t = ctx.Output<framework::Tensor>("FoundInf");
found_inf_t->Resize({1});
auto *found_inf = found_inf_t->mutable_data<bool>(place);
// Step 5: Get attributes weight_decay, beta1, beta2, epsilon,
// max_grad_norm, ring_id,
// use_master_param_norm, is_grad_scaled_by_nranks
auto weight_decay = ctx.Attr<float>("weight_decay");
auto beta1 = ctx.Attr<float>("beta1");
auto beta2 = ctx.Attr<float>("beta2");
auto epsilon = ctx.Attr<float>("epsilon");
auto max_global_grad_norm = ctx.Attr<float>("max_global_grad_norm");
auto clip_after_allreduce = ctx.Attr<bool>("clip_after_allreduce");
auto ring_id = ctx.Attr<int>("ring_id");
auto use_master_param_norm = ctx.Attr<bool>("use_master_param_norm");
auto is_grad_scaled_by_nranks = ctx.Attr<bool>("is_grad_scaled_by_nranks");
VLOG(10) << "max_global_grad_norm = " << max_global_grad_norm
<< " , clip_after_allreduce = " << clip_after_allreduce
<< " , use_master_param_norm = " << use_master_param_norm
<< " , is_grad_scaled_by_nranks = " << is_grad_scaled_by_nranks;
// Step 6: allreduce + global norm gradient clip
int rank = 0;
ncclComm_t comm = nullptr;
if (num_devices > 1) {
auto *nccl_comm_handle =
platform::NCCLCommContext::Instance().Get(ring_id, place);
comm = nccl_comm_handle->comm();
rank = nccl_comm_handle->rank();
}
memory::Buffer grad_norm_square_buffer(place);
auto *fp32_square_grad_norm = grad_norm_square_buffer.Alloc<float>(2);
memory::Buffer cub_tmp_buffer(place);
memory::Buffer sum_grad_buffer(place);
float *fp32_sum_grad;
platform::float16 *fp16_sum_grad;
auto fp32_numel_each_device = fp32_numel / num_devices;
auto fp16_numel_each_device = fp16_numel / num_devices;
if (num_devices > 1 ||
(max_global_grad_norm > 0 && !clip_after_allreduce)) {
auto ptr = sum_grad_buffer.Alloc<uint8_t>(
fp32_numel_each_device * sizeof(float) +
fp16_numel_each_device * sizeof(platform::float16));
fp32_sum_grad = has_fp32_param ? reinterpret_cast<float *>(ptr) : nullptr;
fp16_sum_grad = has_fp16_param
? reinterpret_cast<platform::float16 *>(
ptr + fp32_numel_each_device * sizeof(float))
: nullptr;
} else {
// NOTE: The const_cast here is not important. The fp32_sum_grad and
// fp16_sum_grad would not be changed when num_devices == 1
// But if I do not perform const_cast here, there would be more
// if-else codes (num_devices > 1) when I write the following code.
// So I prefer to use const_cast to unify the following code to reduce
// the if-else codes.
fp32_sum_grad = const_cast<float *>(fp32_grad);
fp16_sum_grad = const_cast<platform::float16 *>(fp16_grad);
}
float rescale_grad = 1.0f;
if (!is_grad_scaled_by_nranks) {
rescale_grad /= num_devices;
}
if (max_global_grad_norm > 0) {
if (clip_after_allreduce) {
// (1) ReduceScater first
NCCLReduceScatterWithScale(fp32_grad, fp32_sum_grad,
fp32_numel_each_device, num_devices, comm,
stream, dev_ctx);
NCCLReduceScatterWithScale(fp16_grad, fp16_sum_grad,
fp16_numel_each_device, num_devices, comm,
stream, dev_ctx);
// (2) Calculate the global grad norm
GetSquareGradNorm(fp32_sum_grad, fp32_numel_each_device, fp16_sum_grad,
fp16_numel_each_device, fp32_square_grad_norm, stream,
&cub_tmp_buffer);
VLOG(1) << "Grad square norm before all reduce: "
<< FlattenToString(fp32_square_grad_norm, 1, place);
if (num_devices > 1) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
fp32_square_grad_norm, fp32_square_grad_norm, 1, ncclFloat32,
ncclSum, comm, stream));
}
VLOG(1) << "Grad square norm after all reduce: "
<< FlattenToString(fp32_square_grad_norm, 1, place);
} else {
// (1) Calculate the local grad norm
GetSquareGradNorm(fp32_grad, fp32_numel, fp16_grad, fp16_numel,
fp32_square_grad_norm, stream, &cub_tmp_buffer);
VLOG(1) << "Grad square norm before all reduce: "
<< FlattenToString(fp32_square_grad_norm, 1, place);
// (2) Calculate the gradient clip scale
float *fp32_scale = nullptr;
platform::float16 *fp16_scale = nullptr;
if (has_fp32_param && has_fp16_param) {
auto *ptr = cub_tmp_buffer.Alloc<uint8_t>(sizeof(float) +
sizeof(platform::float16));
fp32_scale = reinterpret_cast<float *>(ptr);
fp16_scale =
reinterpret_cast<platform::float16 *>(ptr + sizeof(float));
} else if (has_fp32_param) {
fp32_scale = cub_tmp_buffer.Alloc<float>(1);
} else {
fp16_scale = cub_tmp_buffer.Alloc<platform::float16>(1);
}
float clip_scale = 1.0f;
if (is_grad_scaled_by_nranks) {
clip_scale *= num_devices;
}
hipLaunchKernelGGL(( CalcGradNormClipBeforeAllReduceScale<
float, platform::float16>), dim3(1), dim3(1), 0, stream,
global_scale, max_global_grad_norm, fp32_square_grad_norm,
fp32_scale, fp16_scale, clip_scale);
if (fp32_scale) {
VLOG(1) << "Grad scale: " << FlattenToString(fp32_scale, 1, place);
} else {
VLOG(1) << "Grad scale: " << FlattenToString(fp16_scale, 1, place);
}
if (num_devices > 1) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
fp32_square_grad_norm, fp32_square_grad_norm, 1, ncclFloat32,
ncclSum, comm, stream));
}
// (3) Do ReduceScatter with scale
NCCLReduceScatterWithScale(fp32_grad, fp32_sum_grad,
fp32_numel_each_device, num_devices, comm,
stream, dev_ctx, fp32_scale);
NCCLReduceScatterWithScale(fp16_grad, fp16_sum_grad,
fp16_numel_each_device, num_devices, comm,
stream, dev_ctx, fp16_scale);
// (4) mark max_global_grad_norm as 0, meaning that clip has been
// already performed
max_global_grad_norm = 0;
}
} else {
NCCLReduceScatterWithScale(fp32_grad, fp32_sum_grad,
fp32_numel_each_device, num_devices, comm,
stream, dev_ctx);
NCCLReduceScatterWithScale(fp16_grad, fp16_sum_grad,
fp16_numel_each_device, num_devices, comm,
stream, dev_ctx);
CheckHasNanInfGrad(fp32_sum_grad, fp32_numel_each_device, fp16_sum_grad,
fp16_numel_each_device, fp32_square_grad_norm, stream,
&cub_tmp_buffer);
if (num_devices > 1) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
fp32_square_grad_norm, fp32_square_grad_norm, 1, ncclFloat32,
ncclSum, comm, stream));
}
max_global_grad_norm = 0;
}
VLOG(10) << "ReduceScatter done";
// Step 7: update the moment1, moment2. Calcuate the trust_ratio_div
auto *fused_offsets_t = ctx.Input<framework::Tensor>("FusedParamOffsets");
auto *fused_offsets = fused_offsets_t->data<int>();
auto *fp32_partial_fused_offsets_t =
ctx.Input<framework::Tensor>("FP32ShardFusedParamOffsets");
const auto *fp32_partial_fused_offsets =
fp32_partial_fused_offsets_t->data<int>();
auto *fp16_partial_fused_offsets_t =
ctx.Input<framework::Tensor>("FP16ShardFusedParamOffsets");
const auto *fp16_partial_fused_offsets =
fp16_partial_fused_offsets_t->data<int>();
VLOG(1) << "FusedParamOffsets: "
<< FlattenToString(fused_offsets, fused_offsets_t->numel(),
fused_offsets_t->place());
VLOG(1) << "FP32ShardFusedParamOffsets: "
<< FlattenToString(fp32_partial_fused_offsets,
fp32_partial_fused_offsets_t->numel(),
fp32_partial_fused_offsets_t->place());
VLOG(1) << "FP16ShardFusedParamOffsets: "
<< FlattenToString(fp16_partial_fused_offsets,
fp16_partial_fused_offsets_t->numel(),
fp16_partial_fused_offsets_t->place());
memory::Buffer trust_ratio_div_buffer(place);
auto *trust_ratio_div = trust_ratio_div_buffer.Alloc<float>(partial_numel);
auto fp32_offset = rank * fp32_numel_each_device;
auto fp16_offset = rank * fp16_numel_each_device;
if (has_fp32_param) {
VLOG(10) << "Update FP32 Moment and TrustRatioDiv starts";
MultiTensorUpdateLambMomentAndTrustRatioDiv(
dev_ctx, fp32_partial_fused_offsets, fp32_local_param_num,
fp32_param + fp32_offset, fp32_sum_grad, fp32_square_grad_norm,
global_scale, beta1pow, beta2pow, moment1, moment2, trust_ratio_div,
found_inf, weight_decay, fp32_weight_decay_end_idx, beta1, beta2,
epsilon, max_global_grad_norm, rescale_grad);
VLOG(10) << "Update FP32 Moment and TrustRatioDiv done";
}
float *master_param = nullptr;
if (has_fp16_param) {
master_param = fp32_param + fp32_numel;
VLOG(10) << "Update FP16 Moment and TrustRatioDiv starts";
auto tmp_found_inf = has_fp32_param ? nullptr : found_inf;
MultiTensorUpdateLambMomentAndTrustRatioDiv(
dev_ctx, fp16_partial_fused_offsets, fp16_local_param_num,
master_param + fp16_offset, fp16_sum_grad, fp32_square_grad_norm,
global_scale, beta1pow, beta2pow, moment1 + fp32_numel_each_device,
moment2 + fp32_numel_each_device,
trust_ratio_div + fp32_numel_each_device, tmp_found_inf, weight_decay,
fp16_weight_decay_end_idx, beta1, beta2, epsilon,
max_global_grad_norm, rescale_grad);
VLOG(10) << "Update FP16 Moment and TrustRatioDiv done";
}
VLOG(10) << "Update Moment and TrustRatioDiv done hehahaha";
// Step 8: calculate L2-Norm square of parameter and trust_ratio_div
memory::Buffer square_norm_buffer(place);
auto *param_square_norm = square_norm_buffer.Alloc<float>(2 * param_num);
auto *trust_ratio_div_square_norm = param_square_norm + param_num;
if (num_devices > 1) {
if (use_master_param_norm) {
FillZeroWithPtr(param_square_norm + fp32_global_param_num,
2 * param_num - fp32_global_param_num, stream);
} else {
FillZeroWithPtr(trust_ratio_div_square_norm, param_num, stream);
}
}
MultiTensorL2Norm(place, stream, fp32_param, fused_offsets,
fp32_global_param_num, param_square_norm);
if (use_master_param_norm) {
MultiTensorL2Norm(place, stream, master_param + fp16_offset,
fp16_partial_fused_offsets, fp16_local_param_num,
param_square_norm + fp16_local_start_idx);
} else {
MultiTensorL2Norm(
place, stream, fp16_param + fused_offsets[fp16_local_start_idx] -
fused_offsets[fp32_global_param_num],
fused_offsets + fp16_local_start_idx, fp16_local_param_num,
param_square_norm + fp16_local_start_idx);
}
MultiTensorL2Norm(place, stream, trust_ratio_div,
fp32_partial_fused_offsets, fp32_local_param_num,
trust_ratio_div_square_norm + fp32_local_start_idx);
MultiTensorL2Norm(place, stream, trust_ratio_div + fp32_numel_each_device,
fp16_partial_fused_offsets, fp16_local_param_num,
trust_ratio_div_square_norm + fp16_local_start_idx);
VLOG(1) << "TrustRatioDiv L2-Norm before allreduce: "
<< FlattenToString(trust_ratio_div_square_norm, param_num, place);
if (num_devices > 1) {
if (use_master_param_norm) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
param_square_norm + fp32_global_param_num,
param_square_norm + fp32_global_param_num,
2 * param_num - fp32_global_param_num, ncclFloat32, ncclSum, comm,
stream));
} else {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
trust_ratio_div_square_norm, trust_ratio_div_square_norm, param_num,
ncclFloat32, ncclSum, comm, stream));
}
VLOG(10) << "ncclAllReduce done";
}
LogParamAndTrustRatioDivSquareNorm<1>(ctx, param_square_norm,
trust_ratio_div_square_norm);
VLOG(10) << "Calculate L2-Norm of Param and TrustRatioDiv done";
// Step 9: update parameter, beta1pow, beta2pow. All gather parameters.
if (has_fp32_param) {
MultiTensorUpdateLambParamAndBetaPows<float>(
dev_ctx, fp32_partial_fused_offsets, fp32_local_param_num,
trust_ratio_div, lr, param_square_norm + fp32_local_start_idx,
trust_ratio_div_square_norm + fp32_local_start_idx, found_inf,
fp32_param + fp32_offset, nullptr, beta1pow, beta2pow, beta1, beta2);
if (num_devices > 1) {
// ncclAllGather
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather(
fp32_param + fp32_offset, fp32_param, fp32_numel_each_device,
ncclFloat32, comm, stream));
}
beta1pow = nullptr;
beta2pow = nullptr;
}
if (has_fp16_param) {
MultiTensorUpdateLambParamAndBetaPows<platform::float16>(
dev_ctx, fp16_partial_fused_offsets, fp16_local_param_num,
trust_ratio_div + fp32_numel_each_device, lr,
param_square_norm + fp16_local_start_idx,
trust_ratio_div_square_norm + fp16_local_start_idx, found_inf,
fp16_param + fp16_offset, master_param + fp16_offset, beta1pow,
beta2pow, beta1, beta2);
if (num_devices > 1) {
// ncclAllGather
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather(
fp16_param + fp16_offset, fp16_param, fp16_numel_each_device,
ncclFloat16, comm, stream));
}
}
VLOG(10) << "Update Param done";
VLOG(1) << "IsFinite: " << IsFinite(dev_ctx, fp32_square_grad_norm);
#else
PADDLE_THROW(platform::errors::Unimplemented(
"distributed_fused_lamb op should be used with NCCL/RCCL."));
#endif
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
distributed_fused_lamb,
ops::DistributedFusedLambOpKernel<plat::CUDADeviceContext, float>);
|
0db1a92fdc2494a76bba078c06c9f5f55f95408e.cu
|
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cmath>
#include "paddle/fluid/memory/buffer.h"
#include "paddle/fluid/operators/amp/fp16_type_traits.h"
#include "paddle/fluid/operators/optimizers/cast_with_ptr.h"
#include "paddle/fluid/operators/optimizers/distributed_fused_lamb_op.h"
#include "paddle/fluid/operators/optimizers/multi_tensor_apply.h"
#include "paddle/fluid/operators/tensor_to_string.h"
#include "paddle/fluid/platform/aligned_vector.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/core/utils/data_type.h"
#ifdef __NVCC__
#include "cub/cub.cuh"
#include "math.h" // NOLINT
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
#include "math.h" // NOLINT
namespace cub = hipcub;
#endif
namespace paddle {
namespace operators {
template <typename T>
using MasterT = typename details::MPTypeTrait<T>::Type;
template <typename T>
static void FillZeroWithPtr(T *x, size_t n, gpuStream_t stream) {
static_assert(!std::is_same<T, void>::value, "T cannot be void.");
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemsetAsync(x, 0, n * sizeof(T), stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemsetAsync(x, 0, n * sizeof(T), stream));
#endif
}
template <typename T, int BlockDim, int VecSize>
struct L2NormFunctor {
DEVICE void operator()(int tensor_id, int chunk_id, int offset, int size,
const T *x, MasterT<T> *y, int max_chunk_num) const {
using MT = MasterT<T>;
const T *ptr = x + offset;
using BlockReduce = cub::BlockReduce<MT, BlockDim>;
__shared__ typename BlockReduce::TempStorage storage;
MT square_sum = static_cast<MT>(0);
int i;
for (i = threadIdx.x * VecSize; i + VecSize <= size;
i += (BlockDim * VecSize)) {
platform::AlignedVector<T, VecSize> tmp_vec;
platform::Load(ptr + i, &tmp_vec);
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
auto tmp = static_cast<MT>(tmp_vec[j]);
square_sum += (tmp * tmp);
}
}
for (; i < size; ++i) {
auto tmp = static_cast<MT>(ptr[i]);
square_sum += (tmp * tmp);
}
square_sum = BlockReduce(storage).Reduce(square_sum, cub::Sum());
if (threadIdx.x == 0) {
y[tensor_id * max_chunk_num + chunk_id] = square_sum;
}
}
};
template <typename InT, typename OutT, int BlockDim>
static __global__ void MultiTensorL2NormReduceAgainCUDAKernel(
const InT *x, OutT *y, int max_chunk_num) {
int tensor_id = blockIdx.x;
x += (tensor_id * max_chunk_num);
using BlockReduce = cub::BlockReduce<InT, BlockDim>;
__shared__ typename BlockReduce::TempStorage storage;
InT sum = static_cast<InT>(0);
for (int i = threadIdx.x; i < max_chunk_num; i += BlockDim) {
sum += x[i];
}
sum = BlockReduce(storage).Reduce(sum, cub::Sum());
if (threadIdx.x == 0) {
y[blockIdx.x] = static_cast<OutT>(sum);
}
}
template <typename T>
static int GetChunkedVecSize(const T *ptr, int chunk_size) {
static_assert(!std::is_same<T, void>::value, "T cannot be void.");
constexpr int max_load_bits = 128;
int valid_vec_size = max_load_bits / CHAR_BIT / sizeof(T);
auto address = reinterpret_cast<uintptr_t>(ptr);
constexpr int vec8 = alignof(platform::AlignedVector<T, 8>);
constexpr int vec4 = alignof(platform::AlignedVector<T, 4>);
constexpr int vec2 = alignof(platform::AlignedVector<T, 2>);
chunk_size *= sizeof(T);
if (address % vec8 == 0 && chunk_size % vec8 == 0) {
return std::min(8, valid_vec_size);
} else if (address % vec4 == 0 && chunk_size % vec4 == 0) {
return std::min(4, valid_vec_size);
} else if (address % vec2 == 0 && chunk_size % vec2 == 0) {
return std::min(2, valid_vec_size);
} else {
return 1;
}
}
#define PD_VEC_LAUNCH_KERNEL_CASE(__vec_size, ...) \
case __vec_size: { \
constexpr int kVecSize = __vec_size; \
__VA_ARGS__; \
break; \
}
#define PD_VEC_LAUNCH_KERNEL(__vec_size, ...) \
do { \
switch (__vec_size) { \
PD_VEC_LAUNCH_KERNEL_CASE(8, __VA_ARGS__); \
PD_VEC_LAUNCH_KERNEL_CASE(4, __VA_ARGS__); \
PD_VEC_LAUNCH_KERNEL_CASE(2, __VA_ARGS__); \
PD_VEC_LAUNCH_KERNEL_CASE(1, __VA_ARGS__); \
} \
} while (0)
// TODO(zengjinle): which chunk_size is better?
template <typename InT, typename OutT, int MaxTensorNumPerLaunch = 160,
int MaxChunkNumPerLaunch = 780>
static void MultiTensorL2Norm(const platform::CUDAPlace &place,
gpuStream_t stream, const InT *x,
const int *offsets, int n, OutT *y,
int chunk_size = 65536) {
if (n <= 0) return;
constexpr int kNumTensor = MaxTensorNumPerLaunch;
constexpr int kNumChunk = MaxChunkNumPerLaunch;
constexpr int kBlockDim = 512;
int max_chunk_num = -1;
int vec_size = 8;
int total_chunk_num = 0;
for (int i = 0; i < n; ++i) {
vec_size = std::min(
vec_size, GetChunkedVecSize(x + offsets[i] - offsets[0], chunk_size));
int length = offsets[i + 1] - offsets[i];
auto tmp_chunk_num = (length + chunk_size - 1) / chunk_size;
max_chunk_num = std::max(max_chunk_num, tmp_chunk_num);
total_chunk_num += tmp_chunk_num;
}
VLOG(1) << "MultiTensorL2Norm max_chunk_num = " << max_chunk_num
<< " , total_chunk_num = " << total_chunk_num
<< " , tensor_num = " << n;
using MT = MasterT<InT>;
memory::Buffer tmp_out(place);
auto *tmp_out_ptr = tmp_out.Alloc<MT>(n * max_chunk_num);
FillZeroWithPtr(tmp_out_ptr, n * max_chunk_num, stream);
#define PD_LAUNCH_MULTI_TENSOR_APPLY_L2_NORM_KERNEL \
do { \
using FunctorT = L2NormFunctor<InT, kBlockDim, kVecSize>; \
VLOG(10) << __func__ << " " << typeid(InT).name() \
<< " VecSize = " << kVecSize; \
MultiTensorApply<FunctorT, kNumTensor, kNumChunk>( \
FunctorT(), stream, offsets, n, chunk_size, kBlockDim, x, tmp_out_ptr, \
max_chunk_num); \
} while (0)
PD_VEC_LAUNCH_KERNEL(vec_size, PD_LAUNCH_MULTI_TENSOR_APPLY_L2_NORM_KERNEL);
#undef PD_LAUNCH_MULTI_TENSOR_APPLY_L2_NORM_KERNEL
MultiTensorL2NormReduceAgainCUDAKernel<
MT, OutT, kBlockDim><<<n, kBlockDim, 0, stream>>>(tmp_out_ptr, y,
max_chunk_num);
}
template <int LogLevel>
static void LogParamAndTrustRatioDivSquareNorm(
const framework::ExecutionContext &ctx, const float *param_square_norm,
const float *trust_ratio_div_square_norm) {
if (!VLOG_IS_ON(LogLevel)) return;
auto tensors = ctx.MultiInput<framework::Tensor>("Param");
if (tensors.empty()) return;
const auto *order = ctx.Input<framework::Tensor>("ParamOrder")->data<int>();
size_t n = tensors.size();
auto place = tensors[0]->place();
auto pn_vec = ToVector(param_square_norm, n, place);
auto tn_vec = ToVector(trust_ratio_div_square_norm, n, place);
const auto &names = ctx.GetOp().Inputs("Param");
for (size_t i = 0; i < n; ++i) {
auto idx = order[i];
VLOG(LogLevel) << "Param " << tensors[idx]->dtype() << " " << names[idx]
<< " pn = " << pn_vec[i] << " , tn = " << tn_vec[i];
}
}
static bool IsFinite(const platform::CUDADeviceContext &dev_ctx,
const float *ptr) {
auto stream = dev_ctx.stream();
float cpu_value;
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(&cpu_value, ptr, sizeof(float),
hipMemcpyDeviceToHost, stream));
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamSynchronize(stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(&cpu_value, ptr, sizeof(float),
cudaMemcpyDeviceToHost, stream));
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream));
#endif
LOG(INFO) << "NAN_INF indicator value: " << cpu_value;
return isfinite(cpu_value);
}
template <typename T>
static const T *GetInputTensorPtr(const framework::ExecutionContext &ctx,
const char *in_name,
int64_t *numel = nullptr) {
const auto *in_tensor = ctx.Input<framework::Tensor>(in_name);
PADDLE_ENFORCE_NOT_NULL(in_tensor, platform::errors::InvalidArgument(
"Input(%s) cannot be NULL.", in_name));
if (in_tensor->IsInitialized()) {
if (numel) *numel = in_tensor->numel();
return in_tensor->data<T>();
} else {
if (numel) *numel = 0;
return nullptr;
}
}
template <typename T, bool AllowNotExist = false>
static T *GetSameInOutTensorPtr(const framework::ExecutionContext &ctx,
const platform::Place &place,
const char *in_name, const char *out_name,
int64_t *numel = nullptr) {
const auto *in_tensor = ctx.Input<framework::Tensor>(in_name);
if (in_tensor == nullptr || !in_tensor->IsInitialized()) {
PADDLE_ENFORCE_EQ(AllowNotExist, true,
platform::errors::InvalidArgument(
"Input(%s) cannot be NULL.", in_name));
if (numel) *numel = 0;
return nullptr;
}
auto *out_tensor = ctx.Output<framework::Tensor>(out_name);
PADDLE_ENFORCE_NOT_NULL(in_tensor, platform::errors::InvalidArgument(
"Input(%s) cannot be NULL.", in_name));
PADDLE_ENFORCE_NOT_NULL(out_tensor,
platform::errors::InvalidArgument(
"Output(%s) cannot be NULL.", out_name));
const T *in_data = in_tensor->data<T>();
T *out_data = out_tensor->mutable_data<T>(place);
PADDLE_ENFORCE_EQ(in_data, out_data,
platform::errors::InvalidArgument(
"Input(%s) and Output(%s) must be the same Tensor.",
in_name, out_name));
if (numel) *numel = out_tensor->numel();
return out_data;
}
template <typename T>
struct SquareFunctor {
HOSTDEVICE MasterT<T> operator()(T x) const {
auto y = static_cast<MasterT<T>>(x);
return y * y;
}
};
template <typename T>
struct IsNanInfFunctor {
HOSTDEVICE bool operator()(T x) const { return !isfinite(x); }
};
struct OrFunctor {
HOSTDEVICE bool operator()(bool x, bool y) const { return x || y; }
};
struct AndFunctor {
HOSTDEVICE bool operator()(bool x, bool y) const { return x && y; }
};
template <typename T1, typename T2, int VecSize>
static __global__ void ScaleCUDAKernel(const T1 *__restrict__ x,
const T2 *__restrict__ scale,
T1 *__restrict__ y, int num) {
static_assert(sizeof(T1) <= sizeof(T2),
"sizeof(T1) must be not greater than sizeof(T2).");
T2 s = scale[0];
int i = (threadIdx.x + blockIdx.x * blockDim.x) * VecSize;
int stride = blockDim.x * gridDim.x * VecSize;
for (; i + VecSize <= num; i += stride) {
platform::AlignedVector<T1, VecSize> x_vec;
platform::AlignedVector<T1, VecSize> y_vec;
platform::Load(x + i, &x_vec);
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
y_vec[j] = static_cast<T1>(static_cast<T2>(x_vec[j]) * s);
}
platform::Store(y_vec, y + i);
}
for (; i < num; ++i) {
y[i] = static_cast<T1>(static_cast<T2>(x[i]) * s);
}
}
template <typename T>
static __global__ void AddToCUDAKernel(const T *__restrict__ x,
T *__restrict__ y) {
y[0] += x[0];
}
// If clip before allreduce,
// coeff = global_scale * max_global_grad_norm / (1e-6 + sqrt(square_grad_norm)
// * rescale_grad)
// if coeff >= 1 or coeff is Nan/Inf, scale = 1.0
// else scale = coeff
template <typename T1, typename T2>
static __global__ void CalcGradNormClipBeforeAllReduceScale(
const T1 *__restrict__ global_scale, T1 max_global_grad_norm,
const T1 *__restrict__ square_grad_norm, T1 *__restrict__ out1,
T2 *__restrict__ out2, T1 clip_rescale_grad) {
T1 grad_norm = static_cast<T1>(sqrtf(*square_grad_norm)) * clip_rescale_grad;
T1 scale = global_scale[0] * max_global_grad_norm / (1e-6 + grad_norm);
bool found_nan_inf = !isfinite(scale);
if (scale >= 1 || found_nan_inf) {
scale = static_cast<T1>(1.0);
}
if (out1) {
*out1 = scale;
}
if (out2) {
*out2 = static_cast<T2>(scale);
}
}
static __global__ void SetNanInfValueCUDAKernelOneFlag(const bool *in_flag_p,
float *out_p) {
*out_p = (*in_flag_p) ? __int_as_float(0x7fffffffU) : 0.0f;
}
static __global__ void SetNanInfValueCUDAKernelTwoFlag(const bool *in_flag_p_1,
const bool *in_flag_p_2,
float *out_p) {
*out_p =
((*in_flag_p_1) || (*in_flag_p_2)) ? __int_as_float(0x7fffffffU) : 0.0f;
}
template <typename T, typename GradT, int VecSize>
static __global__ void UpdateLambMomentAndTrustRatioDivCUDAKernel(
const T *__restrict__ param_p, const GradT *__restrict__ grad_p,
const T *__restrict__ square_grad_norm_p,
const T *__restrict__ global_scale, const T *__restrict__ beta1pow_p,
const T *__restrict__ beta2pow_p, T *__restrict__ mom1_p,
T *__restrict__ mom2_p, T *__restrict__ trust_ratio_div_p, bool *found_inf,
T weight_decay, int weight_decay_end_numel, T beta1, T beta2, T epsilon,
T max_global_grad_norm, int num, T rescale_grad) {
T square_grad_norm = *square_grad_norm_p;
bool need_update_found_inf =
(found_inf && threadIdx.x == 0 && blockIdx.x == 0);
if (!isfinite(square_grad_norm)) {
if (need_update_found_inf) *found_inf = true;
return;
} else if (need_update_found_inf) {
*found_inf = false;
}
T scale = rescale_grad / global_scale[0];
if (max_global_grad_norm > 0) {
T clip_scale =
max_global_grad_norm / (sqrtf(square_grad_norm) * scale + 1e-6);
if (clip_scale < static_cast<T>(1)) {
scale *= clip_scale;
}
}
T one_minus_beta1pow = 1 - beta1pow_p[0];
T one_minus_beta2pow = 1 - beta2pow_p[0];
int i = (threadIdx.x + blockIdx.x * blockDim.x) * VecSize;
int stride = blockDim.x * gridDim.x * VecSize;
for (; i + VecSize <= num; i += stride) {
platform::AlignedVector<T, VecSize> param_vec;
platform::AlignedVector<GradT, VecSize> grad_vec;
platform::AlignedVector<T, VecSize> mom1_vec;
platform::AlignedVector<T, VecSize> mom2_vec;
platform::AlignedVector<T, VecSize> trust_ratio_div_vec;
T cur_weight_decay = (i < weight_decay_end_numel) * weight_decay;
if (cur_weight_decay != static_cast<T>(0.0)) {
platform::Load(param_p + i, ¶m_vec);
} else {
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
param_vec[j] = static_cast<T>(0);
}
}
platform::Load(grad_p + i, &grad_vec);
platform::Load(mom1_p + i, &mom1_vec);
platform::Load(mom2_p + i, &mom2_vec);
#define PD_LAMB_MOM_TRUST_RATIO_DIV_UPDATE(__param, __grad, __mom1, __mom2, \
__trust_ratio_div, __idx) \
T p = __param[__idx]; \
T g = static_cast<T>(__grad[__idx]) * scale; \
T mom1 = __mom1[__idx]; \
T mom2 = __mom2[__idx]; \
mom1 = beta1 * mom1 + (1 - beta1) * g; \
mom2 = beta2 * mom2 + (1 - beta2) * g * g; \
T mom1_unbiased = mom1 / one_minus_beta1pow; \
T mom2_unbiased = mom2 / one_minus_beta2pow; \
__trust_ratio_div[__idx] = \
mom1_unbiased / (sqrtf(mom2_unbiased) + epsilon) + cur_weight_decay * p; \
__mom1[__idx] = mom1; \
__mom2[__idx] = mom2;
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
PD_LAMB_MOM_TRUST_RATIO_DIV_UPDATE(param_vec, grad_vec, mom1_vec,
mom2_vec, trust_ratio_div_vec, j);
}
platform::Store(mom1_vec, mom1_p + i);
platform::Store(mom2_vec, mom2_p + i);
platform::Store(trust_ratio_div_vec, trust_ratio_div_p + i);
}
for (; i < num; ++i) {
T cur_weight_decay = (i < weight_decay_end_numel) * weight_decay;
PD_LAMB_MOM_TRUST_RATIO_DIV_UPDATE(param_p, grad_p, mom1_p, mom2_p,
trust_ratio_div_p, i);
}
}
template <typename T, typename GradT>
static void MultiTensorUpdateLambMomentAndTrustRatioDiv(
const platform::CUDADeviceContext &dev_ctx, const int *offsets, int n,
const T *param_p, const GradT *grad_p, const T *square_grad_norm_p,
const T *global_scale, const T *beta1pow_p, const T *beta2pow_p, T *mom1_p,
T *mom2_p, T *trust_ratio_div_p, bool *found_inf_p, T weight_decay,
int weight_decay_end_idx, T beta1, T beta2, T epsilon,
T max_global_grad_norm, T rescale_grad) {
if (n <= 0) return;
int numel = offsets[n] - offsets[0];
PADDLE_ENFORCE_GE(weight_decay_end_idx, 0,
platform::errors::InvalidArgument(
"The weight decay end index should be >= 0."));
PADDLE_ENFORCE_LE(weight_decay_end_idx, n,
platform::errors::InvalidArgument(
"The weight decay end index should be < %d.", n));
auto weight_decay_end_numel = offsets[weight_decay_end_idx] - offsets[0];
int vec_size = GetChunkedVecSize(param_p, 0);
vec_size = std::min(vec_size, GetChunkedVecSize(grad_p, 0));
vec_size = std::min(vec_size, GetChunkedVecSize(mom1_p, 0));
vec_size = std::min(vec_size, GetChunkedVecSize(mom2_p, 0));
vec_size = std::min(vec_size, GetChunkedVecSize(trust_ratio_div_p, 0));
for (int i = 0; i < n; ++i) {
auto length = offsets[i + 1] - offsets[i];
while (length % vec_size != 0) {
vec_size /= 2;
}
}
VLOG(1) << __func__ << " VecSize = " << vec_size;
auto stream = dev_ctx.stream();
auto config = platform::GetGpuLaunchConfig1D(dev_ctx, numel, vec_size);
#define PD_LAUNCH_LAMB_MOM_TRUST_RATIO_DIV_KERNEL \
do { \
UpdateLambMomentAndTrustRatioDivCUDAKernel<T, GradT, kVecSize><<< \
config.block_per_grid, config.thread_per_block, 0, stream>>>( \
param_p, grad_p, square_grad_norm_p, global_scale, beta1pow_p, \
beta2pow_p, mom1_p, mom2_p, trust_ratio_div_p, found_inf_p, \
weight_decay, weight_decay_end_numel, beta1, beta2, epsilon, \
max_global_grad_norm, numel, rescale_grad); \
} while (0)
PD_VEC_LAUNCH_KERNEL(vec_size, PD_LAUNCH_LAMB_MOM_TRUST_RATIO_DIV_KERNEL);
#undef PD_LAUNCH_LAMB_MOM_TRUST_RATIO_DIV_KERNEL
}
template <typename T, bool NeedUpdate /*=true*/>
struct LambBetaPowUpdateOnceHelper {
LambBetaPowUpdateOnceHelper(T *beta1pow, T *beta2pow, T beta1, T beta2) {
PADDLE_ENFORCE_NOT_NULL(beta1pow,
platform::errors::InvalidArgument(
"The beta1pow should not be nullptr."));
PADDLE_ENFORCE_NOT_NULL(beta2pow,
platform::errors::InvalidArgument(
"The beta2pow should not be nullptr."));
beta1pow_ = beta1pow;
beta2pow_ = beta2pow;
beta1_ = beta1;
beta2_ = beta2;
}
HOSTDEVICE void UpdateBetaPows() const {
beta1pow_[0] *= beta1_;
beta2pow_[0] *= beta2_;
}
private:
T *__restrict__ beta1pow_;
T *__restrict__ beta2pow_;
T beta1_;
T beta2_;
};
template <typename T>
struct LambBetaPowUpdateOnceHelper<T, false> {
LambBetaPowUpdateOnceHelper(T *beta1pow, T *beta2pow, T beta1, T beta2) {
PADDLE_ENFORCE_EQ(
beta1pow, nullptr,
platform::errors::InvalidArgument("The beta1pow should be nullptr."));
PADDLE_ENFORCE_EQ(
beta2pow, nullptr,
platform::errors::InvalidArgument("The beta2pow should be nullptr."));
}
HOSTDEVICE void UpdateBetaPows() const {}
};
template <typename T, bool HasMasterParam /*=true*/>
struct LambParamHelper {
LambParamHelper(T *param, MasterT<T> *master_param) {
constexpr bool kIsSameType = std::is_same<T, MasterT<T>>::value;
PADDLE_ENFORCE_EQ(kIsSameType, false,
platform::errors::InvalidArgument(
"T must not be the same with MasterT<T>."));
PADDLE_ENFORCE_NOT_NULL(master_param,
platform::errors::InvalidArgument(
"Master parameter must be provided."));
param_ = param;
master_param_ = master_param;
}
HOSTDEVICE T *__restrict__ ParamPtr() { return param_; }
HOSTDEVICE MasterT<T> *__restrict__ MasterParamPtr() { return master_param_; }
private:
T *__restrict__ param_;
MasterT<T> *__restrict__ master_param_;
};
template <typename T>
struct LambParamHelper<T, false> {
LambParamHelper(T *param, MasterT<T> *master_param) {
constexpr bool kIsSameType = std::is_same<T, MasterT<T>>::value;
PADDLE_ENFORCE_EQ(kIsSameType, true,
platform::errors::InvalidArgument(
"T must be the same with MasterT<T>."));
if (master_param != nullptr) {
PADDLE_ENFORCE_EQ(static_cast<void *>(param),
static_cast<void *>(master_param),
platform::errors::InvalidArgument(
"Master parameter must be nullptr or the same as "
"non-master parameter."));
}
param_ = param;
}
HOSTDEVICE T *__restrict__ ParamPtr() { return param_; }
HOSTDEVICE constexpr MasterT<T> *MasterParamPtr() { return nullptr; }
private:
T *__restrict__ param_;
};
template <typename ParamT, bool HasMasterParam, bool NeedUpdateBetaPow,
int VecSize>
struct LambUpdateParamAndBetaPowsFunctor {
DEVICE void operator()(
int tensor_id, int chunk_id, int offset, int size,
LambParamHelper<ParamT, HasMasterParam> param_helper,
const MasterT<ParamT> *trust_ratio_div, const MasterT<ParamT> *lr,
const MasterT<ParamT> *param_square_norm,
const MasterT<ParamT> *trust_ratio_div_square_norm, const bool *found_inf,
LambBetaPowUpdateOnceHelper<MasterT<ParamT>, NeedUpdateBetaPow>
betapow_helper) const {
if (*found_inf) return;
using MT = MasterT<ParamT>;
MT p_square_norm = param_square_norm[tensor_id];
MT t_square_norm = trust_ratio_div_square_norm[tensor_id];
MT lr_value = *lr;
MT ratio = (p_square_norm != static_cast<MT>(0) &&
t_square_norm != static_cast<MT>(0)
? lr_value * sqrtf(p_square_norm / t_square_norm)
: lr_value);
int i;
int stride = blockDim.x * VecSize;
ParamT *param = param_helper.ParamPtr() + offset;
MT *master_param = HasMasterParam ? param_helper.MasterParamPtr() + offset
: param_helper.MasterParamPtr();
trust_ratio_div += offset;
for (i = threadIdx.x * VecSize; i + VecSize <= size; i += stride) {
platform::AlignedVector<MT, VecSize> trust_ratio_div_vec;
platform::Load(trust_ratio_div + i, &trust_ratio_div_vec);
if (HasMasterParam) {
platform::AlignedVector<MT, VecSize> master_param_vec;
platform::Load(master_param + i, &master_param_vec);
platform::AlignedVector<ParamT, VecSize> param_vec;
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
MT p = master_param_vec[j] - ratio * trust_ratio_div_vec[j];
master_param_vec[j] = p;
param_vec[j] = static_cast<ParamT>(p);
}
platform::Store(master_param_vec, master_param + i);
platform::Store(param_vec, param + i);
} else {
platform::AlignedVector<ParamT, VecSize> param_vec;
platform::Load(param + i, ¶m_vec);
#pragma unroll
for (int j = 0; j < VecSize; ++j) {
MT p = static_cast<MT>(param_vec[j]) - ratio * trust_ratio_div_vec[j];
param_vec[j] = static_cast<ParamT>(p);
}
platform::Store(param_vec, param + i);
}
}
for (; i < size; ++i) {
if (HasMasterParam) {
MT p = master_param[i] - ratio * trust_ratio_div[i];
master_param[i] = p;
param[i] = static_cast<ParamT>(p);
} else {
MT p = static_cast<MT>(param[i]) - ratio * trust_ratio_div[i];
param[i] = static_cast<ParamT>(p);
}
}
if (NeedUpdateBetaPow && threadIdx.x == 0 && blockIdx.x == 0) {
betapow_helper.UpdateBetaPows();
}
}
};
// TODO(zengjinle): which block_dim and chunk_size would be better?
template <typename ParamT, int MaxTensorNumPerLaunch = 160,
int MaxChunkNumPerLaunch = 780>
static void MultiTensorUpdateLambParamAndBetaPows(
const platform::CUDADeviceContext &dev_ctx, const int *offsets, int n,
const MasterT<ParamT> *trust_ratio_div, const MasterT<ParamT> *lr,
const MasterT<ParamT> *param_square_norm,
const MasterT<ParamT> *trust_ratio_div_square_norm, const bool *found_inf,
ParamT *param, MasterT<ParamT> *master_param, MasterT<ParamT> *beta1pow,
MasterT<ParamT> *beta2pow, MasterT<ParamT> beta1, MasterT<ParamT> beta2,
int chunk_size = 65536) {
constexpr bool kHasMasterParam =
!(std::is_same<ParamT, MasterT<ParamT>>::value);
bool has_beta_pow = (beta1pow != nullptr);
if (has_beta_pow) {
PADDLE_ENFORCE_NOT_NULL(beta2pow, platform::errors::InvalidArgument(
"Beta2Pow should not be nullptr."));
} else {
PADDLE_ENFORCE_EQ(beta2pow, nullptr, platform::errors::InvalidArgument(
"Beta2Pow should be nullptr."));
}
const int block_dim = 512;
int vec_size = 8;
for (int i = 0; i < n; ++i) {
int offset = offsets[i] - offsets[0];
vec_size =
std::min(vec_size, GetChunkedVecSize(param + offset, chunk_size));
if (kHasMasterParam) {
vec_size = std::min(vec_size,
GetChunkedVecSize(master_param + offset, chunk_size));
}
vec_size = std::min(
vec_size, GetChunkedVecSize(trust_ratio_div + offset, chunk_size));
}
VLOG(1) << __func__ << " VecSize = " << vec_size;
constexpr auto kNumTensor = MaxTensorNumPerLaunch;
constexpr auto kNumChunk = MaxChunkNumPerLaunch;
auto stream = dev_ctx.stream();
#define PD_LAUNCH_MULTI_TENSOR_UPDATE_PARAM_BETAPOW(__has_beta_pow) \
do { \
using FunctorT = \
LambUpdateParamAndBetaPowsFunctor<ParamT, kHasMasterParam, \
__has_beta_pow, kVecSize>; \
LambParamHelper<ParamT, kHasMasterParam> param_helper(param, \
master_param); \
LambBetaPowUpdateOnceHelper<MasterT<ParamT>, __has_beta_pow> \
betapow_helper(beta1pow, beta2pow, beta1, beta2); \
launcher.Launch(FunctorT(), param_helper, trust_ratio_div, lr, \
param_square_norm, trust_ratio_div_square_norm, found_inf, \
betapow_helper); \
} while (0)
#define PD_LAUNCH_VEC_MULTI_TENSOR_UPDATE_PARAM_BETAPOW_CASE \
do { \
auto callback = [&]( \
const MultiTensorLauncher<kNumTensor, kNumChunk> &launcher, \
int launch_n) { \
if (has_beta_pow && launch_n == 0) { \
PD_LAUNCH_MULTI_TENSOR_UPDATE_PARAM_BETAPOW(true); \
beta1pow = nullptr; \
beta2pow = nullptr; \
} else { \
PD_LAUNCH_MULTI_TENSOR_UPDATE_PARAM_BETAPOW(false); \
} \
}; \
MultiTensorApplyWithCallback<kNumTensor, kNumChunk>( \
stream, offsets, n, chunk_size, block_dim, callback); \
} while (0)
PD_VEC_LAUNCH_KERNEL(vec_size,
PD_LAUNCH_VEC_MULTI_TENSOR_UPDATE_PARAM_BETAPOW_CASE);
#undef PD_LAUNCH_MULTI_TENSOR_UPDATE_PARAM_BETAPOW
#undef PD_LAUNCH_VEC_MULTI_TENSOR_UPDATE_PARAM_BETAPOW_CASE
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
static bool CreatePreMulScaleOpIfSupported(ncclDataType_t dtype,
ncclComm_t comm, const void *scale,
ncclRedOp_t *op) {
#if NCCL_VERSION_CODE >= 21100
int ver;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclGetVersion(&ver));
if (ver >= 21100) {
VLOG(10) << "ncclRedOpCreatePreMulSum is supported.";
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRedOpCreatePreMulSum(
op, const_cast<void *>(scale), dtype, ncclScalarDevice, comm));
return true;
}
#endif
VLOG(10) << "ncclRedOpCreatePreMulSum is not supported.";
return false;
}
template <typename T1, typename T2>
static void LaunchScaleKernel(const platform::CUDADeviceContext &dev_ctx,
const T1 *x, const T2 *scale, T1 *y, int n,
gpuStream_t stream) {
int vec_size = std::min(GetChunkedVecSize(x, 0), GetChunkedVecSize(y, 0));
auto config = platform::GetGpuLaunchConfig1D(dev_ctx, n, vec_size);
#define PD_LAMB_VEC_SCALE_KERNEL_CASE \
do { \
ScaleCUDAKernel<T1, T2, kVecSize><<<config.block_per_grid, \
config.thread_per_block, 0, stream>>>( \
x, scale, y, n); \
} while (0)
PD_VEC_LAUNCH_KERNEL(vec_size, PD_LAMB_VEC_SCALE_KERNEL_CASE);
#undef PD_LAMB_VEC_SCALE_KERNEL_CASE
}
template <typename T>
static void NCCLReduceScatterWithScale(
const T *sendbuff, T *recvbuff, size_t recvcount, size_t nranks,
ncclComm_t comm, gpuStream_t stream,
const platform::CUDADeviceContext &dev_ctx, const T *scale = nullptr) {
static_assert(std::is_same<T, float>::value ||
std::is_same<T, platform::float16>::value,
"T must be either float32 or float16.");
if (recvcount == 0) return;
if (comm == nullptr) {
if (scale != nullptr) {
PADDLE_ENFORCE_EQ(nranks, 1,
platform::errors::InvalidArgument(
"nranks must be 1 when scale != nullptr."));
LaunchScaleKernel(dev_ctx, sendbuff, scale, recvbuff, recvcount * nranks,
stream);
}
return;
}
ncclRedOp_t op = ncclSum;
ncclDataType_t dtype =
std::is_same<T, float>::value ? ncclFloat32 : ncclFloat16;
bool should_destroy_op =
scale && CreatePreMulScaleOpIfSupported(dtype, comm, scale, &op);
memory::Buffer buffer(dev_ctx.GetPlace());
if (scale && !should_destroy_op) {
size_t numel = recvcount * nranks;
T *new_sendbuff = buffer.Alloc<T>(numel);
LaunchScaleKernel(dev_ctx, sendbuff, scale, new_sendbuff, numel, stream);
sendbuff = new_sendbuff;
}
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclReduceScatter(
sendbuff, recvbuff, recvcount, dtype, op, comm, stream));
#if NCCL_VERSION_CODE >= 21100
if (should_destroy_op) {
VLOG(10) << "ncclRedOpDestroy starts";
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclRedOpDestroy(op, comm));
VLOG(10) << "ncclRedOpDestroy ends";
}
#endif
}
#endif
template <typename InputIteratorT, typename OutputIteratorT, typename ReduceOpT,
typename T>
static void CubDeviceReduce(InputIteratorT d_in, OutputIteratorT d_out,
int num_items, ReduceOpT reduction_op, T init,
gpuStream_t stream, memory::Buffer *buffer) {
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
PADDLE_ENFORCE_GPU_SUCCESS(
cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out,
num_items, reduction_op, init, stream));
d_temp_storage = buffer->Alloc<void>(temp_storage_bytes);
VLOG(10) << "cub::DeviceReduce::Reduce needs " << temp_storage_bytes
<< " byte(s), ptr = " << d_temp_storage;
PADDLE_ENFORCE_GPU_SUCCESS(
cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out,
num_items, reduction_op, init, stream));
}
template <typename T>
static void GetSquareGradNormImpl(const T *grad, int n, float *square_norm,
gpuStream_t stream,
memory::Buffer *cub_tmp_buffer) {
using Iterator =
cub::TransformInputIterator<float, SquareFunctor<T>, const T *>;
Iterator iter(grad, SquareFunctor<T>());
CubDeviceReduce(iter, square_norm, n, cub::Sum(), static_cast<float>(0),
stream, cub_tmp_buffer);
}
// square_norm is of length 2 at least
static void GetSquareGradNorm(const float *fp32_grad, int fp32_numel,
const platform::float16 *fp16_grad,
int fp16_numel, float *square_norm,
gpuStream_t stream,
memory::Buffer *cub_tmp_buffer) {
VLOG(10) << "GetSquareGradNorm starts, fp32_numel = " << fp32_numel
<< " , fp16_numel = " << fp16_numel;
if (fp32_numel > 0) {
GetSquareGradNormImpl(fp32_grad, fp32_numel, square_norm, stream,
cub_tmp_buffer);
VLOG(10) << "FP32 square L2-Norm: "
<< FlattenToString(square_norm, 1, cub_tmp_buffer->GetPlace());
}
if (fp16_numel > 0) {
float *fp16_square_norm = fp32_numel > 0 ? square_norm + 1 : square_norm;
GetSquareGradNormImpl(fp16_grad, fp16_numel, fp16_square_norm, stream,
cub_tmp_buffer);
VLOG(10) << "FP16 square L2-Norm: "
<< FlattenToString(fp16_square_norm, 1,
cub_tmp_buffer->GetPlace());
if (fp32_numel > 0) {
AddToCUDAKernel<<<1, 1, 0, stream>>>(fp16_square_norm, square_norm);
VLOG(10) << "FP32+FP16 square L2-Norm: "
<< FlattenToString(square_norm, 1, cub_tmp_buffer->GetPlace());
}
}
VLOG(10) << "GetSquareGradNorm ends, fp32_numel = " << fp32_numel
<< " , fp16_numel = " << fp16_numel;
}
template <typename T>
std::string NumToString(T x) {
std::stringstream ss;
ss << x;
return ss.str();
}
template <typename T>
static std::string GetMinMaxStr(const T *x, size_t n,
const platform::Place &place) {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(place), true,
platform::errors::InvalidArgument("Only support CUDAPlace currently."));
auto *dev_ctx = static_cast<platform::CUDADeviceContext *>(
platform::DeviceContextPool::Instance().Get(place));
auto stream = dev_ctx->stream();
memory::Buffer ret_buffer(place);
T *ret = ret_buffer.Alloc<T>(2);
if (n > 0) {
memory::Buffer cub_buffer(place);
CubDeviceReduce(x, ret, n, cub::Min(), std::numeric_limits<T>::max(),
stream, &cub_buffer);
CubDeviceReduce(x, ret + 1, n, cub::Max(), std::numeric_limits<T>::lowest(),
stream, &cub_buffer);
T ret_cpu[2];
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(&ret_cpu[0], ret, 2 * sizeof(T),
hipMemcpyDeviceToHost, stream));
PADDLE_ENFORCE_GPU_SUCCESS(hipStreamSynchronize(stream));
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(&ret_cpu[0], ret, 2 * sizeof(T),
cudaMemcpyDeviceToHost, stream));
PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamSynchronize(stream));
#endif
return std::string("{\"min\": ") + NumToString(ret_cpu[0]) +
" , \"max\": " + NumToString(ret_cpu[1]) + "}";
} else {
return "{\"min\": null, \"max\": null}";
}
}
struct VisitDTypeFunctor {
VisitDTypeFunctor(const framework::Tensor *x, std::string *s)
: x_(x), s_(s) {}
template <typename T>
void apply() const {
*s_ = GetMinMaxStr<T>(x_->template data<T>(), x_->numel(), x_->place());
}
private:
const framework::Tensor *x_;
std::string *s_;
};
static std::string GetMinMaxStr(const framework::Tensor *x) {
if (x == nullptr) return "null";
if (!x->IsInitialized()) return "not_inited";
if (!platform::is_gpu_place(x->place())) return "CPUTensor";
std::string str;
VisitDTypeFunctor functor(x, &str);
phi::VisitDataType(x->dtype(), functor);
return str;
}
static void PrintAllMinMaxRange(const framework::ExecutionContext &ctx,
bool only_inputs) {
if (!VLOG_IS_ON(1)) return;
for (const auto &pair : ctx.GetOp().Inputs()) {
const auto &key = pair.first;
const auto tensors = ctx.MultiInput<framework::Tensor>(key);
size_t n = tensors.size();
for (size_t i = 0; i < n; ++i) {
VLOG(1) << "Input(" << key + ")[" << i << "] = " << pair.second[i]
<< " , " << GetMinMaxStr(tensors[i]);
}
}
if (only_inputs) return;
for (const auto &pair : ctx.GetOp().Outputs()) {
const auto &key = pair.first;
const auto tensors = ctx.MultiOutput<framework::Tensor>(key);
size_t n = tensors.size();
for (size_t i = 0; i < n; ++i) {
VLOG(1) << "Output(" << key + ")[" << i << "] = " << pair.second[i]
<< " , " << GetMinMaxStr(tensors[i]);
}
}
}
static void CheckHasNanInfGrad(const float *fp32_grad, int fp32_numel,
const platform::float16 *fp16_grad,
int fp16_numel, float *nan_inf_flag,
gpuStream_t stream,
memory::Buffer *cub_tmp_buffer) {
bool *fp32_has_nan_inf = nullptr;
bool *fp16_has_nan_inf = nullptr;
if (fp32_numel > 0) {
fp32_has_nan_inf = reinterpret_cast<bool *>(nan_inf_flag + 1);
cub::TransformInputIterator<bool, IsNanInfFunctor<float>, const float *>
iter(fp32_grad, IsNanInfFunctor<float>());
CubDeviceReduce(iter, fp32_has_nan_inf, fp32_numel, OrFunctor(), false,
stream, cub_tmp_buffer);
}
if (fp16_numel > 0) {
fp16_has_nan_inf = reinterpret_cast<bool *>(nan_inf_flag + 1) + 1;
cub::TransformInputIterator<bool, IsNanInfFunctor<platform::float16>,
const platform::float16 *>
iter(fp16_grad, IsNanInfFunctor<platform::float16>());
CubDeviceReduce(iter, fp16_has_nan_inf, fp16_numel, OrFunctor(), false,
stream, cub_tmp_buffer);
}
if (fp32_has_nan_inf && fp16_has_nan_inf) {
SetNanInfValueCUDAKernelTwoFlag<<<1, 1, 0, stream>>>(
fp32_has_nan_inf, fp16_has_nan_inf, nan_inf_flag);
} else if (fp32_has_nan_inf) {
SetNanInfValueCUDAKernelOneFlag<<<1, 1, 0, stream>>>(fp32_has_nan_inf,
nan_inf_flag);
} else {
SetNanInfValueCUDAKernelOneFlag<<<1, 1, 0, stream>>>(fp16_has_nan_inf,
nan_inf_flag);
}
}
template <typename T>
class DistributedFusedLambOpKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto stream = dev_ctx.stream();
auto place = dev_ctx.GetPlace();
// Step 1: Get fp16 param and grad tensors
int64_t fp16_numel;
auto *fp16_param = GetSameInOutTensorPtr<platform::float16, true>(
ctx, place, "FP16FusedParam", "FP16FusedParamOut", &fp16_numel);
bool has_fp16_param = (fp16_numel > 0);
const platform::float16 *fp16_grad = nullptr;
if (has_fp16_param) {
fp16_grad = GetInputTensorPtr<platform::float16>(ctx, "FP16FusedGrad");
} else {
fp16_param = nullptr;
}
// Step 2: Get fp32 param and grad tensors
int64_t fp32_numel = 0;
auto *fp32_param = GetSameInOutTensorPtr<float, true>(
ctx, place, "FP32FusedParam", "FP32FusedParamOut", &fp32_numel);
PADDLE_ENFORCE_GE(fp32_numel, fp16_numel,
platform::errors::InvalidArgument(
"The element number in FP32FusedParam should be not "
"less than FP16FusedParam."));
fp32_numel -= fp16_numel; // the FP32FusedParam contains fp32 param and
// fp16 master weight
bool has_fp32_param = (fp32_numel > 0);
const float *fp32_grad = nullptr;
if (has_fp32_param) {
fp32_grad = GetInputTensorPtr<float>(ctx, "FP32FusedGrad");
} else {
PADDLE_ENFORCE_EQ(
has_fp16_param, true,
platform::errors::InvalidArgument(
"Either FP32FusedGrad or FP16FusedGrad cannot be NULL."));
}
auto numel = fp32_numel + fp16_numel;
VLOG(1) << "numel = " << numel << " , fp32_numel = " << fp32_numel
<< " , fp16_numel = " << fp16_numel;
// The NVIDIA cub library does not support number > INT32_MAX
PADDLE_ENFORCE_LE(numel, std::numeric_limits<int>::max(),
platform::errors::Unimplemented(
"Too many parameter number. Only <= %d is supported.",
std::numeric_limits<int>::max()));
// Step 3: Get ParamInfo
const auto *param_info_tensor = GetInputTensorPtr<int>(ctx, "ParamInfo");
auto fp32_local_start_idx = param_info_tensor[0];
auto fp32_local_param_num = param_info_tensor[1];
auto fp32_global_param_num = param_info_tensor[2];
auto fp32_weight_decay_end_idx = param_info_tensor[3];
auto fp16_local_start_idx = param_info_tensor[4];
auto fp16_local_param_num = param_info_tensor[5];
auto fp16_global_param_num = param_info_tensor[6];
auto fp16_weight_decay_end_idx = param_info_tensor[7];
auto local_param_num = fp32_local_param_num + fp16_local_param_num;
auto param_num = fp32_global_param_num + fp16_global_param_num;
PADDLE_ENFORCE_LE(local_param_num, param_num,
platform::errors::InvalidArgument(
"The local parameter number should not exceed the "
"global parameter number."));
VLOG(1) << "local_param_num = " << local_param_num
<< " , global_param_num = " << param_num
<< " , fp32_local_start_idx = " << fp32_local_start_idx
<< " , fp32_local_param_num = " << fp32_local_param_num
<< " , fp32_global_param_num = " << fp32_global_param_num
<< " , fp16_local_start_idx = " << fp16_local_start_idx
<< " , fp16_local_param_num = " << fp16_local_param_num
<< " , fp16_global_param_num = " << fp16_global_param_num;
// Step 4: Get LearningRate, Moment1, Moment2, Beta1Pow, Beta2Pow,
// GlobalScale, FoundInf
const auto *global_scale = GetInputTensorPtr<float>(ctx, "GlobalScale");
const auto *lr = GetInputTensorPtr<float>(ctx, "LearningRate");
int64_t partial_numel = 0;
auto *moment1 = GetSameInOutTensorPtr<float>(ctx, place, "Moment1",
"Moment1Out", &partial_numel);
PADDLE_ENFORCE_EQ(numel % partial_numel, 0,
platform::errors::InvalidArgument(
"The total parameter number %d should be divided "
"exactly by the element number %d of Moment1.",
numel, partial_numel));
int64_t num_devices = numel / partial_numel;
VLOG(1) << "num_devices = " << num_devices
<< " , partial_numel = " << partial_numel;
PADDLE_ENFORCE_EQ(fp32_numel % num_devices, 0,
platform::errors::InvalidArgument(
"The fp32 parameter number %d should be divided "
"exactly by the device number %d.",
fp32_numel, num_devices));
PADDLE_ENFORCE_EQ(fp16_numel % num_devices, 0,
platform::errors::InvalidArgument(
"The fp16 parameter number %d should be divided "
"exactly by the device number %d.",
fp16_numel, num_devices));
auto *moment2 =
GetSameInOutTensorPtr<float>(ctx, place, "Moment2", "Moment2Out");
auto *beta1pow =
GetSameInOutTensorPtr<float>(ctx, place, "Beta1Pow", "Beta1PowOut");
auto *beta2pow =
GetSameInOutTensorPtr<float>(ctx, place, "Beta2Pow", "Beta2PowOut");
auto *found_inf_t = ctx.Output<framework::Tensor>("FoundInf");
found_inf_t->Resize({1});
auto *found_inf = found_inf_t->mutable_data<bool>(place);
// Step 5: Get attributes weight_decay, beta1, beta2, epsilon,
// max_grad_norm, ring_id,
// use_master_param_norm, is_grad_scaled_by_nranks
auto weight_decay = ctx.Attr<float>("weight_decay");
auto beta1 = ctx.Attr<float>("beta1");
auto beta2 = ctx.Attr<float>("beta2");
auto epsilon = ctx.Attr<float>("epsilon");
auto max_global_grad_norm = ctx.Attr<float>("max_global_grad_norm");
auto clip_after_allreduce = ctx.Attr<bool>("clip_after_allreduce");
auto ring_id = ctx.Attr<int>("ring_id");
auto use_master_param_norm = ctx.Attr<bool>("use_master_param_norm");
auto is_grad_scaled_by_nranks = ctx.Attr<bool>("is_grad_scaled_by_nranks");
VLOG(10) << "max_global_grad_norm = " << max_global_grad_norm
<< " , clip_after_allreduce = " << clip_after_allreduce
<< " , use_master_param_norm = " << use_master_param_norm
<< " , is_grad_scaled_by_nranks = " << is_grad_scaled_by_nranks;
// Step 6: allreduce + global norm gradient clip
int rank = 0;
ncclComm_t comm = nullptr;
if (num_devices > 1) {
auto *nccl_comm_handle =
platform::NCCLCommContext::Instance().Get(ring_id, place);
comm = nccl_comm_handle->comm();
rank = nccl_comm_handle->rank();
}
memory::Buffer grad_norm_square_buffer(place);
auto *fp32_square_grad_norm = grad_norm_square_buffer.Alloc<float>(2);
memory::Buffer cub_tmp_buffer(place);
memory::Buffer sum_grad_buffer(place);
float *fp32_sum_grad;
platform::float16 *fp16_sum_grad;
auto fp32_numel_each_device = fp32_numel / num_devices;
auto fp16_numel_each_device = fp16_numel / num_devices;
if (num_devices > 1 ||
(max_global_grad_norm > 0 && !clip_after_allreduce)) {
auto ptr = sum_grad_buffer.Alloc<uint8_t>(
fp32_numel_each_device * sizeof(float) +
fp16_numel_each_device * sizeof(platform::float16));
fp32_sum_grad = has_fp32_param ? reinterpret_cast<float *>(ptr) : nullptr;
fp16_sum_grad = has_fp16_param
? reinterpret_cast<platform::float16 *>(
ptr + fp32_numel_each_device * sizeof(float))
: nullptr;
} else {
// NOTE: The const_cast here is not important. The fp32_sum_grad and
// fp16_sum_grad would not be changed when num_devices == 1
// But if I do not perform const_cast here, there would be more
// if-else codes (num_devices > 1) when I write the following code.
// So I prefer to use const_cast to unify the following code to reduce
// the if-else codes.
fp32_sum_grad = const_cast<float *>(fp32_grad);
fp16_sum_grad = const_cast<platform::float16 *>(fp16_grad);
}
float rescale_grad = 1.0f;
if (!is_grad_scaled_by_nranks) {
rescale_grad /= num_devices;
}
if (max_global_grad_norm > 0) {
if (clip_after_allreduce) {
// (1) ReduceScater first
NCCLReduceScatterWithScale(fp32_grad, fp32_sum_grad,
fp32_numel_each_device, num_devices, comm,
stream, dev_ctx);
NCCLReduceScatterWithScale(fp16_grad, fp16_sum_grad,
fp16_numel_each_device, num_devices, comm,
stream, dev_ctx);
// (2) Calculate the global grad norm
GetSquareGradNorm(fp32_sum_grad, fp32_numel_each_device, fp16_sum_grad,
fp16_numel_each_device, fp32_square_grad_norm, stream,
&cub_tmp_buffer);
VLOG(1) << "Grad square norm before all reduce: "
<< FlattenToString(fp32_square_grad_norm, 1, place);
if (num_devices > 1) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
fp32_square_grad_norm, fp32_square_grad_norm, 1, ncclFloat32,
ncclSum, comm, stream));
}
VLOG(1) << "Grad square norm after all reduce: "
<< FlattenToString(fp32_square_grad_norm, 1, place);
} else {
// (1) Calculate the local grad norm
GetSquareGradNorm(fp32_grad, fp32_numel, fp16_grad, fp16_numel,
fp32_square_grad_norm, stream, &cub_tmp_buffer);
VLOG(1) << "Grad square norm before all reduce: "
<< FlattenToString(fp32_square_grad_norm, 1, place);
// (2) Calculate the gradient clip scale
float *fp32_scale = nullptr;
platform::float16 *fp16_scale = nullptr;
if (has_fp32_param && has_fp16_param) {
auto *ptr = cub_tmp_buffer.Alloc<uint8_t>(sizeof(float) +
sizeof(platform::float16));
fp32_scale = reinterpret_cast<float *>(ptr);
fp16_scale =
reinterpret_cast<platform::float16 *>(ptr + sizeof(float));
} else if (has_fp32_param) {
fp32_scale = cub_tmp_buffer.Alloc<float>(1);
} else {
fp16_scale = cub_tmp_buffer.Alloc<platform::float16>(1);
}
float clip_scale = 1.0f;
if (is_grad_scaled_by_nranks) {
clip_scale *= num_devices;
}
CalcGradNormClipBeforeAllReduceScale<
float, platform::float16><<<1, 1, 0, stream>>>(
global_scale, max_global_grad_norm, fp32_square_grad_norm,
fp32_scale, fp16_scale, clip_scale);
if (fp32_scale) {
VLOG(1) << "Grad scale: " << FlattenToString(fp32_scale, 1, place);
} else {
VLOG(1) << "Grad scale: " << FlattenToString(fp16_scale, 1, place);
}
if (num_devices > 1) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
fp32_square_grad_norm, fp32_square_grad_norm, 1, ncclFloat32,
ncclSum, comm, stream));
}
// (3) Do ReduceScatter with scale
NCCLReduceScatterWithScale(fp32_grad, fp32_sum_grad,
fp32_numel_each_device, num_devices, comm,
stream, dev_ctx, fp32_scale);
NCCLReduceScatterWithScale(fp16_grad, fp16_sum_grad,
fp16_numel_each_device, num_devices, comm,
stream, dev_ctx, fp16_scale);
// (4) mark max_global_grad_norm as 0, meaning that clip has been
// already performed
max_global_grad_norm = 0;
}
} else {
NCCLReduceScatterWithScale(fp32_grad, fp32_sum_grad,
fp32_numel_each_device, num_devices, comm,
stream, dev_ctx);
NCCLReduceScatterWithScale(fp16_grad, fp16_sum_grad,
fp16_numel_each_device, num_devices, comm,
stream, dev_ctx);
CheckHasNanInfGrad(fp32_sum_grad, fp32_numel_each_device, fp16_sum_grad,
fp16_numel_each_device, fp32_square_grad_norm, stream,
&cub_tmp_buffer);
if (num_devices > 1) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
fp32_square_grad_norm, fp32_square_grad_norm, 1, ncclFloat32,
ncclSum, comm, stream));
}
max_global_grad_norm = 0;
}
VLOG(10) << "ReduceScatter done";
// Step 7: update the moment1, moment2. Calcuate the trust_ratio_div
auto *fused_offsets_t = ctx.Input<framework::Tensor>("FusedParamOffsets");
auto *fused_offsets = fused_offsets_t->data<int>();
auto *fp32_partial_fused_offsets_t =
ctx.Input<framework::Tensor>("FP32ShardFusedParamOffsets");
const auto *fp32_partial_fused_offsets =
fp32_partial_fused_offsets_t->data<int>();
auto *fp16_partial_fused_offsets_t =
ctx.Input<framework::Tensor>("FP16ShardFusedParamOffsets");
const auto *fp16_partial_fused_offsets =
fp16_partial_fused_offsets_t->data<int>();
VLOG(1) << "FusedParamOffsets: "
<< FlattenToString(fused_offsets, fused_offsets_t->numel(),
fused_offsets_t->place());
VLOG(1) << "FP32ShardFusedParamOffsets: "
<< FlattenToString(fp32_partial_fused_offsets,
fp32_partial_fused_offsets_t->numel(),
fp32_partial_fused_offsets_t->place());
VLOG(1) << "FP16ShardFusedParamOffsets: "
<< FlattenToString(fp16_partial_fused_offsets,
fp16_partial_fused_offsets_t->numel(),
fp16_partial_fused_offsets_t->place());
memory::Buffer trust_ratio_div_buffer(place);
auto *trust_ratio_div = trust_ratio_div_buffer.Alloc<float>(partial_numel);
auto fp32_offset = rank * fp32_numel_each_device;
auto fp16_offset = rank * fp16_numel_each_device;
if (has_fp32_param) {
VLOG(10) << "Update FP32 Moment and TrustRatioDiv starts";
MultiTensorUpdateLambMomentAndTrustRatioDiv(
dev_ctx, fp32_partial_fused_offsets, fp32_local_param_num,
fp32_param + fp32_offset, fp32_sum_grad, fp32_square_grad_norm,
global_scale, beta1pow, beta2pow, moment1, moment2, trust_ratio_div,
found_inf, weight_decay, fp32_weight_decay_end_idx, beta1, beta2,
epsilon, max_global_grad_norm, rescale_grad);
VLOG(10) << "Update FP32 Moment and TrustRatioDiv done";
}
float *master_param = nullptr;
if (has_fp16_param) {
master_param = fp32_param + fp32_numel;
VLOG(10) << "Update FP16 Moment and TrustRatioDiv starts";
auto tmp_found_inf = has_fp32_param ? nullptr : found_inf;
MultiTensorUpdateLambMomentAndTrustRatioDiv(
dev_ctx, fp16_partial_fused_offsets, fp16_local_param_num,
master_param + fp16_offset, fp16_sum_grad, fp32_square_grad_norm,
global_scale, beta1pow, beta2pow, moment1 + fp32_numel_each_device,
moment2 + fp32_numel_each_device,
trust_ratio_div + fp32_numel_each_device, tmp_found_inf, weight_decay,
fp16_weight_decay_end_idx, beta1, beta2, epsilon,
max_global_grad_norm, rescale_grad);
VLOG(10) << "Update FP16 Moment and TrustRatioDiv done";
}
VLOG(10) << "Update Moment and TrustRatioDiv done hehahaha";
// Step 8: calculate L2-Norm square of parameter and trust_ratio_div
memory::Buffer square_norm_buffer(place);
auto *param_square_norm = square_norm_buffer.Alloc<float>(2 * param_num);
auto *trust_ratio_div_square_norm = param_square_norm + param_num;
if (num_devices > 1) {
if (use_master_param_norm) {
FillZeroWithPtr(param_square_norm + fp32_global_param_num,
2 * param_num - fp32_global_param_num, stream);
} else {
FillZeroWithPtr(trust_ratio_div_square_norm, param_num, stream);
}
}
MultiTensorL2Norm(place, stream, fp32_param, fused_offsets,
fp32_global_param_num, param_square_norm);
if (use_master_param_norm) {
MultiTensorL2Norm(place, stream, master_param + fp16_offset,
fp16_partial_fused_offsets, fp16_local_param_num,
param_square_norm + fp16_local_start_idx);
} else {
MultiTensorL2Norm(
place, stream, fp16_param + fused_offsets[fp16_local_start_idx] -
fused_offsets[fp32_global_param_num],
fused_offsets + fp16_local_start_idx, fp16_local_param_num,
param_square_norm + fp16_local_start_idx);
}
MultiTensorL2Norm(place, stream, trust_ratio_div,
fp32_partial_fused_offsets, fp32_local_param_num,
trust_ratio_div_square_norm + fp32_local_start_idx);
MultiTensorL2Norm(place, stream, trust_ratio_div + fp32_numel_each_device,
fp16_partial_fused_offsets, fp16_local_param_num,
trust_ratio_div_square_norm + fp16_local_start_idx);
VLOG(1) << "TrustRatioDiv L2-Norm before allreduce: "
<< FlattenToString(trust_ratio_div_square_norm, param_num, place);
if (num_devices > 1) {
if (use_master_param_norm) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
param_square_norm + fp32_global_param_num,
param_square_norm + fp32_global_param_num,
2 * param_num - fp32_global_param_num, ncclFloat32, ncclSum, comm,
stream));
} else {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
trust_ratio_div_square_norm, trust_ratio_div_square_norm, param_num,
ncclFloat32, ncclSum, comm, stream));
}
VLOG(10) << "ncclAllReduce done";
}
LogParamAndTrustRatioDivSquareNorm<1>(ctx, param_square_norm,
trust_ratio_div_square_norm);
VLOG(10) << "Calculate L2-Norm of Param and TrustRatioDiv done";
// Step 9: update parameter, beta1pow, beta2pow. All gather parameters.
if (has_fp32_param) {
MultiTensorUpdateLambParamAndBetaPows<float>(
dev_ctx, fp32_partial_fused_offsets, fp32_local_param_num,
trust_ratio_div, lr, param_square_norm + fp32_local_start_idx,
trust_ratio_div_square_norm + fp32_local_start_idx, found_inf,
fp32_param + fp32_offset, nullptr, beta1pow, beta2pow, beta1, beta2);
if (num_devices > 1) {
// ncclAllGather
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather(
fp32_param + fp32_offset, fp32_param, fp32_numel_each_device,
ncclFloat32, comm, stream));
}
beta1pow = nullptr;
beta2pow = nullptr;
}
if (has_fp16_param) {
MultiTensorUpdateLambParamAndBetaPows<platform::float16>(
dev_ctx, fp16_partial_fused_offsets, fp16_local_param_num,
trust_ratio_div + fp32_numel_each_device, lr,
param_square_norm + fp16_local_start_idx,
trust_ratio_div_square_norm + fp16_local_start_idx, found_inf,
fp16_param + fp16_offset, master_param + fp16_offset, beta1pow,
beta2pow, beta1, beta2);
if (num_devices > 1) {
// ncclAllGather
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllGather(
fp16_param + fp16_offset, fp16_param, fp16_numel_each_device,
ncclFloat16, comm, stream));
}
}
VLOG(10) << "Update Param done";
VLOG(1) << "IsFinite: " << IsFinite(dev_ctx, fp32_square_grad_norm);
#else
PADDLE_THROW(platform::errors::Unimplemented(
"distributed_fused_lamb op should be used with NCCL/RCCL."));
#endif
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
distributed_fused_lamb,
ops::DistributedFusedLambOpKernel<plat::CUDADeviceContext, float>);
|
1dc5163009f4a21009a6ca23977f8288499ae539.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <math.h>
#include "rte.h"
#include <pthread.h>
__host__ Info_Stat * populate_info_dev();
Geometry *geom;
Phantom *phan;
Source *beam_src;
complex_double *diag_terms_host;
complex_double *sph_harm;
Info_Stat *info_stat_host;
SHORT nL;
int nTerms;
__host__ int get_vind_phanind_host(int dep, int row, int col){
return ((geom->bounZ + dep) * (geom->nX + 2 * geom->bounX ) * (geom->nY + 2 * geom->bounY ) /* reached the correct layer */ + ( geom->bounY + row)* (geom->nX + 2 * geom->bounX ) + (geom->bounX + col));
}
__host__ Info_Stat * populate_info_dev(){
Info_Stat *info_stat_host;
info_stat_host = (Info_Stat *) malloc (sizeof(Info_Stat));
info_stat_host->nX = geom->nX;
info_stat_host->nY = geom->nY;
info_stat_host->nZ= geom->nZ;
info_stat_host->bounX = geom->bounX;
info_stat_host->bounY = geom->bounY;
info_stat_host->bounZ= geom->bounZ;
info_stat_host->subbounX = ceilf ((geom->sub_thresh)/(geom->delX));
info_stat_host->subbounY = ceilf ((geom->sub_thresh)/(geom->delY));
info_stat_host->subbounZ = ceilf ((geom->sub_thresh)/(geom->delZ));
info_stat_host->delX = geom->delX;
info_stat_host->delY = geom->delY;
info_stat_host->delZ= geom->delZ;
info_stat_host->x_min = geom->x_min;
info_stat_host->y_min = geom->y_min;
info_stat_host->z_min = geom->z_min;
info_stat_host->x_max = geom->x_max;
info_stat_host->y_max = geom->y_max;
info_stat_host->z_max = geom->z_max;
info_stat_host->sub_thresh = geom->sub_thresh;
info_stat_host->prop_thresh = geom->prop_thresh;
info_stat_host->sub_vox = geom->sub_vox;
info_stat_host->self_sub_vox = geom->self_sub_vox;
info_stat_host->g = phan->g;
info_stat_host->n = phan->n;
info_stat_host->no_tiss = phan->no_tiss;
info_stat_host->cm = C/phan->n;
info_stat_host->no_vox = geom->no_vox;
int i;
for(i=0; i < phan->no_tiss; i++){
info_stat_host->mu_tot[i] = phan->mu_abs[i] + phan->mu_sc[i];
info_stat_host->mu_sc[i] = phan->mu_sc[i];
}
return info_stat_host;
}
int main(int argc, char** argv )
{
time_t start_time, end_time;
time(&start_time);
dictionary *ini;
complex_double *gbar,*dgbar;
complex_double *W, *W1, *out, *out2, *out3, *src, *tmp1, *df, *src2, *out4;
flt_doub *g;
flt_doub *grada,*grads;
flt_doub *grada2,*grads2;
flt_doub delg;
int n;
int j,jj;
int jnk;
int tiss_idx;
int iip, jjp, kkp;
int r_ind,i;
int size;
float av_mua;
float av_mus;
av_mua = 0.01;
av_mus = 1;
if (argc != 2) {
printf("\n InverseRTE file.par\n");
printf(" file.par is the parameter file.\n\n");
exit(1);
}
// Load in the initialization file
ini = iniparser_load(argv[1]);
// Set up the geometry, phantom, etc
printf("Loading in geometry and phantom information...\n");
geom = LoadGeometry(ini);
phan = LoadPhantom(ini,1);
beam_src = LoadSource(ini);
printf("Done reading source information \n");
nL = iniparser_getint(ini,"Algorithm:nL",-1);
nTerms = iniparser_getint(ini,"Algorithm:nTerms",1);
double stepsizea, stepsizes;
stepsizea = iniparser_getdouble(ini,"Runtime:stepsizea",1e3);
stepsizes = iniparser_getdouble(ini,"Runtime:stepsizes",1e3);
int jnk2;
FILE *gFile;
if ((gFile = fopen(iniparser_getstring(ini,"Runtime:gFile",NULL),"r")) == NULL){
printf("Error in opening gfile. Exiting \n");
exit(0);
}
//printf("%s is gFile \n", gFile);
fread(&jnk,sizeof(int),1,gFile);
fread(&jnk2,sizeof(int),1,gFile);
printf("Done reading gfile integers %d and %d \n", jnk, jnk2);
g = (flt_doub *) malloc(sizeof(flt_doub) * geom->nX * geom->nY);
fread(g,sizeof(flt_doub),geom->nX * geom->nY,gFile);
fclose(gFile);
info_stat_host = populate_info_dev();
size = (nL+1)*(nL+1)* geom->no_vox;
printf("Generating the spherical harmonic terms \n");
generate_sph_harm_terms();
unsigned int timer;
int cnt,k;
flt_doub tmp;
grada = (flt_doub *)malloc(sizeof(flt_doub)*phan->no_tiss);
grads = (flt_doub *)malloc(sizeof(flt_doub)* phan->no_tiss);
byte *flag_grada, *flag_grads;
flag_grada = (byte*) malloc ( sizeof(byte)* phan->no_tiss);
flag_grads = (byte*) malloc ( sizeof(byte)* phan->no_tiss);
memset(flag_grada, 0, phan->no_tiss);
memset(flag_grads, 0, phan->no_tiss);
byte flag_net;
int r_ind_phan;
W = alloc_dist();
out2 = alloc_dist();
out3 = alloc_dist();
out4 = alloc_dist();
src = alloc_dist();
src2 = alloc_dist();
tmp1 = alloc_dist();
float mus_hat, mua_hat;
int abs_ind, sc_ind;
int max_ind = 10;
flt_doub *mua, *mus;
mua = (flt_doub *) malloc(sizeof(flt_doub)*max_ind);
mus = (flt_doub *) malloc(sizeof(flt_doub)*max_ind);
FILE *abs_fid, *sc_fid, *grada_fid, *grads_fid, *res_fid1, *res_fid2,*grada_fid2,*grads_fid2;
abs_fid = fopen("abs_co.dat","w");
sc_fid = fopen("sc_co.dat","w");
grada_fid = fopen("grada_terms_high.dat","w");
grads_fid = fopen("grads_terms_high.dat","w");
grada_fid2 = fopen("grada_terms2_high.dat","w");
grads_fid2 = fopen("grads_terms2_high.dat","w");
res_fid1 = fopen("res_terms1_high.dat","w");
res_fid2 = fopen("res_terms2_high.dat","w");
for(sc_ind = 0; sc_ind < max_ind; sc_ind++){
mus[sc_ind] = sc_ind*0.25 + 0.25;
}
for(abs_ind=0; abs_ind< max_ind; abs_ind++){
mua[abs_ind] = abs_ind*0.0025 + 0.0025;
}
fwrite(mua, sizeof(flt_doub),max_ind, abs_fid);
fwrite(mus, sizeof(flt_doub),max_ind, sc_fid);
for(sc_ind = 0; sc_ind < max_ind; sc_ind++){
for(abs_ind=0; abs_ind< max_ind; abs_ind++){
for(tiss_idx = 1; tiss_idx < phan->no_tiss; tiss_idx++){
phan->mu_abs[tiss_idx] = mua[abs_ind];
phan->mu_sc[tiss_idx] = mus[sc_ind];
}
populate_info_dev();
generate_diag_terms_host();
generate_source_beam(src);
copy_dist(src,out2);
copy_dist(src,W);
Neumann(W,out2,1);
// THE GRADIENT of mua
gbar = generate_trans_image(out2,1);
#if 1
//printf("Compute the gradient with respect to mua");
for (tiss_idx = 1; tiss_idx < phan->no_tiss; tiss_idx++) {
if(flag_grada[tiss_idx] == 0){
copy_dist(out2,out3);
for (iip=0; iip<geom->nZ; iip++) {
for (jjp=0; jjp<geom->nY; jjp++) {
for (kkp=0; kkp<geom->nX; kkp++) {
r_ind = (iip + geom->bounZ)* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + (jjp + geom->bounY)* (geom->nX + 2*geom->bounX) + (kkp + geom->bounX);
r_ind_phan =iip* geom->nX * geom->nY + jjp * geom->nX + kkp;
if ( phan->tiss_type[r_ind_phan] != tiss_idx) {
for (n=0;n<(nL+1)*(nL+1);n++) {
out3[VOX_TO_SPIND(r_ind, n,(nL+1)*(nL+1))] = 0+0*I;
}
}
}}}
scale_dist(out3,-1.0*C);
copy_dist(out3,src);
Neumann(src,out3,1);
dgbar = generate_trans_image(out3,0);
grada[tiss_idx] = 0.0;
grada2[tiss_idx] = 0.0;
for (j = 0;j<geom->nX*geom->nY;j++) {
if(g[j] || gbar[j].real()){
grada[tiss_idx] = grada[tiss_idx] - 2*phan->mu_abs[tiss_idx]*(log(g[j])-log(gbar[j].real()))*dgbar[j].real()/(gbar[j].real());
grada2[tiss_idx] = grada[tiss_idx] - 2*((g[j])-(gbar[j].real()))*dgbar[j].real();
}
}
}
}
//printf("Compute the gradient with respect to mus \n");
for (tiss_idx = 1; tiss_idx < phan->no_tiss ; tiss_idx++) {
if(flag_grads[tiss_idx] == 0){
copy_dist(out2,out3);
scale_dist(out3,-1.0*C);
copy_dist(out2,tmp1);
PropScatmu1(geom,phan,nL,tmp1);
add_dist(tmp1,out3,out3);
for (iip=0; iip<geom->nZ; iip++) {
for (jjp=0; jjp<geom->nY; jjp++) {
for (kkp=0; kkp<geom->nX; kkp++) {
r_ind_phan =iip* geom->nX * geom->nY + jjp * geom->nX + kkp;
if ( phan->tiss_type[r_ind_phan] != tiss_idx) {
for (n=0;n<(nL+1)*(nL+1);n++) {
out3[VOX_TO_SPIND(r_ind, n, (nL+1)*(nL+1))] = 0+0*I;
}
}
}}}
src = alloc_dist();
copy_dist(out3,src);
Neumann(src,out3,1);
dgbar = generate_trans_image(out3,0);
grads[tiss_idx] = 0.0;
for (j = 0;j<geom->nX*geom->nY;j++) {
if(g[j] || gbar[j].real()){
grads[tiss_idx] = grads[tiss_idx] - 2*phan->mu_sc[tiss_idx]*(log(g[j])-log(gbar[j].real()))*dgbar[j].real()/(gbar[j].real());
grads2[tiss_idx] = grads[tiss_idx] - 2*((g[j])-(gbar[j].real()))*dgbar[j].real();
}
}
}
}
for (tiss_idx = 1;tiss_idx< phan->no_tiss;tiss_idx++) {
printf("Old value of mua = %e mus = %e for tissue type = %d\n", phan->mu_abs[tiss_idx], phan->mu_sc[tiss_idx],tiss_idx);
mua_hat = log(phan->mu_abs[tiss_idx]/av_mua) - stepsizea*grada[tiss_idx];
phan->mu_abs[tiss_idx] = av_mua*exp(mua_hat);
mus_hat = log(phan->mu_sc[tiss_idx]/av_mus) - stepsizes*grads[tiss_idx];
phan->mu_sc[tiss_idx] = av_mus*exp(mus_hat);
printf("Value of mua grad = %e musgrad = %e for tissue type %d \n", grada[tiss_idx], grads[tiss_idx], tiss_idx);
printf("New value of mua = %e mus = %e for tissue type = %d\n", phan->mu_abs[tiss_idx], phan->mu_sc[tiss_idx],tiss_idx);
}
#endif
delg = 0.0;
delg2 = 0.0
for (i=0;i<geom->nX*geom->nY;i++) {
if(g[j] || gbar[j].real()){
delg = delg + (log(gbar[i].real())-log(g[i]))*(log(gbar[i].real())-log(g[i]));
delg2 = delg2 + ((gbar[i].real())-(g[i]))*((gbar[i].real())-(g[i]));
}
}
tiss_idx = 1;
printf("Residual1 = %e mua grad = %e musgrad = %e \n",(delg), grada[tiss_idx], grads[tiss_idx]);
fwrite(grada, sizeof(flt_doub),1, grada_fid);
fwrite(grads, sizeof(flt_doub),1, grads_fid);
fwrite(&delg,sizeof(flt_doub),1,res_fid1);
fwrite(&delg2,sizeof(flt_doub),1,res_fid2);
}
}
fclose(abs_fid);
fclose(sc_fid);
fclose(grada_fid);
fclose(grads_fid);
fclose(res_fid1);
fclose(res_fid2);
free(g);
free(beam_src);
free(phan);
free(geom);
free(gbar);
free(dgbar);
free(src);
free(src2);
free(out);
free(out2);
free(out3);
free(out4);
iniparser_freedict(ini);
time(&end_time);
printf("\n*------------------------------------------*\n");
printf("\nThe total time taken by the code = %d sec \n", end_time - start_time);
printf("\n*------------------------------------------*\n");
return(0);
}
|
1dc5163009f4a21009a6ca23977f8288499ae539.cu
|
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <math.h>
#include "rte.h"
#include <pthread.h>
__host__ Info_Stat * populate_info_dev();
Geometry *geom;
Phantom *phan;
Source *beam_src;
complex_double *diag_terms_host;
complex_double *sph_harm;
Info_Stat *info_stat_host;
SHORT nL;
int nTerms;
__host__ int get_vind_phanind_host(int dep, int row, int col){
return ((geom->bounZ + dep) * (geom->nX + 2 * geom->bounX ) * (geom->nY + 2 * geom->bounY ) /* reached the correct layer */ + ( geom->bounY + row)* (geom->nX + 2 * geom->bounX ) + (geom->bounX + col));
}
__host__ Info_Stat * populate_info_dev(){
Info_Stat *info_stat_host;
info_stat_host = (Info_Stat *) malloc (sizeof(Info_Stat));
info_stat_host->nX = geom->nX;
info_stat_host->nY = geom->nY;
info_stat_host->nZ= geom->nZ;
info_stat_host->bounX = geom->bounX;
info_stat_host->bounY = geom->bounY;
info_stat_host->bounZ= geom->bounZ;
info_stat_host->subbounX = ceilf ((geom->sub_thresh)/(geom->delX));
info_stat_host->subbounY = ceilf ((geom->sub_thresh)/(geom->delY));
info_stat_host->subbounZ = ceilf ((geom->sub_thresh)/(geom->delZ));
info_stat_host->delX = geom->delX;
info_stat_host->delY = geom->delY;
info_stat_host->delZ= geom->delZ;
info_stat_host->x_min = geom->x_min;
info_stat_host->y_min = geom->y_min;
info_stat_host->z_min = geom->z_min;
info_stat_host->x_max = geom->x_max;
info_stat_host->y_max = geom->y_max;
info_stat_host->z_max = geom->z_max;
info_stat_host->sub_thresh = geom->sub_thresh;
info_stat_host->prop_thresh = geom->prop_thresh;
info_stat_host->sub_vox = geom->sub_vox;
info_stat_host->self_sub_vox = geom->self_sub_vox;
info_stat_host->g = phan->g;
info_stat_host->n = phan->n;
info_stat_host->no_tiss = phan->no_tiss;
info_stat_host->cm = C/phan->n;
info_stat_host->no_vox = geom->no_vox;
int i;
for(i=0; i < phan->no_tiss; i++){
info_stat_host->mu_tot[i] = phan->mu_abs[i] + phan->mu_sc[i];
info_stat_host->mu_sc[i] = phan->mu_sc[i];
}
return info_stat_host;
}
int main(int argc, char** argv )
{
time_t start_time, end_time;
time(&start_time);
dictionary *ini;
complex_double *gbar,*dgbar;
complex_double *W, *W1, *out, *out2, *out3, *src, *tmp1, *df, *src2, *out4;
flt_doub *g;
flt_doub *grada,*grads;
flt_doub *grada2,*grads2;
flt_doub delg;
int n;
int j,jj;
int jnk;
int tiss_idx;
int iip, jjp, kkp;
int r_ind,i;
int size;
float av_mua;
float av_mus;
av_mua = 0.01;
av_mus = 1;
if (argc != 2) {
printf("\n InverseRTE file.par\n");
printf(" file.par is the parameter file.\n\n");
exit(1);
}
// Load in the initialization file
ini = iniparser_load(argv[1]);
// Set up the geometry, phantom, etc
printf("Loading in geometry and phantom information...\n");
geom = LoadGeometry(ini);
phan = LoadPhantom(ini,1);
beam_src = LoadSource(ini);
printf("Done reading source information \n");
nL = iniparser_getint(ini,"Algorithm:nL",-1);
nTerms = iniparser_getint(ini,"Algorithm:nTerms",1);
double stepsizea, stepsizes;
stepsizea = iniparser_getdouble(ini,"Runtime:stepsizea",1e3);
stepsizes = iniparser_getdouble(ini,"Runtime:stepsizes",1e3);
int jnk2;
FILE *gFile;
if ((gFile = fopen(iniparser_getstring(ini,"Runtime:gFile",NULL),"r")) == NULL){
printf("Error in opening gfile. Exiting \n");
exit(0);
}
//printf("%s is gFile \n", gFile);
fread(&jnk,sizeof(int),1,gFile);
fread(&jnk2,sizeof(int),1,gFile);
printf("Done reading gfile integers %d and %d \n", jnk, jnk2);
g = (flt_doub *) malloc(sizeof(flt_doub) * geom->nX * geom->nY);
fread(g,sizeof(flt_doub),geom->nX * geom->nY,gFile);
fclose(gFile);
info_stat_host = populate_info_dev();
size = (nL+1)*(nL+1)* geom->no_vox;
printf("Generating the spherical harmonic terms \n");
generate_sph_harm_terms();
unsigned int timer;
int cnt,k;
flt_doub tmp;
grada = (flt_doub *)malloc(sizeof(flt_doub)*phan->no_tiss);
grads = (flt_doub *)malloc(sizeof(flt_doub)* phan->no_tiss);
byte *flag_grada, *flag_grads;
flag_grada = (byte*) malloc ( sizeof(byte)* phan->no_tiss);
flag_grads = (byte*) malloc ( sizeof(byte)* phan->no_tiss);
memset(flag_grada, 0, phan->no_tiss);
memset(flag_grads, 0, phan->no_tiss);
byte flag_net;
int r_ind_phan;
W = alloc_dist();
out2 = alloc_dist();
out3 = alloc_dist();
out4 = alloc_dist();
src = alloc_dist();
src2 = alloc_dist();
tmp1 = alloc_dist();
float mus_hat, mua_hat;
int abs_ind, sc_ind;
int max_ind = 10;
flt_doub *mua, *mus;
mua = (flt_doub *) malloc(sizeof(flt_doub)*max_ind);
mus = (flt_doub *) malloc(sizeof(flt_doub)*max_ind);
FILE *abs_fid, *sc_fid, *grada_fid, *grads_fid, *res_fid1, *res_fid2,*grada_fid2,*grads_fid2;
abs_fid = fopen("abs_co.dat","w");
sc_fid = fopen("sc_co.dat","w");
grada_fid = fopen("grada_terms_high.dat","w");
grads_fid = fopen("grads_terms_high.dat","w");
grada_fid2 = fopen("grada_terms2_high.dat","w");
grads_fid2 = fopen("grads_terms2_high.dat","w");
res_fid1 = fopen("res_terms1_high.dat","w");
res_fid2 = fopen("res_terms2_high.dat","w");
for(sc_ind = 0; sc_ind < max_ind; sc_ind++){
mus[sc_ind] = sc_ind*0.25 + 0.25;
}
for(abs_ind=0; abs_ind< max_ind; abs_ind++){
mua[abs_ind] = abs_ind*0.0025 + 0.0025;
}
fwrite(mua, sizeof(flt_doub),max_ind, abs_fid);
fwrite(mus, sizeof(flt_doub),max_ind, sc_fid);
for(sc_ind = 0; sc_ind < max_ind; sc_ind++){
for(abs_ind=0; abs_ind< max_ind; abs_ind++){
for(tiss_idx = 1; tiss_idx < phan->no_tiss; tiss_idx++){
phan->mu_abs[tiss_idx] = mua[abs_ind];
phan->mu_sc[tiss_idx] = mus[sc_ind];
}
populate_info_dev();
generate_diag_terms_host();
generate_source_beam(src);
copy_dist(src,out2);
copy_dist(src,W);
Neumann(W,out2,1);
// THE GRADIENT of mua
gbar = generate_trans_image(out2,1);
#if 1
//printf("Compute the gradient with respect to mua");
for (tiss_idx = 1; tiss_idx < phan->no_tiss; tiss_idx++) {
if(flag_grada[tiss_idx] == 0){
copy_dist(out2,out3);
for (iip=0; iip<geom->nZ; iip++) {
for (jjp=0; jjp<geom->nY; jjp++) {
for (kkp=0; kkp<geom->nX; kkp++) {
r_ind = (iip + geom->bounZ)* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + (jjp + geom->bounY)* (geom->nX + 2*geom->bounX) + (kkp + geom->bounX);
r_ind_phan =iip* geom->nX * geom->nY + jjp * geom->nX + kkp;
if ( phan->tiss_type[r_ind_phan] != tiss_idx) {
for (n=0;n<(nL+1)*(nL+1);n++) {
out3[VOX_TO_SPIND(r_ind, n,(nL+1)*(nL+1))] = 0+0*I;
}
}
}}}
scale_dist(out3,-1.0*C);
copy_dist(out3,src);
Neumann(src,out3,1);
dgbar = generate_trans_image(out3,0);
grada[tiss_idx] = 0.0;
grada2[tiss_idx] = 0.0;
for (j = 0;j<geom->nX*geom->nY;j++) {
if(g[j] || gbar[j].real()){
grada[tiss_idx] = grada[tiss_idx] - 2*phan->mu_abs[tiss_idx]*(log(g[j])-log(gbar[j].real()))*dgbar[j].real()/(gbar[j].real());
grada2[tiss_idx] = grada[tiss_idx] - 2*((g[j])-(gbar[j].real()))*dgbar[j].real();
}
}
}
}
//printf("Compute the gradient with respect to mus \n");
for (tiss_idx = 1; tiss_idx < phan->no_tiss ; tiss_idx++) {
if(flag_grads[tiss_idx] == 0){
copy_dist(out2,out3);
scale_dist(out3,-1.0*C);
copy_dist(out2,tmp1);
PropScatmu1(geom,phan,nL,tmp1);
add_dist(tmp1,out3,out3);
for (iip=0; iip<geom->nZ; iip++) {
for (jjp=0; jjp<geom->nY; jjp++) {
for (kkp=0; kkp<geom->nX; kkp++) {
r_ind_phan =iip* geom->nX * geom->nY + jjp * geom->nX + kkp;
if ( phan->tiss_type[r_ind_phan] != tiss_idx) {
for (n=0;n<(nL+1)*(nL+1);n++) {
out3[VOX_TO_SPIND(r_ind, n, (nL+1)*(nL+1))] = 0+0*I;
}
}
}}}
src = alloc_dist();
copy_dist(out3,src);
Neumann(src,out3,1);
dgbar = generate_trans_image(out3,0);
grads[tiss_idx] = 0.0;
for (j = 0;j<geom->nX*geom->nY;j++) {
if(g[j] || gbar[j].real()){
grads[tiss_idx] = grads[tiss_idx] - 2*phan->mu_sc[tiss_idx]*(log(g[j])-log(gbar[j].real()))*dgbar[j].real()/(gbar[j].real());
grads2[tiss_idx] = grads[tiss_idx] - 2*((g[j])-(gbar[j].real()))*dgbar[j].real();
}
}
}
}
for (tiss_idx = 1;tiss_idx< phan->no_tiss;tiss_idx++) {
printf("Old value of mua = %e mus = %e for tissue type = %d\n", phan->mu_abs[tiss_idx], phan->mu_sc[tiss_idx],tiss_idx);
mua_hat = log(phan->mu_abs[tiss_idx]/av_mua) - stepsizea*grada[tiss_idx];
phan->mu_abs[tiss_idx] = av_mua*exp(mua_hat);
mus_hat = log(phan->mu_sc[tiss_idx]/av_mus) - stepsizes*grads[tiss_idx];
phan->mu_sc[tiss_idx] = av_mus*exp(mus_hat);
printf("Value of mua grad = %e musgrad = %e for tissue type %d \n", grada[tiss_idx], grads[tiss_idx], tiss_idx);
printf("New value of mua = %e mus = %e for tissue type = %d\n", phan->mu_abs[tiss_idx], phan->mu_sc[tiss_idx],tiss_idx);
}
#endif
delg = 0.0;
delg2 = 0.0
for (i=0;i<geom->nX*geom->nY;i++) {
if(g[j] || gbar[j].real()){
delg = delg + (log(gbar[i].real())-log(g[i]))*(log(gbar[i].real())-log(g[i]));
delg2 = delg2 + ((gbar[i].real())-(g[i]))*((gbar[i].real())-(g[i]));
}
}
tiss_idx = 1;
printf("Residual1 = %e mua grad = %e musgrad = %e \n",(delg), grada[tiss_idx], grads[tiss_idx]);
fwrite(grada, sizeof(flt_doub),1, grada_fid);
fwrite(grads, sizeof(flt_doub),1, grads_fid);
fwrite(&delg,sizeof(flt_doub),1,res_fid1);
fwrite(&delg2,sizeof(flt_doub),1,res_fid2);
}
}
fclose(abs_fid);
fclose(sc_fid);
fclose(grada_fid);
fclose(grads_fid);
fclose(res_fid1);
fclose(res_fid2);
free(g);
free(beam_src);
free(phan);
free(geom);
free(gbar);
free(dgbar);
free(src);
free(src2);
free(out);
free(out2);
free(out3);
free(out4);
iniparser_freedict(ini);
time(&end_time);
printf("\n*------------------------------------------*\n");
printf("\nThe total time taken by the code = %d sec \n", end_time - start_time);
printf("\n*------------------------------------------*\n");
return(0);
}
|
18688f907b0bf777387508e82f1c34d660d021d7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
#include "deform_conv_cuda_kernel.h"
#include <cstdio>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename DType>
__device__ DType deformable_im2col_bilinear(const DType *bottom_data, const int data_width,
const int height, const int width, DType h, DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
DType v2 = 0;
if (h_low >=0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
DType v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
DType v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename DType>
__device__ DType get_gradient_weight(DType argmax_h, DType argmax_w,
const int h, const int w, const int height, const int width) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) {
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename DType>
__device__ DType get_coordinate_weight(DType argmax_h, DType argmax_w,
const int height, const int width, const DType *im_data,
const int data_width, const int bp_dir) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) {
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
/*!
* \brief deformable_im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void deformable_im2col_gpu_kernel(const int n, const DType *data_im, const DType *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
DType *data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const DType* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const DType* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const DType* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
//const DType map_h = i * dilation_h + offset_h;
//const DType map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
/*!\brief
* cpu function of deformable_im2col algorithm
* \param s device stream
* \param data_im pointer of images (N, C, H, W, ...) in the image batch
* \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape (#channels, N, output_im_height, output_im_width, ...)
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param data_col column buffer pointer
*/
template <typename DType>
inline void deformable_im2col(hipStream_t stream,
const DType *data_im, const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, DType *data_col) {
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
//index_t num_spatial_axes = kernel_shape.ndim();
//CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
//index_t channel_per_deformable_group = im_shape[1] / deformable_group;
//index_t num_kernels = im_shape[1] * col_shape.ProdShape(1, col_shape.ndim());
//using namespace mxnet_op;
//switch (num_spatial_axes) {
//case 2:
// deformable_im2col_gpu_kernel<DType> // NOLINT_NEXT_LINE(whitespace/operators)
// <<<cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
// 0, mshadow::Stream<gpu>::GetStream(s)>>>(
// num_kernels, data_im, data_offset, im_shape[2], im_shape[3], kernel_shape[0], kernel_shape[1],
// pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1], channel_per_deformable_group,
// col_shape[1], im_shape[1], deformable_group, col_shape[2], col_shape[3], data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_im2col_gpu_kernel);
// break;
//default:
// LOG(FATAL) << "im2col_nd_gpu does not support computation with "
// << num_spatial_axes << " spatial axes";
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels, data_im, data_offset, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, channels, deformable_group, height_col, width_col, data_col);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
}
template void deformable_im2col<float>(
hipStream_t stream, const float *data_im, const float *data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, float *data_col);
/*!
* \brief deformable_col2im gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im() instead;
*/
template <typename DType>
__global__ void deformable_col2im_gpu_kernel(const int n, const DType *data_col, const DType *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
DType *grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
DType weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
/*!\brief
* gpu function of deformable_col2im algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_im pointer of images (N, C, H, W,...) in the image batch
*/
template <typename DType>
inline void deformable_col2im(hipStream_t stream,
const DType *data_col, const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
DType* grad_im) {
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
// index_t num_spatial_axes = kernel_shape.ndim();
// index_t im_size = im_shape.ProdShape(1, im_shape.ndim());
// index_t channel_per_deformable_group = im_shape[1] / deformable_group;
// index_t num_kernels = col_shape.ProdShape(0, col_shape.ndim());
// num_axes should be smaller than block size
// CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
// using namespace mxnet_op;
// switch (num_spatial_axes) {
// case 2:
// // To avoid involving atomic operations, we will launch one kernel per
// // bottom dimension, and then in the kernel add up the top dimensions.
// // NOLINT_NEXT_LINE(whitespace/operators)
// deformable_col2im_gpu_kernel<DType><<<cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
// 0, mshadow::Stream<gpu>::GetStream(s)>>>(
// num_kernels, data_col, data_offset, im_shape[1], im_shape[2], im_shape[3],
// kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
// dilation[0], dilation[1], channel_per_deformable_group,
// col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im, req);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_gpu_kernel);
// break;
// default:
// LOG(FATAL) << "col2im_nd_gpu does not support computation with "
// << num_spatial_axes << " spatial axes";
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels, data_col, data_offset, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in deformable_col2im: %s\n", hipGetErrorString(err));
}
}
template void deformable_col2im<float>(
hipStream_t stream, const float *data_col, const float *data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, float *grad_im);
/*!
* \brief deformable_col2im_coord gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im_coord() instead;
*/
template <typename DType>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const DType *data_col,
const DType *data_im, const DType *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, DType *grad_offset) {
CUDA_KERNEL_LOOP(index, n) {
DType val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const DType *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const DType *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
}
const DType weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
/*!\brief
* gpu function of deformable_col2im_coord algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_im pointer of images (N, C, H, W, ...) in the image batch
* \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_offset pointer of the offsets (N, deformable_group*kernel_h*kernel_w*2, H, W,...) in the offset batch
*/
template <typename DType>
inline void deformable_col2im_coord(hipStream_t stream,
const DType *data_col, const DType *data_im, const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, DType *grad_offset) {
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
// index_t num_spatial_axes = kernel_shape.ndim();
// index_t num_kernels = col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
// index_t channel_per_deformable_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
// CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
// using namespace mxnet_op;
// switch (num_spatial_axes) {
// case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
// deformable_col2im_coord_gpu_kernel<DType> << <cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
// 0, mshadow::Stream<gpu>::GetStream(s) >> >(
// num_kernels, data_col, data_im, data_offset, im_shape[1], im_shape[2], im_shape[3],
// kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
// dilation[0], dilation[1], channel_per_deformable_group,
// col_shape[1], 2 * kernel_shape[0] * kernel_shape[1] * deformable_group, deformable_group, col_shape[2], col_shape[3], grad_offset, req);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_coord_gpu_kernel);
// break;
// default:
// LOG(FATAL) << "col2im_nd_gpu does not support computation with "
// << num_spatial_axes << " spatial axes";
hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels, data_col, data_im, data_offset, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset);
}
template void
deformable_col2im_coord(hipStream_t stream, const float *data_col,
const float *data_im, const float *data_offset,
const int channels, const int height, const int width,
const int ksize_h, const int ksize_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, float *grad_offset);
|
18688f907b0bf777387508e82f1c34d660d021d7.cu
|
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
#include "deform_conv_cuda_kernel.h"
#include <cstdio>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename DType>
__device__ DType deformable_im2col_bilinear(const DType *bottom_data, const int data_width,
const int height, const int width, DType h, DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
DType v2 = 0;
if (h_low >=0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
DType v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
DType v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename DType>
__device__ DType get_gradient_weight(DType argmax_h, DType argmax_w,
const int h, const int w, const int height, const int width) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) {
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename DType>
__device__ DType get_coordinate_weight(DType argmax_h, DType argmax_w,
const int height, const int width, const DType *im_data,
const int data_width, const int bp_dir) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) {
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
/*!
* \brief deformable_im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void deformable_im2col_gpu_kernel(const int n, const DType *data_im, const DType *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
DType *data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const DType* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const DType* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const DType* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
//const DType map_h = i * dilation_h + offset_h;
//const DType map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
/*!\brief
* cpu function of deformable_im2col algorithm
* \param s device stream
* \param data_im pointer of images (N, C, H, W, ...) in the image batch
* \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape (#channels, N, output_im_height, output_im_width, ...)
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param data_col column buffer pointer
*/
template <typename DType>
inline void deformable_im2col(cudaStream_t stream,
const DType *data_im, const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, DType *data_col) {
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
//index_t num_spatial_axes = kernel_shape.ndim();
//CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
//index_t channel_per_deformable_group = im_shape[1] / deformable_group;
//index_t num_kernels = im_shape[1] * col_shape.ProdShape(1, col_shape.ndim());
//using namespace mxnet_op;
//switch (num_spatial_axes) {
//case 2:
// deformable_im2col_gpu_kernel<DType> // NOLINT_NEXT_LINE(whitespace/operators)
// <<<cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
// 0, mshadow::Stream<gpu>::GetStream(s)>>>(
// num_kernels, data_im, data_offset, im_shape[2], im_shape[3], kernel_shape[0], kernel_shape[1],
// pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1], channel_per_deformable_group,
// col_shape[1], im_shape[1], deformable_group, col_shape[2], col_shape[3], data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_im2col_gpu_kernel);
// break;
//default:
// LOG(FATAL) << "im2col_nd_gpu does not support computation with "
// << num_spatial_axes << " spatial axes";
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_im, data_offset, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, channels, deformable_group, height_col, width_col, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
}
template void deformable_im2col<float>(
cudaStream_t stream, const float *data_im, const float *data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, float *data_col);
/*!
* \brief deformable_col2im gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im() instead;
*/
template <typename DType>
__global__ void deformable_col2im_gpu_kernel(const int n, const DType *data_col, const DType *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
DType *grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
DType weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
/*!\brief
* gpu function of deformable_col2im algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_im pointer of images (N, C, H, W,...) in the image batch
*/
template <typename DType>
inline void deformable_col2im(cudaStream_t stream,
const DType *data_col, const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
DType* grad_im) {
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
// index_t num_spatial_axes = kernel_shape.ndim();
// index_t im_size = im_shape.ProdShape(1, im_shape.ndim());
// index_t channel_per_deformable_group = im_shape[1] / deformable_group;
// index_t num_kernels = col_shape.ProdShape(0, col_shape.ndim());
// num_axes should be smaller than block size
// CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
// using namespace mxnet_op;
// switch (num_spatial_axes) {
// case 2:
// // To avoid involving atomic operations, we will launch one kernel per
// // bottom dimension, and then in the kernel add up the top dimensions.
// // NOLINT_NEXT_LINE(whitespace/operators)
// deformable_col2im_gpu_kernel<DType><<<cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
// 0, mshadow::Stream<gpu>::GetStream(s)>>>(
// num_kernels, data_col, data_offset, im_shape[1], im_shape[2], im_shape[3],
// kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
// dilation[0], dilation[1], channel_per_deformable_group,
// col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im, req);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_gpu_kernel);
// break;
// default:
// LOG(FATAL) << "col2im_nd_gpu does not support computation with "
// << num_spatial_axes << " spatial axes";
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_col, data_offset, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
}
}
template void deformable_col2im<float>(
cudaStream_t stream, const float *data_col, const float *data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, float *grad_im);
/*!
* \brief deformable_col2im_coord gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im_coord() instead;
*/
template <typename DType>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const DType *data_col,
const DType *data_im, const DType *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, DType *grad_offset) {
CUDA_KERNEL_LOOP(index, n) {
DType val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const DType *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const DType *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
}
const DType weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
/*!\brief
* gpu function of deformable_col2im_coord algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_im pointer of images (N, C, H, W, ...) in the image batch
* \param data_offset pointer of offsets (N, deformable_group*kernel_h*kernel_w*2, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_offset pointer of the offsets (N, deformable_group*kernel_h*kernel_w*2, H, W,...) in the offset batch
*/
template <typename DType>
inline void deformable_col2im_coord(cudaStream_t stream,
const DType *data_col, const DType *data_im, const DType *data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, DType *grad_offset) {
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
// index_t num_spatial_axes = kernel_shape.ndim();
// index_t num_kernels = col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
// index_t channel_per_deformable_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
// CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
// using namespace mxnet_op;
// switch (num_spatial_axes) {
// case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
// deformable_col2im_coord_gpu_kernel<DType> << <cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
// 0, mshadow::Stream<gpu>::GetStream(s) >> >(
// num_kernels, data_col, data_im, data_offset, im_shape[1], im_shape[2], im_shape[3],
// kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
// dilation[0], dilation[1], channel_per_deformable_group,
// col_shape[1], 2 * kernel_shape[0] * kernel_shape[1] * deformable_group, deformable_group, col_shape[2], col_shape[3], grad_offset, req);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_coord_gpu_kernel);
// break;
// default:
// LOG(FATAL) << "col2im_nd_gpu does not support computation with "
// << num_spatial_axes << " spatial axes";
deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_col, data_im, data_offset, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset);
}
template void
deformable_col2im_coord(cudaStream_t stream, const float *data_col,
const float *data_im, const float *data_offset,
const int channels, const int height, const int width,
const int ksize_h, const int ksize_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, float *grad_offset);
|
eb9fef749d36b73a851c05c52a741b6ac824607b.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
*
* This sample demonstrates implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* With compute capability 8.0 or higher the CUDA kernels involved uses asynchronously copy data
* from global to shared memory; a.k.a., async-copy.
* This sample has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <cuda_pipeline.h>
#if __CUDA_ARCH__ >= 700
#include <cuda_awbarrier.h>
#endif
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
namespace nvcuda_namespace = nvcuda::experimental;
enum kernels
{
AsyncCopyMultiStageLargeChunk = 0,
AsyncCopyLargeChunk = 1,
AsyncCopyLargeChunkAWBarrier = 2,
AsyncCopyMultiStage = 3,
AsyncCopySingleStage = 4,
Naive = 5,
NaiveLargeChunk = 6
};
const char* kernelNames[] = {"AsyncCopyMultiStageLargeChunk", "AsyncCopyLargeChunk",
"AsyncCopyLargeChunkAWBarrier", "AsyncCopyMultiStage",
"AsyncCopySingleStage", "Naive", "NaiveLargeChunk"};
#define USE_CPP_API 0
constexpr int blockSize = 16;
// Multi Stage memcpy_async pipeline with large chunk copy
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopyMultiStageLargeChunk(float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B, int wA,
int wB) {
// Requires BLOCK_SIZE % 4 == 0
// Multi-stage pipeline version
constexpr size_t maxPipelineStages = 4;
// Declaration of the shared memory array As used to
// store the sub-matrix of A for each stage
__shared__ float As[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B for each stage
__shared__ float Bs[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE];
float Csub = 0.0;
// Index of the first sub-matrix of A processed by the block
const int aBegin = wA * (BLOCK_SIZE) * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
const int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
const int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
const int t4x = threadIdx.x * 4 ;
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin, i = 0, aStage = aBegin, bStage = bBegin, iStage = 0; a <= aEnd; a += aStep, b += bStep, ++i ) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
for ( ; aStage <= a + aStep * maxPipelineStages ; aStage += aStep, bStage += bStep, ++iStage )
{
if ( aStage <= aEnd && t4x < BLOCK_SIZE )
{
// Rotating buffer
const int j = iStage % maxPipelineStages;
float4 * const A4s = reinterpret_cast<float4*>(& As[j][threadIdx.y][t4x]);
float4 * const B4s = reinterpret_cast<float4*>(& Bs[j][threadIdx.y][t4x]);
const float4 * const A4 = reinterpret_cast<const float4*>(& A[aStage + wA * threadIdx.y + t4x]);
const float4 * const B4 = reinterpret_cast<const float4*>(& B[aStage + wA * threadIdx.y + t4x]);
#if USE_CPP_API
nvcuda_namespace::memcpy_async(*A4s,*A4, pipe);
nvcuda_namespace::memcpy_async(*B4s,*B4, pipe);
#else
__pipeline_memcpy_async(A4s, A4, sizeof(float4));
__pipeline_memcpy_async(B4s, B4, sizeof(float4));
#endif
}
#if USE_CPP_API
pipe.commit();
#else
__pipeline_commit();
#endif
}
#if USE_CPP_API
pipe.wait_prior<maxPipelineStages-1>();
#else
__pipeline_wait_prior(maxPipelineStages-1);
#endif
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Rotating buffer
const int j = i % maxPipelineStages;
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[j][threadIdx.y][k] * Bs[j][k][threadIdx.x];
}
// Don't have to synchronize because
// next iteration is loading to a different buffer
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
// Single Stage memcpy_async pipeline with Large copy chunk (float4)
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopyLargeChunk(float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B, int wA,
int wB) {
// Requires BLOCK_SIZE % 4 == 0
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Single-stage pipeline version
float Csub = 0.0;
const int t4x = threadIdx.x * 4;
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Load the matrices from device memory to shared memory;
// a subset of threads loads a contiguous chunk of elements.
// Previously, per-thread:
// As[ty][tx] = A[a + wA * ty + tx];
// Bs[ty][tx] = B[b + wB * ty + tx];
// Now, one fourth of the threads load four elements of each matrix
if ( t4x < BLOCK_SIZE ) {
float4 * const A4s = reinterpret_cast<float4*>(& As[threadIdx.y][t4x]);
float4 * const B4s = reinterpret_cast<float4*>(& Bs[threadIdx.y][t4x]);
const float4 * const A4 = reinterpret_cast<const float4*>(& A[a + wA * threadIdx.y + t4x]);
const float4 * const B4 = reinterpret_cast<const float4*>(& B[a + wA * threadIdx.y + t4x]);
#if USE_CPP_API
nvcuda_namespace::memcpy_async(*A4s,*A4,pipe);
nvcuda_namespace::memcpy_async(*B4s,*B4,pipe);
pipe.commit_and_wait();
#else
__pipeline_memcpy_async(A4s, A4, sizeof(float4));
__pipeline_memcpy_async(B4s, B4, sizeof(float4));
__pipeline_commit();
__pipeline_wait_prior(0);
#endif
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
// Single Stage memcpy_async pipeline with Large copy chunk (float4) using arrive-wait barrier
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopyLargeChunkAWBarrier(float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B, int wA,
int wB) {
#if __CUDA_ARCH__ >= 700
// Requires BLOCK_SIZE % 4 == 0
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
nvcuda_namespace::pipeline pipe;
__shared__ nvcuda_namespace::awbarrier barrier;
if (threadIdx.x == 0) {
nvcuda_namespace::init(&barrier, blockDim.x*blockDim.y);
}
__syncthreads();
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
float Csub = 0.0;
const int t4x = threadIdx.x * 4;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Load the matrices from device memory to shared memory;
// a subset of threads loads a contiguous chunk of elements.
// Now, one fourth of the threads load four elements of each matrix
if ( t4x < BLOCK_SIZE ) {
float4 * const A4s = reinterpret_cast<float4*>(& As[threadIdx.y][t4x]);
float4 * const B4s = reinterpret_cast<float4*>(& Bs[threadIdx.y][t4x]);
const float4 * const A4 = reinterpret_cast<const float4*>(& A[a + wA * threadIdx.y + t4x]);
const float4 * const B4 = reinterpret_cast<const float4*>(& B[a + wA * threadIdx.y + t4x]);
nvcuda_namespace::memcpy_async(*A4s,*A4,pipe);
nvcuda_namespace::memcpy_async(*B4s,*B4,pipe);
pipe.arrive_on(barrier);
}
// Synchronize to make sure the matrices are loaded
barrier.arrive_and_wait();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
#endif
}
// Single Stage memcpy_async pipeline with float copy
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopySingleStage(float *C, const float *A,
const float *B, int wA,
int wB) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Single-stage pipeline version
float Csub = 0.0;
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
{
const float *A_float = reinterpret_cast<const float*>(A + a + wA * threadIdx.y + threadIdx.x);
const float *B_float = reinterpret_cast<const float*>(B + b + wB * threadIdx.y + threadIdx.x);
#if USE_CPP_API
nvcuda_namespace::memcpy_async(As[threadIdx.y][threadIdx.x], *A_float, pipe);
nvcuda_namespace::memcpy_async(Bs[threadIdx.y][threadIdx.x], *B_float, pipe);
pipe.commit_and_wait();
#else
__pipeline_memcpy_async(&As[threadIdx.y][threadIdx.x], A_float, sizeof(float));
__pipeline_memcpy_async(&Bs[threadIdx.y][threadIdx.x], B_float, sizeof(float));
__pipeline_commit();
__pipeline_wait_prior(0);
#endif
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
// Multi Stage memcpy_async pipeline with int copy
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopyMultiStage(float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B, int wA,
int wB) {
// Multi-stage pipeline version
constexpr size_t maxPipelineStages = 4;
// Declaration of the shared memory array As used to
// store the sub-matrix of A for each stage
__shared__ float As[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B for each stage
__shared__ float Bs[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE];
float Csub = 0.0;
// Index of the first sub-matrix of A processed by the block
const int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
const int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
const int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin, i = 0, aStage = aBegin, bStage = bBegin, iStage = 0; a <= aEnd; a += aStep, b += bStep, ++i ) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
for ( ; aStage <= a + aStep * maxPipelineStages ; aStage += aStep, bStage += bStep, ++iStage )
{
if ( aStage <= aEnd )
{
const float *A_float = reinterpret_cast<const float*>(A + aStage + wA * threadIdx.y + threadIdx.x);
const float *B_float = reinterpret_cast<const float*>(B + bStage + wB * threadIdx.y + threadIdx.x);
// Rotating buffer
const int j = iStage % maxPipelineStages;
#if USE_CPP_API
nvcuda_namespace::memcpy_async(As[j][threadIdx.y][threadIdx.x], *A_float, pipe);
nvcuda_namespace::memcpy_async(Bs[j][threadIdx.y][threadIdx.x], *B_float, pipe);
#else
__pipeline_memcpy_async(&As[j][threadIdx.y][threadIdx.x], A_float, sizeof(float));
__pipeline_memcpy_async(&Bs[j][threadIdx.y][threadIdx.x], B_float, sizeof(float));
#endif
}
#if USE_CPP_API
pipe.commit();
#else
__pipeline_commit();
#endif
}
#if USE_CPP_API
pipe.wait_prior<maxPipelineStages-1>();
#else
__pipeline_wait_prior(maxPipelineStages-1);
#endif
// Synchronize to make sure the matrices are loaded
__syncthreads();
const int j = i % maxPipelineStages;
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[j][threadIdx.y][k] * Bs[j][k][threadIdx.x];
}
// Don't have to synchronize because
// next iteration is loading to a different buffer
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulNaive(float *C, float *A,
float *B, int wA,
int wB) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[threadIdx.y][threadIdx.x] = A[a + wA * threadIdx.y + threadIdx.x];
Bs[threadIdx.y][threadIdx.x] = B[b + wB * threadIdx.y + threadIdx.x];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
template <int BLOCK_SIZE> __global__ void MatrixMulNaiveLargeChunk(float *C, float *A,
float *B, int wA,
int wB) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int t4x = threadIdx.x * 4 ;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory;
// One fourth of the threads load four elements of each matrix
if ( t4x < BLOCK_SIZE ) {
float4 * const A4s = reinterpret_cast<float4*>(& As[threadIdx.y][t4x]);
float4 * const B4s = reinterpret_cast<float4*>(& Bs[threadIdx.y][t4x]);
const float4 * const A4 = reinterpret_cast<float4*>(& A[a + wA * threadIdx.y + t4x]);
const float4 * const B4 = reinterpret_cast<float4*>(& B[a + wA * threadIdx.y + t4x]);
*A4s = *A4 ;
*B4s = *B4 ;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
const dim3 &dimsA,
const dim3 &dimsB,
kernels kernel_number) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
hipStream_t stream;
// Initialize host memory
const float valB = 2.10f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
// copy host memory to device
checkCudaErrors(hipMemcpyAsync(d_A, h_A, mem_size_A, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(d_B, h_B, mem_size_B, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemsetAsync(d_C, 0, mem_size_C, stream));
// Setup execution parameters
dim3 threads(blockSize, blockSize);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
printf("Running kernel = %d - %s\n", kernel_number, kernelNames[kernel_number]);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
switch (kernel_number)
{
case AsyncCopyMultiStageLargeChunk :
default:
hipLaunchKernelGGL(( MatrixMulAsyncCopyMultiStageLargeChunk<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyLargeChunk :
hipLaunchKernelGGL(( MatrixMulAsyncCopyLargeChunk<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyLargeChunkAWBarrier :
hipLaunchKernelGGL(( MatrixMulAsyncCopyLargeChunkAWBarrier<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyMultiStage :
hipLaunchKernelGGL(( MatrixMulAsyncCopyMultiStage<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopySingleStage :
hipLaunchKernelGGL(( MatrixMulAsyncCopySingleStage<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case Naive :
hipLaunchKernelGGL(( MatrixMulNaive<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case NaiveLargeChunk:
hipLaunchKernelGGL(( MatrixMulNaiveLargeChunk<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
}
printf("done\n");
checkCudaErrors(hipStreamSynchronize(stream));
// Execute the kernel
int nIter = 100;
// Record the start event
checkCudaErrors(hipEventRecord(start, stream));
for (int j = 0; j < nIter; j++) {
switch (kernel_number)
{
case AsyncCopyMultiStageLargeChunk :
default:
hipLaunchKernelGGL(( MatrixMulAsyncCopyMultiStageLargeChunk<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyLargeChunk :
hipLaunchKernelGGL(( MatrixMulAsyncCopyLargeChunk<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyLargeChunkAWBarrier :
hipLaunchKernelGGL(( MatrixMulAsyncCopyLargeChunkAWBarrier<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyMultiStage :
hipLaunchKernelGGL(( MatrixMulAsyncCopyMultiStage<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopySingleStage :
hipLaunchKernelGGL(( MatrixMulAsyncCopySingleStage<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case Naive :
hipLaunchKernelGGL(( MatrixMulNaive<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case NaiveLargeChunk:
hipLaunchKernelGGL(( MatrixMulNaiveLargeChunk<blockSize>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
}
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(hipMemcpyAsync(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" -kernel=kernel_number (0 - AsyncCopyMultiStageLargeChunk; 1 - AsyncCopyLargeChunk)\n");
printf(" (2 - AsyncCopyLargeChunkAWBarrier; 3 - AsyncCopyMultiStage)\n");
printf(" (4 - AsyncCopySingleStage; 5 - Naive without memcpy_async)\n");
printf(" (6 - NaiveLargeChunk without memcpy_async)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int matrixBlock = 32;
dim3 dimsA(10 * 2 * matrixBlock, 10 * 2 * matrixBlock, 1);
dim3 dimsB(10 * 2 * matrixBlock, 10 * 2 * matrixBlock, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
kernels selected_kernel = AsyncCopyMultiStageLargeChunk;
// kernel to run - default (AsyncCopyMultiStageLargeChunk == 0)
if (checkCmdLineFlag(argc, (const char **)argv, "kernel")) {
int kernel_number = getCmdLineArgumentInt(argc, (const char **)argv, "kernel");
if (kernel_number < 7)
{
selected_kernel = (kernels)kernel_number;
}
else
{
printf("Error: kernel number should be between 0 to 6, you have entered %d\n", kernel_number);
exit(EXIT_FAILURE);
}
int major = 0;
checkCudaErrors(hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, dev));
if ((kernel_number == AsyncCopyLargeChunkAWBarrier) && major < 7)
{
printf("AsyncCopyLargeChunkAWBarrier kernel requires requires SM 7.0 or higher. Exiting...\n");
exit(EXIT_WAIVED);
}
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, dimsA, dimsB, selected_kernel);
exit(matrix_result);
}
|
eb9fef749d36b73a851c05c52a741b6ac824607b.cu
|
/**
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
*
* This sample demonstrates implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* With compute capability 8.0 or higher the CUDA kernels involved uses asynchronously copy data
* from global to shared memory; a.k.a., async-copy.
* This sample has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cuda_pipeline.h>
#if __CUDA_ARCH__ >= 700
#include <cuda_awbarrier.h>
#endif
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
namespace nvcuda_namespace = nvcuda::experimental;
enum kernels
{
AsyncCopyMultiStageLargeChunk = 0,
AsyncCopyLargeChunk = 1,
AsyncCopyLargeChunkAWBarrier = 2,
AsyncCopyMultiStage = 3,
AsyncCopySingleStage = 4,
Naive = 5,
NaiveLargeChunk = 6
};
const char* kernelNames[] = {"AsyncCopyMultiStageLargeChunk", "AsyncCopyLargeChunk",
"AsyncCopyLargeChunkAWBarrier", "AsyncCopyMultiStage",
"AsyncCopySingleStage", "Naive", "NaiveLargeChunk"};
#define USE_CPP_API 0
constexpr int blockSize = 16;
// Multi Stage memcpy_async pipeline with large chunk copy
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopyMultiStageLargeChunk(float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B, int wA,
int wB) {
// Requires BLOCK_SIZE % 4 == 0
// Multi-stage pipeline version
constexpr size_t maxPipelineStages = 4;
// Declaration of the shared memory array As used to
// store the sub-matrix of A for each stage
__shared__ float As[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B for each stage
__shared__ float Bs[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE];
float Csub = 0.0;
// Index of the first sub-matrix of A processed by the block
const int aBegin = wA * (BLOCK_SIZE) * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
const int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
const int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
const int t4x = threadIdx.x * 4 ;
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin, i = 0, aStage = aBegin, bStage = bBegin, iStage = 0; a <= aEnd; a += aStep, b += bStep, ++i ) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
for ( ; aStage <= a + aStep * maxPipelineStages ; aStage += aStep, bStage += bStep, ++iStage )
{
if ( aStage <= aEnd && t4x < BLOCK_SIZE )
{
// Rotating buffer
const int j = iStage % maxPipelineStages;
float4 * const A4s = reinterpret_cast<float4*>(& As[j][threadIdx.y][t4x]);
float4 * const B4s = reinterpret_cast<float4*>(& Bs[j][threadIdx.y][t4x]);
const float4 * const A4 = reinterpret_cast<const float4*>(& A[aStage + wA * threadIdx.y + t4x]);
const float4 * const B4 = reinterpret_cast<const float4*>(& B[aStage + wA * threadIdx.y + t4x]);
#if USE_CPP_API
nvcuda_namespace::memcpy_async(*A4s,*A4, pipe);
nvcuda_namespace::memcpy_async(*B4s,*B4, pipe);
#else
__pipeline_memcpy_async(A4s, A4, sizeof(float4));
__pipeline_memcpy_async(B4s, B4, sizeof(float4));
#endif
}
#if USE_CPP_API
pipe.commit();
#else
__pipeline_commit();
#endif
}
#if USE_CPP_API
pipe.wait_prior<maxPipelineStages-1>();
#else
__pipeline_wait_prior(maxPipelineStages-1);
#endif
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Rotating buffer
const int j = i % maxPipelineStages;
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[j][threadIdx.y][k] * Bs[j][k][threadIdx.x];
}
// Don't have to synchronize because
// next iteration is loading to a different buffer
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
// Single Stage memcpy_async pipeline with Large copy chunk (float4)
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopyLargeChunk(float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B, int wA,
int wB) {
// Requires BLOCK_SIZE % 4 == 0
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Single-stage pipeline version
float Csub = 0.0;
const int t4x = threadIdx.x * 4;
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Load the matrices from device memory to shared memory;
// a subset of threads loads a contiguous chunk of elements.
// Previously, per-thread:
// As[ty][tx] = A[a + wA * ty + tx];
// Bs[ty][tx] = B[b + wB * ty + tx];
// Now, one fourth of the threads load four elements of each matrix
if ( t4x < BLOCK_SIZE ) {
float4 * const A4s = reinterpret_cast<float4*>(& As[threadIdx.y][t4x]);
float4 * const B4s = reinterpret_cast<float4*>(& Bs[threadIdx.y][t4x]);
const float4 * const A4 = reinterpret_cast<const float4*>(& A[a + wA * threadIdx.y + t4x]);
const float4 * const B4 = reinterpret_cast<const float4*>(& B[a + wA * threadIdx.y + t4x]);
#if USE_CPP_API
nvcuda_namespace::memcpy_async(*A4s,*A4,pipe);
nvcuda_namespace::memcpy_async(*B4s,*B4,pipe);
pipe.commit_and_wait();
#else
__pipeline_memcpy_async(A4s, A4, sizeof(float4));
__pipeline_memcpy_async(B4s, B4, sizeof(float4));
__pipeline_commit();
__pipeline_wait_prior(0);
#endif
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
// Single Stage memcpy_async pipeline with Large copy chunk (float4) using arrive-wait barrier
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopyLargeChunkAWBarrier(float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B, int wA,
int wB) {
#if __CUDA_ARCH__ >= 700
// Requires BLOCK_SIZE % 4 == 0
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
nvcuda_namespace::pipeline pipe;
__shared__ nvcuda_namespace::awbarrier barrier;
if (threadIdx.x == 0) {
nvcuda_namespace::init(&barrier, blockDim.x*blockDim.y);
}
__syncthreads();
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
float Csub = 0.0;
const int t4x = threadIdx.x * 4;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Load the matrices from device memory to shared memory;
// a subset of threads loads a contiguous chunk of elements.
// Now, one fourth of the threads load four elements of each matrix
if ( t4x < BLOCK_SIZE ) {
float4 * const A4s = reinterpret_cast<float4*>(& As[threadIdx.y][t4x]);
float4 * const B4s = reinterpret_cast<float4*>(& Bs[threadIdx.y][t4x]);
const float4 * const A4 = reinterpret_cast<const float4*>(& A[a + wA * threadIdx.y + t4x]);
const float4 * const B4 = reinterpret_cast<const float4*>(& B[a + wA * threadIdx.y + t4x]);
nvcuda_namespace::memcpy_async(*A4s,*A4,pipe);
nvcuda_namespace::memcpy_async(*B4s,*B4,pipe);
pipe.arrive_on(barrier);
}
// Synchronize to make sure the matrices are loaded
barrier.arrive_and_wait();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
#endif
}
// Single Stage memcpy_async pipeline with float copy
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopySingleStage(float *C, const float *A,
const float *B, int wA,
int wB) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Single-stage pipeline version
float Csub = 0.0;
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
{
const float *A_float = reinterpret_cast<const float*>(A + a + wA * threadIdx.y + threadIdx.x);
const float *B_float = reinterpret_cast<const float*>(B + b + wB * threadIdx.y + threadIdx.x);
#if USE_CPP_API
nvcuda_namespace::memcpy_async(As[threadIdx.y][threadIdx.x], *A_float, pipe);
nvcuda_namespace::memcpy_async(Bs[threadIdx.y][threadIdx.x], *B_float, pipe);
pipe.commit_and_wait();
#else
__pipeline_memcpy_async(&As[threadIdx.y][threadIdx.x], A_float, sizeof(float));
__pipeline_memcpy_async(&Bs[threadIdx.y][threadIdx.x], B_float, sizeof(float));
__pipeline_commit();
__pipeline_wait_prior(0);
#endif
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
// Multi Stage memcpy_async pipeline with int copy
template <int BLOCK_SIZE> __global__ void MatrixMulAsyncCopyMultiStage(float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B, int wA,
int wB) {
// Multi-stage pipeline version
constexpr size_t maxPipelineStages = 4;
// Declaration of the shared memory array As used to
// store the sub-matrix of A for each stage
__shared__ float As[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B for each stage
__shared__ float Bs[maxPipelineStages][BLOCK_SIZE][BLOCK_SIZE];
float Csub = 0.0;
// Index of the first sub-matrix of A processed by the block
const int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
const int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
const int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin, i = 0, aStage = aBegin, bStage = bBegin, iStage = 0; a <= aEnd; a += aStep, b += bStep, ++i ) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
for ( ; aStage <= a + aStep * maxPipelineStages ; aStage += aStep, bStage += bStep, ++iStage )
{
if ( aStage <= aEnd )
{
const float *A_float = reinterpret_cast<const float*>(A + aStage + wA * threadIdx.y + threadIdx.x);
const float *B_float = reinterpret_cast<const float*>(B + bStage + wB * threadIdx.y + threadIdx.x);
// Rotating buffer
const int j = iStage % maxPipelineStages;
#if USE_CPP_API
nvcuda_namespace::memcpy_async(As[j][threadIdx.y][threadIdx.x], *A_float, pipe);
nvcuda_namespace::memcpy_async(Bs[j][threadIdx.y][threadIdx.x], *B_float, pipe);
#else
__pipeline_memcpy_async(&As[j][threadIdx.y][threadIdx.x], A_float, sizeof(float));
__pipeline_memcpy_async(&Bs[j][threadIdx.y][threadIdx.x], B_float, sizeof(float));
#endif
}
#if USE_CPP_API
pipe.commit();
#else
__pipeline_commit();
#endif
}
#if USE_CPP_API
pipe.wait_prior<maxPipelineStages-1>();
#else
__pipeline_wait_prior(maxPipelineStages-1);
#endif
// Synchronize to make sure the matrices are loaded
__syncthreads();
const int j = i % maxPipelineStages;
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[j][threadIdx.y][k] * Bs[j][k][threadIdx.x];
}
// Don't have to synchronize because
// next iteration is loading to a different buffer
}
// Write the block sub-matrix to device memory;
// each thread writes four element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulNaive(float *C, float *A,
float *B, int wA,
int wB) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[threadIdx.y][threadIdx.x] = A[a + wA * threadIdx.y + threadIdx.x];
Bs[threadIdx.y][threadIdx.x] = B[b + wB * threadIdx.y + threadIdx.x];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
template <int BLOCK_SIZE> __global__ void MatrixMulNaiveLargeChunk(float *C, float *A,
float *B, int wA,
int wB) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int t4x = threadIdx.x * 4 ;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * blockIdx.y;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * blockIdx.x;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory;
// One fourth of the threads load four elements of each matrix
if ( t4x < BLOCK_SIZE ) {
float4 * const A4s = reinterpret_cast<float4*>(& As[threadIdx.y][t4x]);
float4 * const B4s = reinterpret_cast<float4*>(& Bs[threadIdx.y][t4x]);
const float4 * const A4 = reinterpret_cast<float4*>(& A[a + wA * threadIdx.y + t4x]);
const float4 * const B4 = reinterpret_cast<float4*>(& B[a + wA * threadIdx.y + t4x]);
*A4s = *A4 ;
*B4s = *B4 ;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[threadIdx.y][k] * Bs[k][threadIdx.x];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x;
C[c + wB * threadIdx.y + threadIdx.x] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
const dim3 &dimsA,
const dim3 &dimsB,
kernels kernel_number) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
cudaStream_t stream;
// Initialize host memory
const float valB = 2.10f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
// copy host memory to device
checkCudaErrors(cudaMemcpyAsync(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemsetAsync(d_C, 0, mem_size_C, stream));
// Setup execution parameters
dim3 threads(blockSize, blockSize);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
printf("Running kernel = %d - %s\n", kernel_number, kernelNames[kernel_number]);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
switch (kernel_number)
{
case AsyncCopyMultiStageLargeChunk :
default:
MatrixMulAsyncCopyMultiStageLargeChunk<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyLargeChunk :
MatrixMulAsyncCopyLargeChunk<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyLargeChunkAWBarrier :
MatrixMulAsyncCopyLargeChunkAWBarrier<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyMultiStage :
MatrixMulAsyncCopyMultiStage<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopySingleStage :
MatrixMulAsyncCopySingleStage<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case Naive :
MatrixMulNaive<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case NaiveLargeChunk:
MatrixMulNaiveLargeChunk<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
}
printf("done\n");
checkCudaErrors(cudaStreamSynchronize(stream));
// Execute the kernel
int nIter = 100;
// Record the start event
checkCudaErrors(cudaEventRecord(start, stream));
for (int j = 0; j < nIter; j++) {
switch (kernel_number)
{
case AsyncCopyMultiStageLargeChunk :
default:
MatrixMulAsyncCopyMultiStageLargeChunk<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyLargeChunk :
MatrixMulAsyncCopyLargeChunk<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyLargeChunkAWBarrier :
MatrixMulAsyncCopyLargeChunkAWBarrier<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopyMultiStage :
MatrixMulAsyncCopyMultiStage<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case AsyncCopySingleStage :
MatrixMulAsyncCopySingleStage<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case Naive :
MatrixMulNaive<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
case NaiveLargeChunk:
MatrixMulNaiveLargeChunk<blockSize><<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
break;
}
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(cudaMemcpyAsync(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" -kernel=kernel_number (0 - AsyncCopyMultiStageLargeChunk; 1 - AsyncCopyLargeChunk)\n");
printf(" (2 - AsyncCopyLargeChunkAWBarrier; 3 - AsyncCopyMultiStage)\n");
printf(" (4 - AsyncCopySingleStage; 5 - Naive without memcpy_async)\n");
printf(" (6 - NaiveLargeChunk without memcpy_async)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int matrixBlock = 32;
dim3 dimsA(10 * 2 * matrixBlock, 10 * 2 * matrixBlock, 1);
dim3 dimsB(10 * 2 * matrixBlock, 10 * 2 * matrixBlock, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
kernels selected_kernel = AsyncCopyMultiStageLargeChunk;
// kernel to run - default (AsyncCopyMultiStageLargeChunk == 0)
if (checkCmdLineFlag(argc, (const char **)argv, "kernel")) {
int kernel_number = getCmdLineArgumentInt(argc, (const char **)argv, "kernel");
if (kernel_number < 7)
{
selected_kernel = (kernels)kernel_number;
}
else
{
printf("Error: kernel number should be between 0 to 6, you have entered %d\n", kernel_number);
exit(EXIT_FAILURE);
}
int major = 0;
checkCudaErrors(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, dev));
if ((kernel_number == AsyncCopyLargeChunkAWBarrier) && major < 7)
{
printf("AsyncCopyLargeChunkAWBarrier kernel requires requires SM 7.0 or higher. Exiting...\n");
exit(EXIT_WAIVED);
}
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, dimsA, dimsB, selected_kernel);
exit(matrix_result);
}
|
8a7dea2ec1c0de5e3f1b513360512e8419e9f8a6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "camera.h"
#include "color.h"
#include "cuda_utils.h"
#include "hittable_list.h"
#include "material.h"
#include "ray.h"
#include "sphere.h"
#include "vec3.h"
#include <hiprand/hiprand_kernel.h>
#include <float.h> // for FLT_MAX
#include <iostream>
// this is a kernel function used to init the rand state for all pixels
__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
int pixel_index = j * max_x + i;
// Each thread gets same seed, a different sequence number ,no offset
hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__device__ vec3 ray_color(ray r, hittable **world) {
hit_record rec;
if ((*world)->hit(r, 0.0, FLT_MAX, rec)) {
return 0.5f * vec3(rec.normal.x() + 1.0f, rec.normal.y() + 1.0f,
rec.normal.z() + 1.0f);
} else {
vec3 unit_direction = unit_vector(r.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
return (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
}
}
// for diffuse effect
// with material
__device__ vec3 ray_color(ray r, hittable **world,
hiprandState_t *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0, 1.0, 1.0);
// bounce 50 times
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered,
local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
} else {
// the material will absorb all light ?
return vec3(0.0, 0.0, 0.0);
}
} else {
// shoot into the sky, a light source
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
// modify the intensity
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam,
hittable **world, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
// int pixel_index = j*max_x*3 + i*3;
int pixel_index = j * max_x + i;
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
// sample ns times, each has a random direction
col += ray_color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
__global__ void create_world(hittable **d_list, hittable **d_world,
camera **cam, int nx, int ny) {
// make sure only create this object once
if (threadIdx.x == 0 && blockIdx.x == 0) {
d_list[0] =
new sphere(vec3(0, 0, -1), 0.5, new lambertian(vec3(0.1, 0.2, 0.5)));
d_list[1] = new sphere(vec3(0, -100.5, -1), 100,
new lambertian(vec3(0.8, 0.8, 0.0)));
d_list[2] =
new sphere(vec3(1, 0, -1), 0.5, new metal(vec3(0.8, 0.6, 0.2), 0.0));
d_list[3] = new sphere(vec3(-1, 0, -1), 0.5, new dielectric(1.5));
d_list[4] = new sphere(vec3(-1, 0, -1), -0.45, new dielectric(1.5));
*d_world = new hittable_list(d_list, 5);
// *(cam) = new camera();
// *cam = new camera(vec3(-2, 2, 1), vec3(0, 0, -1), vec3(0, 1, 0), 20.0,
// float(nx) / float(ny));
vec3 lookfrom(3, 3, 2);
vec3 lookat(0, 0, -1);
float dist_to_focus = (lookfrom - lookat).length();
float aperture = 2.0;
*cam = new camera(lookfrom, lookat, vec3(0, 1, 0), 20.0f,
float(nx) / float(ny), aperture, dist_to_focus);
}
}
__global__ void free_world(hittable **d_list, hittable **d_world,
camera **cam) {
// note here we delete the inside pointer first as it is created by new
for (int i = 0; i < 5; i++) {
delete ((sphere *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *(cam);
}
int main(void) {
// Image
const auto aspect_ratio = 16.0 / 9.0;
// nx = 1200 will cause an error in image viewer "pnm loader expected to find an integer"
int nx = 1600;
int ny = static_cast<int>(nx / aspect_ratio);
// ny = 500;
int num_pixels = nx * ny;
int fb_size = num_pixels * sizeof(vec3);
int ns = 100;
// Camera
// auto viewport_height = 2.0;
// auto viewport_width = aspect_ratio * viewport_height;
// auto focal_length = 1.0;
// auto origin = point3(0, 0, 0);
// auto horizontal = vec3(viewport_width, 0, 0);
// auto vertical = vec3(0, viewport_height, 0);
// auto lower_left_corner =
// origin - horizontal / 2 - vertical / 2 - vec3(0, 0, focal_length);
// a list of 2 hittable objects
hittable **d_list;
checkCudaErrors(hipMalloc((void **)&d_list, 4 * sizeof(hittable *)));
hittable **d_world;
checkCudaErrors((hipMalloc((void **)&d_world, sizeof(hittable *))));
camera **d_cam;
checkCudaErrors((hipMalloc((void **)&d_cam, sizeof(camera *))));
hipLaunchKernelGGL(( create_world), dim3(1), dim3(1), 0, 0, d_list, d_world, d_cam, nx, ny);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// allocate FB, no initialization in CPU memory here.
vec3 *fb;
checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
// divide the work on GPU into blocks of 8x8 threads
int tx = 8;
int ty = 8;
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
hiprandState_t *d_rand_state;
checkCudaErrors(
hipMalloc((void **)&d_rand_state, num_pixels * sizeof(hiprandState_t)));
hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, lower_left_corner, horizontal,
// vertical, origin, d_world);
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, ns, d_cam, d_world, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
vec3 pixel = fb[j * nx + i];
color pixel_color(pixel.x(), pixel.y(), pixel.z());
write_color(std::cout, pixel_color);
}
}
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( free_world), dim3(1), dim3(1), 0, 0, d_list, d_world, d_cam);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(fb));
checkCudaErrors(hipFree(d_cam));
return 0;
}
|
8a7dea2ec1c0de5e3f1b513360512e8419e9f8a6.cu
|
#include "camera.h"
#include "color.h"
#include "cuda_utils.h"
#include "hittable_list.h"
#include "material.h"
#include "ray.h"
#include "sphere.h"
#include "vec3.h"
#include <curand_kernel.h>
#include <float.h> // for FLT_MAX
#include <iostream>
// this is a kernel function used to init the rand state for all pixels
__global__ void render_init(int max_x, int max_y, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
int pixel_index = j * max_x + i;
// Each thread gets same seed, a different sequence number ,no offset
curand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__device__ vec3 ray_color(ray r, hittable **world) {
hit_record rec;
if ((*world)->hit(r, 0.0, FLT_MAX, rec)) {
return 0.5f * vec3(rec.normal.x() + 1.0f, rec.normal.y() + 1.0f,
rec.normal.z() + 1.0f);
} else {
vec3 unit_direction = unit_vector(r.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
return (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
}
}
// for diffuse effect
// with material
__device__ vec3 ray_color(ray r, hittable **world,
curandState *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0, 1.0, 1.0);
// bounce 50 times
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered,
local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
} else {
// the material will absorb all light ?
return vec3(0.0, 0.0, 0.0);
}
} else {
// shoot into the sky, a light source
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
// modify the intensity
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam,
hittable **world, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
// int pixel_index = j*max_x*3 + i*3;
int pixel_index = j * max_x + i;
curandState local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
// sample ns times, each has a random direction
col += ray_color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
__global__ void create_world(hittable **d_list, hittable **d_world,
camera **cam, int nx, int ny) {
// make sure only create this object once
if (threadIdx.x == 0 && blockIdx.x == 0) {
d_list[0] =
new sphere(vec3(0, 0, -1), 0.5, new lambertian(vec3(0.1, 0.2, 0.5)));
d_list[1] = new sphere(vec3(0, -100.5, -1), 100,
new lambertian(vec3(0.8, 0.8, 0.0)));
d_list[2] =
new sphere(vec3(1, 0, -1), 0.5, new metal(vec3(0.8, 0.6, 0.2), 0.0));
d_list[3] = new sphere(vec3(-1, 0, -1), 0.5, new dielectric(1.5));
d_list[4] = new sphere(vec3(-1, 0, -1), -0.45, new dielectric(1.5));
*d_world = new hittable_list(d_list, 5);
// *(cam) = new camera();
// *cam = new camera(vec3(-2, 2, 1), vec3(0, 0, -1), vec3(0, 1, 0), 20.0,
// float(nx) / float(ny));
vec3 lookfrom(3, 3, 2);
vec3 lookat(0, 0, -1);
float dist_to_focus = (lookfrom - lookat).length();
float aperture = 2.0;
*cam = new camera(lookfrom, lookat, vec3(0, 1, 0), 20.0f,
float(nx) / float(ny), aperture, dist_to_focus);
}
}
__global__ void free_world(hittable **d_list, hittable **d_world,
camera **cam) {
// note here we delete the inside pointer first as it is created by new
for (int i = 0; i < 5; i++) {
delete ((sphere *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *(cam);
}
int main(void) {
// Image
const auto aspect_ratio = 16.0 / 9.0;
// nx = 1200 will cause an error in image viewer "pnm loader expected to find an integer"
int nx = 1600;
int ny = static_cast<int>(nx / aspect_ratio);
// ny = 500;
int num_pixels = nx * ny;
int fb_size = num_pixels * sizeof(vec3);
int ns = 100;
// Camera
// auto viewport_height = 2.0;
// auto viewport_width = aspect_ratio * viewport_height;
// auto focal_length = 1.0;
// auto origin = point3(0, 0, 0);
// auto horizontal = vec3(viewport_width, 0, 0);
// auto vertical = vec3(0, viewport_height, 0);
// auto lower_left_corner =
// origin - horizontal / 2 - vertical / 2 - vec3(0, 0, focal_length);
// a list of 2 hittable objects
hittable **d_list;
checkCudaErrors(cudaMalloc((void **)&d_list, 4 * sizeof(hittable *)));
hittable **d_world;
checkCudaErrors((cudaMalloc((void **)&d_world, sizeof(hittable *))));
camera **d_cam;
checkCudaErrors((cudaMalloc((void **)&d_cam, sizeof(camera *))));
create_world<<<1, 1>>>(d_list, d_world, d_cam, nx, ny);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// allocate FB, no initialization in CPU memory here.
vec3 *fb;
checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
// divide the work on GPU into blocks of 8x8 threads
int tx = 8;
int ty = 8;
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
curandState *d_rand_state;
checkCudaErrors(
cudaMalloc((void **)&d_rand_state, num_pixels * sizeof(curandState)));
render_init<<<blocks, threads>>>(nx, ny, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// render<<<blocks, threads>>>(fb, nx, ny, lower_left_corner, horizontal,
// vertical, origin, d_world);
render<<<blocks, threads>>>(fb, nx, ny, ns, d_cam, d_world, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
vec3 pixel = fb[j * nx + i];
color pixel_color(pixel.x(), pixel.y(), pixel.z());
write_color(std::cout, pixel_color);
}
}
checkCudaErrors(cudaDeviceSynchronize());
free_world<<<1, 1>>>(d_list, d_world, d_cam);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(fb));
checkCudaErrors(cudaFree(d_cam));
return 0;
}
|
d122c4b681f9caf3465219e48d53dd0b7378f8ca.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../../Linear/Linear.h"
#include "../Attention.h"
#include <CUDA_ptr.hpp>
#include <bits/stdc++.h>
#include <wtime.h>
using namespace culib;
int main(int ac, char** av) {
float sparsity = std::atof(av[1]);
using w_mat_t = base_mat;
using vw_mat_t = row_mat;
using attn_config =
Attention_config<w_mat_t, w_mat_t, vw_mat_t, w_mat_t, Prune_attn>;
auto para =
std::make_shared<Model_t>(Model_t{768, 768, 384, 12, 3072, 768});
auto LQ = gen_sparse_linear<w_mat_t>(para->kdim, para->emdim, para->seq_len, sparsity);
auto LK = gen_sparse_linear<w_mat_t>(para->kdim, para->emdim, para->seq_len, sparsity);
auto LV = gen_sparse_linear<vw_mat_t>(para->vdim, para->emdim, para->seq_len, sparsity);
auto LO = gen_sparse_linear<w_mat_t>(para->emdim, para->vdim, para->seq_len, sparsity);
auto attn = std::make_unique<Attention<attn_config>>(
std::move(LQ), std::move(LK), std::move(LV), std::move(LO), para);
CUDA_ptr<half> IN(para->emdim * para->seq_len);
CUDA_ptr<half> d_OUT(para->emdim * para->seq_len);
attn->forward(d_OUT.get(), IN.get(), IN.get(), IN.get());
hipDeviceSynchronize();
auto time = wtime(
10,
[&]() {
attn->forward(d_OUT.get(), IN.get(), IN.get(), IN.get());
hipDeviceSynchronize();
},
[]() {});
std::cout << "Time: " << time << " us\n";
std::vector<half> OUT(para->emdim * para->seq_len);
d_OUT.dump(OUT.data());
std::cout << __half2float(OUT[0]) << std::endl;
}
|
d122c4b681f9caf3465219e48d53dd0b7378f8ca.cu
|
#include "../../Linear/Linear.h"
#include "../Attention.h"
#include <CUDA_ptr.hpp>
#include <bits/stdc++.h>
#include <wtime.h>
using namespace culib;
int main(int ac, char** av) {
float sparsity = std::atof(av[1]);
using w_mat_t = base_mat;
using vw_mat_t = row_mat;
using attn_config =
Attention_config<w_mat_t, w_mat_t, vw_mat_t, w_mat_t, Prune_attn>;
auto para =
std::make_shared<Model_t>(Model_t{768, 768, 384, 12, 3072, 768});
auto LQ = gen_sparse_linear<w_mat_t>(para->kdim, para->emdim, para->seq_len, sparsity);
auto LK = gen_sparse_linear<w_mat_t>(para->kdim, para->emdim, para->seq_len, sparsity);
auto LV = gen_sparse_linear<vw_mat_t>(para->vdim, para->emdim, para->seq_len, sparsity);
auto LO = gen_sparse_linear<w_mat_t>(para->emdim, para->vdim, para->seq_len, sparsity);
auto attn = std::make_unique<Attention<attn_config>>(
std::move(LQ), std::move(LK), std::move(LV), std::move(LO), para);
CUDA_ptr<half> IN(para->emdim * para->seq_len);
CUDA_ptr<half> d_OUT(para->emdim * para->seq_len);
attn->forward(d_OUT.get(), IN.get(), IN.get(), IN.get());
cudaDeviceSynchronize();
auto time = wtime(
10,
[&]() {
attn->forward(d_OUT.get(), IN.get(), IN.get(), IN.get());
cudaDeviceSynchronize();
},
[]() {});
std::cout << "Time: " << time << " us\n";
std::vector<half> OUT(para->emdim * para->seq_len);
d_OUT.dump(OUT.data());
std::cout << __half2float(OUT[0]) << std::endl;
}
|
85299f62bc4d671ebacb09a1bf155c4697a119ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 1000
__global__ void vector_add(float *out, float *a, float *b, int n) {
for (int i = 0; i < n; i++) {
out[i] = a[i] + b[i];
}
}
int main(){
float *d_a, *d_b, *d_c;
float *h_a, *h_b, *h_c;
h_a = (float*)malloc(N * sizeof(float));
h_b = (float*)malloc(N * sizeof(float));
h_c = (float*)malloc(N * sizeof(float));
hipMalloc(&d_a, N * sizeof(float));
hipMalloc(&d_b, N * sizeof(float));
hipMalloc(&d_c, N * sizeof(float));
// Initialize array
for(int i = 0; i < N; i++){
h_a[i] = 1.0f; h_b[i] = 2.0f;
}
// copy to host
hipMemcpy(d_a, h_a, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_c, h_c, N * sizeof(float), hipMemcpyHostToDevice);
int blocks, grids;
blocks = 32;
grids = (float)ceil((float)N / blocks);
hipLaunchKernelGGL(( vector_add), dim3(grids), dim3(blocks), 0, 0, d_c, d_a, d_b, N);
hipMemcpy(h_c, d_c, N * sizeof(float), hipMemcpyDeviceToHost);
int i;
for (i = 0; i < N; i++) {
printf("%f ", h_c[i]);
}
printf("\n");
// free
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(h_a);
hipFree(h_b);
hipFree(h_c);
return 0;
}
|
85299f62bc4d671ebacb09a1bf155c4697a119ef.cu
|
#include <stdio.h>
#define N 1000
__global__ void vector_add(float *out, float *a, float *b, int n) {
for (int i = 0; i < n; i++) {
out[i] = a[i] + b[i];
}
}
int main(){
float *d_a, *d_b, *d_c;
float *h_a, *h_b, *h_c;
h_a = (float*)malloc(N * sizeof(float));
h_b = (float*)malloc(N * sizeof(float));
h_c = (float*)malloc(N * sizeof(float));
cudaMalloc(&d_a, N * sizeof(float));
cudaMalloc(&d_b, N * sizeof(float));
cudaMalloc(&d_c, N * sizeof(float));
// Initialize array
for(int i = 0; i < N; i++){
h_a[i] = 1.0f; h_b[i] = 2.0f;
}
// copy to host
cudaMemcpy(d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, N * sizeof(float), cudaMemcpyHostToDevice);
int blocks, grids;
blocks = 32;
grids = (float)ceil((float)N / blocks);
vector_add<<<grids, blocks>>>(d_c, d_a, d_b, N);
cudaMemcpy(h_c, d_c, N * sizeof(float), cudaMemcpyDeviceToHost);
int i;
for (i = 0; i < N; i++) {
printf("%f ", h_c[i]);
}
printf("\n");
// free
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(h_a);
cudaFree(h_b);
cudaFree(h_c);
return 0;
}
|
f06b62f4fdf32d85335882a2b437909134163613.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void calcSumTable(const float *rowCumSum, float *SumTable, int rowNumberN, int colNumberM) {
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (xIndex >= colNumberM) return;
for (int i = 1; i < rowNumberN; i++) {
SumTable[i * colNumberM + xIndex] +=
rowCumSum[(i - 1) * colNumberM + xIndex];
}
}
|
f06b62f4fdf32d85335882a2b437909134163613.cu
|
#include "includes.h"
__global__ void calcSumTable(const float *rowCumSum, float *SumTable, int rowNumberN, int colNumberM) {
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (xIndex >= colNumberM) return;
for (int i = 1; i < rowNumberN; i++) {
SumTable[i * colNumberM + xIndex] +=
rowCumSum[(i - 1) * colNumberM + xIndex];
}
}
|
68893111c33901fc11b801f6341aab42062b65bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <math.h>
#include <float.h>
#include <thrust/execution_policy.h> // for certain cuda versions, this is where 'thrust::device' is
// #include <thrust/device_vector.h>
// #include <thrust/copy.h>
#include <thrust/extrema.h>
#include <hip/hip_runtime.h>
#include <iostream>
// #include "hough_voting_cuda_utils.h"
#define THREADS_PER_BLOCK 512
#define VERTEX_CHANNELS 3
#define MAX_ROI 64
#define PRINT(a) std::cout << #a << ": " << a << std::endl;
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__device__ inline float angle_distance(int cx, int cy, int x, int y, float u, float v)
{
float dx = cx - x;
float dy = cy - y;
float n1 = sqrt(u * u + v * v);
float n2 = sqrt(dx * dx + dy * dy);
float dot = u * dx + v * dy;
float distance = dot / (n1 * n2);
return distance;
}
__device__ inline void project_box(int cls, const float* extents, const float* meta_data, float distance, float factor, float* threshold)
{
float xHalf = extents[cls * 3 + 0] * 0.5;
float yHalf = extents[cls * 3 + 1] * 0.5;
float zHalf = extents[cls * 3 + 2] * 0.5;
float bb3D[24];
bb3D[0] = xHalf; bb3D[1] = yHalf; bb3D[2] = zHalf + distance;
bb3D[3] = -xHalf; bb3D[4] = yHalf; bb3D[5] = zHalf + distance;
bb3D[6] = xHalf; bb3D[7] = -yHalf; bb3D[8] = zHalf + distance;
bb3D[9] = -xHalf; bb3D[10] = -yHalf; bb3D[11] = zHalf + distance;
bb3D[12] = xHalf; bb3D[13] = yHalf; bb3D[14] = -zHalf + distance;
bb3D[15] = -xHalf; bb3D[16] = yHalf; bb3D[17] = -zHalf + distance;
bb3D[18] = xHalf; bb3D[19] = -yHalf; bb3D[20] = -zHalf + distance;
bb3D[21] = -xHalf; bb3D[22] = -yHalf; bb3D[23] = -zHalf + distance;
float fx = meta_data[0];
float fy = meta_data[4];
float px = meta_data[2];
float py = meta_data[5];
float minX = 1e8;
float maxX = -1e8;
float minY = 1e8;
float maxY = -1e8;
for (int i = 0; i < 8; i++)
{
float x = fx * (bb3D[i * 3] / bb3D[i * 3 + 2]) + px;
float y = fy * (bb3D[i * 3 + 1] / bb3D[i * 3 + 2]) + py;
minX = fmin(minX, x);
minY = fmin(minY, y);
maxX = fmax(maxX, x);
maxY = fmax(maxY, y);
}
float width = maxX - minX + 1;
float height = maxY - minY + 1;
*threshold = fmax(width, height) * factor;
}
__global__ void compute_arrays_kernel(const int nthreads, const int* labelmap,
int* arrays, int* array_size, const int height, const int width)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int n = index / (height * width);
int mask = labelmap[index];
if (mask > 0)
{
int size = atomicAdd(array_size + n, 1);
int offset = n * height * width + size;
arrays[offset] = index % (height * width);
}
}
}
__global__ void compute_hough_kernel(const int nthreads, float* hough_space, float* hough_data,
const float* vertmap, const float* extents, const float* meta_data, const int* arrays, const int* array_size,
const int* class_indexes, const int height, const int width, const float inlierThreshold, const int skip_pixels)
{
__shared__ float s_meta_data[9];
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
if (threadIdx.x == 0)
{
for (int i = 0; i < 9; ++i)
{
s_meta_data[i] = meta_data[i];
}
}
__syncthreads();
// (cls, cx, cy) is an element in the hough space
int n = index / (height * width);
int cls = class_indexes[n];
int pix = index % (height * width);
int cx = pix % width;
int cy = pix / width;
int size = array_size[n];
float distance = 0;
float threshold = 0;
for (int i = 0; i < size; i += skip_pixels)
{
int offset = n * height * width + i;
int location = arrays[offset]; // H * W
int x = location % width;
int y = location / (width);
// read the direction
offset = n * height * width * VERTEX_CHANNELS + (y * width + x) * VERTEX_CHANNELS;
float u = vertmap[offset];
float v = vertmap[offset + 1];
float d = exp(vertmap[offset + 2]);
// vote
if (angle_distance(cx, cy, x, y, u, v) > inlierThreshold)
{
project_box(cls, extents, s_meta_data, d, 0.6, &threshold);
float dx = fabsf(x - cx);
float dy = fabsf(y - cy);
if (dx < threshold && dy < threshold)
{
hough_space[index]++;
distance += d;
}
}
}
if (hough_space[index] > 0)
{
distance /= hough_space[index];
float bb_width = -1;
float bb_height = -1;
for (int i = 0; i < size; i += skip_pixels)
{
int offset = n * height * width + i;
int location = arrays[offset];
int x = location % width;
int y = location / width;
// read the direction
offset = n * height * width * VERTEX_CHANNELS + (y * width + x) * VERTEX_CHANNELS;
float u = vertmap[offset];
float v = vertmap[offset + 1];
// vote
if (angle_distance(cx, cy, x, y, u, v) > inlierThreshold)
{
project_box(cls, extents, s_meta_data, distance, 0.6, &threshold);
float dx = fabsf(x - cx);
float dy = fabsf(y - cy);
if (dx > bb_width && dx < threshold && dy < threshold)
bb_width = dx;
if (dy > bb_height && dx < threshold && dy < threshold)
bb_height = dy;
}
}
int offset = n * height * width * 3 + 3 * (cy * width + cx);
hough_data[offset] = distance;
hough_data[offset + 1] = 2 * bb_height;
hough_data[offset + 2] = 2 * bb_width;
}
}
}
__global__ void compute_rois_kernel(const int nthreads, float* top_box, float* top_pose,
const float* poses, const float* meta_data, const float* hough_space, const float* hough_data, const int* max_indexes, const int* class_indexes,
const int height, const int width)
{
__shared__ float s_f[4];
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
if (threadIdx.x == 0)
{
s_f[0] = meta_data[0]; // fx
s_f[1] = meta_data[4]; // fy
s_f[2] = meta_data[2]; // px
s_f[3] = meta_data[5]; // py
}
__syncthreads();
float fx = s_f[0];
float fy = s_f[1];
float px = s_f[2];
float py = s_f[3];
float scale = 0.05;
int max_index = max_indexes[index];
float max_hs_idx = hough_space[max_index];
int ind = max_index / (height * width);
int cls = class_indexes[ind];
int n = max_index % (height * width);
int x = n % width;
int y = n / width;
float rx = (x - px) / fx;
float ry = (y - py) / fy;
int offset = ind * height * width * 3 + 3 * (y * width + x);
float bb_distance = hough_data[offset];
float bb_height = hough_data[offset + 1];
float bb_width = hough_data[offset + 2];
int roi_index = index;
top_box[roi_index * 7 + 0] = 0;
top_box[roi_index * 7 + 1] = cls;
top_box[roi_index * 7 + 2] = x - bb_width * (0.5 + scale);
top_box[roi_index * 7 + 3] = y - bb_height * (0.5 + scale);
top_box[roi_index * 7 + 4] = x + bb_width * (0.5 + scale);
top_box[roi_index * 7 + 5] = y + bb_height * (0.5 + scale);
top_box[roi_index * 7 + 6] = max_hs_idx;
top_pose[roi_index * 7 + 0] = poses[index*4];
top_pose[roi_index * 7 + 1] = poses[index*4+1];
top_pose[roi_index * 7 + 2] = poses[index*4+2];
top_pose[roi_index * 7 + 3] = poses[index*4+3];
top_pose[roi_index * 7 + 4] = rx * bb_distance;
top_pose[roi_index * 7 + 5] = ry * bb_distance;
top_pose[roi_index * 7 + 6] = bb_distance;
}
}
int HoughVotingForwardLaucher(
const int* labels, const int* labelmap, const float* vertmap, const float* extents, const float* meta_data, const float* poses,
const int batch_size, const int height, const int width,
const float inlierThreshold,// const float votingThreshold, const float perThreshold,
const int skip_pixels,
float* top_box, float* top_pose, hipStream_t stream)
{
const int kThreadsPerBlock = THREADS_PER_BLOCK;
hipError_t err;
const int N = batch_size;
// step 1: compute a label index array for each instance
int dims = N * height * width;
int* arrays;// = arrays_vec.get();
hipMalloc((void **)&arrays, dims * sizeof(int));
hipMemset(arrays, 0, N * sizeof(int));
int* array_sizes;// = array_sizes_vec.get();
hipMalloc((void **)&array_sizes, N * sizeof(int));
hipMemset(array_sizes, 0, N * sizeof(int));
int output_size = N * height * width;
hipLaunchKernelGGL(( compute_arrays_kernel), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock),
dim3(kThreadsPerBlock), 0, stream,
output_size, labelmap, arrays, array_sizes, height, width);
hipDeviceSynchronize();
// DEBUG
// std::vector<int> array_sizes_host(N);
// hipMemcpy(&array_sizes_host[0], array_sizes, N * sizeof(int), hipMemcpyDeviceToHost);
// std::vector<int> labels_host(N);
// hipMemcpy(&labels_host[0], labels, N * sizeof(int), hipMemcpyDeviceToHost);
// std::vector<int> arrays_host(N*height*width);
// hipMemcpy(&arrays_host[0], arrays, N*height*width * sizeof(int), hipMemcpyDeviceToHost);
// // std::vector<float> meta_data_host(9);
// // hipMemcpy(&meta_data_host[0], meta_data, 9 * sizeof(float), hipMemcpyDeviceToHost);
// for (int n = 0; n < N; n++)
// {
// printf("Class %d) %d) (labels count: %d), sample value: %d\n", labels_host[n], n, array_sizes_host[n], arrays_host[n*height*width]);
// }
// // for (int n = 0; n < 9; n++)
// // {
// // printf("META: %.3f\n", meta_data_host[n]);
// // }
// // printf("\n");
// //
// step 2: compute the hough space
float* hough_space; // = thrust::raw_pointer_cast(hough_space_vec.data());
hipMalloc((void **)&hough_space, N * height * width * sizeof(float));
if (hipMemset(hough_space, 0, N * height * width * sizeof(float)) != hipSuccess)
fprintf(stderr, "reset error\n");
float* hough_data; // = thrust::raw_pointer_cast(hough_data_vec.data());
hipMalloc((void **)&hough_data, N * height * width * 3 * sizeof(float));
if (hipMemset(hough_data, 0, N * height * width * 3 * sizeof(float)) != hipSuccess)
fprintf(stderr, "reset error\n");
output_size = N * height * width;
hipLaunchKernelGGL(( compute_hough_kernel), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock),
dim3(kThreadsPerBlock), 0, stream,
output_size, hough_space, hough_data, vertmap, extents, meta_data,
arrays, array_sizes, labels, height, width, inlierThreshold, skip_pixels);
hipDeviceSynchronize();
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed compute hough space: %s\n", hipGetErrorString( err ) );
exit( -1 );
}
// step 3: find the maximum in hough space
std::vector<int> max_indexes_host(N);
for (int i = 0; i < N; i++)
{
float *hmax = thrust::max_element(thrust::device, hough_space + i * height * width, hough_space + (i+1) * height * width);
max_indexes_host[i] = hmax - hough_space;
// printf("Max indexes %d) %d\n", i, max_indexes_host[i]);
}
int* max_indexes;
hipMalloc((void **)&max_indexes, N * sizeof(int));
hipMemcpy(max_indexes, &max_indexes_host[0], N * sizeof(int), hipMemcpyHostToDevice);
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed compute maximum: %s\n", hipGetErrorString( err ) );
exit( -1 );
}
// step 4: compute outputs
output_size = N;
hipLaunchKernelGGL(( compute_rois_kernel), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock),
dim3(kThreadsPerBlock), 0, stream,
output_size, top_box, top_pose,
poses, meta_data, hough_space, hough_data, max_indexes, labels,
height, width);
hipDeviceSynchronize();
// err checking
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed compute outputs: %s\n", hipGetErrorString( err ) );
exit( -1 );
}
hipFree(arrays);
hipFree(array_sizes);
hipFree(hough_space);
hipFree(hough_data);
hipFree(max_indexes);
return 1;
}
std::vector<at::Tensor> hough_voting_forward_cuda
(
const at::Tensor& labels, const at::Tensor& masks, const at::Tensor& vertmap, const at::Tensor& extents,
const at::Tensor& meta_data, const at::Tensor& poses,
const float inlierThreshold, const int skip_pixels
)
{
int batch_size = masks.size(0);
int N = batch_size;
int H = masks.size(1);
int W = masks.size(2);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// float tensors
at::Tensor top_box = at::zeros({N, 7}, vertmap.options());
at::Tensor top_pose = at::zeros({N, 7}, vertmap.options());
HoughVotingForwardLaucher(
labels.contiguous().data<int>(), masks.contiguous().data<int>(), vertmap.contiguous().data<float>(), extents.contiguous().data<float>(),
meta_data.contiguous().data<float>(), poses.contiguous().data<float>(),
N, H, W,
inlierThreshold, // votingThreshold, perThreshold,
skip_pixels,
top_box.data<float>(), top_pose.data<float>(),
stream
);
THCudaCheck(hipGetLastError());
return {top_box, top_pose};
}
|
68893111c33901fc11b801f6341aab42062b65bb.cu
|
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <math.h>
#include <float.h>
#include <thrust/execution_policy.h> // for certain cuda versions, this is where 'thrust::device' is
// #include <thrust/device_vector.h>
// #include <thrust/copy.h>
#include <thrust/extrema.h>
#include <cuda_runtime.h>
#include <iostream>
// #include "hough_voting_cuda_utils.h"
#define THREADS_PER_BLOCK 512
#define VERTEX_CHANNELS 3
#define MAX_ROI 64
#define PRINT(a) std::cout << #a << ": " << a << std::endl;
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__device__ inline float angle_distance(int cx, int cy, int x, int y, float u, float v)
{
float dx = cx - x;
float dy = cy - y;
float n1 = sqrt(u * u + v * v);
float n2 = sqrt(dx * dx + dy * dy);
float dot = u * dx + v * dy;
float distance = dot / (n1 * n2);
return distance;
}
__device__ inline void project_box(int cls, const float* extents, const float* meta_data, float distance, float factor, float* threshold)
{
float xHalf = extents[cls * 3 + 0] * 0.5;
float yHalf = extents[cls * 3 + 1] * 0.5;
float zHalf = extents[cls * 3 + 2] * 0.5;
float bb3D[24];
bb3D[0] = xHalf; bb3D[1] = yHalf; bb3D[2] = zHalf + distance;
bb3D[3] = -xHalf; bb3D[4] = yHalf; bb3D[5] = zHalf + distance;
bb3D[6] = xHalf; bb3D[7] = -yHalf; bb3D[8] = zHalf + distance;
bb3D[9] = -xHalf; bb3D[10] = -yHalf; bb3D[11] = zHalf + distance;
bb3D[12] = xHalf; bb3D[13] = yHalf; bb3D[14] = -zHalf + distance;
bb3D[15] = -xHalf; bb3D[16] = yHalf; bb3D[17] = -zHalf + distance;
bb3D[18] = xHalf; bb3D[19] = -yHalf; bb3D[20] = -zHalf + distance;
bb3D[21] = -xHalf; bb3D[22] = -yHalf; bb3D[23] = -zHalf + distance;
float fx = meta_data[0];
float fy = meta_data[4];
float px = meta_data[2];
float py = meta_data[5];
float minX = 1e8;
float maxX = -1e8;
float minY = 1e8;
float maxY = -1e8;
for (int i = 0; i < 8; i++)
{
float x = fx * (bb3D[i * 3] / bb3D[i * 3 + 2]) + px;
float y = fy * (bb3D[i * 3 + 1] / bb3D[i * 3 + 2]) + py;
minX = fmin(minX, x);
minY = fmin(minY, y);
maxX = fmax(maxX, x);
maxY = fmax(maxY, y);
}
float width = maxX - minX + 1;
float height = maxY - minY + 1;
*threshold = fmax(width, height) * factor;
}
__global__ void compute_arrays_kernel(const int nthreads, const int* labelmap,
int* arrays, int* array_size, const int height, const int width)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int n = index / (height * width);
int mask = labelmap[index];
if (mask > 0)
{
int size = atomicAdd(array_size + n, 1);
int offset = n * height * width + size;
arrays[offset] = index % (height * width);
}
}
}
__global__ void compute_hough_kernel(const int nthreads, float* hough_space, float* hough_data,
const float* vertmap, const float* extents, const float* meta_data, const int* arrays, const int* array_size,
const int* class_indexes, const int height, const int width, const float inlierThreshold, const int skip_pixels)
{
__shared__ float s_meta_data[9];
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
if (threadIdx.x == 0)
{
for (int i = 0; i < 9; ++i)
{
s_meta_data[i] = meta_data[i];
}
}
__syncthreads();
// (cls, cx, cy) is an element in the hough space
int n = index / (height * width);
int cls = class_indexes[n];
int pix = index % (height * width);
int cx = pix % width;
int cy = pix / width;
int size = array_size[n];
float distance = 0;
float threshold = 0;
for (int i = 0; i < size; i += skip_pixels)
{
int offset = n * height * width + i;
int location = arrays[offset]; // H * W
int x = location % width;
int y = location / (width);
// read the direction
offset = n * height * width * VERTEX_CHANNELS + (y * width + x) * VERTEX_CHANNELS;
float u = vertmap[offset];
float v = vertmap[offset + 1];
float d = exp(vertmap[offset + 2]);
// vote
if (angle_distance(cx, cy, x, y, u, v) > inlierThreshold)
{
project_box(cls, extents, s_meta_data, d, 0.6, &threshold);
float dx = fabsf(x - cx);
float dy = fabsf(y - cy);
if (dx < threshold && dy < threshold)
{
hough_space[index]++;
distance += d;
}
}
}
if (hough_space[index] > 0)
{
distance /= hough_space[index];
float bb_width = -1;
float bb_height = -1;
for (int i = 0; i < size; i += skip_pixels)
{
int offset = n * height * width + i;
int location = arrays[offset];
int x = location % width;
int y = location / width;
// read the direction
offset = n * height * width * VERTEX_CHANNELS + (y * width + x) * VERTEX_CHANNELS;
float u = vertmap[offset];
float v = vertmap[offset + 1];
// vote
if (angle_distance(cx, cy, x, y, u, v) > inlierThreshold)
{
project_box(cls, extents, s_meta_data, distance, 0.6, &threshold);
float dx = fabsf(x - cx);
float dy = fabsf(y - cy);
if (dx > bb_width && dx < threshold && dy < threshold)
bb_width = dx;
if (dy > bb_height && dx < threshold && dy < threshold)
bb_height = dy;
}
}
int offset = n * height * width * 3 + 3 * (cy * width + cx);
hough_data[offset] = distance;
hough_data[offset + 1] = 2 * bb_height;
hough_data[offset + 2] = 2 * bb_width;
}
}
}
__global__ void compute_rois_kernel(const int nthreads, float* top_box, float* top_pose,
const float* poses, const float* meta_data, const float* hough_space, const float* hough_data, const int* max_indexes, const int* class_indexes,
const int height, const int width)
{
__shared__ float s_f[4];
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
if (threadIdx.x == 0)
{
s_f[0] = meta_data[0]; // fx
s_f[1] = meta_data[4]; // fy
s_f[2] = meta_data[2]; // px
s_f[3] = meta_data[5]; // py
}
__syncthreads();
float fx = s_f[0];
float fy = s_f[1];
float px = s_f[2];
float py = s_f[3];
float scale = 0.05;
int max_index = max_indexes[index];
float max_hs_idx = hough_space[max_index];
int ind = max_index / (height * width);
int cls = class_indexes[ind];
int n = max_index % (height * width);
int x = n % width;
int y = n / width;
float rx = (x - px) / fx;
float ry = (y - py) / fy;
int offset = ind * height * width * 3 + 3 * (y * width + x);
float bb_distance = hough_data[offset];
float bb_height = hough_data[offset + 1];
float bb_width = hough_data[offset + 2];
int roi_index = index;
top_box[roi_index * 7 + 0] = 0;
top_box[roi_index * 7 + 1] = cls;
top_box[roi_index * 7 + 2] = x - bb_width * (0.5 + scale);
top_box[roi_index * 7 + 3] = y - bb_height * (0.5 + scale);
top_box[roi_index * 7 + 4] = x + bb_width * (0.5 + scale);
top_box[roi_index * 7 + 5] = y + bb_height * (0.5 + scale);
top_box[roi_index * 7 + 6] = max_hs_idx;
top_pose[roi_index * 7 + 0] = poses[index*4];
top_pose[roi_index * 7 + 1] = poses[index*4+1];
top_pose[roi_index * 7 + 2] = poses[index*4+2];
top_pose[roi_index * 7 + 3] = poses[index*4+3];
top_pose[roi_index * 7 + 4] = rx * bb_distance;
top_pose[roi_index * 7 + 5] = ry * bb_distance;
top_pose[roi_index * 7 + 6] = bb_distance;
}
}
int HoughVotingForwardLaucher(
const int* labels, const int* labelmap, const float* vertmap, const float* extents, const float* meta_data, const float* poses,
const int batch_size, const int height, const int width,
const float inlierThreshold,// const float votingThreshold, const float perThreshold,
const int skip_pixels,
float* top_box, float* top_pose, cudaStream_t stream)
{
const int kThreadsPerBlock = THREADS_PER_BLOCK;
cudaError_t err;
const int N = batch_size;
// step 1: compute a label index array for each instance
int dims = N * height * width;
int* arrays;// = arrays_vec.get();
cudaMalloc((void **)&arrays, dims * sizeof(int));
cudaMemset(arrays, 0, N * sizeof(int));
int* array_sizes;// = array_sizes_vec.get();
cudaMalloc((void **)&array_sizes, N * sizeof(int));
cudaMemset(array_sizes, 0, N * sizeof(int));
int output_size = N * height * width;
compute_arrays_kernel<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock, 0, stream>>>(
output_size, labelmap, arrays, array_sizes, height, width);
cudaThreadSynchronize();
// DEBUG
// std::vector<int> array_sizes_host(N);
// cudaMemcpy(&array_sizes_host[0], array_sizes, N * sizeof(int), cudaMemcpyDeviceToHost);
// std::vector<int> labels_host(N);
// cudaMemcpy(&labels_host[0], labels, N * sizeof(int), cudaMemcpyDeviceToHost);
// std::vector<int> arrays_host(N*height*width);
// cudaMemcpy(&arrays_host[0], arrays, N*height*width * sizeof(int), cudaMemcpyDeviceToHost);
// // std::vector<float> meta_data_host(9);
// // cudaMemcpy(&meta_data_host[0], meta_data, 9 * sizeof(float), cudaMemcpyDeviceToHost);
// for (int n = 0; n < N; n++)
// {
// printf("Class %d) %d) (labels count: %d), sample value: %d\n", labels_host[n], n, array_sizes_host[n], arrays_host[n*height*width]);
// }
// // for (int n = 0; n < 9; n++)
// // {
// // printf("META: %.3f\n", meta_data_host[n]);
// // }
// // printf("\n");
// //
// step 2: compute the hough space
float* hough_space; // = thrust::raw_pointer_cast(hough_space_vec.data());
cudaMalloc((void **)&hough_space, N * height * width * sizeof(float));
if (cudaMemset(hough_space, 0, N * height * width * sizeof(float)) != cudaSuccess)
fprintf(stderr, "reset error\n");
float* hough_data; // = thrust::raw_pointer_cast(hough_data_vec.data());
cudaMalloc((void **)&hough_data, N * height * width * 3 * sizeof(float));
if (cudaMemset(hough_data, 0, N * height * width * 3 * sizeof(float)) != cudaSuccess)
fprintf(stderr, "reset error\n");
output_size = N * height * width;
compute_hough_kernel<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock, 0, stream>>>(
output_size, hough_space, hough_data, vertmap, extents, meta_data,
arrays, array_sizes, labels, height, width, inlierThreshold, skip_pixels);
cudaThreadSynchronize();
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed compute hough space: %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
// step 3: find the maximum in hough space
std::vector<int> max_indexes_host(N);
for (int i = 0; i < N; i++)
{
float *hmax = thrust::max_element(thrust::device, hough_space + i * height * width, hough_space + (i+1) * height * width);
max_indexes_host[i] = hmax - hough_space;
// printf("Max indexes %d) %d\n", i, max_indexes_host[i]);
}
int* max_indexes;
cudaMalloc((void **)&max_indexes, N * sizeof(int));
cudaMemcpy(max_indexes, &max_indexes_host[0], N * sizeof(int), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed compute maximum: %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
// step 4: compute outputs
output_size = N;
compute_rois_kernel<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock, 0, stream>>>(
output_size, top_box, top_pose,
poses, meta_data, hough_space, hough_data, max_indexes, labels,
height, width);
cudaThreadSynchronize();
// err checking
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed compute outputs: %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
cudaFree(arrays);
cudaFree(array_sizes);
cudaFree(hough_space);
cudaFree(hough_data);
cudaFree(max_indexes);
return 1;
}
std::vector<at::Tensor> hough_voting_forward_cuda
(
const at::Tensor& labels, const at::Tensor& masks, const at::Tensor& vertmap, const at::Tensor& extents,
const at::Tensor& meta_data, const at::Tensor& poses,
const float inlierThreshold, const int skip_pixels
)
{
int batch_size = masks.size(0);
int N = batch_size;
int H = masks.size(1);
int W = masks.size(2);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// float tensors
at::Tensor top_box = at::zeros({N, 7}, vertmap.options());
at::Tensor top_pose = at::zeros({N, 7}, vertmap.options());
HoughVotingForwardLaucher(
labels.contiguous().data<int>(), masks.contiguous().data<int>(), vertmap.contiguous().data<float>(), extents.contiguous().data<float>(),
meta_data.contiguous().data<float>(), poses.contiguous().data<float>(),
N, H, W,
inlierThreshold, // votingThreshold, perThreshold,
skip_pixels,
top_box.data<float>(), top_pose.data<float>(),
stream
);
THCudaCheck(cudaGetLastError());
return {top_box, top_pose};
}
|
37a668642632b06a7fdb0f258687cced56316db8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/fluid/operators/top_k_v2_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
template <typename DeviceContext, typename T>
class TopkV2OpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument(
"It must use CUDAPlace, you must check your device set."));
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
// get the attributes
int k = static_cast<int>(ctx.Attr<int>("k"));
int axis = static_cast<int>(ctx.Attr<int>("axis"));
const bool& sorted = static_cast<bool>(ctx.Attr<bool>("sorted"));
const bool& largest = static_cast<bool>(ctx.Attr<bool>("largest"));
// get the input dims
const auto& in_dims = input->dims();
// calcluate the real axis
if (axis < 0) axis += in_dims.size();
auto* k_t = ctx.Input<Tensor>("K");
if (k_t) {
Tensor k_host;
framework::TensorCopySync(*k_t, platform::CPUPlace(), &k_host);
k = k_host.data<int>()[0];
framework::DDim output_dims = output->dims();
output_dims[axis] = k;
output->Resize(output_dims);
indices->Resize(output_dims);
}
const auto& out_dims = output->dims();
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
if (axis == in_dims.size() - 1) {
// if get the topK from the last axis
const int64_t& input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
const auto& dev_ctx = ctx.cuda_device_context();
if (k > input_width) k = input_width;
if ((input_width <= 1024 || k >= 128 || k == input_width)) {
if (SortTopk<T>(dev_ctx, input, input_width, input_height, k, output,
indices, largest)) {
// Successed, return.
return;
} else {
LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (GetDesiredBlockDim(input_width)) {
FIXED_BLOCK_DIM(
hipLaunchKernelGGL(( KeMatrixTopK<T, 5,
kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(),
output_data, k, indices_data, input_data, input_width,
input_width, static_cast<int>(k), gridx, input_height,
largest));
default:
PADDLE_THROW(platform::errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
} else {
// if get topK not from the last axis, will tranpose the tensor and get
// TopK
// first step, prepare the trans args for the tranpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
framework::DDim trans_dims(in_dims);
framework::DDim trans_out_dims(output->dims());
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = out_dims[trans[i]];
}
// second step, tranpose the input
Tensor trans_input;
trans_input.mutable_data<T>(trans_dims, ctx.GetPlace());
int ndims = trans.size();
const auto& dev_ctx = ctx.cuda_device_context();
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, *input,
&trans_input, trans);
// third step, calcluate the topk
// allocate the tmp cuda memory for the tmp result
Tensor trans_ind;
trans_ind.mutable_data<int64_t>(trans_out_dims, ctx.GetPlace());
Tensor trans_out;
trans_out.mutable_data<T>(trans_out_dims, ctx.GetPlace());
const int64_t input_height = framework::product(
framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
if (k > input_width) k = input_width;
if ((input_width <= 1024 || k >= 128 || k == input_width)) {
if (SortTopk<T>(dev_ctx, &trans_input, input_width, input_height, k,
&trans_out, &trans_ind, largest)) {
// last step, tranpose back the indices and output
TransCompute<platform::CUDADeviceContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
TransCompute<platform::CUDADeviceContext, T>(
ndims, dev_ctx, trans_out, output, trans);
return;
} else {
LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (GetDesiredBlockDim(input_width)) {
FIXED_BLOCK_DIM(
hipLaunchKernelGGL(( KeMatrixTopK<T, 5,
kBlockDim>), dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(),
trans_out.data<T>(), k, trans_ind.data<int64_t>(),
trans_input.data<T>(), input_width, input_width,
static_cast<int>(k), gridx, input_height, largest));
default:
PADDLE_THROW(platform::errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
// last step, tranpose back the indices and output
TransCompute<platform::CUDADeviceContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, trans_out,
output, trans);
}
}
};
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
template <typename DeviceContext, typename T>
class TopkV2OpGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::InvalidArgument(
"It must use CUDAPlace, you must check your device set."));
auto* x = context.Input<Tensor>("X");
auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out"));
auto* indices = context.Input<Tensor>("Indices");
auto* x_grad = context.Output<Tensor>(framework::GradVarName("X"));
int axis = context.Attr<int>("axis");
const auto& in_dims = x->dims();
const auto& out_dims = indices->dims();
// get the real the axis and the k
if (axis < 0) axis += in_dims.size();
const int& k = out_dims[axis];
const int& raw_height = in_dims[axis];
// allocate the cuda memory for the x_grad
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
const T* out_grad_data = out_grad->data<T>();
const int64_t* indices_data = indices->data<int64_t>();
int pre, n, post;
GetDims(in_dims, axis, &pre, &n, &post);
// calcluate the block and grid num
auto& dev_ctx = context.cuda_device_context();
auto ComputeBlockSize = [](int col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
};
int block_size = ComputeBlockSize(post * k);
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(((max_threads - 1) / block_size + 1), 1);
int grid_size = ::min(max_blocks, pre);
// lanuch the cuda kernel to assign the grad
hipLaunchKernelGGL(( AssignGradWithAxis<T>), dim3(grid_size), dim3(block_size), 64 * 4, dev_ctx.stream(),
out_grad_data, indices_data, x_grad_data, pre, post, n, k);
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
top_k_v2,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
float>,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
double>,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
int>,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
int64_t>,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
top_k_v2_grad, paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, float>,
paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, double>,
paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, int>,
paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, int64_t>,
paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, paddle::platform::float16>);
|
37a668642632b06a7fdb0f258687cced56316db8.cu
|
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/fluid/operators/top_k_v2_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
#define FIXED_BLOCK_DIM_BASE(dim, ...) \
case (dim): { \
constexpr auto kBlockDim = (dim); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM(...) \
FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__)
template <typename DeviceContext, typename T>
class TopkV2OpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument(
"It must use CUDAPlace, you must check your device set."));
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
// get the attributes
int k = static_cast<int>(ctx.Attr<int>("k"));
int axis = static_cast<int>(ctx.Attr<int>("axis"));
const bool& sorted = static_cast<bool>(ctx.Attr<bool>("sorted"));
const bool& largest = static_cast<bool>(ctx.Attr<bool>("largest"));
// get the input dims
const auto& in_dims = input->dims();
// calcluate the real axis
if (axis < 0) axis += in_dims.size();
auto* k_t = ctx.Input<Tensor>("K");
if (k_t) {
Tensor k_host;
framework::TensorCopySync(*k_t, platform::CPUPlace(), &k_host);
k = k_host.data<int>()[0];
framework::DDim output_dims = output->dims();
output_dims[axis] = k;
output->Resize(output_dims);
indices->Resize(output_dims);
}
const auto& out_dims = output->dims();
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace());
if (axis == in_dims.size() - 1) {
// if get the topK from the last axis
const int64_t& input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
const auto& dev_ctx = ctx.cuda_device_context();
if (k > input_width) k = input_width;
if ((input_width <= 1024 || k >= 128 || k == input_width)) {
if (SortTopk<T>(dev_ctx, input, input_width, input_height, k, output,
indices, largest)) {
// Successed, return.
return;
} else {
LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (GetDesiredBlockDim(input_width)) {
FIXED_BLOCK_DIM(
KeMatrixTopK<T, 5,
kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(
output_data, k, indices_data, input_data, input_width,
input_width, static_cast<int>(k), gridx, input_height,
largest));
default:
PADDLE_THROW(platform::errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
} else {
// if get topK not from the last axis, will tranpose the tensor and get
// TopK
// first step, prepare the trans args for the tranpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
framework::DDim trans_dims(in_dims);
framework::DDim trans_out_dims(output->dims());
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = out_dims[trans[i]];
}
// second step, tranpose the input
Tensor trans_input;
trans_input.mutable_data<T>(trans_dims, ctx.GetPlace());
int ndims = trans.size();
const auto& dev_ctx = ctx.cuda_device_context();
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, *input,
&trans_input, trans);
// third step, calcluate the topk
// allocate the tmp cuda memory for the tmp result
Tensor trans_ind;
trans_ind.mutable_data<int64_t>(trans_out_dims, ctx.GetPlace());
Tensor trans_out;
trans_out.mutable_data<T>(trans_out_dims, ctx.GetPlace());
const int64_t input_height = framework::product(
framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
if (k > input_width) k = input_width;
if ((input_width <= 1024 || k >= 128 || k == input_width)) {
if (SortTopk<T>(dev_ctx, &trans_input, input_width, input_height, k,
&trans_out, &trans_ind, largest)) {
// last step, tranpose back the indices and output
TransCompute<platform::CUDADeviceContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
TransCompute<platform::CUDADeviceContext, T>(
ndims, dev_ctx, trans_out, output, trans);
return;
} else {
LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use "
"default topk kernel.";
}
}
const int kMaxHeight = 2048;
int gridx = input_height < kMaxHeight ? input_height : kMaxHeight;
switch (GetDesiredBlockDim(input_width)) {
FIXED_BLOCK_DIM(
KeMatrixTopK<T, 5,
kBlockDim><<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(
trans_out.data<T>(), k, trans_ind.data<int64_t>(),
trans_input.data<T>(), input_width, input_width,
static_cast<int>(k), gridx, input_height, largest));
default:
PADDLE_THROW(platform::errors::Fatal(
"the input data shape has error in the topk cuda kernel."));
}
// last step, tranpose back the indices and output
TransCompute<platform::CUDADeviceContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, trans_out,
output, trans);
}
}
};
#undef FIXED_BLOCK_DIM_BASE
#undef FIXED_BLOCK_DIM
template <typename DeviceContext, typename T>
class TopkV2OpGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(context.GetPlace()), true,
platform::errors::InvalidArgument(
"It must use CUDAPlace, you must check your device set."));
auto* x = context.Input<Tensor>("X");
auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out"));
auto* indices = context.Input<Tensor>("Indices");
auto* x_grad = context.Output<Tensor>(framework::GradVarName("X"));
int axis = context.Attr<int>("axis");
const auto& in_dims = x->dims();
const auto& out_dims = indices->dims();
// get the real the axis and the k
if (axis < 0) axis += in_dims.size();
const int& k = out_dims[axis];
const int& raw_height = in_dims[axis];
// allocate the cuda memory for the x_grad
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
const T* out_grad_data = out_grad->data<T>();
const int64_t* indices_data = indices->data<int64_t>();
int pre, n, post;
GetDims(in_dims, axis, &pre, &n, &post);
// calcluate the block and grid num
auto& dev_ctx = context.cuda_device_context();
auto ComputeBlockSize = [](int col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
};
int block_size = ComputeBlockSize(post * k);
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(((max_threads - 1) / block_size + 1), 1);
int grid_size = std::min(max_blocks, pre);
// lanuch the cuda kernel to assign the grad
AssignGradWithAxis<T><<<grid_size, block_size, 64 * 4, dev_ctx.stream()>>>(
out_grad_data, indices_data, x_grad_data, pre, post, n, k);
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
top_k_v2,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
float>,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
double>,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
int>,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
int64_t>,
paddle::operators::TopkV2OpCUDAKernel<paddle::platform::CUDADeviceContext,
paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
top_k_v2_grad, paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, float>,
paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, double>,
paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, int>,
paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, int64_t>,
paddle::operators::TopkV2OpGradCUDAKernel<
paddle::platform::CUDADeviceContext, paddle::platform::float16>);
|
0b5a63f55fe2e6c42c15bef13f686f28458541fc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2016-present Jean-Noel Braun.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef BCNN_USE_CUDA
#include "bcnn_mat.h"
#include "bcnn_tensor.h"
#include "bcnn_upsample_layer.h"
#include "bcnn_utils.h"
__global__ void bcnn_forward_upsample_cuda_kernel(size_t dst_sz, float *src,
int w, int h, int c, int n,
int size, float *dst) {
size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i >= dst_sz) {
return;
}
int dst_idx = i;
int dst_w = i % (w * size);
i = i / (w * size);
int dst_h = i % (h * size);
i = i / (h * size);
int dst_c = i % c;
i = i / c;
int b = i % n;
int src_w = dst_w / size;
int src_h = dst_h / size;
int src_c = dst_c;
int src_idx = b * w * h * c + src_c * w * h + src_h * w + src_w;
dst[dst_idx] += src[src_idx];
}
void bcnn_forward_upsample_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_upsample_param *param = (bcnn_upsample_param *)node->param;
bcnn_cuda_fill_f32(bcnn_tensor_size(dst_tensor), 0, dst_tensor->data_gpu,
1);
size_t size = src_tensor->w * src_tensor->h * src_tensor->c *
src_tensor->n * param->size * param->size;
hipLaunchKernelGGL(( bcnn_forward_upsample_cuda_kernel), dim3(bcnn_cuda_blocks(size)),
dim3(BCNN_CUDA_THREADS), 0, 0,
size, src_tensor->data_gpu, src_tensor->w, src_tensor->h, src_tensor->c,
src_tensor->n, param->size, dst_tensor->data_gpu);
bcnn_cuda_check(hipPeekAtLastError());
return;
}
__global__ void bcnn_backward_upsample_cuda_kernel(size_t dst_sz, float *src,
int w, int h, int c, int n,
int size, float *dst) {
size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i >= dst_sz) {
return;
}
int dst_idx = i;
int dst_w = i % (w * size);
i = i / (w * size);
int dst_h = i % (h * size);
i = i / (h * size);
int dst_c = i % c;
i = i / c;
int b = i % n;
int in_w = dst_w / size;
int in_h = dst_h / size;
int in_c = dst_c;
int src_idx = b * w * h * c + in_c * w * h + in_h * w + in_w;
src[src_idx] += dst[dst_idx];
}
void bcnn_backward_upsample_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_upsample_param *param = (bcnn_upsample_param *)node->param;
size_t size = src_tensor->w * src_tensor->h * src_tensor->c *
src_tensor->n * param->size * param->size;
hipLaunchKernelGGL(( bcnn_backward_upsample_cuda_kernel), dim3(bcnn_cuda_blocks(size)),
dim3(BCNN_CUDA_THREADS), 0, 0,
size, src_tensor->grad_data_gpu, src_tensor->w, src_tensor->h,
src_tensor->c, src_tensor->n, param->size, dst_tensor->grad_data_gpu);
bcnn_cuda_check(hipPeekAtLastError());
return;
}
#endif // BCNN_USE_CUDA
|
0b5a63f55fe2e6c42c15bef13f686f28458541fc.cu
|
/*
* Copyright (c) 2016-present Jean-Noel Braun.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef BCNN_USE_CUDA
#include "bcnn_mat.h"
#include "bcnn_tensor.h"
#include "bcnn_upsample_layer.h"
#include "bcnn_utils.h"
__global__ void bcnn_forward_upsample_cuda_kernel(size_t dst_sz, float *src,
int w, int h, int c, int n,
int size, float *dst) {
size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i >= dst_sz) {
return;
}
int dst_idx = i;
int dst_w = i % (w * size);
i = i / (w * size);
int dst_h = i % (h * size);
i = i / (h * size);
int dst_c = i % c;
i = i / c;
int b = i % n;
int src_w = dst_w / size;
int src_h = dst_h / size;
int src_c = dst_c;
int src_idx = b * w * h * c + src_c * w * h + src_h * w + src_w;
dst[dst_idx] += src[src_idx];
}
void bcnn_forward_upsample_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_upsample_param *param = (bcnn_upsample_param *)node->param;
bcnn_cuda_fill_f32(bcnn_tensor_size(dst_tensor), 0, dst_tensor->data_gpu,
1);
size_t size = src_tensor->w * src_tensor->h * src_tensor->c *
src_tensor->n * param->size * param->size;
bcnn_forward_upsample_cuda_kernel<<<bcnn_cuda_blocks(size),
BCNN_CUDA_THREADS>>>(
size, src_tensor->data_gpu, src_tensor->w, src_tensor->h, src_tensor->c,
src_tensor->n, param->size, dst_tensor->data_gpu);
bcnn_cuda_check(cudaPeekAtLastError());
return;
}
__global__ void bcnn_backward_upsample_cuda_kernel(size_t dst_sz, float *src,
int w, int h, int c, int n,
int size, float *dst) {
size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i >= dst_sz) {
return;
}
int dst_idx = i;
int dst_w = i % (w * size);
i = i / (w * size);
int dst_h = i % (h * size);
i = i / (h * size);
int dst_c = i % c;
i = i / c;
int b = i % n;
int in_w = dst_w / size;
int in_h = dst_h / size;
int in_c = dst_c;
int src_idx = b * w * h * c + in_c * w * h + in_h * w + in_w;
src[src_idx] += dst[dst_idx];
}
void bcnn_backward_upsample_layer_gpu(bcnn_net *net, bcnn_node *node) {
bcnn_tensor *src_tensor = &net->tensors[node->src[0]];
bcnn_tensor *dst_tensor = &net->tensors[node->dst[0]];
bcnn_upsample_param *param = (bcnn_upsample_param *)node->param;
size_t size = src_tensor->w * src_tensor->h * src_tensor->c *
src_tensor->n * param->size * param->size;
bcnn_backward_upsample_cuda_kernel<<<bcnn_cuda_blocks(size),
BCNN_CUDA_THREADS>>>(
size, src_tensor->grad_data_gpu, src_tensor->w, src_tensor->h,
src_tensor->c, src_tensor->n, param->size, dst_tensor->grad_data_gpu);
bcnn_cuda_check(cudaPeekAtLastError());
return;
}
#endif // BCNN_USE_CUDA
|
02b06cbb80b8e989c4df6d00fd2edfe4d2a964e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//%%cu
/**************************************************************************
C-DAC Tech Workshop : hyPACK-2013
October 15-18, 2013
Objective : Program to solve a solution of Poisson Eq. (PDE) on GPU
Input : No. of Grid Points in X-Dir, No. of Grid Points in Y-Dir
and maximum number of iterations
Output : Solution Vector.
Created : August-2013
E-mail : [email protected]
*****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
#include<sys/time.h>
//---------------------------------------------------------------------------
#define BLOCKSIZE 16
#define TOLERANCE 1.0E-06
//#define TOPBOUNDARYVALUE 4.1f
//#define BOTTOMBOUNDARYVALUE 3.1f
//#define LEFTBOUNDARYVALUE 1.1f
//#define RIGHTBOUNDARYVALUE 2.1f
#define TOPBOUNDARYVALUE 1.0f
#define BOTTOMBOUNDARYVALUE 1.0f
#define LEFTBOUNDARYVALUE 1.0f
#define RIGHTBOUNDARYVALUE 1.0f
//----------------------------------------------------------------------------
void IntializeAndSetBoundaryConditions(float **, float **, int , int , int );
void SetBoundaryCondition(int , int , float , int , float *, float *);
void IntializeUInteriorIndex(int **, int , int , int );
float GetTheMaximumValue(float *,int );
void IntializeUDifference(float **, int );
//------------------------------------------------------------------------
//Pragma routine to report the detail of cuda error
#define CUDA_SAFE_CALL(call) \
do{ \
hipError_t err = call; \
if(err != hipSuccess) \
{ \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(1); \
} \
} while (0) \
//------------------------------------------------------------------------------------------
//Kernel that performs the Jacobi Iteration
__global__ void JacobiIteration(float *DeviceUOld, float *DeviceUNew, int *DeviceUInteriorIndex, float *DeviceUDifference, int NoPointsX, int Size, int ThreadDim)
{
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int ThreadIndex = (ThreadDim * tidx) + tidy;
int MaxNumThread = ThreadDim * ThreadDim;
int CurrentColumnIndex;
int pass = 0;
int Center, Left, Right, Bottom, Top;
while( (CurrentColumnIndex = (ThreadIndex + MaxNumThread * pass)) < Size )
{
Center = DeviceUInteriorIndex[CurrentColumnIndex];
Left = Center - 1;
Right = Center + 1;
Top = Center - NoPointsX;
Bottom = Center + NoPointsX;
//Updating the UNew values
DeviceUNew[Center] = 0.25 * (DeviceUOld[Left] + DeviceUOld[Right] + DeviceUOld[Top] + DeviceUOld[Bottom]);
//Finding the Difference between UNew and UOld
DeviceUDifference[CurrentColumnIndex] = DeviceUNew[Center] - DeviceUOld[Center];
//Assigning UNew to UOld
DeviceUOld[Center] = DeviceUNew[Center];
pass++;
}
__syncthreads();
}//End of Jacobi Iteration Device function
//----------------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
//Checking if valid number of Arguements have been passed
/*
if(argc != 4)
{
printf("Valid number of inputs are not given \n");
printf("Usage:<./Program Name><Number of X points><Number of Y points><Maximum Number of Iterations> \n");
exit(-1);
}
*/
//Host Variables Declaration
float *UOld, *UNew, *UDifference;
int *UInteriorIndex;
float MaxError = 0.0f;
struct timeval TV;
double StartTime,EndTime,ActualTime;
int NoPointsX, NoPointsY, MaxIterations, NoPointsInterior, Index, PresentIteration,NoPointsTotal;
//Device Variables Declaration
float *DeviceUOld, *DeviceUNew, *DeviceUDifference;
int *DeviceUInteriorIndex;
//Obtaining the Values of NoPointsX, NoPointsY and MaxIterations from the arguements passed by the User
// NoPointsX = atoi( argv[1] );
//NoPointsY = atoi( argv[2] );
//MaxIterations = atoi( argv[3] );
NoPointsX = 10;
NoPointsY = 10;
MaxIterations = 10;
//Calculating the Total Points and Interior Points
NoPointsTotal = NoPointsX * NoPointsY;
NoPointsInterior = (NoPointsTotal) - (((2 * NoPointsX) + (2 * NoPointsY)) - 4);
//Intializing the UOld and seting the Boundary conditions
IntializeAndSetBoundaryConditions( &UOld, &UNew, NoPointsX, NoPointsY, NoPointsTotal );
//Intializing the UDifference
IntializeUDifference( &UDifference,NoPointsInterior );
//Filling the UInteriorIndex with Index Values of Interior Points
IntializeUInteriorIndex( &UInteriorIndex, NoPointsX, NoPointsY,NoPointsInterior );
//Allocating Memory on Device
CUDA_SAFE_CALL( hipMalloc( (void **)&DeviceUOld, NoPointsTotal * sizeof(float)));
CUDA_SAFE_CALL( hipMalloc( (void **)&DeviceUNew, NoPointsTotal * sizeof(float)));
CUDA_SAFE_CALL( hipMalloc( (void **)&DeviceUInteriorIndex, NoPointsInterior * sizeof(int)));
CUDA_SAFE_CALL( hipMalloc( (void **)&DeviceUDifference, NoPointsInterior * sizeof(float)));
//Copying Data from Host to Device
CUDA_SAFE_CALL( hipMemcpy((void *)DeviceUOld, (void *)UOld, NoPointsTotal * sizeof(float), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy((void *)DeviceUNew, (void *)UNew, NoPointsTotal * sizeof(float), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy((void *)DeviceUInteriorIndex, (void *)UInteriorIndex, NoPointsInterior * sizeof(float), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy((void *)DeviceUDifference, (void *)UDifference, NoPointsInterior * sizeof(float), hipMemcpyHostToDevice) );
//Defining Thread Grid and the Thread Block
dim3 DimGrid( 1,1 );
dim3 DimBlock( BLOCKSIZE,BLOCKSIZE );
PresentIteration = 0;
//start timing computation
gettimeofday(&TV, NULL);
StartTime = TV.tv_sec+( TV.tv_usec/1000000.0 );
while(1)
{
//Incrementing the Iteration Number
PresentIteration++;
//Invoking the Kernel
hipLaunchKernelGGL(( JacobiIteration), dim3(DimGrid), dim3(DimBlock), 0, 0, DeviceUOld, DeviceUNew, DeviceUInteriorIndex, DeviceUDifference, NoPointsX, NoPointsInterior, BLOCKSIZE );
//Copying Udifference from Device to Host
CUDA_SAFE_CALL( hipMemcpy((void *)UDifference, (void *)DeviceUDifference, NoPointsInterior * sizeof(float), hipMemcpyDeviceToHost) );
//Finding the Maximum among the UDifference values
MaxError = GetTheMaximumValue( UDifference, NoPointsInterior );
//Checking for the convergence
if((MaxError < TOLERANCE) || (PresentIteration == MaxIterations))
break;
}
//stop timing computation
gettimeofday(&TV,NULL);
EndTime = TV.tv_sec+(TV.tv_usec/1000000.0);
//calculate difference between start and stop times
ActualTime = EndTime - StartTime;
//Copying UNew from Device to Host
CUDA_SAFE_CALL(hipMemcpy((void *)UNew, (void *)DeviceUNew, NoPointsTotal * sizeof(float), hipMemcpyDeviceToHost));
//Printing the solution
for(Index = 0; Index < NoPointsTotal; Index++)
printf(" %f", UNew[Index]);
printf("Output Vector given above calculated in %d Iterations and in %lf secs.\n",PresentIteration,ActualTime);
//Freeing the Allocated Memory on Device
CUDA_SAFE_CALL( hipFree( DeviceUOld ) );
CUDA_SAFE_CALL( hipFree( DeviceUNew ) );
CUDA_SAFE_CALL( hipFree( DeviceUInteriorIndex ) );
CUDA_SAFE_CALL( hipFree( DeviceUDifference ) );
//Freeing the Allocated Memory on Host
free( UOld );
free( UNew );
free( UInteriorIndex );
free( UDifference );
return(0);
}//End of Main
//-----------------------------------------------------------------------------------------------------
void IntializeAndSetBoundaryConditions( float **UOld, float **UNew, int NoPointsX, int NoPointsY, int NoPointsTotal )
{
float *TempUOld,*TempUNew;
int Index;
//Allocating memory for UOld and UNew
TempUOld = (float *)malloc( NoPointsTotal * sizeof(float) );
if(TempUOld == NULL)
{
printf("Can't allocate the memory for the variable TempUOld \n");
exit(-1);
}
TempUNew = (float *)malloc( NoPointsTotal * sizeof(float) );
if(TempUNew == NULL)
{
printf("Can't allocate the memory for the variable TempUNew \n");
exit(-1);
}
//Intialize UOld to zeros
for(Index = 0; Index < (NoPointsTotal); Index++)
TempUOld[Index] = 0.0;
//Setting the Boundary Conditions
//Case:Left
for(Index = 0; Index < NoPointsY; Index++)
SetBoundaryCondition(0, Index, LEFTBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Case:Right
for(Index = 0; Index < NoPointsY; Index++)
SetBoundaryCondition((NoPointsX - 1), Index, RIGHTBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Case:Bottom
for(Index = 0; Index < NoPointsX; Index++)
SetBoundaryCondition(Index, 0, BOTTOMBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Case:Top
for(Index = 0; Index < NoPointsX; Index++)
SetBoundaryCondition(Index, (NoPointsY - 1), TOPBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Assigning Temporary Varibles Locations to Original Variables
*UOld = TempUOld;
*UNew = TempUNew;
}
//---------------------------------------------------------------------------------------------------
void SetBoundaryCondition(int i, int j, float Value, int NoPointsX, float *UOld, float *UNew)
{
int Index;
Index = (j * NoPointsX) + i;
UOld[Index] = Value;
UNew[Index] = Value;
}
//------------------------------------------------------------------------------------------------
void IntializeUInteriorIndex(int **UInteriorIndex, int NoPointsX, int NoPointsY,int NoPointsInterior)
{
int i, j, Index, IndexValue;
int *TempUInteriorIndex;
Index = 0;
//Allocating memory for UInteriorIndex
TempUInteriorIndex = (int *)malloc( NoPointsInterior * sizeof(int) );
if( TempUInteriorIndex == NULL )
{
printf("Can't allocate memory for the variable TempUInteriorIndex \n");
exit(-1);
}
//Assigning the index of the Interior points of UOld and UNew
for(j = 1; j < (NoPointsY - 1); ++j)
{
for(i = 1; i < (NoPointsX - 1); i++)
{
IndexValue = (j * NoPointsX) + i;
TempUInteriorIndex[Index] = IndexValue;
Index++;
}
}
*UInteriorIndex = TempUInteriorIndex;
}
//--------------------------------------------------------------------------------------------------------
float GetTheMaximumValue(float *Array,int NumberOfElements)
{
float MaxError;
int RowNum;
MaxError = 0.0f;
for(RowNum = 0; RowNum < NumberOfElements; RowNum++)
{
if(Array[RowNum] >= MaxError)
MaxError = Array[RowNum];
}
return(MaxError);
}
//---------------------------------------------------------------------------------------------------------------
void IntializeUDifference(float **UDifference, int NoPointsInterior)
{
float *TempUDifference;
int RowNumber;
//Allocating Memory for UDifference
TempUDifference = (float *)malloc( NoPointsInterior * sizeof(float) );
if( TempUDifference == NULL )
{
printf("Can't allocate the memory for the variable TempUDifference \n");
exit(-1);
}
//Intializing the UDifference to zero's
for(RowNumber = 0; RowNumber < NoPointsInterior; RowNumber++)
TempUDifference[RowNumber] = 0.0f;
*UDifference = TempUDifference;
}
//-------------------------------------------------------------------------------------------------------
|
02b06cbb80b8e989c4df6d00fd2edfe4d2a964e7.cu
|
//%%cu
/**************************************************************************
C-DAC Tech Workshop : hyPACK-2013
October 15-18, 2013
Objective : Program to solve a solution of Poisson Eq. (PDE) on GPU
Input : No. of Grid Points in X-Dir, No. of Grid Points in Y-Dir
and maximum number of iterations
Output : Solution Vector.
Created : August-2013
E-mail : [email protected]
*****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
#include<sys/time.h>
//---------------------------------------------------------------------------
#define BLOCKSIZE 16
#define TOLERANCE 1.0E-06
//#define TOPBOUNDARYVALUE 4.1f
//#define BOTTOMBOUNDARYVALUE 3.1f
//#define LEFTBOUNDARYVALUE 1.1f
//#define RIGHTBOUNDARYVALUE 2.1f
#define TOPBOUNDARYVALUE 1.0f
#define BOTTOMBOUNDARYVALUE 1.0f
#define LEFTBOUNDARYVALUE 1.0f
#define RIGHTBOUNDARYVALUE 1.0f
//----------------------------------------------------------------------------
void IntializeAndSetBoundaryConditions(float **, float **, int , int , int );
void SetBoundaryCondition(int , int , float , int , float *, float *);
void IntializeUInteriorIndex(int **, int , int , int );
float GetTheMaximumValue(float *,int );
void IntializeUDifference(float **, int );
//------------------------------------------------------------------------
//Pragma routine to report the detail of cuda error
#define CUDA_SAFE_CALL(call) \
do{ \
cudaError_t err = call; \
if(err != cudaSuccess) \
{ \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(1); \
} \
} while (0) \
//------------------------------------------------------------------------------------------
//Kernel that performs the Jacobi Iteration
__global__ void JacobiIteration(float *DeviceUOld, float *DeviceUNew, int *DeviceUInteriorIndex, float *DeviceUDifference, int NoPointsX, int Size, int ThreadDim)
{
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int ThreadIndex = (ThreadDim * tidx) + tidy;
int MaxNumThread = ThreadDim * ThreadDim;
int CurrentColumnIndex;
int pass = 0;
int Center, Left, Right, Bottom, Top;
while( (CurrentColumnIndex = (ThreadIndex + MaxNumThread * pass)) < Size )
{
Center = DeviceUInteriorIndex[CurrentColumnIndex];
Left = Center - 1;
Right = Center + 1;
Top = Center - NoPointsX;
Bottom = Center + NoPointsX;
//Updating the UNew values
DeviceUNew[Center] = 0.25 * (DeviceUOld[Left] + DeviceUOld[Right] + DeviceUOld[Top] + DeviceUOld[Bottom]);
//Finding the Difference between UNew and UOld
DeviceUDifference[CurrentColumnIndex] = DeviceUNew[Center] - DeviceUOld[Center];
//Assigning UNew to UOld
DeviceUOld[Center] = DeviceUNew[Center];
pass++;
}
__syncthreads();
}//End of Jacobi Iteration Device function
//----------------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
//Checking if valid number of Arguements have been passed
/*
if(argc != 4)
{
printf("Valid number of inputs are not given \n");
printf("Usage:<./Program Name><Number of X points><Number of Y points><Maximum Number of Iterations> \n");
exit(-1);
}
*/
//Host Variables Declaration
float *UOld, *UNew, *UDifference;
int *UInteriorIndex;
float MaxError = 0.0f;
struct timeval TV;
double StartTime,EndTime,ActualTime;
int NoPointsX, NoPointsY, MaxIterations, NoPointsInterior, Index, PresentIteration,NoPointsTotal;
//Device Variables Declaration
float *DeviceUOld, *DeviceUNew, *DeviceUDifference;
int *DeviceUInteriorIndex;
//Obtaining the Values of NoPointsX, NoPointsY and MaxIterations from the arguements passed by the User
// NoPointsX = atoi( argv[1] );
//NoPointsY = atoi( argv[2] );
//MaxIterations = atoi( argv[3] );
NoPointsX = 10;
NoPointsY = 10;
MaxIterations = 10;
//Calculating the Total Points and Interior Points
NoPointsTotal = NoPointsX * NoPointsY;
NoPointsInterior = (NoPointsTotal) - (((2 * NoPointsX) + (2 * NoPointsY)) - 4);
//Intializing the UOld and seting the Boundary conditions
IntializeAndSetBoundaryConditions( &UOld, &UNew, NoPointsX, NoPointsY, NoPointsTotal );
//Intializing the UDifference
IntializeUDifference( &UDifference,NoPointsInterior );
//Filling the UInteriorIndex with Index Values of Interior Points
IntializeUInteriorIndex( &UInteriorIndex, NoPointsX, NoPointsY,NoPointsInterior );
//Allocating Memory on Device
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceUOld, NoPointsTotal * sizeof(float)));
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceUNew, NoPointsTotal * sizeof(float)));
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceUInteriorIndex, NoPointsInterior * sizeof(int)));
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceUDifference, NoPointsInterior * sizeof(float)));
//Copying Data from Host to Device
CUDA_SAFE_CALL( cudaMemcpy((void *)DeviceUOld, (void *)UOld, NoPointsTotal * sizeof(float), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy((void *)DeviceUNew, (void *)UNew, NoPointsTotal * sizeof(float), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy((void *)DeviceUInteriorIndex, (void *)UInteriorIndex, NoPointsInterior * sizeof(float), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy((void *)DeviceUDifference, (void *)UDifference, NoPointsInterior * sizeof(float), cudaMemcpyHostToDevice) );
//Defining Thread Grid and the Thread Block
dim3 DimGrid( 1,1 );
dim3 DimBlock( BLOCKSIZE,BLOCKSIZE );
PresentIteration = 0;
//start timing computation
gettimeofday(&TV, NULL);
StartTime = TV.tv_sec+( TV.tv_usec/1000000.0 );
while(1)
{
//Incrementing the Iteration Number
PresentIteration++;
//Invoking the Kernel
JacobiIteration<<<DimGrid, DimBlock>>>( DeviceUOld, DeviceUNew, DeviceUInteriorIndex, DeviceUDifference, NoPointsX, NoPointsInterior, BLOCKSIZE );
//Copying Udifference from Device to Host
CUDA_SAFE_CALL( cudaMemcpy((void *)UDifference, (void *)DeviceUDifference, NoPointsInterior * sizeof(float), cudaMemcpyDeviceToHost) );
//Finding the Maximum among the UDifference values
MaxError = GetTheMaximumValue( UDifference, NoPointsInterior );
//Checking for the convergence
if((MaxError < TOLERANCE) || (PresentIteration == MaxIterations))
break;
}
//stop timing computation
gettimeofday(&TV,NULL);
EndTime = TV.tv_sec+(TV.tv_usec/1000000.0);
//calculate difference between start and stop times
ActualTime = EndTime - StartTime;
//Copying UNew from Device to Host
CUDA_SAFE_CALL(cudaMemcpy((void *)UNew, (void *)DeviceUNew, NoPointsTotal * sizeof(float), cudaMemcpyDeviceToHost));
//Printing the solution
for(Index = 0; Index < NoPointsTotal; Index++)
printf(" %f", UNew[Index]);
printf("Output Vector given above calculated in %d Iterations and in %lf secs.\n",PresentIteration,ActualTime);
//Freeing the Allocated Memory on Device
CUDA_SAFE_CALL( cudaFree( DeviceUOld ) );
CUDA_SAFE_CALL( cudaFree( DeviceUNew ) );
CUDA_SAFE_CALL( cudaFree( DeviceUInteriorIndex ) );
CUDA_SAFE_CALL( cudaFree( DeviceUDifference ) );
//Freeing the Allocated Memory on Host
free( UOld );
free( UNew );
free( UInteriorIndex );
free( UDifference );
return(0);
}//End of Main
//-----------------------------------------------------------------------------------------------------
void IntializeAndSetBoundaryConditions( float **UOld, float **UNew, int NoPointsX, int NoPointsY, int NoPointsTotal )
{
float *TempUOld,*TempUNew;
int Index;
//Allocating memory for UOld and UNew
TempUOld = (float *)malloc( NoPointsTotal * sizeof(float) );
if(TempUOld == NULL)
{
printf("Can't allocate the memory for the variable TempUOld \n");
exit(-1);
}
TempUNew = (float *)malloc( NoPointsTotal * sizeof(float) );
if(TempUNew == NULL)
{
printf("Can't allocate the memory for the variable TempUNew \n");
exit(-1);
}
//Intialize UOld to zeros
for(Index = 0; Index < (NoPointsTotal); Index++)
TempUOld[Index] = 0.0;
//Setting the Boundary Conditions
//Case:Left
for(Index = 0; Index < NoPointsY; Index++)
SetBoundaryCondition(0, Index, LEFTBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Case:Right
for(Index = 0; Index < NoPointsY; Index++)
SetBoundaryCondition((NoPointsX - 1), Index, RIGHTBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Case:Bottom
for(Index = 0; Index < NoPointsX; Index++)
SetBoundaryCondition(Index, 0, BOTTOMBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Case:Top
for(Index = 0; Index < NoPointsX; Index++)
SetBoundaryCondition(Index, (NoPointsY - 1), TOPBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Assigning Temporary Varibles Locations to Original Variables
*UOld = TempUOld;
*UNew = TempUNew;
}
//---------------------------------------------------------------------------------------------------
void SetBoundaryCondition(int i, int j, float Value, int NoPointsX, float *UOld, float *UNew)
{
int Index;
Index = (j * NoPointsX) + i;
UOld[Index] = Value;
UNew[Index] = Value;
}
//------------------------------------------------------------------------------------------------
void IntializeUInteriorIndex(int **UInteriorIndex, int NoPointsX, int NoPointsY,int NoPointsInterior)
{
int i, j, Index, IndexValue;
int *TempUInteriorIndex;
Index = 0;
//Allocating memory for UInteriorIndex
TempUInteriorIndex = (int *)malloc( NoPointsInterior * sizeof(int) );
if( TempUInteriorIndex == NULL )
{
printf("Can't allocate memory for the variable TempUInteriorIndex \n");
exit(-1);
}
//Assigning the index of the Interior points of UOld and UNew
for(j = 1; j < (NoPointsY - 1); ++j)
{
for(i = 1; i < (NoPointsX - 1); i++)
{
IndexValue = (j * NoPointsX) + i;
TempUInteriorIndex[Index] = IndexValue;
Index++;
}
}
*UInteriorIndex = TempUInteriorIndex;
}
//--------------------------------------------------------------------------------------------------------
float GetTheMaximumValue(float *Array,int NumberOfElements)
{
float MaxError;
int RowNum;
MaxError = 0.0f;
for(RowNum = 0; RowNum < NumberOfElements; RowNum++)
{
if(Array[RowNum] >= MaxError)
MaxError = Array[RowNum];
}
return(MaxError);
}
//---------------------------------------------------------------------------------------------------------------
void IntializeUDifference(float **UDifference, int NoPointsInterior)
{
float *TempUDifference;
int RowNumber;
//Allocating Memory for UDifference
TempUDifference = (float *)malloc( NoPointsInterior * sizeof(float) );
if( TempUDifference == NULL )
{
printf("Can't allocate the memory for the variable TempUDifference \n");
exit(-1);
}
//Intializing the UDifference to zero's
for(RowNumber = 0; RowNumber < NoPointsInterior; RowNumber++)
TempUDifference[RowNumber] = 0.0f;
*UDifference = TempUDifference;
}
//-------------------------------------------------------------------------------------------------------
|
a8f76bbf0743cd85e304fc351b2536c2b63ee107.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace nd4j {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
static void batchnormCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance,
const NDArray* gamma, const NDArray* beta,
NDArray* output,
const double epsilon, const bool isSpatialMode) {
// input, output -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> zStrides = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
zStrides.push_back((int)output->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(z, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(z, dataType, xRank, xShape.data(), zStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// mean, variance, gamma and beta descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/beta failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* ptrAlpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// calculations
err = cudnnBatchNormalizationForwardInference(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
z, output->getSpecialBuffer(),
params,
gamma->getSpecialBuffer(), beta->getSpecialBuffer(),
mean->getSpecialBuffer(), variance->getSpecialBuffer(), epsilon);
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnBatchNormalizationForwardInference failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("batchnormCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
}
//////////////////////////////////////////////////////////////////////////
static void batchnormBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance, const NDArray* gamma, const NDArray* gradO,
NDArray* gradI, NDArray* gradG, NDArray* gradB,
const double epsilon, const bool isSpatialMode) {
// input, gradO, gradI -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta, gradM, gradV, gradG, gradB -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> dxStrides = {(int)gradI->strideAt(0), (int)gradI->strideAt(1), (int)gradI->strideAt(2), (int)gradI->strideAt(3)};
std::vector<int> dzStrides = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
dxStrides.push_back((int)gradI->strideAt(4));
dzStrides.push_back((int)gradO->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(dz, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(dz, dataType, xRank, xShape.data(), dzStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradO failed", err);
// gradI descriptor
cudnnTensorDescriptor_t dx;
cudnnCreateTensorDescriptor(&dx);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(dx, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(dx, dataType, xRank, xShape.data(), dxStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradI failed", err);
// mean, variance, gamma, gradG and gradB descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/gradG/gradB failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
double alpha64(1), beta64(0);
const void* ptrAlpha = input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
// calculations
// TODO: we can use cache here
err = cudnnBatchNormalizationBackward(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta, ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
dz, gradO->getSpecialBuffer(),
dx, gradI->getSpecialBuffer(),
params,
gamma->getSpecialBuffer(), gradG->getSpecialBuffer(), gradB->getSpecialBuffer(),
epsilon,
nullptr/*mean->getSpecialBuffer()*/, nullptr/*variance->getSpecialBuffer()*/);
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnBatchNormalizationBackward failed", err);
auto cudaErr = hipStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("batchnormBpCUDNN: hipStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm, ENGINE_CUDA) {
auto input = INPUT_VARIABLE(0);
auto mean = INPUT_VARIABLE(1);
auto variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
auto output = OUTPUT_VARIABLE(0);
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const double epsilon = T_ARG(0);
if(applyScale)
gamma = INPUT_VARIABLE(3);
if(applyOffset)
beta = INPUT_VARIABLE(3 + (int)applyScale);
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape) , 0, "BATCHNORM CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
// types of all input arrays should be the same
for(int i = 1; i < block.width(); ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM CUDNN op: types of all input arrays should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() == input->sizeAt(-1);
if(needPermut) { // if NHWC
std::vector<int> perm = inRank == 4 ? std::vector<int>({0, 3, 1, 2}) : std::vector<int>({0, 4, 1, 2, 3}); // NHWC -> NCHW
input = new NDArray(input->permute(perm));
output = new NDArray(output->permute(perm));
}
// cudnn requires gamma and beta to be non-nullptr
if(!applyScale) {
gamma = new NDArray(mean);
*gamma = 1;
}
if(!applyOffset) {
beta = new NDArray(mean);
*beta = 0;
}
// calculations
batchnormCUDNN(block.launchContext(), input, mean, variance, gamma, beta, output, epsilon, axes.size() == 1);
if(needPermut) {
delete input;
delete output;
}
if(!applyScale)
delete gamma;
if(!applyOffset)
delete beta;
return Status::OK();
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_CHECK(batchnorm, ENGINE_CUDA) {
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = applyScale ? INPUT_VARIABLE(3) : nullptr;
NDArray* beta = applyOffset ? INPUT_VARIABLE(3 + (int)applyScale) : nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(beta)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), beta->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const float epsilon = T_ARG(0);
if(applyScale) {
gamma = INPUT_VARIABLE(3);
gradG = OUTPUT_VARIABLE(3);
}
if(applyOffset) {
beta = INPUT_VARIABLE(3 + (int)applyScale);
gradB = OUTPUT_VARIABLE(3 + (int)applyScale);
}
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM_BP CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
REQUIRE_TRUE(input->isSameShape(gradO), 0, "BATCHNORM_BP CUDNN op: wrong shape of output gradients array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(input).c_str(), ShapeUtils::shapeAsString(gradO).c_str());
// types of all input arrays should be the same (except gradO)
for(int i = 1; i < block.width() - 2; ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM_BP CUDNN op: types of arrays (input, mean, variance, gamma, beta) should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() != input->sizeAt(1);
if(needPermut) { // if NHWC
std::vector<int> perm = inRank == 4 ? std::vector<int>({0, 3, 1, 2}) : std::vector<int>({0, 4, 1, 2, 3}); // NHWC -> NCHW
input = new NDArray(input->permute(perm));
gradO = new NDArray(gradO->permute(perm));
gradI = new NDArray(gradI->permute(perm));
}
// cudnn requires gamma, gradG, gradB to be non-nullptr
if(!applyScale) {
gamma = new NDArray(mean);
gradG = new NDArray(mean);
*gamma = 1;
}
if(!applyOffset)
gradB = new NDArray(mean);
// calculations
batchnormBpCUDNN(block.launchContext(), input, mean, variance, gamma, gradO, gradI, gradG, gradB, epsilon, axes.size() == 1);
*gradM = 0; // put zeros so far
*gradV = 0; // put zeros so far
if(needPermut) {
delete input;
delete gradO;
delete gradI;
}
if(!applyScale) {
delete gamma;
delete gradG;
}
if(!applyOffset)
delete gradB;
return Status::OK();
}
PLATFORM_CHECK(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(gradG)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gradG->getShapeInfo());
if(gradB)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gradB->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
}
}
}
|
a8f76bbf0743cd85e304fc351b2536c2b63ee107.cu
|
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include "cudnnUtils.h"
#include <ops/declarable/helpers/convolutions.h>
namespace nd4j {
namespace ops {
namespace platforms {
//////////////////////////////////////////////////////////////////////////
static void batchnormCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance,
const NDArray* gamma, const NDArray* beta,
NDArray* output,
const double epsilon, const bool isSpatialMode) {
// input, output -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> zStrides = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), (int)output->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
zStrides.push_back((int)output->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// output descriptor
cudnnTensorDescriptor_t z;
cudnnCreateTensorDescriptor(&z);
if(output->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(z, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(z, dataType, xRank, xShape.data(), zStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for output failed", err);
// mean, variance, gamma and beta descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/beta failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
const double alpha64(1), beta64(0);
const void* ptrAlpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// calculations
err = cudnnBatchNormalizationForwardInference(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
z, output->getSpecialBuffer(),
params,
gamma->getSpecialBuffer(), beta->getSpecialBuffer(),
mean->getSpecialBuffer(), variance->getSpecialBuffer(), epsilon);
if (err != 0) throw nd4j::cuda_exception::build("batchnormCUDNN: cudnnBatchNormalizationForwardInference failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("batchnormCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
}
//////////////////////////////////////////////////////////////////////////
static void batchnormBpCUDNN(const LaunchContext* context,
const NDArray* input, const NDArray* mean, const NDArray* variance, const NDArray* gamma, const NDArray* gradO,
NDArray* gradI, NDArray* gradG, NDArray* gradB,
const double epsilon, const bool isSpatialMode) {
// input, gradO, gradI -> 4D:nchw, 5D:ncdhw
// mean, variance, gamma, beta, gradM, gradV, gradG, gradB -> 1xCx1x1 for 4D and 1xCx1x1x1 for 5D for BATCHNORM_MODE_SPATIAL mode
// -> 1xCxHxW for 4D and 1xCxDxHxW for 5D for BATCHNORM_MODE_PER_ACTIVATION mode
const cudnnDataType_t dataType = cudnnDataType(input->dataType());
const int xRank = input->rankOf();
auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle());
cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: can't set stream for cuDNN", err);
const std::vector<int> xShape = input->getShapeAsVectorInt(); // input and output have same shapes
std::vector<int> paramsShape, paramsStrides; // mean, variance, gamma and beta have same shapes
if(isSpatialMode) { // 1xCx1x1
const int iC = mean->lengthOf();
const int stride0 = mean->strideAt(0);
paramsShape = xRank == 4 ? std::vector<int>({1, iC, 1, 1}) : std::vector<int>({1, iC, 1, 1, 1});
paramsStrides = xRank == 4 ? std::vector<int>({iC*stride0, stride0, 1, 1}) : std::vector<int>({iC*stride0, stride0, 1, 1, 1});
}
else {
paramsShape = mean->getShapeAsVectorInt();
paramsStrides = xRank == 4 ? std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3)}) : std::vector<int>({(int)mean->strideAt(0), (int)mean->strideAt(1), (int)mean->strideAt(2), (int)mean->strideAt(3), (int)mean->strideAt(4)});
}
std::vector<int> xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), (int)input->strideAt(3)};
std::vector<int> dxStrides = {(int)gradI->strideAt(0), (int)gradI->strideAt(1), (int)gradI->strideAt(2), (int)gradI->strideAt(3)};
std::vector<int> dzStrides = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), (int)gradO->strideAt(3)};
if(xRank > 4) { // 5D
xStrides.push_back((int)input->strideAt(4));
dxStrides.push_back((int)gradI->strideAt(4));
dzStrides.push_back((int)gradO->strideAt(4));
}
cudnnTensorFormat_t format = CUDNN_TENSOR_NCHW;
// input descriptor
cudnnTensorDescriptor_t x;
cudnnCreateTensorDescriptor(&x);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(x, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(x, dataType, xRank, xShape.data(), xStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for input failed", err);
// gradO descriptor
cudnnTensorDescriptor_t dz;
cudnnCreateTensorDescriptor(&dz);
if(gradO->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(dz, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(dz, dataType, xRank, xShape.data(), dzStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradO failed", err);
// gradI descriptor
cudnnTensorDescriptor_t dx;
cudnnCreateTensorDescriptor(&dx);
if(input->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(dx, format, dataType, xRank, xShape.data());
else
err = cudnnSetTensorNdDescriptor(dx, dataType, xRank, xShape.data(), dxStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for gradI failed", err);
// mean, variance, gamma, gradG and gradB descriptor, the same descriptor for all of them
cudnnTensorDescriptor_t params;
cudnnCreateTensorDescriptor(¶ms);
if(mean->ews() == 1)
err = cudnnSetTensorNdDescriptorEx(params, format, dataType, xRank, paramsShape.data());
else
err = cudnnSetTensorNdDescriptor(params, dataType, xRank, paramsShape.data(), paramsStrides.data());
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnSetTensorNdDescriptor/cudnnSetTensorNdDescriptorEx for mean/variance/gamma/gradG/gradB failed", err);
// provide scaling parameters
const float alpha32(1), beta32(0);
double alpha64(1), beta64(0);
const void* ptrAlpha = input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64);
const void* ptrBeta = input->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64);
NDArray::prepareSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
// calculations
// TODO: we can use cache here
err = cudnnBatchNormalizationBackward(*handle, isSpatialMode ? CUDNN_BATCHNORM_SPATIAL : CUDNN_BATCHNORM_PER_ACTIVATION,
ptrAlpha, ptrBeta, ptrAlpha, ptrBeta,
x, input->getSpecialBuffer(),
dz, gradO->getSpecialBuffer(),
dx, gradI->getSpecialBuffer(),
params,
gamma->getSpecialBuffer(), gradG->getSpecialBuffer(), gradB->getSpecialBuffer(),
epsilon,
nullptr/*mean->getSpecialBuffer()*/, nullptr/*variance->getSpecialBuffer()*/);
if (err != 0) throw nd4j::cuda_exception::build("batchnormBpCUDNN: cudnnBatchNormalizationBackward failed", err);
auto cudaErr = cudaStreamSynchronize(*context->getCudaStream());
if (cudaErr != 0)
throw cuda_exception::build("batchnormBpCUDNN: cudaStreamSynchronize failed !", cudaErr);
NDArray::registerSpecialUse({gradI, gradG, gradB}, {input, mean, variance, gamma, gradO});
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm, ENGINE_CUDA) {
auto input = INPUT_VARIABLE(0);
auto mean = INPUT_VARIABLE(1);
auto variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
auto output = OUTPUT_VARIABLE(0);
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const double epsilon = T_ARG(0);
if(applyScale)
gamma = INPUT_VARIABLE(3);
if(applyOffset)
beta = INPUT_VARIABLE(3 + (int)applyScale);
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape) , 0, "BATCHNORM CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
// types of all input arrays should be the same
for(int i = 1; i < block.width(); ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM CUDNN op: types of all input arrays should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() == input->sizeAt(-1);
if(needPermut) { // if NHWC
std::vector<int> perm = inRank == 4 ? std::vector<int>({0, 3, 1, 2}) : std::vector<int>({0, 4, 1, 2, 3}); // NHWC -> NCHW
input = new NDArray(input->permute(perm));
output = new NDArray(output->permute(perm));
}
// cudnn requires gamma and beta to be non-nullptr
if(!applyScale) {
gamma = new NDArray(mean);
*gamma = 1;
}
if(!applyOffset) {
beta = new NDArray(mean);
*beta = 0;
}
// calculations
batchnormCUDNN(block.launchContext(), input, mean, variance, gamma, beta, output, epsilon, axes.size() == 1);
if(needPermut) {
delete input;
delete output;
}
if(!applyScale)
delete gamma;
if(!applyOffset)
delete beta;
return Status::OK();
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_CHECK(batchnorm, ENGINE_CUDA) {
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = applyScale ? INPUT_VARIABLE(3) : nullptr;
NDArray* beta = applyOffset ? INPUT_VARIABLE(3 + (int)applyScale) : nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(beta)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), beta->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
PLATFORM_IMPL(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const bool applyScale = (bool)INT_ARG(0);
const bool applyOffset = (bool)INT_ARG(1);
const float epsilon = T_ARG(0);
if(applyScale) {
gamma = INPUT_VARIABLE(3);
gradG = OUTPUT_VARIABLE(3);
}
if(applyOffset) {
beta = INPUT_VARIABLE(3 + (int)applyScale);
gradB = OUTPUT_VARIABLE(3 + (int)applyScale);
}
const int numOfIntArgs = block.getIArguments()->size();
const int inRank = input->rankOf();
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(inRank-1); // default dimension to reduce along is last dimension
const int numOfAxes = axes.size();
REQUIRE_TRUE(numOfAxes <= inRank, 0, "BATCHNORM_BP CUDNN op: too big number of input axes to normalize over, expected number should be less or equal to rank of input array, but got %i and %i correspondingly !", numOfAxes, inRank);
// evaluate expected shape for mean, variance and gamma. These 3 arrays should have identical shapes
// for example if input shape is {2,3,4,5,6} and axes = {1,3}, then expected shape would be {1,3,1,5,1}, and if axes = {3}, then expected shape would be {5}
std::vector<Nd4jLong> expShape;
if(numOfAxes == 1)
expShape.push_back(input->sizeAt(axes[0]));
else { // get, for example, something like {1, inputDim1, 1, inputDim3, 1} if axes = {1, 3}
expShape = std::vector<Nd4jLong>(inRank, 1);
for(uint i = 0; i < numOfAxes; ++i)
expShape[axes[i]] = input->sizeAt(axes[i]);
}
REQUIRE_TRUE(mean->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of mean array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(mean).c_str());
REQUIRE_TRUE(variance->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of variance array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(variance).c_str());
if(gamma)
REQUIRE_TRUE(gamma->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of gamma array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(gamma).c_str());
if(beta)
REQUIRE_TRUE(beta->isSameShape(expShape), 0, "BATCHNORM_BP CUDNN op: wrong shape of beta array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expShape).c_str(), ShapeUtils::shapeAsString(beta).c_str());
REQUIRE_TRUE(input->isSameShape(gradO), 0, "BATCHNORM_BP CUDNN op: wrong shape of output gradients array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(input).c_str(), ShapeUtils::shapeAsString(gradO).c_str());
// types of all input arrays should be the same (except gradO)
for(int i = 1; i < block.width() - 2; ++i)
REQUIRE_TRUE(INPUT_VARIABLE(0)->dataType() == INPUT_VARIABLE(i)->dataType(), 0, "BATCHNORM_BP CUDNN op: types of arrays (input, mean, variance, gamma, beta) should be the same !");
// cudnn supports NCHW format only
const bool needPermut = axes.size() == 1 && mean->lengthOf() != input->sizeAt(1);
if(needPermut) { // if NHWC
std::vector<int> perm = inRank == 4 ? std::vector<int>({0, 3, 1, 2}) : std::vector<int>({0, 4, 1, 2, 3}); // NHWC -> NCHW
input = new NDArray(input->permute(perm));
gradO = new NDArray(gradO->permute(perm));
gradI = new NDArray(gradI->permute(perm));
}
// cudnn requires gamma, gradG, gradB to be non-nullptr
if(!applyScale) {
gamma = new NDArray(mean);
gradG = new NDArray(mean);
*gamma = 1;
}
if(!applyOffset)
gradB = new NDArray(mean);
// calculations
batchnormBpCUDNN(block.launchContext(), input, mean, variance, gamma, gradO, gradI, gradG, gradB, epsilon, axes.size() == 1);
*gradM = 0; // put zeros so far
*gradV = 0; // put zeros so far
if(needPermut) {
delete input;
delete gradO;
delete gradI;
}
if(!applyScale) {
delete gamma;
delete gradG;
}
if(!applyOffset)
delete gradB;
return Status::OK();
}
PLATFORM_CHECK(batchnorm_bp, ENGINE_CUDA) {
NDArray* input = INPUT_VARIABLE(0);
NDArray* mean = INPUT_VARIABLE(1);
NDArray* variance = INPUT_VARIABLE(2);
NDArray* gamma = nullptr;
NDArray* beta = nullptr;
NDArray* gradO = INPUT_VARIABLE(block.width() - 1); // next epsilon
NDArray* gradI = OUTPUT_VARIABLE(0);
NDArray* gradM = OUTPUT_VARIABLE(1);
NDArray* gradV = OUTPUT_VARIABLE(2);
NDArray* gradG = nullptr;
NDArray* gradB = nullptr;
const int numOfIntArgs = block.getIArguments()->size();
const int xRank = input->rankOf();
// *********************************** //
if(xRank != 4 && xRank != 5)
return false;
// *********************************** //
const bool badType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF;
if(badType)
return false;
// *********************************** //
// get axes args to normalize input array over
std::vector<int> axes;
if(numOfIntArgs > 2)
for(int i = 2; i < numOfIntArgs; ++i)
axes.push_back(INT_ARG(i));
else
axes.push_back(xRank-1); // default dimension to reduce along is last dimension
if(axes.size() != 1 && axes.size() != 3 && axes.size() != 4)
return false;
// *********************************** //
bool allParamsHaveSameShapeAndStrides = shape::haveSameShapeAndStrides(mean->getShapeInfo(), variance->getShapeInfo());
if(gamma)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gamma->getShapeInfo());
if(gradG)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gradG->getShapeInfo());
if(gradB)
allParamsHaveSameShapeAndStrides &= shape::haveSameShapeAndStrides(mean->getShapeInfo(), gradB->getShapeInfo());
if(!allParamsHaveSameShapeAndStrides)
return false;
// *********************************** //
bool isFormatGood = false;
if(axes.size() == 1)
isFormatGood = mean->lengthOf() == input->sizeAt(1) || mean->lengthOf() == input->sizeAt(-1); // mean [C]
else {
auto inputShapeModif = input->getShapeAsVector(); // [dim0,dim1,dim2,dim3] 4D or [dim0,dim1,dim2,dim3,dim4]
inputShapeModif[0] = 1;
isFormatGood = mean->isSameShape(inputShapeModif); // mean [1,dim1,dim2,dim3] 4D or [1,dim1,dim2,dim3,dim4]
}
if(!isFormatGood)
return false;
return true;
}
}
}
}
|
6b79321b6edea3cef7190cacc2c001b70e6b44fb.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "decode.h"
#include "utils.h"
#include <algorithm>
#include <cstdint>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/tabulate.h>
#include <thrust/count.h>
#include <thrust/find.h>
#include <thrust/system/hip/detail/hipcub/hipcub.hpp>
#include <thrust/system/hip/detail/cub/iterator/counting_input_iterator.cuh>
namespace retinanet {
namespace cuda {
int decode(int batch_size,
const void *const *inputs, void **outputs,
size_t height, size_t width, size_t scale,
size_t num_anchors, size_t num_classes,
const std::vector<float> &anchors, float score_thresh, int top_n,
void *workspace, size_t workspace_size, hipStream_t stream) {
int scores_size = num_anchors * num_classes * height * width;
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<float>(anchors.size()); // anchors
workspace_size += get_size_aligned<bool>(scores_size); // flags
workspace_size += get_size_aligned<int>(scores_size); // indices
workspace_size += get_size_aligned<int>(scores_size); // indices_sorted
workspace_size += get_size_aligned<float>(scores_size); // scores
workspace_size += get_size_aligned<float>(scores_size); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::hipcub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(scores_size),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
size_t temp_size_sort = 0;
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
workspace_size += ::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size);
hipMemcpy(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, hipMemcpyHostToDevice);
auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size);
auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size;
auto in_boxes = static_cast<const float *>(inputs[1]) + batch * scores_size * 4 / num_classes;
auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * top_n;
auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n;
// Discard scores below threshold
thrust::transform(thrust::device, in_scores, in_scores + scores_size,
flags, thrust::placeholders::_1 > score_thresh);
int *num_selected = reinterpret_cast<int *>(scores);
thrust::cuda_cub::hipcub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(0),
flags, indices, num_selected, scores_size);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Only keep top n scores
if (num_detections > top_n) {
thrust::gather(thrust::device, indices, indices + num_detections,
in_scores, scores);
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections);
indices = indices_sorted;
num_detections = top_n;
}
// Gather boxes
bool has_anchors = !anchors.empty();
thrust::transform(thrust::device, indices, indices + num_detections,
thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)),
[=] __device__ (int i) {
int x = i % width;
int y = (i / width) % height;
int a = (i / num_classes / height / width) % num_anchors;
int cls = (i / height / width) % num_classes;
float4 box = float4{
in_boxes[((a * 4 + 0) * height + y) * width + x],
in_boxes[((a * 4 + 1) * height + y) * width + x],
in_boxes[((a * 4 + 2) * height + y) * width + x],
in_boxes[((a * 4 + 3) * height + y) * width + x]
};
if (has_anchors) {
// Add anchors offsets to deltas
float x = (i % width) * scale;
float y = ((i / width) % height) * scale;
float *d = anchors_d + 4*a;
float x1 = x + d[0];
float y1 = y + d[1];
float x2 = x + d[2];
float y2 = y + d[3];
float w = x2 - x1 + 1.0f;
float h = y2 - y1 + 1.0f;
float pred_ctr_x = box.x * w + x1 + 0.5f * w;
float pred_ctr_y = box.y * h + y1 + 0.5f * h;
float pred_w = exp(box.z) * w;
float pred_h = exp(box.w) * h;
box = float4{
max(0.0f, pred_ctr_x - 0.5f * pred_w),
max(0.0f, pred_ctr_y - 0.5f * pred_h),
min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f),
min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f)
};
}
return thrust::make_tuple(in_scores[i], box, cls);
});
// Zero-out unused scores
if (num_detections < top_n) {
thrust::fill(thrust::device, out_scores + num_detections,
out_scores + top_n, 0.0f);
thrust::fill(thrust::device, out_classes + num_detections,
out_classes + top_n, 0.0f);
}
}
return 0;
}
}
}
|
6b79321b6edea3cef7190cacc2c001b70e6b44fb.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "decode.h"
#include "utils.h"
#include <algorithm>
#include <cstdint>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/tabulate.h>
#include <thrust/count.h>
#include <thrust/find.h>
#include <thrust/system/cuda/detail/cub/device/device_radix_sort.cuh>
#include <thrust/system/cuda/detail/cub/iterator/counting_input_iterator.cuh>
namespace retinanet {
namespace cuda {
int decode(int batch_size,
const void *const *inputs, void **outputs,
size_t height, size_t width, size_t scale,
size_t num_anchors, size_t num_classes,
const std::vector<float> &anchors, float score_thresh, int top_n,
void *workspace, size_t workspace_size, cudaStream_t stream) {
int scores_size = num_anchors * num_classes * height * width;
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size = get_size_aligned<float>(anchors.size()); // anchors
workspace_size += get_size_aligned<bool>(scores_size); // flags
workspace_size += get_size_aligned<int>(scores_size); // indices
workspace_size += get_size_aligned<int>(scores_size); // indices_sorted
workspace_size += get_size_aligned<float>(scores_size); // scores
workspace_size += get_size_aligned<float>(scores_size); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::cub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag,
thrust::cuda_cub::cub::CountingInputIterator<int>(scores_size),
(bool *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
size_t temp_size_sort = 0;
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort,
(float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, scores_size);
workspace_size += std::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto anchors_d = get_next_ptr<float>(anchors.size(), workspace, workspace_size);
cudaMemcpy(anchors_d, anchors.data(), anchors.size() * sizeof *anchors_d, cudaMemcpyHostToDevice);
auto flags = get_next_ptr<bool>(scores_size, workspace, workspace_size);
auto indices = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(scores_size, workspace, workspace_size);
auto scores = get_next_ptr<float>(scores_size, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(scores_size, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * scores_size;
auto in_boxes = static_cast<const float *>(inputs[1]) + batch * scores_size * 4 / num_classes;
auto out_scores = static_cast<float *>(outputs[0]) + batch * top_n;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * top_n;
auto out_classes = static_cast<float *>(outputs[2]) + batch * top_n;
// Discard scores below threshold
thrust::transform(thrust::device, in_scores, in_scores + scores_size,
flags, thrust::placeholders::_1 > score_thresh);
int *num_selected = reinterpret_cast<int *>(scores);
thrust::cuda_cub::cub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::cub::CountingInputIterator<int>(0),
flags, indices, num_selected, scores_size);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Only keep top n scores
if (num_detections > top_n) {
thrust::gather(thrust::device, indices, indices + num_detections,
in_scores, scores);
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices, indices_sorted, num_detections);
indices = indices_sorted;
num_detections = top_n;
}
// Gather boxes
bool has_anchors = !anchors.empty();
thrust::transform(thrust::device, indices, indices + num_detections,
thrust::make_zip_iterator(thrust::make_tuple(out_scores, out_boxes, out_classes)),
[=] __device__ (int i) {
int x = i % width;
int y = (i / width) % height;
int a = (i / num_classes / height / width) % num_anchors;
int cls = (i / height / width) % num_classes;
float4 box = float4{
in_boxes[((a * 4 + 0) * height + y) * width + x],
in_boxes[((a * 4 + 1) * height + y) * width + x],
in_boxes[((a * 4 + 2) * height + y) * width + x],
in_boxes[((a * 4 + 3) * height + y) * width + x]
};
if (has_anchors) {
// Add anchors offsets to deltas
float x = (i % width) * scale;
float y = ((i / width) % height) * scale;
float *d = anchors_d + 4*a;
float x1 = x + d[0];
float y1 = y + d[1];
float x2 = x + d[2];
float y2 = y + d[3];
float w = x2 - x1 + 1.0f;
float h = y2 - y1 + 1.0f;
float pred_ctr_x = box.x * w + x1 + 0.5f * w;
float pred_ctr_y = box.y * h + y1 + 0.5f * h;
float pred_w = exp(box.z) * w;
float pred_h = exp(box.w) * h;
box = float4{
max(0.0f, pred_ctr_x - 0.5f * pred_w),
max(0.0f, pred_ctr_y - 0.5f * pred_h),
min(pred_ctr_x + 0.5f * pred_w - 1.0f, width * scale - 1.0f),
min(pred_ctr_y + 0.5f * pred_h - 1.0f, height * scale - 1.0f)
};
}
return thrust::make_tuple(in_scores[i], box, cls);
});
// Zero-out unused scores
if (num_detections < top_n) {
thrust::fill(thrust::device, out_scores + num_detections,
out_scores + top_n, 0.0f);
thrust::fill(thrust::device, out_classes + num_detections,
out_classes + top_n, 0.0f);
}
}
return 0;
}
}
}
|
3092e91a8521352ed0322802fd072cae6acf2dbc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void magnitudeCopy(float *mag_vec, float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) { mag_vec[xIndex] = abs(vec[xIndex]); }
}
|
3092e91a8521352ed0322802fd072cae6acf2dbc.cu
|
#include "includes.h"
__global__ void magnitudeCopy(float *mag_vec, float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) { mag_vec[xIndex] = abs(vec[xIndex]); }
}
|
9f94bb910590849406be977d67835690ac306a4e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ void ArrayMin( void* param)
{
float* paramIn = (float*)param;
int N = (int)(*paramIn);
paramIn = paramIn + 1;
float* a = paramIn;
float* b = a + N;
int tid = threadIdx.x;
int cacheIndex = threadIdx.x;
float temp = a[tid];
#if 1
while (tid < N)
{
//temp += a[tid] * b[tid];
//temp = a[tid] ;//>= a[tid+1] ? a[tid] : a[tid+1];
if (temp > a[tid])
temp = a[tid];
tid += 32;
}
#endif
// set the cache values
b[cacheIndex] = temp;
//printf("Cache[%d]=%g\n", cacheIndex, temp);
#if 1
// synchronize threads in this block
//__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = 32/2;
//if (cacheIndex < 0)
//printf("i=%d,blockDim.x=%d,tid=%d\n",i, blockDim.x, cacheIndex);
while (i != 0)
{
if (cacheIndex < i )
{
//cache[cacheIndex] += cache[cacheIndex + i];
if (b[cacheIndex] >= b[cacheIndex + i])
b[cacheIndex] = b[cacheIndex+i];
else
b[cacheIndex] = b[cacheIndex];
//printf("i=%d,tid1=%d,blockIdx.x=%d,tid=%d,%g,%g\n",
// i, tid1,blockIdx.x, cacheIndex, cache[cacheIndex],cache[cacheIndex+1]);
}
//__syncthreads();
i /= 2;
}
//if (cacheIndex == 0)
//{
//printf("c[blockIdx.x]:%g, :%d\n", cache[0],blockIdx.x);
// c[cacheIndex] = cache[0];
//}
#endif
}
#if 0
__device__ void VecDot( void* param)
{
int N = 32;
float* paramIn = (float*)param;
float* paramInOrig = (float*)param;
int size = (int)(*paramIn);
paramIn = paramIn + 1;
float* a = paramIn;
paramIn = paramIn + size;
float* b = paramIn;
paramIn = paramIn + size;
float* c = paramIn;
//int tid = threadIdx.x%N + blockIdx.x * blockDim.x;
int tid = threadIdx.x%N;
int cacheIndex = threadIdx.x%N;
float temp = 0;
while (tid < size)
{
temp += a[tid] * b[tid];
tid = tid + N;
//tid += blockDim.x * gridDim.x;
}
// set the cache values
c[cacheIndex] = temp;
#if 1
// synchronize threads in this block
//__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
//int i = blockDim.x/2;
int i = N/2;
while (i != 0) {
if (cacheIndex < i)
c[cacheIndex] += c[cacheIndex + i];
//__syncthreads();
i /= 2;
}
//__syncthreads();
if (cacheIndex == 0)
{
//paramInOrig[0] = 44;
//printf("Val: %f\n", c[0]);
}
#endif
//paramInOrig[0] = 44;
}
#endif
#if 0
__global__ void dot( float *a, float *b, float *c ) {
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
#endif
|
9f94bb910590849406be977d67835690ac306a4e.cu
|
__device__ void ArrayMin( void* param)
{
float* paramIn = (float*)param;
int N = (int)(*paramIn);
paramIn = paramIn + 1;
float* a = paramIn;
float* b = a + N;
int tid = threadIdx.x;
int cacheIndex = threadIdx.x;
float temp = a[tid];
#if 1
while (tid < N)
{
//temp += a[tid] * b[tid];
//temp = a[tid] ;//>= a[tid+1] ? a[tid] : a[tid+1];
if (temp > a[tid])
temp = a[tid];
tid += 32;
}
#endif
// set the cache values
b[cacheIndex] = temp;
//printf("Cache[%d]=%g\n", cacheIndex, temp);
#if 1
// synchronize threads in this block
//__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = 32/2;
//if (cacheIndex < 0)
//printf("i=%d,blockDim.x=%d,tid=%d\n",i, blockDim.x, cacheIndex);
while (i != 0)
{
if (cacheIndex < i )
{
//cache[cacheIndex] += cache[cacheIndex + i];
if (b[cacheIndex] >= b[cacheIndex + i])
b[cacheIndex] = b[cacheIndex+i];
else
b[cacheIndex] = b[cacheIndex];
//printf("i=%d,tid1=%d,blockIdx.x=%d,tid=%d,%g,%g\n",
// i, tid1,blockIdx.x, cacheIndex, cache[cacheIndex],cache[cacheIndex+1]);
}
//__syncthreads();
i /= 2;
}
//if (cacheIndex == 0)
//{
//printf("c[blockIdx.x]:%g, :%d\n", cache[0],blockIdx.x);
// c[cacheIndex] = cache[0];
//}
#endif
}
#if 0
__device__ void VecDot( void* param)
{
int N = 32;
float* paramIn = (float*)param;
float* paramInOrig = (float*)param;
int size = (int)(*paramIn);
paramIn = paramIn + 1;
float* a = paramIn;
paramIn = paramIn + size;
float* b = paramIn;
paramIn = paramIn + size;
float* c = paramIn;
//int tid = threadIdx.x%N + blockIdx.x * blockDim.x;
int tid = threadIdx.x%N;
int cacheIndex = threadIdx.x%N;
float temp = 0;
while (tid < size)
{
temp += a[tid] * b[tid];
tid = tid + N;
//tid += blockDim.x * gridDim.x;
}
// set the cache values
c[cacheIndex] = temp;
#if 1
// synchronize threads in this block
//__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
//int i = blockDim.x/2;
int i = N/2;
while (i != 0) {
if (cacheIndex < i)
c[cacheIndex] += c[cacheIndex + i];
//__syncthreads();
i /= 2;
}
//__syncthreads();
if (cacheIndex == 0)
{
//paramInOrig[0] = 44;
//printf("Val: %f\n", c[0]);
}
#endif
//paramInOrig[0] = 44;
}
#endif
#if 0
__global__ void dot( float *a, float *b, float *c ) {
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
#endif
|
8b0d46dd4596038dbac0b8f396bd5c19db6f6b64.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <limits>
#include <sys/time.h>
// #define PINNED_MEMORY
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
#define BLOCK_SIZE 256
#define MICROSECONDS(start, end) ((end.tv_sec - start.tv_sec) * 1000000LL + end.tv_usec - start.tv_usec)
#define MILLISECONDS(start, end) MICROSECONDS(start, end) / 1000.0
#define SECONDS(start, end) MILLISECONDS(start, end) / 1000.0
typedef struct
{
float3 position;
float3 velocity;
} Particle;
void cpu_timestep(Particle *particles, const float dt)
{
for (unsigned int i = 0; i < NUM_PARTICLES; i++)
{
particles[i].position.x += particles[i].velocity.x * dt;
particles[i].position.y += particles[i].velocity.y * dt;
particles[i].position.z += particles[i].velocity.z * dt;
}
}
__global__ void gpu_timestep(Particle *particles, const float dt)
{
const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NUM_PARTICLES)
{
particles[i].position.x += particles[i].velocity.x * dt;
particles[i].position.y += particles[i].velocity.y * dt;
particles[i].position.z += particles[i].velocity.z * dt;
}
}
int main(int argc, char **argv)
{
struct timeval start, end;
const float dt = 1.0;
// Initialize CPU data.
#ifdef PINNED_MEMORY
Particle *cpu_particles;
hipHostMalloc(&cpu_particles, NUM_PARTICLES * sizeof(Particle));
#else
Particle *cpu_particles = (Particle *)malloc(NUM_PARTICLES * sizeof(Particle));
#endif
for (unsigned int i = 0; i < NUM_PARTICLES; i++)
{
cpu_particles[i].position.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].position.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].position.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].velocity.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].velocity.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].velocity.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
// Initialize data.
Particle *gpu_particles;
hipMalloc(&gpu_particles, NUM_PARTICLES * sizeof(Particle));
// Run simulation.
printf("Running simulation... ");
gettimeofday(&start, NULL);
for (unsigned int i = 0; i < NUM_ITERATIONS; i++)
{
hipMemcpy(gpu_particles, cpu_particles, NUM_PARTICLES * sizeof(Particle), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gpu_timestep), dim3((NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, gpu_particles, dt);
hipDeviceSynchronize();
hipMemcpy(cpu_particles, gpu_particles, NUM_PARTICLES * sizeof(Particle), hipMemcpyDeviceToHost); // Copy anywhere.
}
gettimeofday(&end, NULL);
printf("Done! Took %lfs.\n", SECONDS(start, end));
// Free resources.
#ifdef PINNED_MEMORY
hipHostFree(cpu_particles);
#else
free(cpu_particles);
#endif
hipFree(gpu_particles);
return 0;
}
|
8b0d46dd4596038dbac0b8f396bd5c19db6f6b64.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <limits>
#include <sys/time.h>
// #define PINNED_MEMORY
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
#define BLOCK_SIZE 256
#define MICROSECONDS(start, end) ((end.tv_sec - start.tv_sec) * 1000000LL + end.tv_usec - start.tv_usec)
#define MILLISECONDS(start, end) MICROSECONDS(start, end) / 1000.0
#define SECONDS(start, end) MILLISECONDS(start, end) / 1000.0
typedef struct
{
float3 position;
float3 velocity;
} Particle;
void cpu_timestep(Particle *particles, const float dt)
{
for (unsigned int i = 0; i < NUM_PARTICLES; i++)
{
particles[i].position.x += particles[i].velocity.x * dt;
particles[i].position.y += particles[i].velocity.y * dt;
particles[i].position.z += particles[i].velocity.z * dt;
}
}
__global__ void gpu_timestep(Particle *particles, const float dt)
{
const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NUM_PARTICLES)
{
particles[i].position.x += particles[i].velocity.x * dt;
particles[i].position.y += particles[i].velocity.y * dt;
particles[i].position.z += particles[i].velocity.z * dt;
}
}
int main(int argc, char **argv)
{
struct timeval start, end;
const float dt = 1.0;
// Initialize CPU data.
#ifdef PINNED_MEMORY
Particle *cpu_particles;
cudaMallocHost(&cpu_particles, NUM_PARTICLES * sizeof(Particle));
#else
Particle *cpu_particles = (Particle *)malloc(NUM_PARTICLES * sizeof(Particle));
#endif
for (unsigned int i = 0; i < NUM_PARTICLES; i++)
{
cpu_particles[i].position.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].position.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].position.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].velocity.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].velocity.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
cpu_particles[i].velocity.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
// Initialize data.
Particle *gpu_particles;
cudaMalloc(&gpu_particles, NUM_PARTICLES * sizeof(Particle));
// Run simulation.
printf("Running simulation... ");
gettimeofday(&start, NULL);
for (unsigned int i = 0; i < NUM_ITERATIONS; i++)
{
cudaMemcpy(gpu_particles, cpu_particles, NUM_PARTICLES * sizeof(Particle), cudaMemcpyHostToDevice);
gpu_timestep<<<(NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(gpu_particles, dt);
cudaDeviceSynchronize();
cudaMemcpy(cpu_particles, gpu_particles, NUM_PARTICLES * sizeof(Particle), cudaMemcpyDeviceToHost); // Copy anywhere.
}
gettimeofday(&end, NULL);
printf("Done! Took %lfs.\n", SECONDS(start, end));
// Free resources.
#ifdef PINNED_MEMORY
cudaFreeHost(cpu_particles);
#else
free(cpu_particles);
#endif
cudaFree(gpu_particles);
return 0;
}
|
67620bdc0fbd9234fae0d85b23f18602ffb916c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <inttypes.h>
#include "header.h"
//Thread block size
#define BLOCK_SIZE 16 //number of threads for each block
#define BIN_SIZE 2 //size of a hisogram bin
#define TH 50 //threshold for triggering the histogram
__global__ void GridKernel(const DataFrame, DataFrame, unsigned int*); //aggiungere i parametri
//------------------------------------------------------------
//Histogram algorithm HOST CODE
unsigned int CircleFit(const DataFrame data, DataFrame circles)
{
//load data to device memory
DataFrame d_data;
d_data.w = data.w;
d_data.h = data.h;
size_t size = data.w * data.h * sizeof(float);
hipError_t err = hipMalloc(&d_data.e, size);
if(err)
printf("CUDA malloc data DataFrame: %s\n",hipGetErrorString(err));
hipMemcpy(d_data.e, data.e, size, hipMemcpyHostToDevice);
//allocate circles
DataFrame d_circles;
d_circles.w = circles.w;
d_circles.h = circles.h;
size = circles.w * circles.h * sizeof(float);
err = hipMalloc(&d_circles.e, size);
if(err)
printf("CUDA malloc circles DataFrame: %s\n",hipGetErrorString(err));
//allocate variable for counting circles found
unsigned int *d_counter;
unsigned int h_counter = 0;
hipMallocManaged(&d_counter, sizeof(unsigned int));
if(err)
printf("CUDA malloc counter variable: %s\n",hipGetErrorString(err));
// Hai assegnato al puntatore l'indirizzo di memoria da utilizzare, ma e' memoria della GPU, non puoi accedere da qui
// ma il valore nella cella puntata non e' inizializzato
hipMemset(d_counter, 0, sizeof(unsigned int));
hipMemcpy(&h_counter, d_counter, sizeof(unsigned int), hipMemcpyDeviceToHost);
float time;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//start time
hipEventRecord(start);
// Define the geometry
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE/ dimBlock.x, GRID_SIZE/ dimBlock.y);
//warning GRID_SIZE must be multiple of 16
// Invoke kernel
hipLaunchKernelGGL(( GridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_data, d_circles, d_counter);
err = hipDeviceSynchronize();
//stop time
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
if(err)
printf("Run kernel: %s\n", hipGetErrorString(err));
//printf("Time: %3.5f ms\n",time);
// Read C from device memory
size = circles.w * circles.h * sizeof(float);
err = hipMemcpy(circles.e, d_circles.e, size, hipMemcpyDeviceToHost);
if(err)
printf("Copy circles off of device: %s\n",hipGetErrorString(err));
hipMemcpy(&h_counter, d_counter, sizeof(unsigned int), hipMemcpyDeviceToHost);
//printf("cerchi trovati : %u\n", h_counter);
// Free device memory
hipFree(d_data.e);
hipFree(d_circles.e);
hipFree(d_counter);
return h_counter;
} //END HOST FUNCTION
//---------------------------------------------------------
// thread aware log function
__device__ void log_msg(const char * message)
{
printf("%d.%d.%d.%d-%s", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, message);
}
//Device function to calculate distance
__device__ float Distance2(float x, float y, float xd, float yd)
{
return (x-xd)*(x-xd)+(y-yd)*(y-yd);
}
//---------------------------------------------------------
//Device function to Get Element from data
__device__ float GetElement(const DataFrame D, int row, int col)
{
if ( (row < D.h ) && (col < D.w ) )
return D.e[row * D.w + col];
else
return 0.0;
}
//---------------------------------------------------------------
//Device function to Set Element of data
__device__ void SetElement(DataFrame D, int row, int col, float value)
{
if ( (row < D.h ) && (col < D.w ) )
D.e[row * D.w + col] = value;
}
//---------------------------------------------------------
//Device function to fill histo
// __device__ void HistoFill(int x, int y, DataFrame data,int *h, DataFrame circles, unsigned int *counter)
__device__ void HstFill(int x, int y, DataFrame data,DataFrame circles, unsigned int *counter)
{
const size_t HST_SIZE = GRID_SIZE*2/BIN_SIZE;
int hst[HST_SIZE] = {0}; // va inizializzato
int i, idx;
// atomicAdd(counter,1);
for ( i=0; i<data.h; i++)
{
// float xd, yd, d;
float xd, yd;
int d;
xd = GetElement(data, i, 0);
yd = GetElement(data, i, 1);
d = __float2int_rn( sqrt( Distance2( __int2float_rn(x), __int2float_rn(y),xd,yd) ))/ BIN_SIZE ;
if (d < HST_SIZE) hst[d] +=1; // questo if serve per evitare errori di accesso alla memoria nel caso di valori "strani"
}
// dopo aver riempito l'array hst, scandisce tutti gli elementi e aggiunge alla lista dei cerchi il cerchio trovato
for ( i=0; i < HST_SIZE; i++)
{
if(hst[i]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
SetElement(circles, idx,0, __int2float_rn(x));
SetElement(circles, idx,1, __int2float_rn(y));
SetElement(circles, idx,2, __int2float_rn(i*BIN_SIZE) );
}
}
}
//---------------------------------------------------------
//Histogram algorithm kernel
__global__ void GridKernel(DataFrame data, DataFrame circles,unsigned int *counter)
{
//x and y indexes
int x = blockIdx.x * blockDim.x+ threadIdx.x - (GRID_SIZE / 2);
int y = blockIdx.y * blockDim.y +threadIdx.y - (GRID_SIZE / 2);
HstFill(x,y, data, circles, counter);
}
//------------------------------------------------------------
//dump dataframe function
void dump(DataFrame m)
{
for (int i = 0; i< m.h; i++) // Loop sulle righe
{
for (int j = 0; j< m.w; j++) // Loop sulle colonne
printf("%3.1f\t", m.e[i*m.w + j]);
printf("\n"); // A capo di fine riga
}
printf("\n");
}
//-------------------------------------------------------------
int main(int argc, char** argv)
{
std::vector<float> buf = ReadData("file.dat");
DataFrame data, circles;
data.h = buf.size()>>1;
data.w = 2;
data.e = (float*) malloc(data.w * data.h * sizeof(float));
circles.h = 10;
circles.w = 3;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
data.e = &buf[0]; //copy imported floats from buf to data .... non stai copiando i dati, se fai cosi' non ti serve la memoria che hai allocato prima
buf.clear();
unsigned int h_counter = CircleFit(data, circles);
circles.h = h_counter;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
h_counter = CircleFit(data, circles); //CircleFit ritorna il numero di cerchi trovati
buf = media1 (circles); //sposto i dati dei cerchi fittati nel buffer
if (h_counter == 0 ) buf = {0, 0, 0};
AppendData (buf, "fit.dat"); //scrivi buf su fit.dat (in append)
// dump(data);
// dump(circles);
return 0;
}
|
67620bdc0fbd9234fae0d85b23f18602ffb916c9.cu
|
#include <inttypes.h>
#include "header.h"
//Thread block size
#define BLOCK_SIZE 16 //number of threads for each block
#define BIN_SIZE 2 //size of a hisogram bin
#define TH 50 //threshold for triggering the histogram
__global__ void GridKernel(const DataFrame, DataFrame, unsigned int*); //aggiungere i parametri
//------------------------------------------------------------
//Histogram algorithm HOST CODE
unsigned int CircleFit(const DataFrame data, DataFrame circles)
{
//load data to device memory
DataFrame d_data;
d_data.w = data.w;
d_data.h = data.h;
size_t size = data.w * data.h * sizeof(float);
cudaError_t err = cudaMalloc(&d_data.e, size);
if(err)
printf("CUDA malloc data DataFrame: %s\n",cudaGetErrorString(err));
cudaMemcpy(d_data.e, data.e, size, cudaMemcpyHostToDevice);
//allocate circles
DataFrame d_circles;
d_circles.w = circles.w;
d_circles.h = circles.h;
size = circles.w * circles.h * sizeof(float);
err = cudaMalloc(&d_circles.e, size);
if(err)
printf("CUDA malloc circles DataFrame: %s\n",cudaGetErrorString(err));
//allocate variable for counting circles found
unsigned int *d_counter;
unsigned int h_counter = 0;
cudaMallocManaged(&d_counter, sizeof(unsigned int));
if(err)
printf("CUDA malloc counter variable: %s\n",cudaGetErrorString(err));
// Hai assegnato al puntatore l'indirizzo di memoria da utilizzare, ma e' memoria della GPU, non puoi accedere da qui
// ma il valore nella cella puntata non e' inizializzato
cudaMemset(d_counter, 0, sizeof(unsigned int));
cudaMemcpy(&h_counter, d_counter, sizeof(unsigned int), cudaMemcpyDeviceToHost);
float time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//start time
cudaEventRecord(start);
// Define the geometry
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE/ dimBlock.x, GRID_SIZE/ dimBlock.y);
//warning GRID_SIZE must be multiple of 16
// Invoke kernel
GridKernel<<<dimGrid, dimBlock>>>(d_data, d_circles, d_counter);
err = cudaDeviceSynchronize();
//stop time
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
if(err)
printf("Run kernel: %s\n", cudaGetErrorString(err));
//printf("Time: %3.5f ms\n",time);
// Read C from device memory
size = circles.w * circles.h * sizeof(float);
err = cudaMemcpy(circles.e, d_circles.e, size, cudaMemcpyDeviceToHost);
if(err)
printf("Copy circles off of device: %s\n",cudaGetErrorString(err));
cudaMemcpy(&h_counter, d_counter, sizeof(unsigned int), cudaMemcpyDeviceToHost);
//printf("cerchi trovati : %u\n", h_counter);
// Free device memory
cudaFree(d_data.e);
cudaFree(d_circles.e);
cudaFree(d_counter);
return h_counter;
} //END HOST FUNCTION
//---------------------------------------------------------
// thread aware log function
__device__ void log_msg(const char * message)
{
printf("%d.%d.%d.%d-%s", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, message);
}
//Device function to calculate distance
__device__ float Distance2(float x, float y, float xd, float yd)
{
return (x-xd)*(x-xd)+(y-yd)*(y-yd);
}
//---------------------------------------------------------
//Device function to Get Element from data
__device__ float GetElement(const DataFrame D, int row, int col)
{
if ( (row < D.h ) && (col < D.w ) )
return D.e[row * D.w + col];
else
return 0.0;
}
//---------------------------------------------------------------
//Device function to Set Element of data
__device__ void SetElement(DataFrame D, int row, int col, float value)
{
if ( (row < D.h ) && (col < D.w ) )
D.e[row * D.w + col] = value;
}
//---------------------------------------------------------
//Device function to fill histo
// __device__ void HistoFill(int x, int y, DataFrame data,int *h, DataFrame circles, unsigned int *counter)
__device__ void HstFill(int x, int y, DataFrame data,DataFrame circles, unsigned int *counter)
{
const size_t HST_SIZE = GRID_SIZE*2/BIN_SIZE;
int hst[HST_SIZE] = {0}; // va inizializzato
int i, idx;
// atomicAdd(counter,1);
for ( i=0; i<data.h; i++)
{
// float xd, yd, d;
float xd, yd;
int d;
xd = GetElement(data, i, 0);
yd = GetElement(data, i, 1);
d = __float2int_rn( sqrt( Distance2( __int2float_rn(x), __int2float_rn(y),xd,yd) ))/ BIN_SIZE ;
if (d < HST_SIZE) hst[d] +=1; // questo if serve per evitare errori di accesso alla memoria nel caso di valori "strani"
}
// dopo aver riempito l'array hst, scandisce tutti gli elementi e aggiunge alla lista dei cerchi il cerchio trovato
for ( i=0; i < HST_SIZE; i++)
{
if(hst[i]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
SetElement(circles, idx,0, __int2float_rn(x));
SetElement(circles, idx,1, __int2float_rn(y));
SetElement(circles, idx,2, __int2float_rn(i*BIN_SIZE) );
}
}
}
//---------------------------------------------------------
//Histogram algorithm kernel
__global__ void GridKernel(DataFrame data, DataFrame circles,unsigned int *counter)
{
//x and y indexes
int x = blockIdx.x * blockDim.x+ threadIdx.x - (GRID_SIZE / 2);
int y = blockIdx.y * blockDim.y +threadIdx.y - (GRID_SIZE / 2);
HstFill(x,y, data, circles, counter);
}
//------------------------------------------------------------
//dump dataframe function
void dump(DataFrame m)
{
for (int i = 0; i< m.h; i++) // Loop sulle righe
{
for (int j = 0; j< m.w; j++) // Loop sulle colonne
printf("%3.1f\t", m.e[i*m.w + j]);
printf("\n"); // A capo di fine riga
}
printf("\n");
}
//-------------------------------------------------------------
int main(int argc, char** argv)
{
std::vector<float> buf = ReadData("file.dat");
DataFrame data, circles;
data.h = buf.size()>>1;
data.w = 2;
data.e = (float*) malloc(data.w * data.h * sizeof(float));
circles.h = 10;
circles.w = 3;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
data.e = &buf[0]; //copy imported floats from buf to data .... non stai copiando i dati, se fai cosi' non ti serve la memoria che hai allocato prima
buf.clear();
unsigned int h_counter = CircleFit(data, circles);
circles.h = h_counter;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
h_counter = CircleFit(data, circles); //CircleFit ritorna il numero di cerchi trovati
buf = media1 (circles); //sposto i dati dei cerchi fittati nel buffer
if (h_counter == 0 ) buf = {0, 0, 0};
AppendData (buf, "fit.dat"); //scrivi buf su fit.dat (in append)
// dump(data);
// dump(circles);
return 0;
}
|
3551924d515377b873f91a370d259641b2653e42.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kfusion.h"
#undef isnan
#undef isfinite
#include <iostream>
#include <TooN/TooN.h>
#include <TooN/se3.h>
#include <TooN/GR_SVD.h>
static const float INVALID = -2; // this is used to mark invalid entries in normal or vertex maps
using namespace std;
__global__ void initVolume( Volume volume, const float2 val ){
uint3 pos = make_uint3(thr2pos2());
for(pos.z = 0; pos.z < volume.size.z; ++pos.z)
volume.set(pos, val);
}
__global__ void raycast( Image<float3> pos3D, Image<float3> normal, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){
const uint2 pos = thr2pos2();
const float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep );
if(hit.w > 0){
pos3D[pos] = make_float3(hit);
float3 surfNorm = volume.grad(make_float3(hit));
if(length(surfNorm) == 0){
normal[pos].x = INVALID;
} else {
normal[pos] = normalize(surfNorm);
}
} else {
pos3D[pos] = make_float3(0);
normal[pos] = make_float3(INVALID, 0, 0);
}
}
__forceinline__ __device__ float sq( const float x ){
return x*x;
}
__global__ void integrate( Volume vol, const Image<float> depth, const Matrix4 invTrack, const Matrix4 K, const float mu, const float maxweight){
uint3 pix = make_uint3(thr2pos2());
float3 pos = invTrack * vol.pos(pix);
float3 cameraX = K * pos;
const float3 delta = rotate(invTrack, make_float3(0,0, vol.dim.z / vol.size.z));
const float3 cameraDelta = rotate(K, delta);
for(pix.z = 0; pix.z < vol.size.z; ++pix.z, pos += delta, cameraX += cameraDelta){
if(pos.z < 0.0001f) // some near plane constraint
continue;
const float2 pixel = make_float2(cameraX.x/cameraX.z + 0.5f, cameraX.y/cameraX.z + 0.5f);
if(pixel.x < 0 || pixel.x > depth.size.x-1 || pixel.y < 0 || pixel.y > depth.size.y-1)
continue;
const uint2 px = make_uint2(cameraX.x/cameraX.z + 0.5f, cameraX.y/cameraX.z + 0.5f);
if(depth[px] == 0)
continue;
const float diff = (depth[px] - cameraX.z) * sqrt(1+sq(pos.x/pos.z) + sq(pos.y/pos.z));
if(diff > -mu){
const float sdf = fminf(1.f, diff/mu);
float2 data = vol[pix];
data.x = clamp((data.y*data.x + sdf)/(data.y + 1), -1.f, 1.f);
data.y = fminf(data.y+1, maxweight);
vol.set(pix, data);
}
}
}
__global__ void depth2vertex( Image<float3> vertex, const Image<float> depth, const Matrix4 invK ){
const uint2 pixel = thr2pos2();
if(pixel.x >= depth.size.x || pixel.y >= depth.size.y )
return;
if(depth[pixel] > 0){
vertex[pixel] = depth[pixel] * (rotate(invK, make_float3(pixel.x, pixel.y, 1.f)));
} else {
vertex[pixel] = make_float3(0);
}
}
__global__ void vertex2normal( Image<float3> normal, const Image<float3> vertex ){
const uint2 pixel = thr2pos2();
if(pixel.x >= vertex.size.x || pixel.y >= vertex.size.y )
return;
const float3 left = vertex[make_uint2(max(int(pixel.x)-1,0), pixel.y)];
const float3 right = vertex[make_uint2(min(pixel.x+1,vertex.size.x-1), pixel.y)];
const float3 up = vertex[make_uint2(pixel.x, max(int(pixel.y)-1,0))];
const float3 down = vertex[make_uint2(pixel.x, min(pixel.y+1,vertex.size.y-1))];
if(left.z == 0 || right.z == 0 || up.z == 0 || down.z == 0) {
normal[pixel].x = INVALID;
return;
}
const float3 dxv = right - left;
const float3 dyv = down - up;
normal[pixel] = normalize(cross(dyv, dxv)); // switched dx and dy to get factor -1
}
template <int HALFSAMPLE>
__global__ void mm2meters( Image<float> depth, const Image<ushort> in ){
const uint2 pixel = thr2pos2();
depth[pixel] = in[pixel * (HALFSAMPLE+1)] / 1000.0f;
}
//column pass using coalesced global memory reads
__global__ void bilateral_filter(Image<float> out, const Image<float> in, const Image<float> gaussian, const float e_d, const int r) {
const uint2 pos = thr2pos2();
if(in[pos] == 0){
out[pos] = 0;
return;
}
float sum = 0.0f;
float t = 0.0f;
const float center = in[pos];
for(int i = -r; i <= r; ++i) {
for(int j = -r; j <= r; ++j) {
const float curPix = in[make_uint2(clamp(pos.x + i, 0u, in.size.x-1), clamp(pos.y + j, 0u, in.size.y-1))];
if(curPix > 0){
const float mod = sq(curPix - center);
const float factor = gaussian[make_uint2(i + r, 0)] * gaussian[make_uint2(j + r, 0)] * __expf(-mod / (2 * e_d * e_d));
t += factor * curPix;
sum += factor;
}
}
}
out[pos] = t / sum;
}
// filter and halfsample
__global__ void halfSampleRobust( Image<float> out, const Image<float> in, const float e_d, const int r){
const uint2 pixel = thr2pos2();
const uint2 centerPixel = 2 * pixel;
if(pixel.x >= out.size.x || pixel.y >= out.size.y )
return;
float sum = 0.0f;
float t = 0.0f;
const float center = in[centerPixel];
for(int i = -r + 1; i <= r; ++i){
for(int j = -r + 1; j <= r; ++j){
float current = in[make_uint2(clamp(make_int2(centerPixel.x + j, centerPixel.y + i), make_int2(0), make_int2(in.size.x - 1, in.size.y - 1)))]; // TODO simplify this!
if(fabsf(current - center) < e_d){
sum += 1.0f;
t += current;
}
}
}
out[pixel] = t / sum;
}
__global__ void generate_gaussian(Image<float> out, float delta, int radius) {
int x = threadIdx.x - radius;
out[make_uint2(threadIdx.x,0)] = __expf(-(x * x) / (2 * delta * delta));
}
__global__ void track( Image<TrackData> output, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ) {
const uint2 pixel = thr2pos2();
if(pixel.x >= inVertex.size.x || pixel.y >= inVertex.size.y )
return;
TrackData & row = output[pixel];
if(inNormal[pixel].x == INVALID ){
row.result = -1;
return;
}
const float3 projectedVertex = Ttrack * inVertex[pixel];
const float3 projectedPos = view * projectedVertex;
const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){
row.result = -2;
return;
}
const uint2 refPixel = make_uint2(projPixel.x, projPixel.y);
const float3 referenceNormal = refNormal[refPixel];
if(referenceNormal.x == INVALID){
row.result = -3;
return;
}
const float3 diff = refVertex[refPixel] - projectedVertex;
const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]);
if(length(diff) > dist_threshold ){
row.result = -4;
return;
}
if(dot(projectedNormal, referenceNormal) < normal_threshold){
row.result = -5;
return;
}
row.result = 1;
row.error = dot(referenceNormal, diff);
((float3 *)row.J)[0] = referenceNormal;
((float3 *)row.J)[1] = cross(projectedVertex, referenceNormal);
}
__global__ void reduce( float * out, const Image<TrackData> J, const uint2 size){
__shared__ float S[112][32]; // this is for the final accumulation
const uint sline = threadIdx.x;
float sums[32];
float * jtj = sums + 7;
float * info = sums + 28;
for(uint i = 0; i < 32; ++i)
sums[i] = 0;
for(uint y = blockIdx.x; y < size.y; y += gridDim.x){
for(uint x = sline; x < size.x; x += blockDim.x ){
const TrackData & row = J[make_uint2(x, y)];
if(row.result < 1){
info[1] += row.result == -4 ? 1 : 0;
info[2] += row.result == -5 ? 1 : 0;
info[3] += row.result > -4 ? 1 : 0;
continue;
}
// Error part
sums[0] += row.error * row.error;
// JTe part
for(int i = 0; i < 6; ++i)
sums[i+1] += row.error * row.J[i];
// JTJ part, unfortunatly the double loop is not unrolled well...
jtj[0] += row.J[0] * row.J[0];
jtj[1] += row.J[0] * row.J[1];
jtj[2] += row.J[0] * row.J[2];
jtj[3] += row.J[0] * row.J[3];
jtj[4] += row.J[0] * row.J[4];
jtj[5] += row.J[0] * row.J[5];
jtj[6] += row.J[1] * row.J[1];
jtj[7] += row.J[1] * row.J[2];
jtj[8] += row.J[1] * row.J[3];
jtj[9] += row.J[1] * row.J[4];
jtj[10] += row.J[1] * row.J[5];
jtj[11] += row.J[2] * row.J[2];
jtj[12] += row.J[2] * row.J[3];
jtj[13] += row.J[2] * row.J[4];
jtj[14] += row.J[2] * row.J[5];
jtj[15] += row.J[3] * row.J[3];
jtj[16] += row.J[3] * row.J[4];
jtj[17] += row.J[3] * row.J[5];
jtj[18] += row.J[4] * row.J[4];
jtj[19] += row.J[4] * row.J[5];
jtj[20] += row.J[5] * row.J[5];
// extra info here
info[0] += 1;
}
}
for(int i = 0; i < 32; ++i) // copy over to shared memory
S[sline][i] = sums[i];
__syncthreads(); // wait for everyone to finish
if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads
for(unsigned i = 1; i < blockDim.x; ++i)
S[0][sline] += S[i][sline];
out[sline+blockIdx.x*32] = S[0][sline];
}
}
__global__ void trackAndReduce( float * out, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ){
__shared__ float S[112][32]; // this is for the final accumulation
const uint sline = threadIdx.x;
float sums[32];
float * jtj = sums + 7;
float * info = sums + 28;
for(uint i = 0; i < 32; ++i)
sums[i] = 0;
float J[6];
for(uint y = blockIdx.x; y < inVertex.size.y; y += gridDim.x){
for(uint x = sline; x < inVertex.size.x; x += blockDim.x ){
const uint2 pixel = make_uint2(x,y);
if(inNormal[pixel].x == INVALID){
continue;
}
const float3 projectedVertex = Ttrack * inVertex[pixel];
const float3 projectedPos = view * projectedVertex;
const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){
info[3] += 1;
continue;
}
const uint2 refPixel = make_uint2(projPixel.x, projPixel.y);
if(refNormal[refPixel].x == INVALID){
info[3] += 1;
continue;
}
const float3 referenceNormal = refNormal[refPixel];
const float3 diff = refVertex[refPixel] - projectedVertex;
const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]);
if(length(diff) > dist_threshold ){
info[1] += 1;
continue;
}
if(dot(projectedNormal, referenceNormal) < normal_threshold){
info[2] += 1;
continue;
}
const float error = dot(referenceNormal, diff);
((float3 *)J)[0] = referenceNormal;
((float3 *)J)[1] = cross(projectedVertex, referenceNormal);
// Error part
sums[0] += error * error;
// JTe part
for(int i = 0; i < 6; ++i)
sums[i+1] += error * J[i];
// JTJ part
jtj[0] += J[0] * J[0];
jtj[1] += J[0] * J[1];
jtj[2] += J[0] * J[2];
jtj[3] += J[0] * J[3];
jtj[4] += J[0] * J[4];
jtj[5] += J[0] * J[5];
jtj[6] += J[1] * J[1];
jtj[7] += J[1] * J[2];
jtj[8] += J[1] * J[3];
jtj[9] += J[1] * J[4];
jtj[10] += J[1] * J[5];
jtj[11] += J[2] * J[2];
jtj[12] += J[2] * J[3];
jtj[13] += J[2] * J[4];
jtj[14] += J[2] * J[5];
jtj[15] += J[3] * J[3];
jtj[16] += J[3] * J[4];
jtj[17] += J[3] * J[5];
jtj[18] += J[4] * J[4];
jtj[19] += J[4] * J[5];
jtj[20] += J[5] * J[5];
// extra info here
info[0] += 1;
}
}
for(int i = 0; i < 32; ++i) // copy over to shared memory
S[sline][i] = sums[i];
__syncthreads(); // wait for everyone to finish
if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads
for(unsigned i = 1; i < blockDim.x; ++i)
S[0][sline] += S[i][sline];
out[sline+blockIdx.x*32] = S[0][sline];
}
}
void KFusion::Init( const KFusionConfig & config ) {
configuration = config;
hipSetDeviceFlags(hipDeviceMapHost);
integration.init(config.volumeSize, config.volumeDimensions);
reduction.alloc(config.inputSize);
vertex.alloc(config.inputSize);
normal.alloc(config.inputSize);
rawDepth.alloc(config.inputSize);
inputDepth.resize(config.iterations.size());
inputVertex.resize(config.iterations.size());
inputNormal.resize(config.iterations.size());
for(int i = 0; i < config.iterations.size(); ++i){
inputDepth[i].alloc(config.inputSize >> i);
inputVertex[i].alloc(config.inputSize >> i);
inputNormal[i].alloc(config.inputSize >> i);
}
gaussian.alloc(make_uint2(config.radius * 2 + 1, 1));
output.alloc(make_uint2(32,8));
//generate gaussian array
hipLaunchKernelGGL(( generate_gaussian), dim3(1), dim3(gaussian.size.x), 0, 0, gaussian, config.delta, config.radius);
Reset();
}
void KFusion::Reset(){
dim3 block(32,16);
dim3 grid = divup(dim3(integration.size.x, integration.size.y), block);
hipLaunchKernelGGL(( initVolume), dim3(grid), dim3(block), 0, 0, integration, make_float2(1.0f, 0.0f));
}
void KFusion::Clear(){
integration.release();
}
void KFusion::setPose( const Matrix4 & p ){
pose = p;
}
void KFusion::setKinectDeviceDepth( const Image<uint16_t> & in){
if(configuration.inputSize.x == in.size.x)
hipLaunchKernelGGL(( mm2meters<0>), dim3(divup(rawDepth.size, configuration.imageBlock)), dim3(configuration.imageBlock), 0, 0, rawDepth, in);
else if(configuration.inputSize.x == in.size.x / 2 )
hipLaunchKernelGGL(( mm2meters<1>), dim3(divup(rawDepth.size, configuration.imageBlock)), dim3(configuration.imageBlock), 0, 0, rawDepth, in);
else
assert(false);
}
Matrix4 operator*( const Matrix4 & A, const Matrix4 & B){
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::wrapMatrix<4,4>(&A.data[0].x) * TooN::wrapMatrix<4,4>(&B.data[0].x);
return R;
}
template<typename P>
inline Matrix4 toMatrix4( const TooN::SE3<P> & p){
const TooN::Matrix<4, 4, float> I = TooN::Identity;
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = p * I;
return R;
}
Matrix4 inverse( const Matrix4 & A ){
static TooN::Matrix<4, 4, float> I = TooN::Identity;
TooN::Matrix<4,4,float> temp = TooN::wrapMatrix<4,4>(&A.data[0].x);
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::gaussian_elimination(temp , I );
return R;
}
std::ostream & operator<<( std::ostream & out, const Matrix4 & m ){
for(unsigned i = 0; i < 4; ++i)
out << m.data[i].x << " " << m.data[i].y << " " << m.data[i].z << " " << m.data[i].w << "\n";
return out;
}
template <typename P, typename A>
TooN::Matrix<6> makeJTJ( const TooN::Vector<21, P, A> & v ){
TooN::Matrix<6> C = TooN::Zeros;
C[0] = v.template slice<0,6>();
C[1].template slice<1,5>() = v.template slice<6,5>();
C[2].template slice<2,4>() = v.template slice<11,4>();
C[3].template slice<3,3>() = v.template slice<15,3>();
C[4].template slice<4,2>() = v.template slice<18,2>();
C[5][5] = v[20];
for(int r = 1; r < 6; ++r)
for(int c = 0; c < r; ++c)
C[r][c] = C[c][r];
return C;
}
template <typename T, typename A>
TooN::Vector<6> solve( const TooN::Vector<27, T, A> & vals ){
const TooN::Vector<6> b = vals.template slice<0,6>();
const TooN::Matrix<6> C = makeJTJ(vals.template slice<6,21>());
TooN::GR_SVD<6,6> svd(C);
return svd.backsub(b, 1e6);
}
void KFusion::Raycast(){
// raycast integration volume into the depth, vertex, normal buffers
raycastPose = pose;
hipLaunchKernelGGL(( raycast), dim3(divup(configuration.inputSize, configuration.raycastBlock)), dim3(configuration.raycastBlock), 0, 0, vertex, normal, integration, raycastPose * getInverseCameraMatrix(configuration.camera), configuration.nearPlane, configuration.farPlane, configuration.stepSize(), 0.75 * configuration.mu);
}
bool KFusion::Track() {
const Matrix4 invK = getInverseCameraMatrix(configuration.camera);
vector<dim3> grids;
for(int i = 0; i < configuration.iterations.size(); ++i)
grids.push_back(divup(configuration.inputSize >> i, configuration.imageBlock));
// filter the input depth map
hipLaunchKernelGGL(( bilateral_filter), dim3(grids[0]), dim3(configuration.imageBlock), 0, 0, inputDepth[0], rawDepth, gaussian, configuration.e_delta, configuration.radius);
// half sample the input depth maps into the pyramid levels
for(int i = 1; i < configuration.iterations.size(); ++i)
hipLaunchKernelGGL(( halfSampleRobust), dim3(grids[i]), dim3(configuration.imageBlock), 0, 0, inputDepth[i], inputDepth[i-1], configuration.e_delta * 3, 1);
// prepare the 3D information from the input depth maps
for(int i = 0; i < configuration.iterations.size(); ++i){
hipLaunchKernelGGL(( depth2vertex), dim3(grids[i]), dim3(configuration.imageBlock), 0, 0, inputVertex[i], inputDepth[i], getInverseCameraMatrix(configuration.camera / (1 << i))); // inverse camera matrix depends on level
hipLaunchKernelGGL(( vertex2normal), dim3(grids[i]), dim3(configuration.imageBlock), 0, 0, inputNormal[i], inputVertex[i] );
}
const Matrix4 oldPose = pose;
const Matrix4 projectReference = getCameraMatrix(configuration.camera) * inverse(raycastPose);
TooN::Matrix<8, 32, float, TooN::Reference::RowMajor> values(output.data());
for(int level = configuration.iterations.size()-1; level >= 0; --level){
for(int i = 0; i < configuration.iterations[level]; ++i){
if(configuration.combinedTrackAndReduce){
hipLaunchKernelGGL(( trackAndReduce), dim3(8), dim3(112), 0, 0, output.getDeviceImage().data(), inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold );
} else {
hipLaunchKernelGGL(( track), dim3(grids[level]), dim3(configuration.imageBlock), 0, 0, reduction, inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold);
hipLaunchKernelGGL(( reduce), dim3(8), dim3(112), 0, 0, output.getDeviceImage().data(), reduction, inputVertex[level].size ); // compute the linear system to solve
}
hipDeviceSynchronize(); // important due to async nature of kernel call
for(int j = 1; j < 8; ++j)
values[0] += values[j];
TooN::Vector<6> x = solve(values[0].slice<1,27>());
TooN::SE3<> delta(x);
pose = toMatrix4( delta ) * pose;
if(norm(x) < 1e-5)
break;
}
}
// test on both RSME per pixel and percent of pixels tracked
if((sqrt(values(0,0) / values(0,28)) > 2e-2) || (values(0,28) / (rawDepth.size.x * rawDepth.size.y) < configuration.track_threshold) ){
pose = oldPose;
return false;
}
return true;
}
void KFusion::Integrate() {
hipLaunchKernelGGL(( integrate), dim3(divup(dim3(integration.size.x, integration.size.y), configuration.imageBlock)), dim3(configuration.imageBlock), 0, 0, integration, rawDepth, inverse(pose), getCameraMatrix(configuration.camera), configuration.mu, configuration.maxweight );
}
int printCUDAError() {
hipError_t error = hipGetLastError();
if(error)
std::cout << hipGetErrorString(error) << std::endl;
return error;
}
|
3551924d515377b873f91a370d259641b2653e42.cu
|
#include "kfusion.h"
#undef isnan
#undef isfinite
#include <iostream>
#include <TooN/TooN.h>
#include <TooN/se3.h>
#include <TooN/GR_SVD.h>
static const float INVALID = -2; // this is used to mark invalid entries in normal or vertex maps
using namespace std;
__global__ void initVolume( Volume volume, const float2 val ){
uint3 pos = make_uint3(thr2pos2());
for(pos.z = 0; pos.z < volume.size.z; ++pos.z)
volume.set(pos, val);
}
__global__ void raycast( Image<float3> pos3D, Image<float3> normal, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){
const uint2 pos = thr2pos2();
const float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep );
if(hit.w > 0){
pos3D[pos] = make_float3(hit);
float3 surfNorm = volume.grad(make_float3(hit));
if(length(surfNorm) == 0){
normal[pos].x = INVALID;
} else {
normal[pos] = normalize(surfNorm);
}
} else {
pos3D[pos] = make_float3(0);
normal[pos] = make_float3(INVALID, 0, 0);
}
}
__forceinline__ __device__ float sq( const float x ){
return x*x;
}
__global__ void integrate( Volume vol, const Image<float> depth, const Matrix4 invTrack, const Matrix4 K, const float mu, const float maxweight){
uint3 pix = make_uint3(thr2pos2());
float3 pos = invTrack * vol.pos(pix);
float3 cameraX = K * pos;
const float3 delta = rotate(invTrack, make_float3(0,0, vol.dim.z / vol.size.z));
const float3 cameraDelta = rotate(K, delta);
for(pix.z = 0; pix.z < vol.size.z; ++pix.z, pos += delta, cameraX += cameraDelta){
if(pos.z < 0.0001f) // some near plane constraint
continue;
const float2 pixel = make_float2(cameraX.x/cameraX.z + 0.5f, cameraX.y/cameraX.z + 0.5f);
if(pixel.x < 0 || pixel.x > depth.size.x-1 || pixel.y < 0 || pixel.y > depth.size.y-1)
continue;
const uint2 px = make_uint2(cameraX.x/cameraX.z + 0.5f, cameraX.y/cameraX.z + 0.5f);
if(depth[px] == 0)
continue;
const float diff = (depth[px] - cameraX.z) * sqrt(1+sq(pos.x/pos.z) + sq(pos.y/pos.z));
if(diff > -mu){
const float sdf = fminf(1.f, diff/mu);
float2 data = vol[pix];
data.x = clamp((data.y*data.x + sdf)/(data.y + 1), -1.f, 1.f);
data.y = fminf(data.y+1, maxweight);
vol.set(pix, data);
}
}
}
__global__ void depth2vertex( Image<float3> vertex, const Image<float> depth, const Matrix4 invK ){
const uint2 pixel = thr2pos2();
if(pixel.x >= depth.size.x || pixel.y >= depth.size.y )
return;
if(depth[pixel] > 0){
vertex[pixel] = depth[pixel] * (rotate(invK, make_float3(pixel.x, pixel.y, 1.f)));
} else {
vertex[pixel] = make_float3(0);
}
}
__global__ void vertex2normal( Image<float3> normal, const Image<float3> vertex ){
const uint2 pixel = thr2pos2();
if(pixel.x >= vertex.size.x || pixel.y >= vertex.size.y )
return;
const float3 left = vertex[make_uint2(max(int(pixel.x)-1,0), pixel.y)];
const float3 right = vertex[make_uint2(min(pixel.x+1,vertex.size.x-1), pixel.y)];
const float3 up = vertex[make_uint2(pixel.x, max(int(pixel.y)-1,0))];
const float3 down = vertex[make_uint2(pixel.x, min(pixel.y+1,vertex.size.y-1))];
if(left.z == 0 || right.z == 0 || up.z == 0 || down.z == 0) {
normal[pixel].x = INVALID;
return;
}
const float3 dxv = right - left;
const float3 dyv = down - up;
normal[pixel] = normalize(cross(dyv, dxv)); // switched dx and dy to get factor -1
}
template <int HALFSAMPLE>
__global__ void mm2meters( Image<float> depth, const Image<ushort> in ){
const uint2 pixel = thr2pos2();
depth[pixel] = in[pixel * (HALFSAMPLE+1)] / 1000.0f;
}
//column pass using coalesced global memory reads
__global__ void bilateral_filter(Image<float> out, const Image<float> in, const Image<float> gaussian, const float e_d, const int r) {
const uint2 pos = thr2pos2();
if(in[pos] == 0){
out[pos] = 0;
return;
}
float sum = 0.0f;
float t = 0.0f;
const float center = in[pos];
for(int i = -r; i <= r; ++i) {
for(int j = -r; j <= r; ++j) {
const float curPix = in[make_uint2(clamp(pos.x + i, 0u, in.size.x-1), clamp(pos.y + j, 0u, in.size.y-1))];
if(curPix > 0){
const float mod = sq(curPix - center);
const float factor = gaussian[make_uint2(i + r, 0)] * gaussian[make_uint2(j + r, 0)] * __expf(-mod / (2 * e_d * e_d));
t += factor * curPix;
sum += factor;
}
}
}
out[pos] = t / sum;
}
// filter and halfsample
__global__ void halfSampleRobust( Image<float> out, const Image<float> in, const float e_d, const int r){
const uint2 pixel = thr2pos2();
const uint2 centerPixel = 2 * pixel;
if(pixel.x >= out.size.x || pixel.y >= out.size.y )
return;
float sum = 0.0f;
float t = 0.0f;
const float center = in[centerPixel];
for(int i = -r + 1; i <= r; ++i){
for(int j = -r + 1; j <= r; ++j){
float current = in[make_uint2(clamp(make_int2(centerPixel.x + j, centerPixel.y + i), make_int2(0), make_int2(in.size.x - 1, in.size.y - 1)))]; // TODO simplify this!
if(fabsf(current - center) < e_d){
sum += 1.0f;
t += current;
}
}
}
out[pixel] = t / sum;
}
__global__ void generate_gaussian(Image<float> out, float delta, int radius) {
int x = threadIdx.x - radius;
out[make_uint2(threadIdx.x,0)] = __expf(-(x * x) / (2 * delta * delta));
}
__global__ void track( Image<TrackData> output, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ) {
const uint2 pixel = thr2pos2();
if(pixel.x >= inVertex.size.x || pixel.y >= inVertex.size.y )
return;
TrackData & row = output[pixel];
if(inNormal[pixel].x == INVALID ){
row.result = -1;
return;
}
const float3 projectedVertex = Ttrack * inVertex[pixel];
const float3 projectedPos = view * projectedVertex;
const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){
row.result = -2;
return;
}
const uint2 refPixel = make_uint2(projPixel.x, projPixel.y);
const float3 referenceNormal = refNormal[refPixel];
if(referenceNormal.x == INVALID){
row.result = -3;
return;
}
const float3 diff = refVertex[refPixel] - projectedVertex;
const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]);
if(length(diff) > dist_threshold ){
row.result = -4;
return;
}
if(dot(projectedNormal, referenceNormal) < normal_threshold){
row.result = -5;
return;
}
row.result = 1;
row.error = dot(referenceNormal, diff);
((float3 *)row.J)[0] = referenceNormal;
((float3 *)row.J)[1] = cross(projectedVertex, referenceNormal);
}
__global__ void reduce( float * out, const Image<TrackData> J, const uint2 size){
__shared__ float S[112][32]; // this is for the final accumulation
const uint sline = threadIdx.x;
float sums[32];
float * jtj = sums + 7;
float * info = sums + 28;
for(uint i = 0; i < 32; ++i)
sums[i] = 0;
for(uint y = blockIdx.x; y < size.y; y += gridDim.x){
for(uint x = sline; x < size.x; x += blockDim.x ){
const TrackData & row = J[make_uint2(x, y)];
if(row.result < 1){
info[1] += row.result == -4 ? 1 : 0;
info[2] += row.result == -5 ? 1 : 0;
info[3] += row.result > -4 ? 1 : 0;
continue;
}
// Error part
sums[0] += row.error * row.error;
// JTe part
for(int i = 0; i < 6; ++i)
sums[i+1] += row.error * row.J[i];
// JTJ part, unfortunatly the double loop is not unrolled well...
jtj[0] += row.J[0] * row.J[0];
jtj[1] += row.J[0] * row.J[1];
jtj[2] += row.J[0] * row.J[2];
jtj[3] += row.J[0] * row.J[3];
jtj[4] += row.J[0] * row.J[4];
jtj[5] += row.J[0] * row.J[5];
jtj[6] += row.J[1] * row.J[1];
jtj[7] += row.J[1] * row.J[2];
jtj[8] += row.J[1] * row.J[3];
jtj[9] += row.J[1] * row.J[4];
jtj[10] += row.J[1] * row.J[5];
jtj[11] += row.J[2] * row.J[2];
jtj[12] += row.J[2] * row.J[3];
jtj[13] += row.J[2] * row.J[4];
jtj[14] += row.J[2] * row.J[5];
jtj[15] += row.J[3] * row.J[3];
jtj[16] += row.J[3] * row.J[4];
jtj[17] += row.J[3] * row.J[5];
jtj[18] += row.J[4] * row.J[4];
jtj[19] += row.J[4] * row.J[5];
jtj[20] += row.J[5] * row.J[5];
// extra info here
info[0] += 1;
}
}
for(int i = 0; i < 32; ++i) // copy over to shared memory
S[sline][i] = sums[i];
__syncthreads(); // wait for everyone to finish
if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads
for(unsigned i = 1; i < blockDim.x; ++i)
S[0][sline] += S[i][sline];
out[sline+blockIdx.x*32] = S[0][sline];
}
}
__global__ void trackAndReduce( float * out, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ){
__shared__ float S[112][32]; // this is for the final accumulation
const uint sline = threadIdx.x;
float sums[32];
float * jtj = sums + 7;
float * info = sums + 28;
for(uint i = 0; i < 32; ++i)
sums[i] = 0;
float J[6];
for(uint y = blockIdx.x; y < inVertex.size.y; y += gridDim.x){
for(uint x = sline; x < inVertex.size.x; x += blockDim.x ){
const uint2 pixel = make_uint2(x,y);
if(inNormal[pixel].x == INVALID){
continue;
}
const float3 projectedVertex = Ttrack * inVertex[pixel];
const float3 projectedPos = view * projectedVertex;
const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){
info[3] += 1;
continue;
}
const uint2 refPixel = make_uint2(projPixel.x, projPixel.y);
if(refNormal[refPixel].x == INVALID){
info[3] += 1;
continue;
}
const float3 referenceNormal = refNormal[refPixel];
const float3 diff = refVertex[refPixel] - projectedVertex;
const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]);
if(length(diff) > dist_threshold ){
info[1] += 1;
continue;
}
if(dot(projectedNormal, referenceNormal) < normal_threshold){
info[2] += 1;
continue;
}
const float error = dot(referenceNormal, diff);
((float3 *)J)[0] = referenceNormal;
((float3 *)J)[1] = cross(projectedVertex, referenceNormal);
// Error part
sums[0] += error * error;
// JTe part
for(int i = 0; i < 6; ++i)
sums[i+1] += error * J[i];
// JTJ part
jtj[0] += J[0] * J[0];
jtj[1] += J[0] * J[1];
jtj[2] += J[0] * J[2];
jtj[3] += J[0] * J[3];
jtj[4] += J[0] * J[4];
jtj[5] += J[0] * J[5];
jtj[6] += J[1] * J[1];
jtj[7] += J[1] * J[2];
jtj[8] += J[1] * J[3];
jtj[9] += J[1] * J[4];
jtj[10] += J[1] * J[5];
jtj[11] += J[2] * J[2];
jtj[12] += J[2] * J[3];
jtj[13] += J[2] * J[4];
jtj[14] += J[2] * J[5];
jtj[15] += J[3] * J[3];
jtj[16] += J[3] * J[4];
jtj[17] += J[3] * J[5];
jtj[18] += J[4] * J[4];
jtj[19] += J[4] * J[5];
jtj[20] += J[5] * J[5];
// extra info here
info[0] += 1;
}
}
for(int i = 0; i < 32; ++i) // copy over to shared memory
S[sline][i] = sums[i];
__syncthreads(); // wait for everyone to finish
if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads
for(unsigned i = 1; i < blockDim.x; ++i)
S[0][sline] += S[i][sline];
out[sline+blockIdx.x*32] = S[0][sline];
}
}
void KFusion::Init( const KFusionConfig & config ) {
configuration = config;
cudaSetDeviceFlags(cudaDeviceMapHost);
integration.init(config.volumeSize, config.volumeDimensions);
reduction.alloc(config.inputSize);
vertex.alloc(config.inputSize);
normal.alloc(config.inputSize);
rawDepth.alloc(config.inputSize);
inputDepth.resize(config.iterations.size());
inputVertex.resize(config.iterations.size());
inputNormal.resize(config.iterations.size());
for(int i = 0; i < config.iterations.size(); ++i){
inputDepth[i].alloc(config.inputSize >> i);
inputVertex[i].alloc(config.inputSize >> i);
inputNormal[i].alloc(config.inputSize >> i);
}
gaussian.alloc(make_uint2(config.radius * 2 + 1, 1));
output.alloc(make_uint2(32,8));
//generate gaussian array
generate_gaussian<<< 1, gaussian.size.x>>>(gaussian, config.delta, config.radius);
Reset();
}
void KFusion::Reset(){
dim3 block(32,16);
dim3 grid = divup(dim3(integration.size.x, integration.size.y), block);
initVolume<<<grid, block>>>(integration, make_float2(1.0f, 0.0f));
}
void KFusion::Clear(){
integration.release();
}
void KFusion::setPose( const Matrix4 & p ){
pose = p;
}
void KFusion::setKinectDeviceDepth( const Image<uint16_t> & in){
if(configuration.inputSize.x == in.size.x)
mm2meters<0><<<divup(rawDepth.size, configuration.imageBlock), configuration.imageBlock>>>(rawDepth, in);
else if(configuration.inputSize.x == in.size.x / 2 )
mm2meters<1><<<divup(rawDepth.size, configuration.imageBlock), configuration.imageBlock>>>(rawDepth, in);
else
assert(false);
}
Matrix4 operator*( const Matrix4 & A, const Matrix4 & B){
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::wrapMatrix<4,4>(&A.data[0].x) * TooN::wrapMatrix<4,4>(&B.data[0].x);
return R;
}
template<typename P>
inline Matrix4 toMatrix4( const TooN::SE3<P> & p){
const TooN::Matrix<4, 4, float> I = TooN::Identity;
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = p * I;
return R;
}
Matrix4 inverse( const Matrix4 & A ){
static TooN::Matrix<4, 4, float> I = TooN::Identity;
TooN::Matrix<4,4,float> temp = TooN::wrapMatrix<4,4>(&A.data[0].x);
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::gaussian_elimination(temp , I );
return R;
}
std::ostream & operator<<( std::ostream & out, const Matrix4 & m ){
for(unsigned i = 0; i < 4; ++i)
out << m.data[i].x << " " << m.data[i].y << " " << m.data[i].z << " " << m.data[i].w << "\n";
return out;
}
template <typename P, typename A>
TooN::Matrix<6> makeJTJ( const TooN::Vector<21, P, A> & v ){
TooN::Matrix<6> C = TooN::Zeros;
C[0] = v.template slice<0,6>();
C[1].template slice<1,5>() = v.template slice<6,5>();
C[2].template slice<2,4>() = v.template slice<11,4>();
C[3].template slice<3,3>() = v.template slice<15,3>();
C[4].template slice<4,2>() = v.template slice<18,2>();
C[5][5] = v[20];
for(int r = 1; r < 6; ++r)
for(int c = 0; c < r; ++c)
C[r][c] = C[c][r];
return C;
}
template <typename T, typename A>
TooN::Vector<6> solve( const TooN::Vector<27, T, A> & vals ){
const TooN::Vector<6> b = vals.template slice<0,6>();
const TooN::Matrix<6> C = makeJTJ(vals.template slice<6,21>());
TooN::GR_SVD<6,6> svd(C);
return svd.backsub(b, 1e6);
}
void KFusion::Raycast(){
// raycast integration volume into the depth, vertex, normal buffers
raycastPose = pose;
raycast<<<divup(configuration.inputSize, configuration.raycastBlock), configuration.raycastBlock>>>(vertex, normal, integration, raycastPose * getInverseCameraMatrix(configuration.camera), configuration.nearPlane, configuration.farPlane, configuration.stepSize(), 0.75 * configuration.mu);
}
bool KFusion::Track() {
const Matrix4 invK = getInverseCameraMatrix(configuration.camera);
vector<dim3> grids;
for(int i = 0; i < configuration.iterations.size(); ++i)
grids.push_back(divup(configuration.inputSize >> i, configuration.imageBlock));
// filter the input depth map
bilateral_filter<<<grids[0], configuration.imageBlock>>>(inputDepth[0], rawDepth, gaussian, configuration.e_delta, configuration.radius);
// half sample the input depth maps into the pyramid levels
for(int i = 1; i < configuration.iterations.size(); ++i)
halfSampleRobust<<<grids[i], configuration.imageBlock>>>(inputDepth[i], inputDepth[i-1], configuration.e_delta * 3, 1);
// prepare the 3D information from the input depth maps
for(int i = 0; i < configuration.iterations.size(); ++i){
depth2vertex<<<grids[i], configuration.imageBlock>>>( inputVertex[i], inputDepth[i], getInverseCameraMatrix(configuration.camera / (1 << i))); // inverse camera matrix depends on level
vertex2normal<<<grids[i], configuration.imageBlock>>>( inputNormal[i], inputVertex[i] );
}
const Matrix4 oldPose = pose;
const Matrix4 projectReference = getCameraMatrix(configuration.camera) * inverse(raycastPose);
TooN::Matrix<8, 32, float, TooN::Reference::RowMajor> values(output.data());
for(int level = configuration.iterations.size()-1; level >= 0; --level){
for(int i = 0; i < configuration.iterations[level]; ++i){
if(configuration.combinedTrackAndReduce){
trackAndReduce<<<8, 112>>>( output.getDeviceImage().data(), inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold );
} else {
track<<<grids[level], configuration.imageBlock>>>( reduction, inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold);
reduce<<<8, 112>>>( output.getDeviceImage().data(), reduction, inputVertex[level].size ); // compute the linear system to solve
}
cudaDeviceSynchronize(); // important due to async nature of kernel call
for(int j = 1; j < 8; ++j)
values[0] += values[j];
TooN::Vector<6> x = solve(values[0].slice<1,27>());
TooN::SE3<> delta(x);
pose = toMatrix4( delta ) * pose;
if(norm(x) < 1e-5)
break;
}
}
// test on both RSME per pixel and percent of pixels tracked
if((sqrt(values(0,0) / values(0,28)) > 2e-2) || (values(0,28) / (rawDepth.size.x * rawDepth.size.y) < configuration.track_threshold) ){
pose = oldPose;
return false;
}
return true;
}
void KFusion::Integrate() {
integrate<<<divup(dim3(integration.size.x, integration.size.y), configuration.imageBlock), configuration.imageBlock>>>( integration, rawDepth, inverse(pose), getCameraMatrix(configuration.camera), configuration.mu, configuration.maxweight );
}
int printCUDAError() {
cudaError_t error = cudaGetLastError();
if(error)
std::cout << cudaGetErrorString(error) << std::endl;
return error;
}
|
a96181635dba5fdeeb08cb1c6c74f5942ef15f59.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
MIT License
Copyright (c) 2019 Michael Ksel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/particle_to_grid.h"
#include "cuda_utils.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__device__ bool is_first_particle(Particle* particle_array, int i)
{
return i == 0 || particle_array[i].grid_cell_idx != particle_array[i - 1].grid_cell_idx;
}
__device__ bool is_last_particle(Particle* particle_array, int particle_count, int i)
{
return i == particle_count - 1 || particle_array[i].grid_cell_idx != particle_array[i + 1].grid_cell_idx;
}
__global__ void particleToGridKernel(Particle* particle_array, GridCell* grid_cell_array, float* weight_array, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
int j = particle_array[i].grid_cell_idx;
if (is_first_particle(particle_array, i))
{
grid_cell_array[j].start_idx = i;
}
if (is_last_particle(particle_array, particle_count, i))
{
grid_cell_array[j].end_idx = i;
}
//printf("Cell: %d, Start idx: %d, End idx: %d\n", j, grid_cell_array[j].start_idx, grid_cell_array[j].end_idx);
weight_array[i] = particle_array[i].weight;
}
}
|
a96181635dba5fdeeb08cb1c6c74f5942ef15f59.cu
|
/*
MIT License
Copyright (c) 2019 Michael Kösel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/particle_to_grid.h"
#include "cuda_utils.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__device__ bool is_first_particle(Particle* particle_array, int i)
{
return i == 0 || particle_array[i].grid_cell_idx != particle_array[i - 1].grid_cell_idx;
}
__device__ bool is_last_particle(Particle* particle_array, int particle_count, int i)
{
return i == particle_count - 1 || particle_array[i].grid_cell_idx != particle_array[i + 1].grid_cell_idx;
}
__global__ void particleToGridKernel(Particle* particle_array, GridCell* grid_cell_array, float* weight_array, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
int j = particle_array[i].grid_cell_idx;
if (is_first_particle(particle_array, i))
{
grid_cell_array[j].start_idx = i;
}
if (is_last_particle(particle_array, particle_count, i))
{
grid_cell_array[j].end_idx = i;
}
//printf("Cell: %d, Start idx: %d, End idx: %d\n", j, grid_cell_array[j].start_idx, grid_cell_array[j].end_idx);
weight_array[i] = particle_array[i].weight;
}
}
|
50982ae8fdd23fa1c5fdc3cbb842fb0493c7d645.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_back;
int xdim0_update_halo_kernel4_plus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_back;
int ydim0_update_halo_kernel4_plus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_back;
int xdim1_update_halo_kernel4_plus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_back;
int ydim1_update_halo_kernel4_plus_2_back_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_back*(y)+xdim0_update_halo_kernel4_plus_2_back*ydim0_update_halo_kernel4_plus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_back*(y)+xdim1_update_halo_kernel4_plus_2_back*ydim1_update_halo_kernel4_plus_2_back*(z))
//user function
__device__
inline void update_halo_kernel4_plus_2_back(double *vol_flux_y, double *mass_flux_y, const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(0,0,2)];
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel4_plus_2_back + idx_z * 1 * xdim0_update_halo_kernel4_plus_2_back * ydim0_update_halo_kernel4_plus_2_back;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel4_plus_2_back + idx_z * 1 * xdim1_update_halo_kernel4_plus_2_back * ydim1_update_halo_kernel4_plus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_back(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(110,"update_halo_kernel4_plus_2_back");
OPS_kernels[110].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel4_plus_2_back_h || ydim0 != ydim0_update_halo_kernel4_plus_2_back_h || xdim1 != xdim1_update_halo_kernel4_plus_2_back_h || ydim1 != ydim1_update_halo_kernel4_plus_2_back_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel4_plus_2_back_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel4_plus_2_back_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel4_plus_2_back_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel4_plus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[110].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[110].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[110].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[110].transfer += ops_compute_transfer(dim, range, &arg1);
}
|
50982ae8fdd23fa1c5fdc3cbb842fb0493c7d645.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_back;
int xdim0_update_halo_kernel4_plus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_back;
int ydim0_update_halo_kernel4_plus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_back;
int xdim1_update_halo_kernel4_plus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_back;
int ydim1_update_halo_kernel4_plus_2_back_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_back*(y)+xdim0_update_halo_kernel4_plus_2_back*ydim0_update_halo_kernel4_plus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_back*(y)+xdim1_update_halo_kernel4_plus_2_back*ydim1_update_halo_kernel4_plus_2_back*(z))
//user function
__device__
inline void update_halo_kernel4_plus_2_back(double *vol_flux_y, double *mass_flux_y, const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(0,0,2)];
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel4_plus_2_back + idx_z * 1 * xdim0_update_halo_kernel4_plus_2_back * ydim0_update_halo_kernel4_plus_2_back;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel4_plus_2_back + idx_z * 1 * xdim1_update_halo_kernel4_plus_2_back * ydim1_update_halo_kernel4_plus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_back(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(110,"update_halo_kernel4_plus_2_back");
OPS_kernels[110].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel4_plus_2_back_h || ydim0 != ydim0_update_halo_kernel4_plus_2_back_h || xdim1 != xdim1_update_halo_kernel4_plus_2_back_h || ydim1 != ydim1_update_halo_kernel4_plus_2_back_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel4_plus_2_back_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel4_plus_2_back_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel4_plus_2_back_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel4_plus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[110].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel4_plus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[110].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[110].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[110].transfer += ops_compute_transfer(dim, range, &arg1);
}
|
99eb79756d507643d924399ae920b42e4e35db0a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int num = prob_.num();
const int dim = prob_.count() / num;
const int spatial_dim = prob_.height() * prob_.width();
const int nthreads = num * spatial_dim;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= num;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int num = prob_.num();
const int dim = prob_.count() / num;
const int spatial_dim = prob_.height() * prob_.width();
const int nthreads = num * spatial_dim;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
99eb79756d507643d924399ae920b42e4e35db0a.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int num = prob_.num();
const int dim = prob_.count() / num;
const int spatial_dim = prob_.height() * prob_.width();
const int nthreads = num * spatial_dim;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= num;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int num = prob_.num();
const int dim = prob_.count() / num;
const int spatial_dim = prob_.height() * prob_.width();
const int nthreads = num * spatial_dim;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
num, dim, spatial_dim, has_ignore_label_, ignore_label_, counts);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
e5e2781fa7a51c1804aa5e24dd496160aabce302.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 05-Jan-2012 10:49:03
//
// user function
__device__
#include "update.h"
// CUDA kernel function
__global__ void op_cuda_update(
double *arg0,
double *arg1,
double *arg2,
double *arg3,
double *arg4,
int offset_s,
int set_size ) {
double arg0_l[4];
double arg1_l[4];
double arg2_l[4];
double arg4_l[1];
for (int d=0; d<1; d++) arg4_l[d]=ZERO_double;
int tid = threadIdx.x%OP_WARPSIZE;
extern __shared__ char shared[];
char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE);
// process set elements
for (int n=threadIdx.x+blockIdx.x*blockDim.x;
n<set_size; n+=blockDim.x*gridDim.x) {
int offset = n - tid;
int nelems = MIN(OP_WARPSIZE,set_size-offset);
// copy data into shared memory, then into local
for (int m=0; m<4; m++)
((double *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4];
for (int m=0; m<4; m++)
arg0_l[m] = ((double *)arg_s)[m+tid*4];
for (int m=0; m<4; m++)
((double *)arg_s)[tid+m*nelems] = arg2[tid+m*nelems+offset*4];
for (int m=0; m<4; m++)
arg2_l[m] = ((double *)arg_s)[m+tid*4];
// user-supplied kernel call
update( arg0_l, arg1_l, arg2_l, arg3+n, arg4_l );
// copy back into shared memory, then to device
for (int m=0; m<4; m++)
((double *)arg_s)[m+tid*4] = arg1_l[m];
for (int m=0; m<4; m++)
arg1[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems];
for (int m=0; m<4; m++)
((double *)arg_s)[m+tid*4] = arg2_l[m];
for (int m=0; m<4; m++)
arg2[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems];
}
// global reductions
for(int d=0; d<1; d++)
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
// host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4 ){
double *arg4h = (double *)arg4.data;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update \n");
}
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
// int nthread = OP_block_size;
int nthread = 128;
#endif
int nblocks = 200;
// transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((double *)arg4.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
// work out shared memory requirements per element
int nshared = 0;
nshared = MAX(nshared,sizeof(double)*4);
nshared = MAX(nshared,sizeof(double)*4);
nshared = MAX(nshared,sizeof(double)*4);
// execute plan
int offset_s = nshared*OP_WARPSIZE;
nshared = MAX(nshared*nthread,reduct_size*nthread);
hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0, (double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
offset_s,
set->size );
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_update execution failed\n");
// transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg4h[d] = arg4h[d] + ((double *)arg4.data)[d+b*1];
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(4);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size;
OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg3.size;
}
|
e5e2781fa7a51c1804aa5e24dd496160aabce302.cu
|
//
// auto-generated by op2.m on 05-Jan-2012 10:49:03
//
// user function
__device__
#include "update.h"
// CUDA kernel function
__global__ void op_cuda_update(
double *arg0,
double *arg1,
double *arg2,
double *arg3,
double *arg4,
int offset_s,
int set_size ) {
double arg0_l[4];
double arg1_l[4];
double arg2_l[4];
double arg4_l[1];
for (int d=0; d<1; d++) arg4_l[d]=ZERO_double;
int tid = threadIdx.x%OP_WARPSIZE;
extern __shared__ char shared[];
char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE);
// process set elements
for (int n=threadIdx.x+blockIdx.x*blockDim.x;
n<set_size; n+=blockDim.x*gridDim.x) {
int offset = n - tid;
int nelems = MIN(OP_WARPSIZE,set_size-offset);
// copy data into shared memory, then into local
for (int m=0; m<4; m++)
((double *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4];
for (int m=0; m<4; m++)
arg0_l[m] = ((double *)arg_s)[m+tid*4];
for (int m=0; m<4; m++)
((double *)arg_s)[tid+m*nelems] = arg2[tid+m*nelems+offset*4];
for (int m=0; m<4; m++)
arg2_l[m] = ((double *)arg_s)[m+tid*4];
// user-supplied kernel call
update( arg0_l, arg1_l, arg2_l, arg3+n, arg4_l );
// copy back into shared memory, then to device
for (int m=0; m<4; m++)
((double *)arg_s)[m+tid*4] = arg1_l[m];
for (int m=0; m<4; m++)
arg1[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems];
for (int m=0; m<4; m++)
((double *)arg_s)[m+tid*4] = arg2_l[m];
for (int m=0; m<4; m++)
arg2[tid+m*nelems+offset*4] = ((double *)arg_s)[tid+m*nelems];
}
// global reductions
for(int d=0; d<1; d++)
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
// host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4 ){
double *arg4h = (double *)arg4.data;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update \n");
}
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
// int nthread = OP_block_size;
int nthread = 128;
#endif
int nblocks = 200;
// transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((double *)arg4.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
// work out shared memory requirements per element
int nshared = 0;
nshared = MAX(nshared,sizeof(double)*4);
nshared = MAX(nshared,sizeof(double)*4);
nshared = MAX(nshared,sizeof(double)*4);
// execute plan
int offset_s = nshared*OP_WARPSIZE;
nshared = MAX(nshared*nthread,reduct_size*nthread);
op_cuda_update<<<nblocks,nthread,nshared>>>( (double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
offset_s,
set->size );
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_update execution failed\n");
// transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg4h[d] = arg4h[d] + ((double *)arg4.data)[d+b*1];
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(4);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size;
OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg3.size;
}
|
0359604a68a82e3d99417df956049a1bea460b41.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ************************************************
// cuda_rng_options.cu
// authors: Lee Howes and David B. Thomas
//
// Main source file for cuda versions of code.
// Built to an object, hence the extern "C"
// exports for calling from the surrounding
// framework code.
// Contains a function to call the Wallace
// random number generator outputting to
// a poor, one to call the Tausworthe
// generator (these are used by the
// statistical tests) and a speed test
// function which calls versions which output
// timing results.
// ************************************************
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
#include "rand_helpers.h"
#include "constants.h"
unsigned int timer_pre_computation_init_wallace = 0;
unsigned int timer_pre_computation_init_tw = 0;
unsigned int *tauswortheSeeds = 0;
unsigned int *deviceTauswortheSeeds = 0;
float *hostPool = 0;
float *devPool = 0;
// includes, kernels
#include <wallace_kernel.cu>
#include <tausworthe_kernel.cu>
#include <rng_kernel.cu>
#include <lookback_option_kernel.cu>
#include <asian_option_kernel.cu>
void montecarloHost();
////////////////////////////////////////////////////////////////////////////////
//! Wallace generator for random number quality tests
////////////////////////////////////////////////////////////////////////////////
extern "C" void cuda_wallace_two_block(unsigned int seed, float *chi2Corrections, float *hostPool, float *poolOut, float *output)
{
CUT_CHECK_DEVICE();
initRand();
// Host mallocs
// allocate device memory for pool and wallace output
// allocate a pool and fill with normal random numbers
float *devPool, *deviceGeneratedRandomNumberPool, *deviceChi2Corrections;
float *refOutput = (float *) malloc(4 * WALLACE_OUTPUT_SIZE);
// just to keep call happy
float *devicePrices, *deviceStrike, *deviceYears;
CUDA_SAFE_CALL(hipMalloc((void **) &devicePrices, 4));
CUDA_SAFE_CALL(hipMalloc((void **) &deviceStrike, 4));
CUDA_SAFE_CALL(hipMalloc((void **) &deviceYears, 4));
CUDA_SAFE_CALL(hipMalloc((void **) &deviceChi2Corrections, 4 * WALLACE_NUM_BLOCKS * WALLACE_NUM_OUTPUTS_PER_RUN));
CUDA_SAFE_CALL(hipMalloc((void **) &devPool, 4 * WALLACE_TOTAL_POOL_SIZE));
CUDA_SAFE_CALL(hipMalloc((void **) &deviceGeneratedRandomNumberPool, 4 * WALLACE_OUTPUT_SIZE));
// Perform copies to GPU
// copy the start pool in
CUDA_SAFE_CALL(hipMemcpy(devPool, hostPool, 4 * WALLACE_TOTAL_POOL_SIZE, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(deviceChi2Corrections, chi2Corrections, 4 * WALLACE_NUM_BLOCKS * WALLACE_NUM_OUTPUTS_PER_RUN, hipMemcpyHostToDevice));
// setup execution parameters and execute
dim3 wallace_grid(WALLACE_NUM_BLOCKS, 1, 1);
dim3 wallace_threads(WALLACE_NUM_THREADS, 1, 1);
hipLaunchKernelGGL(( rng_wallace) , dim3(wallace_grid), dim3(wallace_threads), WALLACE_POOL_SIZE * 4 , 0, seed, devPool, deviceGeneratedRandomNumberPool, deviceChi2Corrections);
CUT_CHECK_ERROR("Kernel execution failed: Wallace");
// get the transformed pool and the output back
CUDA_SAFE_CALL(hipMemcpy(poolOut, devPool, 4 * WALLACE_TOTAL_POOL_SIZE, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(output, deviceGeneratedRandomNumberPool, 4 * WALLACE_OUTPUT_SIZE, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(deviceChi2Corrections));
CUDA_SAFE_CALL(hipFree(devPool));
CUDA_SAFE_CALL(hipFree(deviceGeneratedRandomNumberPool));
}
////////////////////////////////////////////////////////////////////////////////
//! Tausworthe generator for random number quality tests
////////////////////////////////////////////////////////////////////////////////
extern "C" void cuda_tausworthe(unsigned *pool, float *output)
{
CUT_CHECK_DEVICE();
initRand();
// Host mallocs
// allocate device memory for pool and wallace output
// allocate a pool and fill with normal random numbers
unsigned *devPool;
float *deviceGeneratedRandomNumberPool;
CUDA_SAFE_CALL(hipMalloc((void **) &devPool, 4 * TAUSWORTHE_TOTAL_SEED_SIZE));
CUDA_SAFE_CALL(hipMalloc((void **) &deviceGeneratedRandomNumberPool, 4 * TAUSWORTHE_OUTPUT_SIZE));
// Perform copies to GPU
// copy the start pool in
CUDA_SAFE_CALL(hipMemcpy(devPool, pool, 4 * TAUSWORTHE_TOTAL_SEED_SIZE, hipMemcpyHostToDevice));
// setup execution parameters and execute
dim3 tausworthe_grid(TAUSWORTHE_NUM_BLOCKS, 1, 1);
dim3 tausworthe_threads(TAUSWORTHE_NUM_THREADS, 1, 1);
hipLaunchKernelGGL(( rng_tausworthe) , dim3(tausworthe_grid), dim3(tausworthe_threads), 0 , 0, devPool, deviceGeneratedRandomNumberPool);
CUT_CHECK_ERROR("Kernel execution failed: Wallace or montecarlo");
// get the transformed pool and the output back
CUDA_SAFE_CALL(hipMemcpy(pool, devPool, 4 * TAUSWORTHE_TOTAL_SEED_SIZE, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(output, deviceGeneratedRandomNumberPool, 4 * TAUSWORTHE_OUTPUT_SIZE, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(devPool));
CUDA_SAFE_CALL(hipFree(deviceGeneratedRandomNumberPool));
}
////////////////////////////////////////////////////////////////////////////////
//! Perform montecarlo speed tests
////////////////////////////////////////////////////////////////////////////////
void speedTests()
{
CUT_CHECK_DEVICE();
CUT_SAFE_CALL(cutCreateTimer(&timer_pre_computation_init_wallace));
CUT_SAFE_CALL(cutCreateTimer(&timer_pre_computation_init_tw));
// allocate device memory for pool and wallace output
// allocate a pool and fill with normal random numbers
hostPool = (float *) malloc(4 * WALLACE_TOTAL_POOL_SIZE);
tauswortheSeeds = (unsigned int *) malloc(4 * TAUSWORTHE_NUM_SEEDS);
CUDA_SAFE_CALL(hipMalloc((void **) &devPool, 4 * WALLACE_TOTAL_POOL_SIZE));
CUDA_SAFE_CALL(hipMalloc((void **) &deviceTauswortheSeeds, 4 * TAUSWORTHE_NUM_SEEDS));
CUT_SAFE_CALL(cutStartTimer(timer_pre_computation_init_wallace));
// Fill wallace initialisation pool with random numbers
double sumSquares = 0;
for (unsigned i = 0; i < WALLACE_TOTAL_POOL_SIZE; i++)
{
float x = RandN();
sumSquares += x * x;
hostPool[i] = x;
}
double scale = sqrt(WALLACE_TOTAL_POOL_SIZE / sumSquares);
for (unsigned i = 0; i < WALLACE_TOTAL_POOL_SIZE; i++)
{
float x = hostPool[i];
hostPool[i] = x * scale;
}
CUT_SAFE_CALL(cutStopTimer(timer_pre_computation_init_wallace));
CUT_SAFE_CALL(cutStartTimer(timer_pre_computation_init_tw));
// Prepare Tausworthe seeds
for (unsigned i = 0; i < TAUSWORTHE_NUM_SEEDS; i++)
{
tauswortheSeeds[i] = (unsigned int) Rand();
}
CUT_SAFE_CALL(cutStopTimer(timer_pre_computation_init_tw));
// Upload tausworthe seeds
CUDA_SAFE_CALL(hipMemcpy(deviceTauswortheSeeds, tauswortheSeeds, 4 * TAUSWORTHE_NUM_SEEDS, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(devPool, hostPool, 4 * WALLACE_TOTAL_POOL_SIZE, hipMemcpyHostToDevice));
// Execute the speed tests
// RNG only
computeRNG();
// Asian options
computeAsianOptions();
// Lookback options
computeLookbackOptions();
free(hostPool);
free(tauswortheSeeds);
printf("\n\n");
printf("Initialisation time for options: %f (ms)\n", cutGetTimerValue(timer_pre_computation_init_tw));
printf("Initialisation time for wallace: %f (ms)\n", cutGetTimerValue(timer_pre_computation_init_wallace));
CUT_SAFE_CALL(cutDeleteTimer(timer_pre_computation_init_tw));
CUT_SAFE_CALL(cutDeleteTimer(timer_pre_computation_init_wallace));
CUDA_SAFE_CALL(hipFree(devPool));
CUDA_SAFE_CALL(hipFree(deviceTauswortheSeeds));
}
|
0359604a68a82e3d99417df956049a1bea460b41.cu
|
// ************************************************
// cuda_rng_options.cu
// authors: Lee Howes and David B. Thomas
//
// Main source file for cuda versions of code.
// Built to an object, hence the extern "C"
// exports for calling from the surrounding
// framework code.
// Contains a function to call the Wallace
// random number generator outputting to
// a poor, one to call the Tausworthe
// generator (these are used by the
// statistical tests) and a speed test
// function which calls versions which output
// timing results.
// ************************************************
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
#include "rand_helpers.h"
#include "constants.h"
unsigned int timer_pre_computation_init_wallace = 0;
unsigned int timer_pre_computation_init_tw = 0;
unsigned int *tauswortheSeeds = 0;
unsigned int *deviceTauswortheSeeds = 0;
float *hostPool = 0;
float *devPool = 0;
// includes, kernels
#include <wallace_kernel.cu>
#include <tausworthe_kernel.cu>
#include <rng_kernel.cu>
#include <lookback_option_kernel.cu>
#include <asian_option_kernel.cu>
void montecarloHost();
////////////////////////////////////////////////////////////////////////////////
//! Wallace generator for random number quality tests
////////////////////////////////////////////////////////////////////////////////
extern "C" void cuda_wallace_two_block(unsigned int seed, float *chi2Corrections, float *hostPool, float *poolOut, float *output)
{
CUT_CHECK_DEVICE();
initRand();
// Host mallocs
// allocate device memory for pool and wallace output
// allocate a pool and fill with normal random numbers
float *devPool, *deviceGeneratedRandomNumberPool, *deviceChi2Corrections;
float *refOutput = (float *) malloc(4 * WALLACE_OUTPUT_SIZE);
// just to keep call happy
float *devicePrices, *deviceStrike, *deviceYears;
CUDA_SAFE_CALL(cudaMalloc((void **) &devicePrices, 4));
CUDA_SAFE_CALL(cudaMalloc((void **) &deviceStrike, 4));
CUDA_SAFE_CALL(cudaMalloc((void **) &deviceYears, 4));
CUDA_SAFE_CALL(cudaMalloc((void **) &deviceChi2Corrections, 4 * WALLACE_NUM_BLOCKS * WALLACE_NUM_OUTPUTS_PER_RUN));
CUDA_SAFE_CALL(cudaMalloc((void **) &devPool, 4 * WALLACE_TOTAL_POOL_SIZE));
CUDA_SAFE_CALL(cudaMalloc((void **) &deviceGeneratedRandomNumberPool, 4 * WALLACE_OUTPUT_SIZE));
// Perform copies to GPU
// copy the start pool in
CUDA_SAFE_CALL(cudaMemcpy(devPool, hostPool, 4 * WALLACE_TOTAL_POOL_SIZE, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(deviceChi2Corrections, chi2Corrections, 4 * WALLACE_NUM_BLOCKS * WALLACE_NUM_OUTPUTS_PER_RUN, cudaMemcpyHostToDevice));
// setup execution parameters and execute
dim3 wallace_grid(WALLACE_NUM_BLOCKS, 1, 1);
dim3 wallace_threads(WALLACE_NUM_THREADS, 1, 1);
rng_wallace <<< wallace_grid, wallace_threads, WALLACE_POOL_SIZE * 4 >>> (seed, devPool, deviceGeneratedRandomNumberPool, deviceChi2Corrections);
CUT_CHECK_ERROR("Kernel execution failed: Wallace");
// get the transformed pool and the output back
CUDA_SAFE_CALL(cudaMemcpy(poolOut, devPool, 4 * WALLACE_TOTAL_POOL_SIZE, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(output, deviceGeneratedRandomNumberPool, 4 * WALLACE_OUTPUT_SIZE, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(deviceChi2Corrections));
CUDA_SAFE_CALL(cudaFree(devPool));
CUDA_SAFE_CALL(cudaFree(deviceGeneratedRandomNumberPool));
}
////////////////////////////////////////////////////////////////////////////////
//! Tausworthe generator for random number quality tests
////////////////////////////////////////////////////////////////////////////////
extern "C" void cuda_tausworthe(unsigned *pool, float *output)
{
CUT_CHECK_DEVICE();
initRand();
// Host mallocs
// allocate device memory for pool and wallace output
// allocate a pool and fill with normal random numbers
unsigned *devPool;
float *deviceGeneratedRandomNumberPool;
CUDA_SAFE_CALL(cudaMalloc((void **) &devPool, 4 * TAUSWORTHE_TOTAL_SEED_SIZE));
CUDA_SAFE_CALL(cudaMalloc((void **) &deviceGeneratedRandomNumberPool, 4 * TAUSWORTHE_OUTPUT_SIZE));
// Perform copies to GPU
// copy the start pool in
CUDA_SAFE_CALL(cudaMemcpy(devPool, pool, 4 * TAUSWORTHE_TOTAL_SEED_SIZE, cudaMemcpyHostToDevice));
// setup execution parameters and execute
dim3 tausworthe_grid(TAUSWORTHE_NUM_BLOCKS, 1, 1);
dim3 tausworthe_threads(TAUSWORTHE_NUM_THREADS, 1, 1);
rng_tausworthe <<< tausworthe_grid, tausworthe_threads, 0 >>> (devPool, deviceGeneratedRandomNumberPool);
CUT_CHECK_ERROR("Kernel execution failed: Wallace or montecarlo");
// get the transformed pool and the output back
CUDA_SAFE_CALL(cudaMemcpy(pool, devPool, 4 * TAUSWORTHE_TOTAL_SEED_SIZE, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(output, deviceGeneratedRandomNumberPool, 4 * TAUSWORTHE_OUTPUT_SIZE, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(devPool));
CUDA_SAFE_CALL(cudaFree(deviceGeneratedRandomNumberPool));
}
////////////////////////////////////////////////////////////////////////////////
//! Perform montecarlo speed tests
////////////////////////////////////////////////////////////////////////////////
void speedTests()
{
CUT_CHECK_DEVICE();
CUT_SAFE_CALL(cutCreateTimer(&timer_pre_computation_init_wallace));
CUT_SAFE_CALL(cutCreateTimer(&timer_pre_computation_init_tw));
// allocate device memory for pool and wallace output
// allocate a pool and fill with normal random numbers
hostPool = (float *) malloc(4 * WALLACE_TOTAL_POOL_SIZE);
tauswortheSeeds = (unsigned int *) malloc(4 * TAUSWORTHE_NUM_SEEDS);
CUDA_SAFE_CALL(cudaMalloc((void **) &devPool, 4 * WALLACE_TOTAL_POOL_SIZE));
CUDA_SAFE_CALL(cudaMalloc((void **) &deviceTauswortheSeeds, 4 * TAUSWORTHE_NUM_SEEDS));
CUT_SAFE_CALL(cutStartTimer(timer_pre_computation_init_wallace));
// Fill wallace initialisation pool with random numbers
double sumSquares = 0;
for (unsigned i = 0; i < WALLACE_TOTAL_POOL_SIZE; i++)
{
float x = RandN();
sumSquares += x * x;
hostPool[i] = x;
}
double scale = sqrt(WALLACE_TOTAL_POOL_SIZE / sumSquares);
for (unsigned i = 0; i < WALLACE_TOTAL_POOL_SIZE; i++)
{
float x = hostPool[i];
hostPool[i] = x * scale;
}
CUT_SAFE_CALL(cutStopTimer(timer_pre_computation_init_wallace));
CUT_SAFE_CALL(cutStartTimer(timer_pre_computation_init_tw));
// Prepare Tausworthe seeds
for (unsigned i = 0; i < TAUSWORTHE_NUM_SEEDS; i++)
{
tauswortheSeeds[i] = (unsigned int) Rand();
}
CUT_SAFE_CALL(cutStopTimer(timer_pre_computation_init_tw));
// Upload tausworthe seeds
CUDA_SAFE_CALL(cudaMemcpy(deviceTauswortheSeeds, tauswortheSeeds, 4 * TAUSWORTHE_NUM_SEEDS, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(devPool, hostPool, 4 * WALLACE_TOTAL_POOL_SIZE, cudaMemcpyHostToDevice));
// Execute the speed tests
// RNG only
computeRNG();
// Asian options
computeAsianOptions();
// Lookback options
computeLookbackOptions();
free(hostPool);
free(tauswortheSeeds);
printf("\n\n");
printf("Initialisation time for options: %f (ms)\n", cutGetTimerValue(timer_pre_computation_init_tw));
printf("Initialisation time for wallace: %f (ms)\n", cutGetTimerValue(timer_pre_computation_init_wallace));
CUT_SAFE_CALL(cutDeleteTimer(timer_pre_computation_init_tw));
CUT_SAFE_CALL(cutDeleteTimer(timer_pre_computation_init_wallace));
CUDA_SAFE_CALL(cudaFree(devPool));
CUDA_SAFE_CALL(cudaFree(deviceTauswortheSeeds));
}
|
b3286c983226e1f0f1fdf5efb20cc8067b71fa83.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* PROJECT: Pairwise sequence alignments on GPU
* FILE: psa_swwozniak_gpu
* AUTHOR(S): Alejandro Chacon <[email protected]>
* Jacopo Pantaleoni <[email protected]>
* DESCRIPTION: Device functions for the SW-Gotoh GPU implementation
* using a wozniak approach. Paralelizing by anti-diagonals.
* The implementation is warp-wide using shuffle instructions.
*/
extern "C" {
#include "../../../include/psa_pairwise_gpu.h"
}
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define MATCH_SCORE 2
#define MISMATCH_SCORE -5
#define OPEN_INDEL_SCORE -2
#define EXTEND_INDEL_SCORE -1
#ifndef QUERIES_SIZE
#define QUERIES_SIZE 100
#endif
#ifndef CANDIDATES_SIZE
#define CANDIDATES_SIZE 120
#endif
#define MAX3(a,b,c) ((a >= b) ? ((a >= c) ? a : c) : ((b >= c) ? b : c))
#define MIN3(a,b,c) ((a < b) ? ((a < c) ? a : c) : ((b < c) ? b : c))
#define WARP_SIZE 32
#define MAX_THREADS_PER_SM 128
#define CUDA_NUM_THREADS 128
#define THREADS_PER_SEGMENT 32
#define NUM_SEGMENTS (DIV_CEIL(QUERIES_SIZE, THREADS_PER_SEGMENT))
#define NUM_SW_PER_BLOCK (MAX_THREADS_PER_SM / THREADS_PER_SEGMENT)
#define NUM_WARPS (MAX_THREADS_PER_SM / WARP_SIZE)
inline __device__ int32_t reduce_max(int32_t values, int32_t column)
{
//TODO: Remember the maximal ROW (reduce using KEY and VALUE)
for (int i = 1; i < THREADS_PER_SEGMENT; i *= 2){
//TODO: Pack the 2 values to save 1 SHUFFLE & CMOV ops
int newValue = __shfl_down(values, i, 32);
int newColumn = __shfl_down(column, i, 32);
values = MAX(newValue, values);
column = (newValue >= values) ? newColumn : column;
}
return(values);
}
__global__
void localProcessSWWozniak(ASCIIEntry_t *d_CandidatesASCII, uint32_t *d_CandidatesASCIIposition,
ASCIIEntry_t *d_QueriesASCII, uint32_t *d_QueriesASCIIposition,
alignmentInfo_t *d_AlignmentsInfo, alignmentEntry_t *d_AlignmentsResults,
uint32_t querySize, uint32_t candidateSize, uint32_t candidatesNum)
{
const uint32_t idThread = blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x;
const uint32_t idCandidate = idThread / THREADS_PER_SEGMENT;
const uint32_t idThreadLocalSegment = idThread % THREADS_PER_SEGMENT;
if ((threadIdx.x < MAX_THREADS_PER_SM) && (idCandidate < candidatesNum)){
typedef int32_t score_type;
__shared__ short2 tmpHValues[CANDIDATES_SIZE * NUM_SW_PER_BLOCK];
short2 *temp = tmpHValues + (CANDIDATES_SIZE * (idCandidate % NUM_WARPS));
const char* candidate = d_CandidatesASCII + d_CandidatesASCIIposition[idCandidate];
const char* query = d_QueriesASCII + d_QueriesASCIIposition[d_AlignmentsInfo[idCandidate]];
// local scores
score_type h_top, h_left, h_diag, hi;
score_type e_top, ei;
score_type f_left, fi;
score_type h_maxScore = 0;
uint32_t maxColumn = 0;
// current reference string character
uint8_t r_j;
// per-thread cache for temp values and reference string characters
// each thread loads a different value; cache values are shuffled down the warp at each iteration
score_type temp_cache_h, temp_cache_f;
uint8_t reference_cache;
// width of the current warp-block stripe of the DP matrix (always WARP_SIZE except for the last stripe)
uint32_t warp_block_width;
// compute warp-block horizontal coordinate in DP matrix for this thread
const uint32_t wi = idThreadLocalSegment + 1;
// initialize the leftmost matrix column
for(uint32_t i = idThreadLocalSegment; i < CANDIDATES_SIZE; i += WARP_SIZE)
temp[i] = make_short2(0,0);
for(uint32_t warp_block = 0; warp_block < QUERIES_SIZE; warp_block += WARP_SIZE){
// width of this block
warp_block_width = (warp_block + WARP_SIZE >= QUERIES_SIZE ? QUERIES_SIZE % WARP_SIZE : WARP_SIZE);
// compute the horizontal coordinate of the current thread in the DP matrix (including border column)
const uint32_t i = wi + warp_block;
// set top boundary values
h_top = 0;
e_top = 0;
// initialize diagonal
h_diag = 0;
// load the query string character for the current thread
const uint8_t s_i = (i <= QUERIES_SIZE ? query[i - 1] : 0);
// initialize the best score for this stripe
score_type max_score = 0;
// loop over all DP anti-diagonals, excluding the border row/column
for(uint32_t block_diag = 2; block_diag <= warp_block_width + CANDIDATES_SIZE; block_diag += WARP_SIZE){
// reload caches every WARP_SIZE diagonals
const uint32_t thread_j = (block_diag - 2) + idThreadLocalSegment;
if (thread_j < CANDIDATES_SIZE){
temp_cache_h = temp[thread_j].x;
temp_cache_f = temp[thread_j].y;
reference_cache = candidate[(block_diag - 2) + idThreadLocalSegment];
} else {
temp_cache_h = 0;
temp_cache_f = 0;
reference_cache = 0;
}
for(uint32_t diag = block_diag; diag < block_diag + WARP_SIZE; diag++){
// compute the length of this anti-diagonal (excluding border row/column)
const uint32_t diag_len = MIN3(diag - 1, WARP_SIZE, warp_block_width);
// compute vertical coordinate of the current cell in the DP matrix (including border column)
const uint32_t j = diag - wi;
// is the current cell inside the DP matrix?
if (wi <= diag_len && j <= CANDIDATES_SIZE){
if (wi == 1){
// load new temp and reference values
r_j = reference_cache;
// initialize cell to the left of the current cell
h_left = temp_cache_h;
f_left = temp_cache_f;
}
// compute the match/mismatch score
const score_type S_ij = (r_j == s_i) ? MATCH_SCORE : MISMATCH_SCORE;
ei = MAX(e_top + EXTEND_INDEL_SCORE,
h_top + OPEN_INDEL_SCORE);
fi = MAX(f_left + EXTEND_INDEL_SCORE,
h_left + OPEN_INDEL_SCORE);
hi = MAX3(h_diag + S_ij,
ei,
fi);
// clamp score to zero
hi = MAX(hi, 0);
// save off the last column
if (wi == WARP_SIZE){
temp[j - 1] = make_short2( hi, fi );
// keep track of the best score in this stripe
max_score = MAX( max_score, hi );
}
// save the best score across the entire matrix for local scoring
if (hi >= h_maxScore){
h_maxScore = hi;
maxColumn = j;
//h_alignment = make_uint2(j, i);
}
// current left becomes diagonal for next iteration on this lane
h_diag = h_left;
// current value becomes h_top for next iteration on this lane
h_top = hi;
e_top = ei;
}
// move previous cell reference value across the warp
r_j = __shfl_up(r_j, 1);
// hi becomes h_left on the next lane
h_left = __shfl_up(hi, 1);
f_left = __shfl_up(fi, 1);
// push temp_cache and reference_cache values down the warp
temp_cache_h = __shfl_down(temp_cache_h, 1);
temp_cache_f = __shfl_down(temp_cache_f, 1);
reference_cache = __shfl_down(reference_cache, 1);
}
}
}
h_maxScore = reduce_max(h_maxScore, maxColumn);
if(idThreadLocalSegment == 0){
d_AlignmentsResults[idCandidate].score = h_maxScore;
//d_AlignmentsResults[idCandidate].column = maxColumn;
d_AlignmentsResults[idCandidate].column = 0; //DEBUG
}
}
}
extern "C"
psaError_t localProcessPairwiseStream(sequences_t *candidates, sequences_t *queries, alignments_t *alignments)
{
uint32_t alignmentsPerBlock = DIV_CEIL(MAX_THREADS_PER_SM, THREADS_PER_SEGMENT);
uint32_t blocks = DIV_CEIL(candidates->num, alignmentsPerBlock);
uint32_t threads = CUDA_NUM_THREADS;
uint32_t querySize = queries->h_size[0];
uint32_t candidateSize = candidates->h_size[0];
printf("Grid Size: %d, Block Size: %d, Alignments Per Block: %d, Total alignments: %d\n", blocks, threads, alignmentsPerBlock, candidates->num);
hipLaunchKernelGGL(( localProcessSWWozniak), dim3(blocks), dim3(threads), 0, 0, candidates->d_ASCII, candidates->d_ASCIIposition,
queries->d_ASCII, queries->d_ASCIIposition,
alignments->d_info, alignments->d_results,
querySize, candidateSize, candidates->num);
hipDeviceSynchronize();
return (SUCCESS);
}
|
b3286c983226e1f0f1fdf5efb20cc8067b71fa83.cu
|
/*
* PROJECT: Pairwise sequence alignments on GPU
* FILE: psa_swwozniak_gpu
* AUTHOR(S): Alejandro Chacon <[email protected]>
* Jacopo Pantaleoni <[email protected]>
* DESCRIPTION: Device functions for the SW-Gotoh GPU implementation
* using a wozniak approach. Paralelizing by anti-diagonals.
* The implementation is warp-wide using shuffle instructions.
*/
extern "C" {
#include "../../../include/psa_pairwise_gpu.h"
}
#include <cuda_runtime.h>
#include <cuda.h>
#define MATCH_SCORE 2
#define MISMATCH_SCORE -5
#define OPEN_INDEL_SCORE -2
#define EXTEND_INDEL_SCORE -1
#ifndef QUERIES_SIZE
#define QUERIES_SIZE 100
#endif
#ifndef CANDIDATES_SIZE
#define CANDIDATES_SIZE 120
#endif
#define MAX3(a,b,c) ((a >= b) ? ((a >= c) ? a : c) : ((b >= c) ? b : c))
#define MIN3(a,b,c) ((a < b) ? ((a < c) ? a : c) : ((b < c) ? b : c))
#define WARP_SIZE 32
#define MAX_THREADS_PER_SM 128
#define CUDA_NUM_THREADS 128
#define THREADS_PER_SEGMENT 32
#define NUM_SEGMENTS (DIV_CEIL(QUERIES_SIZE, THREADS_PER_SEGMENT))
#define NUM_SW_PER_BLOCK (MAX_THREADS_PER_SM / THREADS_PER_SEGMENT)
#define NUM_WARPS (MAX_THREADS_PER_SM / WARP_SIZE)
inline __device__ int32_t reduce_max(int32_t values, int32_t column)
{
//TODO: Remember the maximal ROW (reduce using KEY and VALUE)
for (int i = 1; i < THREADS_PER_SEGMENT; i *= 2){
//TODO: Pack the 2 values to save 1 SHUFFLE & CMOV ops
int newValue = __shfl_down(values, i, 32);
int newColumn = __shfl_down(column, i, 32);
values = MAX(newValue, values);
column = (newValue >= values) ? newColumn : column;
}
return(values);
}
__global__
void localProcessSWWozniak(ASCIIEntry_t *d_CandidatesASCII, uint32_t *d_CandidatesASCIIposition,
ASCIIEntry_t *d_QueriesASCII, uint32_t *d_QueriesASCIIposition,
alignmentInfo_t *d_AlignmentsInfo, alignmentEntry_t *d_AlignmentsResults,
uint32_t querySize, uint32_t candidateSize, uint32_t candidatesNum)
{
const uint32_t idThread = blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x;
const uint32_t idCandidate = idThread / THREADS_PER_SEGMENT;
const uint32_t idThreadLocalSegment = idThread % THREADS_PER_SEGMENT;
if ((threadIdx.x < MAX_THREADS_PER_SM) && (idCandidate < candidatesNum)){
typedef int32_t score_type;
__shared__ short2 tmpHValues[CANDIDATES_SIZE * NUM_SW_PER_BLOCK];
short2 *temp = tmpHValues + (CANDIDATES_SIZE * (idCandidate % NUM_WARPS));
const char* candidate = d_CandidatesASCII + d_CandidatesASCIIposition[idCandidate];
const char* query = d_QueriesASCII + d_QueriesASCIIposition[d_AlignmentsInfo[idCandidate]];
// local scores
score_type h_top, h_left, h_diag, hi;
score_type e_top, ei;
score_type f_left, fi;
score_type h_maxScore = 0;
uint32_t maxColumn = 0;
// current reference string character
uint8_t r_j;
// per-thread cache for temp values and reference string characters
// each thread loads a different value; cache values are shuffled down the warp at each iteration
score_type temp_cache_h, temp_cache_f;
uint8_t reference_cache;
// width of the current warp-block stripe of the DP matrix (always WARP_SIZE except for the last stripe)
uint32_t warp_block_width;
// compute warp-block horizontal coordinate in DP matrix for this thread
const uint32_t wi = idThreadLocalSegment + 1;
// initialize the leftmost matrix column
for(uint32_t i = idThreadLocalSegment; i < CANDIDATES_SIZE; i += WARP_SIZE)
temp[i] = make_short2(0,0);
for(uint32_t warp_block = 0; warp_block < QUERIES_SIZE; warp_block += WARP_SIZE){
// width of this block
warp_block_width = (warp_block + WARP_SIZE >= QUERIES_SIZE ? QUERIES_SIZE % WARP_SIZE : WARP_SIZE);
// compute the horizontal coordinate of the current thread in the DP matrix (including border column)
const uint32_t i = wi + warp_block;
// set top boundary values
h_top = 0;
e_top = 0;
// initialize diagonal
h_diag = 0;
// load the query string character for the current thread
const uint8_t s_i = (i <= QUERIES_SIZE ? query[i - 1] : 0);
// initialize the best score for this stripe
score_type max_score = 0;
// loop over all DP anti-diagonals, excluding the border row/column
for(uint32_t block_diag = 2; block_diag <= warp_block_width + CANDIDATES_SIZE; block_diag += WARP_SIZE){
// reload caches every WARP_SIZE diagonals
const uint32_t thread_j = (block_diag - 2) + idThreadLocalSegment;
if (thread_j < CANDIDATES_SIZE){
temp_cache_h = temp[thread_j].x;
temp_cache_f = temp[thread_j].y;
reference_cache = candidate[(block_diag - 2) + idThreadLocalSegment];
} else {
temp_cache_h = 0;
temp_cache_f = 0;
reference_cache = 0;
}
for(uint32_t diag = block_diag; diag < block_diag + WARP_SIZE; diag++){
// compute the length of this anti-diagonal (excluding border row/column)
const uint32_t diag_len = MIN3(diag - 1, WARP_SIZE, warp_block_width);
// compute vertical coordinate of the current cell in the DP matrix (including border column)
const uint32_t j = diag - wi;
// is the current cell inside the DP matrix?
if (wi <= diag_len && j <= CANDIDATES_SIZE){
if (wi == 1){
// load new temp and reference values
r_j = reference_cache;
// initialize cell to the left of the current cell
h_left = temp_cache_h;
f_left = temp_cache_f;
}
// compute the match/mismatch score
const score_type S_ij = (r_j == s_i) ? MATCH_SCORE : MISMATCH_SCORE;
ei = MAX(e_top + EXTEND_INDEL_SCORE,
h_top + OPEN_INDEL_SCORE);
fi = MAX(f_left + EXTEND_INDEL_SCORE,
h_left + OPEN_INDEL_SCORE);
hi = MAX3(h_diag + S_ij,
ei,
fi);
// clamp score to zero
hi = MAX(hi, 0);
// save off the last column
if (wi == WARP_SIZE){
temp[j - 1] = make_short2( hi, fi );
// keep track of the best score in this stripe
max_score = MAX( max_score, hi );
}
// save the best score across the entire matrix for local scoring
if (hi >= h_maxScore){
h_maxScore = hi;
maxColumn = j;
//h_alignment = make_uint2(j, i);
}
// current left becomes diagonal for next iteration on this lane
h_diag = h_left;
// current value becomes h_top for next iteration on this lane
h_top = hi;
e_top = ei;
}
// move previous cell reference value across the warp
r_j = __shfl_up(r_j, 1);
// hi becomes h_left on the next lane
h_left = __shfl_up(hi, 1);
f_left = __shfl_up(fi, 1);
// push temp_cache and reference_cache values down the warp
temp_cache_h = __shfl_down(temp_cache_h, 1);
temp_cache_f = __shfl_down(temp_cache_f, 1);
reference_cache = __shfl_down(reference_cache, 1);
}
}
}
h_maxScore = reduce_max(h_maxScore, maxColumn);
if(idThreadLocalSegment == 0){
d_AlignmentsResults[idCandidate].score = h_maxScore;
//d_AlignmentsResults[idCandidate].column = maxColumn;
d_AlignmentsResults[idCandidate].column = 0; //DEBUG
}
}
}
extern "C"
psaError_t localProcessPairwiseStream(sequences_t *candidates, sequences_t *queries, alignments_t *alignments)
{
uint32_t alignmentsPerBlock = DIV_CEIL(MAX_THREADS_PER_SM, THREADS_PER_SEGMENT);
uint32_t blocks = DIV_CEIL(candidates->num, alignmentsPerBlock);
uint32_t threads = CUDA_NUM_THREADS;
uint32_t querySize = queries->h_size[0];
uint32_t candidateSize = candidates->h_size[0];
printf("Grid Size: %d, Block Size: %d, Alignments Per Block: %d, Total alignments: %d\n", blocks, threads, alignmentsPerBlock, candidates->num);
localProcessSWWozniak<<<blocks, threads>>>(candidates->d_ASCII, candidates->d_ASCIIposition,
queries->d_ASCII, queries->d_ASCIIposition,
alignments->d_info, alignments->d_results,
querySize, candidateSize, candidates->num);
cudaThreadSynchronize();
return (SUCCESS);
}
|
f5f168fec9b5137110deb60b324c0ee45a6df868.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::get_forward_results(
size_t memcpy_size, const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensor2<TypeEmbeddingComp> &embedding_feature, Tensors2<TypeEmbeddingComp> &temp_tensors,
const ResourceManager &resource_manager) {
size_t total_gpu_count = resource_manager.get_global_gpu_count();
const auto &local_gpu = resource_manager.get_local_gpu(0);
CudaDeviceContext context;
if (total_gpu_count > 1) {
// nccl allGather
all_gather(memcpy_size,
embedding_feature_tensors, // send
temp_tensors, // recv
resource_manager);
// memcpy D2H
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(hipMemcpyAsync(embedding_feature.get_ptr(), temp_tensors[0].get_ptr(),
total_gpu_count * memcpy_size * sizeof(TypeEmbeddingComp),
hipMemcpyDeviceToHost, local_gpu->get_stream()));
CK_CUDA_THROW_(hipStreamSynchronize(local_gpu->get_stream()));
} else {
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(hipMemcpyAsync(
embedding_feature.get_ptr(), embedding_feature_tensors[0].get_ptr(),
memcpy_size * sizeof(TypeEmbeddingComp), hipMemcpyDeviceToHost, local_gpu->get_stream()));
CK_CUDA_THROW_(hipStreamSynchronize(local_gpu->get_stream()));
}
return;
}
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::get_forward_results(size_t memcpy_size,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
void* const embedding_feature,
Tensors2<TypeEmbeddingComp> &temp_tensors,
const ResourceManager &resource_manager,
const bool on_gpu) {
size_t total_gpu_count = resource_manager.get_global_gpu_count();
const auto &local_gpu = resource_manager.get_local_gpu(0);
hipMemcpyKind direction = (on_gpu ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost);
if (total_gpu_count > 1) {
//if all p2p is enabled, use p2p, otherwise use all_gather
if (resource_manager.all_p2p_enabled()) {
/*p2p copy*/
TypeEmbeddingComp* embedding_feature_ptr = reinterpret_cast<TypeEmbeddingComp*>(embedding_feature);
for (size_t dev_id = 0; dev_id < total_gpu_count; ++dev_id){
CudaDeviceContext context;
const auto& local_gpu = resource_manager.get_local_gpu(dev_id);
context.set_device(local_gpu->get_device_id());
size_t offset = dev_id * embedding_feature_tensors[0].get_num_elements();
CK_CUDA_THROW_(hipMemcpyAsync(embedding_feature_ptr + offset,
embedding_feature_tensors[dev_id].get_ptr(),
embedding_feature_tensors[dev_id].get_size_in_bytes(),
hipMemcpyDeviceToDevice,
local_gpu->get_stream()));
} // for dev_id
} else {
/*nccl all_gather*/
all_gather(memcpy_size,
embedding_feature_tensors, // send
temp_tensors, // recv
resource_manager);
CudaDeviceContext context;
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(hipMemcpyAsync(embedding_feature,
temp_tensors[0].get_ptr(),
total_gpu_count * memcpy_size * sizeof(TypeEmbeddingComp),
direction,
local_gpu->get_stream()));
}
} else {
CudaDeviceContext context;
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(hipMemcpyAsync(
embedding_feature, embedding_feature_tensors[0].get_ptr(),
memcpy_size * sizeof(TypeEmbeddingComp), direction, local_gpu->get_stream()));
}
return;
}
template void SparseEmbeddingFunctors::get_forward_results<float>(
size_t memcpy_size, const Tensors2<float> &embedding_feature_tensors,
Tensor2<float> &embedding_feature, Tensors2<float> &temp_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::get_forward_results<__half>(
size_t memcpy_size, const Tensors2<__half> &embedding_feature_tensors,
Tensor2<__half> &embedding_feature, Tensors2<__half> &temp_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::get_forward_results<float>(
size_t memcpy_size,
const Tensors2<float> &embedding_feature_tensors,
void* const embedding_feature,
Tensors2<float> &temp_tensors,
const ResourceManager &resource_manager,
const bool on_gpu);
template void SparseEmbeddingFunctors::get_forward_results<__half>(
size_t memcpy_size,
const Tensors2<__half> &embedding_feature_tensors,
void* const embedding_feature,
Tensors2<__half> &temp_tensors,
const ResourceManager &resource_manager,
const bool on_gpu);
} // namespace HugeCTR
|
f5f168fec9b5137110deb60b324c0ee45a6df868.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::get_forward_results(
size_t memcpy_size, const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensor2<TypeEmbeddingComp> &embedding_feature, Tensors2<TypeEmbeddingComp> &temp_tensors,
const ResourceManager &resource_manager) {
size_t total_gpu_count = resource_manager.get_global_gpu_count();
const auto &local_gpu = resource_manager.get_local_gpu(0);
CudaDeviceContext context;
if (total_gpu_count > 1) {
// nccl allGather
all_gather(memcpy_size,
embedding_feature_tensors, // send
temp_tensors, // recv
resource_manager);
// memcpy D2H
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(cudaMemcpyAsync(embedding_feature.get_ptr(), temp_tensors[0].get_ptr(),
total_gpu_count * memcpy_size * sizeof(TypeEmbeddingComp),
cudaMemcpyDeviceToHost, local_gpu->get_stream()));
CK_CUDA_THROW_(cudaStreamSynchronize(local_gpu->get_stream()));
} else {
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(cudaMemcpyAsync(
embedding_feature.get_ptr(), embedding_feature_tensors[0].get_ptr(),
memcpy_size * sizeof(TypeEmbeddingComp), cudaMemcpyDeviceToHost, local_gpu->get_stream()));
CK_CUDA_THROW_(cudaStreamSynchronize(local_gpu->get_stream()));
}
return;
}
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::get_forward_results(size_t memcpy_size,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
void* const embedding_feature,
Tensors2<TypeEmbeddingComp> &temp_tensors,
const ResourceManager &resource_manager,
const bool on_gpu) {
size_t total_gpu_count = resource_manager.get_global_gpu_count();
const auto &local_gpu = resource_manager.get_local_gpu(0);
cudaMemcpyKind direction = (on_gpu ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost);
if (total_gpu_count > 1) {
//if all p2p is enabled, use p2p, otherwise use all_gather
if (resource_manager.all_p2p_enabled()) {
/*p2p copy*/
TypeEmbeddingComp* embedding_feature_ptr = reinterpret_cast<TypeEmbeddingComp*>(embedding_feature);
for (size_t dev_id = 0; dev_id < total_gpu_count; ++dev_id){
CudaDeviceContext context;
const auto& local_gpu = resource_manager.get_local_gpu(dev_id);
context.set_device(local_gpu->get_device_id());
size_t offset = dev_id * embedding_feature_tensors[0].get_num_elements();
CK_CUDA_THROW_(cudaMemcpyAsync(embedding_feature_ptr + offset,
embedding_feature_tensors[dev_id].get_ptr(),
embedding_feature_tensors[dev_id].get_size_in_bytes(),
cudaMemcpyDeviceToDevice,
local_gpu->get_stream()));
} // for dev_id
} else {
/*nccl all_gather*/
all_gather(memcpy_size,
embedding_feature_tensors, // send
temp_tensors, // recv
resource_manager);
CudaDeviceContext context;
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(cudaMemcpyAsync(embedding_feature,
temp_tensors[0].get_ptr(),
total_gpu_count * memcpy_size * sizeof(TypeEmbeddingComp),
direction,
local_gpu->get_stream()));
}
} else {
CudaDeviceContext context;
context.set_device(local_gpu->get_device_id());
CK_CUDA_THROW_(cudaMemcpyAsync(
embedding_feature, embedding_feature_tensors[0].get_ptr(),
memcpy_size * sizeof(TypeEmbeddingComp), direction, local_gpu->get_stream()));
}
return;
}
template void SparseEmbeddingFunctors::get_forward_results<float>(
size_t memcpy_size, const Tensors2<float> &embedding_feature_tensors,
Tensor2<float> &embedding_feature, Tensors2<float> &temp_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::get_forward_results<__half>(
size_t memcpy_size, const Tensors2<__half> &embedding_feature_tensors,
Tensor2<__half> &embedding_feature, Tensors2<__half> &temp_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::get_forward_results<float>(
size_t memcpy_size,
const Tensors2<float> &embedding_feature_tensors,
void* const embedding_feature,
Tensors2<float> &temp_tensors,
const ResourceManager &resource_manager,
const bool on_gpu);
template void SparseEmbeddingFunctors::get_forward_results<__half>(
size_t memcpy_size,
const Tensors2<__half> &embedding_feature_tensors,
void* const embedding_feature,
Tensors2<__half> &temp_tensors,
const ResourceManager &resource_manager,
const bool on_gpu);
} // namespace HugeCTR
|
f15bdfdb234300b7f12a5a42ca16d4b2aaff74f4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backref_re_hip.cuh"
#include <cudf/strings/detail/utilities.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace strings {
namespace detail {
//
children_pair replace_with_backrefs_medium(column_device_view const& d_strings,
reprog_device& d_prog,
string_view const& d_repl_template,
rmm::device_vector<backref_type>& backrefs,
size_type null_count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return make_strings_children(
backrefs_fn<RX_STACK_MEDIUM>{
d_strings, d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
d_strings.size(),
null_count,
stream,
mr);
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
f15bdfdb234300b7f12a5a42ca16d4b2aaff74f4.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backref_re.cuh"
#include <cudf/strings/detail/utilities.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace strings {
namespace detail {
//
children_pair replace_with_backrefs_medium(column_device_view const& d_strings,
reprog_device& d_prog,
string_view const& d_repl_template,
rmm::device_vector<backref_type>& backrefs,
size_type null_count,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return make_strings_children(
backrefs_fn<RX_STACK_MEDIUM>{
d_strings, d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
d_strings.size(),
null_count,
stream,
mr);
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
b739f5899c06e50f580039c5a252f76de66b54af.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2016, David lu
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "cuda_log.h"
#include "../../utils/data_defination.h"
/*
*channel: channel of input data
*kernel_size: pooling window size
*input_dim: width of input data
*output_dim: width of output data
*/
__global__ void _k_CACU_CROSS_ENTROPY_GPU(mycnn::float_t *x, int num, int length, unsigned int *label_, mycnn::float_t *loss_) {
int tid = threadIdx.x;
extern __shared__ mycnn::float_t shared_data[];
mycnn::float_t *xp;
shared_data[tid] = 0.0;
for (int i = tid; i < num; i+=THREADNUM)
{
xp = x + i * length;
shared_data[tid] -= log(xp[label_[i]]);
}
__syncthreads();
int acc_length = THREADNUM / 2;
while(acc_length > 0){
if(tid < acc_length)
shared_data[tid] += shared_data[tid + acc_length];
acc_length /= 2;
__syncthreads();
}
if(tid == 0)
loss_[0] += shared_data[0];
}
extern "C" void cacu_cross_entropy_gpu(mycnn::float_t *x, int num, int length, unsigned int *label_, mycnn::float_t *loss_){
hipLaunchKernelGGL(( _k_CACU_CROSS_ENTROPY_GPU), dim3(1), dim3(THREADNUM), THREADNUM * sizeof(mycnn::float_t), 0, x, num, length, label_,loss_);
CUDA_CHECK(hipDeviceSynchronize());
}
|
b739f5899c06e50f580039c5a252f76de66b54af.cu
|
/*
Copyright (c) 2016, David lu
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "cuda_log.h"
#include "../../utils/data_defination.h"
/*
*channel: channel of input data
*kernel_size: pooling window size
*input_dim: width of input data
*output_dim: width of output data
*/
__global__ void _k_CACU_CROSS_ENTROPY_GPU(mycnn::float_t *x, int num, int length, unsigned int *label_, mycnn::float_t *loss_) {
int tid = threadIdx.x;
extern __shared__ mycnn::float_t shared_data[];
mycnn::float_t *xp;
shared_data[tid] = 0.0;
for (int i = tid; i < num; i+=THREADNUM)
{
xp = x + i * length;
shared_data[tid] -= log(xp[label_[i]]);
}
__syncthreads();
int acc_length = THREADNUM / 2;
while(acc_length > 0){
if(tid < acc_length)
shared_data[tid] += shared_data[tid + acc_length];
acc_length /= 2;
__syncthreads();
}
if(tid == 0)
loss_[0] += shared_data[0];
}
extern "C" void cacu_cross_entropy_gpu(mycnn::float_t *x, int num, int length, unsigned int *label_, mycnn::float_t *loss_){
_k_CACU_CROSS_ENTROPY_GPU<<<1, THREADNUM, THREADNUM * sizeof(mycnn::float_t)>>>(x, num, length, label_,loss_);
CUDA_CHECK(cudaThreadSynchronize());
}
|
4e4412b3d00e1e477c3f7853caf349db59be1aec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "test_num_vgpr_num_sgpr.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
test_num_vgpr_num_sgpr), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
test_num_vgpr_num_sgpr), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
test_num_vgpr_num_sgpr), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4e4412b3d00e1e477c3f7853caf349db59be1aec.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "test_num_vgpr_num_sgpr.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
test_num_vgpr_num_sgpr<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
test_num_vgpr_num_sgpr<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
test_num_vgpr_num_sgpr<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.