hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
cf770eaf926d14f5063e798c2ec5391f55d94909.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/cuda_utils.hpp>
#include <type_traits>
#include <utility/utility.hpp>
#include <utility/trajectory_thrust.cuh>
#include <cuspatial/trajectory.hpp>
namespace{
/**
* @brief CUDA kernel for computing spatial bounding boxes of trajectories
*
*/
template <typename T>
__global__ void sbbox_kernel(gdf_size_type num_traj,
const T* const __restrict__ x,
const T* const __restrict__ y,
const uint32_t * const __restrict__ len,
const uint32_t * const __restrict__ pos,
T* const __restrict__ bbox_x1,
T* const __restrict__ bbox_y1,
T* const __restrict__ bbox_x2,
T* const __restrict__ bbox_y2)
{
int pid=blockIdx.x*blockDim.x+threadIdx.x;
if(pid>=num_traj) return;
int bp=(pid==0)?0:pos[pid-1];
int ep=pos[pid];
bbox_x2[pid]=bbox_x1[pid]=x[bp];
bbox_y2[pid]=bbox_y1[pid]=y[bp];
for(int i=bp+1;i<ep;i++)
{
if(bbox_x1[pid]>x[i]) bbox_x1[pid]=x[i];
if(bbox_x2[pid]<x[i]) bbox_x2[pid]=x[i];
if(bbox_y1[pid]>y[i]) bbox_y1[pid]=y[i];
if(bbox_y2[pid]<y[i]) bbox_y2[pid]=y[i];
}
}
struct sbbox_functor {
template <typename T>
static constexpr bool is_supported()
{
return std::is_floating_point<T>::value;
}
template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr>
void operator()(const gdf_column& x, const gdf_column& y,
const gdf_column& length, const gdf_column& offset,
gdf_column& bbox_x1, gdf_column& bbox_y1,
gdf_column& bbox_x2, gdf_column& bbox_y2)
{
T* temp{nullptr};
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&bbox_x1, temp, nullptr, length.size, x.dtype,
0, gdf_dtype_extra_info{TIME_UNIT_NONE},
"bbox_x1");
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&bbox_x2, temp, nullptr, length.size, x.dtype,
0, gdf_dtype_extra_info{TIME_UNIT_NONE},
"bbox_x2");
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&bbox_y1, temp, nullptr, length.size, x.dtype,
0, gdf_dtype_extra_info{TIME_UNIT_NONE},
"bbox_y1");
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&bbox_y2, temp, nullptr, length.size, x.dtype,
0, gdf_dtype_extra_info{TIME_UNIT_NONE},
"bbox_y2");
gdf_size_type min_grid_size = 0, block_size = 0;
CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&min_grid_size,
&block_size,
sbbox_kernel<T>) );
cudf::util::cuda::grid_config_1d grid{x.size, block_size, 1};
hipLaunchKernelGGL(( sbbox_kernel<T>), dim3(grid.num_blocks), dim3(block_size) , 0, 0,
length.size, static_cast<T*>(x.data), static_cast<T*>(y.data),
static_cast<uint32_t*>(length.data),
static_cast<uint32_t*>(offset.data),
static_cast<T*>(bbox_x1.data),
static_cast<T*>(bbox_y1.data),
static_cast<T*>(bbox_x2.data),
static_cast<T*>(bbox_y2.data) );
CUDA_TRY( hipDeviceSynchronize() );
}
template <typename T, std::enable_if_t<!is_supported<T>()>* = nullptr>
void operator()(const gdf_column& x, const gdf_column& y,
const gdf_column& length, const gdf_column& offset,
gdf_column& bbox_x1, gdf_column& bbox_y1,
gdf_column& bbox_x2, gdf_column& bbox_y2)
{
CUDF_FAIL("Non-floating point operation is not supported");
}
};
} // namespace anonymous
namespace cuspatial {
/**
* @brief computing spatial bounding boxes of trajectories
*
* see trajectory.hpp
*/
void trajectory_spatial_bounds(const gdf_column& x, const gdf_column& y,
const gdf_column& length,
const gdf_column& offset,
gdf_column& bbox_x1, gdf_column& bbox_y1,
gdf_column& bbox_x2, gdf_column& bbox_y2)
{
CUDF_EXPECTS(x.data != nullptr && y.data != nullptr &&
length.data != nullptr && offset.data != nullptr,
"Null data pointer");
CUDF_EXPECTS(x.size == y.size && length.size == offset.size,
"Data size mismatch");
// future versions might allow x/y/pos/len have null_count>0, which might be
// useful for taking query results as inputs
CUDF_EXPECTS(x.null_count == 0 && y.null_count == 0 &&
length.null_count==0 && offset.null_count==0,
"Null data support not implemented");
CUDF_EXPECTS(x.size >= offset.size,
"one trajectory must have at least one point");
cudf::type_dispatcher(x.dtype, sbbox_functor(), x, y, length, offset,
bbox_x1, bbox_y1, bbox_x2, bbox_y2);
// TODO: handle null_count if needed
}
}// namespace cuspatial
|
cf770eaf926d14f5063e798c2ec5391f55d94909.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/cuda_utils.hpp>
#include <type_traits>
#include <utility/utility.hpp>
#include <utility/trajectory_thrust.cuh>
#include <cuspatial/trajectory.hpp>
namespace{
/**
* @brief CUDA kernel for computing spatial bounding boxes of trajectories
*
*/
template <typename T>
__global__ void sbbox_kernel(gdf_size_type num_traj,
const T* const __restrict__ x,
const T* const __restrict__ y,
const uint32_t * const __restrict__ len,
const uint32_t * const __restrict__ pos,
T* const __restrict__ bbox_x1,
T* const __restrict__ bbox_y1,
T* const __restrict__ bbox_x2,
T* const __restrict__ bbox_y2)
{
int pid=blockIdx.x*blockDim.x+threadIdx.x;
if(pid>=num_traj) return;
int bp=(pid==0)?0:pos[pid-1];
int ep=pos[pid];
bbox_x2[pid]=bbox_x1[pid]=x[bp];
bbox_y2[pid]=bbox_y1[pid]=y[bp];
for(int i=bp+1;i<ep;i++)
{
if(bbox_x1[pid]>x[i]) bbox_x1[pid]=x[i];
if(bbox_x2[pid]<x[i]) bbox_x2[pid]=x[i];
if(bbox_y1[pid]>y[i]) bbox_y1[pid]=y[i];
if(bbox_y2[pid]<y[i]) bbox_y2[pid]=y[i];
}
}
struct sbbox_functor {
template <typename T>
static constexpr bool is_supported()
{
return std::is_floating_point<T>::value;
}
template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr>
void operator()(const gdf_column& x, const gdf_column& y,
const gdf_column& length, const gdf_column& offset,
gdf_column& bbox_x1, gdf_column& bbox_y1,
gdf_column& bbox_x2, gdf_column& bbox_y2)
{
T* temp{nullptr};
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&bbox_x1, temp, nullptr, length.size, x.dtype,
0, gdf_dtype_extra_info{TIME_UNIT_NONE},
"bbox_x1");
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&bbox_x2, temp, nullptr, length.size, x.dtype,
0, gdf_dtype_extra_info{TIME_UNIT_NONE},
"bbox_x2");
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&bbox_y1, temp, nullptr, length.size, x.dtype,
0, gdf_dtype_extra_info{TIME_UNIT_NONE},
"bbox_y1");
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&bbox_y2, temp, nullptr, length.size, x.dtype,
0, gdf_dtype_extra_info{TIME_UNIT_NONE},
"bbox_y2");
gdf_size_type min_grid_size = 0, block_size = 0;
CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&min_grid_size,
&block_size,
sbbox_kernel<T>) );
cudf::util::cuda::grid_config_1d grid{x.size, block_size, 1};
sbbox_kernel<T><<< grid.num_blocks, block_size >>>(
length.size, static_cast<T*>(x.data), static_cast<T*>(y.data),
static_cast<uint32_t*>(length.data),
static_cast<uint32_t*>(offset.data),
static_cast<T*>(bbox_x1.data),
static_cast<T*>(bbox_y1.data),
static_cast<T*>(bbox_x2.data),
static_cast<T*>(bbox_y2.data) );
CUDA_TRY( cudaDeviceSynchronize() );
}
template <typename T, std::enable_if_t<!is_supported<T>()>* = nullptr>
void operator()(const gdf_column& x, const gdf_column& y,
const gdf_column& length, const gdf_column& offset,
gdf_column& bbox_x1, gdf_column& bbox_y1,
gdf_column& bbox_x2, gdf_column& bbox_y2)
{
CUDF_FAIL("Non-floating point operation is not supported");
}
};
} // namespace anonymous
namespace cuspatial {
/**
* @brief computing spatial bounding boxes of trajectories
*
* see trajectory.hpp
*/
void trajectory_spatial_bounds(const gdf_column& x, const gdf_column& y,
const gdf_column& length,
const gdf_column& offset,
gdf_column& bbox_x1, gdf_column& bbox_y1,
gdf_column& bbox_x2, gdf_column& bbox_y2)
{
CUDF_EXPECTS(x.data != nullptr && y.data != nullptr &&
length.data != nullptr && offset.data != nullptr,
"Null data pointer");
CUDF_EXPECTS(x.size == y.size && length.size == offset.size,
"Data size mismatch");
// future versions might allow x/y/pos/len have null_count>0, which might be
// useful for taking query results as inputs
CUDF_EXPECTS(x.null_count == 0 && y.null_count == 0 &&
length.null_count==0 && offset.null_count==0,
"Null data support not implemented");
CUDF_EXPECTS(x.size >= offset.size,
"one trajectory must have at least one point");
cudf::type_dispatcher(x.dtype, sbbox_functor(), x, y, length, offset,
bbox_x1, bbox_y1, bbox_x2, bbox_y2);
// TODO: handle null_count if needed
}
}// namespace cuspatial
|
01b8f04a87d5e07852658be669580c1af8267d87.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* (C) Copyright 2020 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "cuda_math_util.h"
#include "cuda_util.h"
#include "utility_functions.h"
#include <chrono>
#include <iostream>
#include <memory>
#define IDX2F(i, j, ld) ((((j)-1) * (ld)) + ((i)-1))
// this should be not necesary, because device id is set individually
// per thread. However, if one would want to use 2 GPUs within one
// thread, one needs it.
#define RPU_EXPLICIT_ENFORCE_DEVICE_ID
namespace RPU {
__global__ void kernelCurandSetup(unsigned long long rseed, hiprandState_t *state, int n) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence
number, no offset */
if (id < n) {
hiprand_init(rseed, id, 0, &state[id]);
}
}
__global__ void kernelCurandSetupSameSeed(unsigned long long rseed, hiprandState_t *state, int n) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n) {
hiprand_init(rseed, 0, 0, &state[id]);
}
}
void curandSetup(CudaArray<hiprandState_t> &dev_states, unsigned long long rseed, bool same_seed) {
unsigned long long seed = rseed;
if (rseed == 0) {
seed = (unsigned long long)std::chrono::high_resolution_clock::now().time_since_epoch().count();
} else {
seed = rseed;
}
CudaContext *c = dev_states.getContext();
int m = dev_states.getSize();
int nthreads = c->getNThreads();
int nblocks = c->getNBlocks(m, nthreads);
if (same_seed) {
hipLaunchKernelGGL(( kernelCurandSetupSameSeed), dim3(nblocks), dim3(nthreads), 0, c->getStream(),
seed, dev_states.getData(), m);
} else {
hipLaunchKernelGGL(( kernelCurandSetup), dim3(nblocks), dim3(nthreads), 0, c->getStream(), seed, dev_states.getData(), m);
}
c->synchronize();
}
void curandSetup(
CudaContext *c,
std::unique_ptr<CudaArray<hiprandState_t>> &dev_states,
int n,
unsigned long long rseed,
bool same_seed) {
int m = (n + 31) / 32 * 32;
dev_states = std::unique_ptr<CudaArray<hiprandState_t>>(new CudaArray<hiprandState_t>(c, m));
curandSetup(*dev_states, rseed, same_seed);
}
CublasEnvironment::~CublasEnvironment() {
DEBUG_OUT("Destroy BLAS env.");
// DEBUG_OUT("handle : " <<this->handle_);
// destroy device
// destroy host
if (handle_ != nullptr) {
hipblasDestroy(handle_);
DEBUG_OUT("CUBLAS destroyed");
}
#ifdef RPU_WITH_CUBLAS_DEVICE
if (device_handle_created_) {
DEBUG_OUT("destroy device handle");
hipLaunchKernelGGL(( kernelCublasDestroy), dim3(1), dim3(1), 0, 0, device_handle_);
hipDeviceSynchronize();
hipFree(device_handle_);
DEBUG_OUT("CUBLAS device destroyed");
}
#endif
// hipDeviceReset();
}
CublasEnvironment::CublasEnvironment(int gpu_id) {
DEBUG_OUT("GET BLAS env.");
if (gpu_id >= 0)
CUDA_CALL(hipSetDevice(gpu_id));
// create host
hipblasStatus_t stat = hipblasCreate(&handle_);
CUDA_CALL(hipDeviceSynchronize());
// DEBUG_CALL(this->test(););
// DEBUG_OUT("handle : " <<handle_);
if (stat != HIPBLAS_STATUS_SUCCESS) {
RPU_FATAL("CUBLAS initialization failed");
} else
DEBUG_OUT("CUBLAS Host initialized.");
#ifdef RPU_WITH_CUBLAS_DEVICE
device_handle_created_ = false;
#endif
}
void CublasEnvironment::test() {
this->runTest();
#ifdef RPU_WITH_CUBLAS_DEVICE
if (device_handle_created_) {
this->runTestDevice();
}
#endif
}
static __inline__ void
modifyS(hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta) {
hipblasSscal(handle, n - p + 1, &alpha, &m[IDX2F(p, q, ldm)], ldm);
hipblasSscal(handle, ldm - p + 1, &beta, &m[IDX2F(p, q, ldm)], 1);
}
int CublasEnvironment::runTest() {
// make a test run
hipblasStatus_t stat;
int i, j;
int M = 5;
int N = 6;
float *devPtrA;
float *a = 0;
a = (float *)malloc(M * N * sizeof(*a));
if (!a) {
std::cout << "CUBLAS test run failed (malloc)\n";
return 1;
}
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
a[IDX2F(i, j, M)] = (float)((i - 1) * M + j);
}
}
if (hipMalloc((void **)&devPtrA, M * N * sizeof(*a)) != hipSuccess) {
std::cerr << "CUBLAS test run failed (hipMalloc)\n";
free(a);
return 1;
}
modifyS(handle_, devPtrA, M, N, 2, 3, 16.0f, 12.0f);
stat = hipblasSetMatrix(M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS test run failed (data download)\n";
hipFree(devPtrA);
free(a);
return 1;
}
stat = hipblasGetMatrix(M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS test run failed (data upload)\n";
hipFree(devPtrA);
free(a);
return 1;
}
hipFree(devPtrA);
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
std::cout << a[IDX2F(i, j, M)] << ",";
}
std::cout << std::endl;
}
free(a);
std::cout << "CUBLAS test run successful.\n";
return 0;
}
#ifdef RPU_WITH_CUBLAS_DEVICE
__global__ void kernelCublasDestroy(hipblasHandle_t *device_handle) {
hipblasStatus_t status = hipblasDestroy(*device_handle);
hipDeviceSynchronize();
if (status != HIPBLAS_STATUS_SUCCESS) {
printf("ERROR in destroying cublas device!\n");
}
}
__global__ void kernelCublasCreateDevice(hipblasHandle_t *device_handle) {
hipblasStatus_t status = hipblasCreate(device_handle);
hipDeviceSynchronize();
if (status != HIPBLAS_STATUS_SUCCESS) {
printf("ERROR in creating cublas device!\n");
return;
}
}
void CublasEnvironment::createDeviceHandle() {
if (device_handle_created_)
return;
CUDA_CALL(hipMalloc(&device_handle_, sizeof(hipblasHandle_t)));
CUDA_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernelCublasCreateDevice), dim3(1), dim3(1), 0, 0, device_handle_);
CUDA_CALL(hipDeviceSynchronize());
DEBUG_OUT("Created device handle");
device_handle_created_ = true;
}
hipblasHandle_t *CublasEnvironment::getDeviceHandle() {
if (!device_handle_created_) {
this->createDeviceHandle();
}
return device_handle_;
}
__global__ void kernelCublasTest(hipblasHandle_t *device_handle, float *source, float *dest) {
hipblasStatus_t status = hipblasScopy(*device_handle, 1, source, 1, dest, 1);
hipDeviceSynchronize();
if ((status != HIPBLAS_STATUS_SUCCESS)) {
printf("Some problems with the CuBLAS device test.\n");
}
}
int CublasEnvironment::runTestDevice() {
float one = 1;
float zero = 0;
float *a;
float *b;
CUDA_CALL(hipMalloc(&a, sizeof(float)));
CUDA_CALL(hipMalloc(&b, sizeof(float)));
CUDA_CALL(hipMemcpy(a, &one, sizeof(float), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(b, &zero, sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernelCublasTest), dim3(1), dim3(1), 0, 0, device_handle_, a, b);
CUDA_CALL(hipDeviceSynchronize());
CUDA_CALL(hipMemcpy(&zero, b, sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipFree(a));
CUDA_CALL(hipFree(b));
if (zero == 1) {
std::cout << "CuBLAS device test succeded\n";
return 0;
} else {
std::cerr << "ERROR in CuBLAS device test\n";
return 1;
}
}
#endif
//**********************************************************************//
void CudaContext::init() {
DEBUG_OUT("Init context...");
if (gpu_id_ >= 0) {
CUDA_CALL(hipSetDevice(gpu_id_));
} else {
CUDA_CALL(hipGetDevice(&gpu_id_));
}
// int gpu_count = 0;
// hipGetDeviceCount(&gpu_count);
// std::cout << "GPU devices " << gpu_count << ", using ID " << gpu_id_ << std::endl;
env_ = new CublasEnvironment(gpu_id_);
stream_id_ = 0;
rng_created_ = false;
shared_ = false;
non_blocking_ = true;
CUDA_CALL(hipEventCreate(&event_));
prop_ = new hipDeviceProp_t();
CUDA_CALL(hipGetDeviceProperties(prop_, gpu_id_));
}
CudaContext::CudaContext(int gpu_id, bool non_blocking)
: gpu_id_(gpu_id), non_blocking_(non_blocking) {
DEBUG_OUT("Create context on GPU " << gpu_id);
this->init();
this->getStream(0);
}
CudaContext::CudaContext(hipStream_t shared_stream, int gpu_id) : gpu_id_(gpu_id) {
DEBUG_OUT("Create context on GPU " << gpu_id << " with shared stream (on id 0)\n");
this->init();
// ignore the test for shared stream 0. Pytorch seem to like 0
// if (!shared_stream) {
// RPU_FATAL("Shared stream should not be NULL!");
//} else {
shared_ = true;
streams_.push_back(shared_stream);
//}
}
CudaContext::~CudaContext() {
DEBUG_OUT("Destroy Cuda Context...");
enforceDeviceId();
if (env_ != nullptr) {
int i_start = shared_ ? 1 : 0;
for (int i = i_start; i < streams_.size(); i++) {
hipStreamSynchronize(streams_[i]);
hipStreamDestroy(streams_[i]);
}
}
if (event_ != nullptr) {
hipEventDestroy(event_);
event_ = nullptr;
}
if (env_ != nullptr) {
delete env_;
env_ = nullptr;
}
if (rng_created_) {
hiprandDestroyGenerator(rng_);
}
if (prop_ != nullptr) {
delete prop_;
prop_ = nullptr;
}
DEBUG_OUT("Destroyed.");
}
// copy constructor
CudaContext::CudaContext(const CudaContext &other) {
// only stream idx 0 is ever shared !
// copy construction will share the stream.
// random generator etc are NOT shared !
gpu_id_ = other.gpu_id_;
this->init();
shared_ = true;
non_blocking_ = other.non_blocking_;
// only stream 0 is ever shared !!
if (other.streams_.size() > 0) {
streams_.push_back(other.streams_[0]);
}
for (int i = 1; i < other.streams_.size(); i++) {
// rest are new streams!!
this->getStream(i);
}
stream_id_ = other.stream_id_;
if (other.rng_created_) {
this->createRandomGenerator();
}
// random states won't be copied. They will be created a new
DEBUG_OUT("CudaContext copy constructed [but only first stream shared. New streams and event!].");
}
// copy assignment
CudaContext &CudaContext::operator=(const CudaContext &other) {
DEBUG_OUT("Copy assignment ");
CudaContext tmp(other);
swap(*this, tmp);
return *this;
}
// move constructor
CudaContext::CudaContext(CudaContext &&other) {
*this = std::move(other);
DEBUG_OUT("Move constructor ");
}
// move assignment
CudaContext &CudaContext::operator=(CudaContext &&other) {
gpu_id_ = other.gpu_id_;
stream_id_ = other.stream_id_;
shared_ = other.shared_;
non_blocking_ = other.non_blocking_;
prop_ = other.prop_;
other.prop_ = nullptr;
streams_ = std::move(other.streams_);
env_ = other.env_;
other.env_ = nullptr;
rng_ = other.rng_;
other.rng_ = nullptr;
rng_created_ = other.rng_created_;
event_ = other.event_;
other.event_ = nullptr;
shared_random_states_ = std::move(other.shared_random_states_);
DEBUG_OUT("Move assignment ");
return *this;
}
void CudaContext::synchronizeContext() const {
enforceDeviceId();
for (int i = 0; i < streams_.size(); i++) {
CUDA_CALL(hipStreamSynchronize(streams_[i]));
}
}
void CudaContext::enforceDeviceId() const {
#ifdef RPU_EXPLICIT_ENFORCE_DEVICE_ID
int gpu_id;
CUDA_CALL(hipGetDevice(&gpu_id));
if (gpu_id != gpu_id_) {
std::cout << "WARNING wrong device detected!" << std::endl;
CUDA_CALL(hipSetDevice(gpu_id_));
}
#endif
}
void CudaContext::synchronizeDevice() const {
enforceDeviceId();
CUDA_CALL(hipDeviceSynchronize());
}
void CudaContext::synchronizeWith(CudaContext *c) const {
if (this->getStream() == c->getStream()) {
// do nothing since work on the same stream
} else {
this->synchronize();
c->synchronize();
}
}
void CudaContext::synchronizeWith(CudaContext *ca, CudaContext *cb) const {
if (ca->getStream() != cb->getStream()) {
ca->synchronizeWith(cb);
}
if (ca->getStream() != this->getStream()) {
this->synchronize();
}
}
void CudaContext::synchronizeStream(int idx) const {
DEBUG_OUT("Synchronize stream idx " << idx);
enforceDeviceId();
if ((idx >= 0) && (idx < streams_.size())) {
CUDA_CALL(hipStreamSynchronize(streams_[idx]));
}
}
void CudaContext::synchronizeStream() const {
DEBUG_OUT("Synchronize stream id " << stream_id_);
enforceDeviceId();
CUDA_CALL(hipStreamSynchronize(streams_[stream_id_]));
}
int CudaContext::getNBlocks(int size, int nthreads) const {
DEBUG_OUT("get NBlocks for size " << size);
return (size + nthreads - 1) / nthreads;
}
int CudaContext::getNStrideBlocks(int size, int nthreads) const {
DEBUG_OUT("get N Stride Blocks for size " << size);
int max_blocks = getSMCount() * maxThreadsPerBlock() / nthreads;
return MIN(getNBlocks(size, nthreads), max_blocks);
}
hipStream_t CudaContext::getStream(int idx) {
enforceDeviceId();
DEBUG_OUT("Try to get streams " << idx);
if ((idx >= 0) && (idx < streams_.size())) {
if (stream_id_ != idx) {
stream_id_ = idx;
CUBLAS_CALL(hipblasSetStream(this->getBlasHandle(), streams_[idx]));
}
return streams_[idx];
} else if (streams_.size() == idx) {
hipStream_t s;
if (non_blocking_) {
CUDA_CALL(hipStreamCreateWithFlags(&s, hipStreamNonBlocking));
} else {
CUDA_CALL(hipStreamCreate(&s));
}
streams_.push_back(s);
stream_id_ = idx;
CUBLAS_CALL(hipblasSetStream(this->getBlasHandle(), streams_[idx]));
DEBUG_OUT("Created stream id " << idx << " at : " << streams_[idx] << " ( s: " << s << ")");
return streams_[idx];
} else {
RPU_FATAL("Requested stream size mismatch.");
}
}
void CudaContext::setStream(hipStream_t s) {
if (shared_) {
enforceDeviceId();
if (s != streams_[stream_id_]) {
if (stream_id_ != 0) {
this->synchronizeDevice();
} else {
this->synchronizeStream();
}
}
streams_[0] = s;
stream_id_ = 0;
} else {
RPU_FATAL("setStream: must be shared context.");
}
}
void CudaContext::createRandomGenerator() {
if (!rng_created_) {
enforceDeviceId();
CURAND_CALL(hiprandCreateGenerator(&rng_, HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(hiprandSetStream(rng_, this->getStream()));
rng_created_ = true;
}
}
void CudaContext::setRandomSeed(unsigned long long rseed) {
enforceDeviceId();
if (!rng_created_) {
this->createRandomGenerator();
}
unsigned long long seed = rseed;
if (rseed == 0) {
seed = (unsigned long long)std::chrono::high_resolution_clock::now().time_since_epoch().count();
} else {
seed = rseed;
}
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(rng_, seed));
this->synchronizeStream();
}
void CudaContext::randNormal(float *dev_array, int size, float mean, float stddev) {
if (!rng_created_) {
setRandomSeed(0); // will create random generator on the fly
}
if (stddev > 0) {
CURAND_CALL(hiprandGenerateNormal(rng_, dev_array, size, mean, stddev));
} else {
RPU::math::elemconst(this, dev_array, size, mean);
}
}
void CudaContext::randUniform(float *dev_array, int size) {
if (!rng_created_) {
setRandomSeed(0);
}
CURAND_CALL(hiprandGenerateUniform(rng_, dev_array, size));
}
hiprandState_t *CudaContext::getRandomStates(int size) {
int n = size;
if (n <= 0) {
n = getSMCount() * maxThreadsPerBlock();
}
if (shared_random_states_.size() <= stream_id_) {
shared_random_states_.resize(stream_id_ + 1);
}
if (!shared_random_states_[stream_id_] || (n > shared_random_states_[stream_id_]->getSize())) {
curandSetup(this, shared_random_states_[stream_id_], n, 0, false);
}
return shared_random_states_[stream_id_]->getData();
}
void CudaContext::recordWaitEvent(CudaContext *wait_on_context) {
this->recordWaitEvent(wait_on_context->getStream(), wait_on_context->getEvent());
}
void CudaContext::recordEvent() { CUDA_CALL(hipEventRecord(event_, streams_[stream_id_])); }
void CudaContext::waitEvent(hipEvent_t wait_on_event) {
CUDA_CALL(hipStreamWaitEvent(streams_[stream_id_], wait_on_event, 0));
}
void CudaContext::waitEvent(CudaContext *wait_on_context) {
waitEvent(wait_on_context->getEvent());
}
void CudaContext::recordWaitEvent(hipStream_t s) { this->recordWaitEvent(s, event_); }
void CudaContext::recordWaitEvent(hipStream_t s, hipEvent_t e) {
if (streams_[stream_id_] != s) {
CUDA_CALL(hipEventRecord(e, s));
CUDA_CALL(hipStreamWaitEvent(streams_[stream_id_], e, 0));
}
}
//**********************************************************************//
template <typename T>
CudaArray<T>::CudaArray(CudaContext *c) : size_(0), width_(0), height_(1), pitch_(0), context_(c) {}
template <typename T> CudaArray<T>::CudaArray(CudaContext *c, int n) : CudaArray(c) {
size_ = n;
width_ = n;
height_ = 1; // this needs to be one! No height>1 supported yet
if (n > 0) {
context_->enforceDeviceId();
CUDA_CALL(hipMallocPitch(&values_, &pitch_, n * sizeof(T), height_));
}
}
template <typename T>
CudaArray<T>::CudaArray(CudaContext *c, int n, const T *host_array) : CudaArray(c, n) {
if (n > 0) {
this->assign(host_array);
context_->synchronize(); // better syncrhonize. Constructing is considered slow anyway
}
}
template <typename T> CudaArray<T>::~CudaArray() {
// no sync because no ownership of context !! (might be already destructed)
if ((size_ > 0) && (values_ != nullptr) && (!shared_if_)) {
context_->enforceDeviceId();
hipFree(values_);
values_ = nullptr;
}
}
// copy constructor
template <typename T> CudaArray<T>::CudaArray(const CudaArray<T> &other) {
size_ = other.size_;
width_ = other.width_;
height_ = other.height_;
pitch_ = other.pitch_;
context_ = other.context_;
values_ = nullptr;
if (size_ > 0) {
context_->enforceDeviceId();
CUDA_CALL(hipMallocPitch(&values_, &pitch_, size_ * sizeof(T), height_));
this->assign(other);
context_->synchronize(); // better synchronize. Constructing is slow anyway
}
if (other.shared_if_) {
this->setShared(other.values_);
}
DEBUG_OUT("CudaArray copy constructed.");
}
// copy assignment
template <typename T> CudaArray<T> &CudaArray<T>::operator=(const CudaArray<T> &other) {
context_->enforceDeviceId();
CudaArray<T> tmp(other); // seems a bit inefficient...
swap(*this, tmp);
context_->synchronize(); // need sync because of tmp
return *this;
}
// move constructor
template <typename T> CudaArray<T>::CudaArray(CudaArray<T> &&other) {
context_->enforceDeviceId();
*this = std::move(other);
}
// move assignment
template <typename T> CudaArray<T> &CudaArray<T>::operator=(CudaArray<T> &&other) {
size_ = other.size_;
other.size_ = 0;
width_ = other.width_;
other.width_ = 0;
height_ = other.height_;
other.height_ = 0;
pitch_ = other.pitch_;
other.pitch_ = 0;
context_ = other.context_;
other.context_ = nullptr;
values_ = other.values_;
other.values_ = nullptr;
shared_if_ = other.shared_if_;
return *this;
}
template <typename T> void CudaArray<T>::setConst(T set_value) {
DEBUG_OUT(
"Set (hsize,P,W,H): " << size_ << ", " << pitch_ << ", " << width_ * sizeof(T) << ", "
<< height_);
if (size_ > 0) {
context_->enforceDeviceId();
if (set_value != 0) {
RPU::math::elemconst(context_, values_, size_, set_value);
} else {
CUDA_CALL(hipMemset2DAsync(
values_, pitch_, 0, this->getWidthBytes(), height_, context_->getStream()));
}
}
}
template <> void CudaArray<curandStateXORWOW>::setConst(curandStateXORWOW set_value) {
RPU_FATAL("Cannot set curandstates to some values.");
}
template <> void CudaArray<double *>::setConst(double *set_value) {
RPU_FATAL("Cannot set pointer types to some values.");
}
template <> void CudaArray<float *>::setConst(float *set_value) {
RPU_FATAL("Cannot set pointer types to some values.");
}
template <typename T> void CudaArray<T>::printValues(int nmax) const {
T *values = new T[size_];
this->copyTo(values); // will synchronize
int n = nmax > 0 ? MIN(nmax, size_) : size_;
for (int i = 0; i < n; ++i) {
std::cout << "[" << i << "]:" << values[i] << ", ";
}
if (n < size_) {
std::cout << "...";
}
std::cout << std::endl;
delete[] values;
}
template <> void CudaArray<curandStateXORWOW>::printValues(int nmax) const {
RPU_FATAL("Cannot print curandstates.");
}
template <typename T> void CudaArray<T>::assign(const T *host_array) {
int sz = size_ * sizeof(T);
DEBUG_OUT(
"Assign host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", "
<< height_);
context_->enforceDeviceId();
context_->synchronize();
CUDA_CALL(hipMemcpy2DAsync(
values_, pitch_, host_array, sz, sz, 1, hipMemcpyHostToDevice, context_->getStream()));
}
template <typename T>
void CudaArray<T>::assignTranspose(const T *host_array, const int m, const int n) {
// col major to row major
if (m * n != size_) {
RPU_FATAL("Size mismatch");
}
T *transposed_array = new T[size_];
for (int i = 0; i < size_; i++) {
int i_col = (i % n);
int i_row = (i / n);
transposed_array[i_col * m + i_row] = host_array[i];
}
context_->enforceDeviceId();
int sz = size_ * sizeof(T);
DEBUG_OUT(
"Assign host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", "
<< height_);
context_->synchronize();
CUDA_CALL(hipMemcpy2D(
values_, pitch_, transposed_array, sz, sz, 1, hipMemcpyHostToDevice)); // no async
delete[] transposed_array;
}
template <typename T> void CudaArray<T>::assign(const CudaArray<T> &source) {
DEBUG_OUT(
"Assign device (P,W,H): "
<< ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_);
if (source.getSize() != size_) {
RPU_FATAL("Assignment of Cuda Array failed. Size mismatch.");
}
if ((size_ > 0) && (source.getSize() > 0)) {
hipStream_t s = context_->getStream();
context_->synchronizeWith(source.getContext());
CUDA_CALL(hipMemcpy2DAsync(
values_, pitch_, source.getDataConst(), source.getPitch(), source.getWidthBytes(), 1,
hipMemcpyDeviceToDevice, s));
}
}
template <typename T> void CudaArray<T>::assignFromDevice(const T *device_array) {
DEBUG_OUT(
"Assign device (P,W,H): "
<< ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_);
if ((size_ > 0)) {
int sz = size_ * sizeof(T);
hipStream_t s = context_->getStream();
context_->synchronizeDevice(); // better do device-wide. Not clear where the device array lives
CUDA_CALL(
hipMemcpy2DAsync(values_, pitch_, device_array, sz, sz, 1, hipMemcpyDeviceToDevice, s));
}
}
template <typename T> void CudaArray<T>::setShared(T *device_array) {
// destruct
if ((size_ > 0) && (values_ != nullptr) && (!shared_if_)) {
context_->enforceDeviceId();
CUDA_CALL(hipFree(values_));
values_ = nullptr;
}
shared_if_ = true;
values_ = device_array; // assign memory shared (memory is governed from outside)
// Caution: does not CHECK THE SIZE OF THE GIVEN ARRAY!
}
template <typename T> void CudaArray<T>::copyTo(T *host_array) const {
int sz = size_ * sizeof(T);
DEBUG_OUT(
"Copy to host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", "
<< height_);
if (size_ > 0) {
context_->enforceDeviceId();
CUDA_CALL(hipMemcpy2DAsync(
host_array, sz, values_, pitch_, this->getWidthBytes(), height_, hipMemcpyDeviceToHost,
context_->getStream()));
context_->synchronizeStream();
}
}
template <typename T> T *CudaArray<T>::getDataSafe(CudaContext *c) {
context_->synchronizeWith(c);
return values_;
}
#ifdef RPU_USE_DOUBLE
template class CudaArray<double>;
template class CudaArray<double *>;
#endif
template class CudaArray<float>;
template class CudaArray<float *>;
template class CudaArray<int>;
template class CudaArray<char>;
template class CudaArray<uint32_t>;
template class CudaArray<uint64_t>;
template class CudaArray<curandStateXORWOW>;
// reset
void resetCuda(int gpu_id) {
if (gpu_id >= 0) {
CUDA_CALL(hipSetDevice(gpu_id));
}
CUDA_CALL(hipDeviceReset());
CUDA_CALL(hipFree(0));
CUDA_CALL(hipDeviceSynchronize());
}
} // namespace RPU
|
01b8f04a87d5e07852658be669580c1af8267d87.cu
|
/**
* (C) Copyright 2020 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "cuda_math_util.h"
#include "cuda_util.h"
#include "utility_functions.h"
#include <chrono>
#include <iostream>
#include <memory>
#define IDX2F(i, j, ld) ((((j)-1) * (ld)) + ((i)-1))
// this should be not necesary, because device id is set individually
// per thread. However, if one would want to use 2 GPUs within one
// thread, one needs it.
#define RPU_EXPLICIT_ENFORCE_DEVICE_ID
namespace RPU {
__global__ void kernelCurandSetup(unsigned long long rseed, curandState_t *state, int n) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence
number, no offset */
if (id < n) {
curand_init(rseed, id, 0, &state[id]);
}
}
__global__ void kernelCurandSetupSameSeed(unsigned long long rseed, curandState_t *state, int n) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n) {
curand_init(rseed, 0, 0, &state[id]);
}
}
void curandSetup(CudaArray<curandState_t> &dev_states, unsigned long long rseed, bool same_seed) {
unsigned long long seed = rseed;
if (rseed == 0) {
seed = (unsigned long long)std::chrono::high_resolution_clock::now().time_since_epoch().count();
} else {
seed = rseed;
}
CudaContext *c = dev_states.getContext();
int m = dev_states.getSize();
int nthreads = c->getNThreads();
int nblocks = c->getNBlocks(m, nthreads);
if (same_seed) {
kernelCurandSetupSameSeed<<<nblocks, nthreads, 0, c->getStream()>>>(
seed, dev_states.getData(), m);
} else {
kernelCurandSetup<<<nblocks, nthreads, 0, c->getStream()>>>(seed, dev_states.getData(), m);
}
c->synchronize();
}
void curandSetup(
CudaContext *c,
std::unique_ptr<CudaArray<curandState_t>> &dev_states,
int n,
unsigned long long rseed,
bool same_seed) {
int m = (n + 31) / 32 * 32;
dev_states = std::unique_ptr<CudaArray<curandState_t>>(new CudaArray<curandState_t>(c, m));
curandSetup(*dev_states, rseed, same_seed);
}
CublasEnvironment::~CublasEnvironment() {
DEBUG_OUT("Destroy BLAS env.");
// DEBUG_OUT("handle : " <<this->handle_);
// destroy device
// destroy host
if (handle_ != nullptr) {
cublasDestroy(handle_);
DEBUG_OUT("CUBLAS destroyed");
}
#ifdef RPU_WITH_CUBLAS_DEVICE
if (device_handle_created_) {
DEBUG_OUT("destroy device handle");
kernelCublasDestroy<<<1, 1>>>(device_handle_);
cudaDeviceSynchronize();
cudaFree(device_handle_);
DEBUG_OUT("CUBLAS device destroyed");
}
#endif
// cudaDeviceReset();
}
CublasEnvironment::CublasEnvironment(int gpu_id) {
DEBUG_OUT("GET BLAS env.");
if (gpu_id >= 0)
CUDA_CALL(cudaSetDevice(gpu_id));
// create host
cublasStatus_t stat = cublasCreate(&handle_);
CUDA_CALL(cudaDeviceSynchronize());
// DEBUG_CALL(this->test(););
// DEBUG_OUT("handle : " <<handle_);
if (stat != CUBLAS_STATUS_SUCCESS) {
RPU_FATAL("CUBLAS initialization failed");
} else
DEBUG_OUT("CUBLAS Host initialized.");
#ifdef RPU_WITH_CUBLAS_DEVICE
device_handle_created_ = false;
#endif
}
void CublasEnvironment::test() {
this->runTest();
#ifdef RPU_WITH_CUBLAS_DEVICE
if (device_handle_created_) {
this->runTestDevice();
}
#endif
}
static __inline__ void
modifyS(cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta) {
cublasSscal(handle, n - p + 1, &alpha, &m[IDX2F(p, q, ldm)], ldm);
cublasSscal(handle, ldm - p + 1, &beta, &m[IDX2F(p, q, ldm)], 1);
}
int CublasEnvironment::runTest() {
// make a test run
cublasStatus_t stat;
int i, j;
int M = 5;
int N = 6;
float *devPtrA;
float *a = 0;
a = (float *)malloc(M * N * sizeof(*a));
if (!a) {
std::cout << "CUBLAS test run failed (malloc)\n";
return 1;
}
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
a[IDX2F(i, j, M)] = (float)((i - 1) * M + j);
}
}
if (cudaMalloc((void **)&devPtrA, M * N * sizeof(*a)) != cudaSuccess) {
std::cerr << "CUBLAS test run failed (cudaMalloc)\n";
free(a);
return 1;
}
modifyS(handle_, devPtrA, M, N, 2, 3, 16.0f, 12.0f);
stat = cublasSetMatrix(M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS test run failed (data download)\n";
cudaFree(devPtrA);
free(a);
return 1;
}
stat = cublasGetMatrix(M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
std::cerr << "CUBLAS test run failed (data upload)\n";
cudaFree(devPtrA);
free(a);
return 1;
}
cudaFree(devPtrA);
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
std::cout << a[IDX2F(i, j, M)] << ",";
}
std::cout << std::endl;
}
free(a);
std::cout << "CUBLAS test run successful.\n";
return 0;
}
#ifdef RPU_WITH_CUBLAS_DEVICE
__global__ void kernelCublasDestroy(cublasHandle_t *device_handle) {
cublasStatus_t status = cublasDestroy(*device_handle);
cudaDeviceSynchronize();
if (status != CUBLAS_STATUS_SUCCESS) {
printf("ERROR in destroying cublas device!\n");
}
}
__global__ void kernelCublasCreateDevice(cublasHandle_t *device_handle) {
cublasStatus_t status = cublasCreate(device_handle);
cudaDeviceSynchronize();
if (status != CUBLAS_STATUS_SUCCESS) {
printf("ERROR in creating cublas device!\n");
return;
}
}
void CublasEnvironment::createDeviceHandle() {
if (device_handle_created_)
return;
CUDA_CALL(cudaMalloc(&device_handle_, sizeof(cublasHandle_t)));
CUDA_CALL(cudaDeviceSynchronize());
kernelCublasCreateDevice<<<1, 1>>>(device_handle_);
CUDA_CALL(cudaDeviceSynchronize());
DEBUG_OUT("Created device handle");
device_handle_created_ = true;
}
cublasHandle_t *CublasEnvironment::getDeviceHandle() {
if (!device_handle_created_) {
this->createDeviceHandle();
}
return device_handle_;
}
__global__ void kernelCublasTest(cublasHandle_t *device_handle, float *source, float *dest) {
cublasStatus_t status = cublasScopy(*device_handle, 1, source, 1, dest, 1);
cudaDeviceSynchronize();
if ((status != CUBLAS_STATUS_SUCCESS)) {
printf("Some problems with the CuBLAS device test.\n");
}
}
int CublasEnvironment::runTestDevice() {
float one = 1;
float zero = 0;
float *a;
float *b;
CUDA_CALL(cudaMalloc(&a, sizeof(float)));
CUDA_CALL(cudaMalloc(&b, sizeof(float)));
CUDA_CALL(cudaMemcpy(a, &one, sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(b, &zero, sizeof(float), cudaMemcpyHostToDevice));
kernelCublasTest<<<1, 1>>>(device_handle_, a, b);
CUDA_CALL(cudaDeviceSynchronize());
CUDA_CALL(cudaMemcpy(&zero, b, sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaFree(a));
CUDA_CALL(cudaFree(b));
if (zero == 1) {
std::cout << "CuBLAS device test succeded\n";
return 0;
} else {
std::cerr << "ERROR in CuBLAS device test\n";
return 1;
}
}
#endif
//**********************************************************************//
void CudaContext::init() {
DEBUG_OUT("Init context...");
if (gpu_id_ >= 0) {
CUDA_CALL(cudaSetDevice(gpu_id_));
} else {
CUDA_CALL(cudaGetDevice(&gpu_id_));
}
// int gpu_count = 0;
// cuDeviceGetCount(&gpu_count);
// std::cout << "GPU devices " << gpu_count << ", using ID " << gpu_id_ << std::endl;
env_ = new CublasEnvironment(gpu_id_);
stream_id_ = 0;
rng_created_ = false;
shared_ = false;
non_blocking_ = true;
CUDA_CALL(cudaEventCreate(&event_));
prop_ = new cudaDeviceProp();
CUDA_CALL(cudaGetDeviceProperties(prop_, gpu_id_));
}
CudaContext::CudaContext(int gpu_id, bool non_blocking)
: gpu_id_(gpu_id), non_blocking_(non_blocking) {
DEBUG_OUT("Create context on GPU " << gpu_id);
this->init();
this->getStream(0);
}
CudaContext::CudaContext(cudaStream_t shared_stream, int gpu_id) : gpu_id_(gpu_id) {
DEBUG_OUT("Create context on GPU " << gpu_id << " with shared stream (on id 0)\n");
this->init();
// ignore the test for shared stream 0. Pytorch seem to like 0
// if (!shared_stream) {
// RPU_FATAL("Shared stream should not be NULL!");
//} else {
shared_ = true;
streams_.push_back(shared_stream);
//}
}
CudaContext::~CudaContext() {
DEBUG_OUT("Destroy Cuda Context...");
enforceDeviceId();
if (env_ != nullptr) {
int i_start = shared_ ? 1 : 0;
for (int i = i_start; i < streams_.size(); i++) {
cudaStreamSynchronize(streams_[i]);
cudaStreamDestroy(streams_[i]);
}
}
if (event_ != nullptr) {
cudaEventDestroy(event_);
event_ = nullptr;
}
if (env_ != nullptr) {
delete env_;
env_ = nullptr;
}
if (rng_created_) {
curandDestroyGenerator(rng_);
}
if (prop_ != nullptr) {
delete prop_;
prop_ = nullptr;
}
DEBUG_OUT("Destroyed.");
}
// copy constructor
CudaContext::CudaContext(const CudaContext &other) {
// only stream idx 0 is ever shared !
// copy construction will share the stream.
// random generator etc are NOT shared !
gpu_id_ = other.gpu_id_;
this->init();
shared_ = true;
non_blocking_ = other.non_blocking_;
// only stream 0 is ever shared !!
if (other.streams_.size() > 0) {
streams_.push_back(other.streams_[0]);
}
for (int i = 1; i < other.streams_.size(); i++) {
// rest are new streams!!
this->getStream(i);
}
stream_id_ = other.stream_id_;
if (other.rng_created_) {
this->createRandomGenerator();
}
// random states won't be copied. They will be created a new
DEBUG_OUT("CudaContext copy constructed [but only first stream shared. New streams and event!].");
}
// copy assignment
CudaContext &CudaContext::operator=(const CudaContext &other) {
DEBUG_OUT("Copy assignment ");
CudaContext tmp(other);
swap(*this, tmp);
return *this;
}
// move constructor
CudaContext::CudaContext(CudaContext &&other) {
*this = std::move(other);
DEBUG_OUT("Move constructor ");
}
// move assignment
CudaContext &CudaContext::operator=(CudaContext &&other) {
gpu_id_ = other.gpu_id_;
stream_id_ = other.stream_id_;
shared_ = other.shared_;
non_blocking_ = other.non_blocking_;
prop_ = other.prop_;
other.prop_ = nullptr;
streams_ = std::move(other.streams_);
env_ = other.env_;
other.env_ = nullptr;
rng_ = other.rng_;
other.rng_ = nullptr;
rng_created_ = other.rng_created_;
event_ = other.event_;
other.event_ = nullptr;
shared_random_states_ = std::move(other.shared_random_states_);
DEBUG_OUT("Move assignment ");
return *this;
}
void CudaContext::synchronizeContext() const {
enforceDeviceId();
for (int i = 0; i < streams_.size(); i++) {
CUDA_CALL(cudaStreamSynchronize(streams_[i]));
}
}
void CudaContext::enforceDeviceId() const {
#ifdef RPU_EXPLICIT_ENFORCE_DEVICE_ID
int gpu_id;
CUDA_CALL(cudaGetDevice(&gpu_id));
if (gpu_id != gpu_id_) {
std::cout << "WARNING wrong device detected!" << std::endl;
CUDA_CALL(cudaSetDevice(gpu_id_));
}
#endif
}
void CudaContext::synchronizeDevice() const {
enforceDeviceId();
CUDA_CALL(cudaDeviceSynchronize());
}
void CudaContext::synchronizeWith(CudaContext *c) const {
if (this->getStream() == c->getStream()) {
// do nothing since work on the same stream
} else {
this->synchronize();
c->synchronize();
}
}
void CudaContext::synchronizeWith(CudaContext *ca, CudaContext *cb) const {
if (ca->getStream() != cb->getStream()) {
ca->synchronizeWith(cb);
}
if (ca->getStream() != this->getStream()) {
this->synchronize();
}
}
void CudaContext::synchronizeStream(int idx) const {
DEBUG_OUT("Synchronize stream idx " << idx);
enforceDeviceId();
if ((idx >= 0) && (idx < streams_.size())) {
CUDA_CALL(cudaStreamSynchronize(streams_[idx]));
}
}
void CudaContext::synchronizeStream() const {
DEBUG_OUT("Synchronize stream id " << stream_id_);
enforceDeviceId();
CUDA_CALL(cudaStreamSynchronize(streams_[stream_id_]));
}
int CudaContext::getNBlocks(int size, int nthreads) const {
DEBUG_OUT("get NBlocks for size " << size);
return (size + nthreads - 1) / nthreads;
}
int CudaContext::getNStrideBlocks(int size, int nthreads) const {
DEBUG_OUT("get N Stride Blocks for size " << size);
int max_blocks = getSMCount() * maxThreadsPerBlock() / nthreads;
return MIN(getNBlocks(size, nthreads), max_blocks);
}
cudaStream_t CudaContext::getStream(int idx) {
enforceDeviceId();
DEBUG_OUT("Try to get streams " << idx);
if ((idx >= 0) && (idx < streams_.size())) {
if (stream_id_ != idx) {
stream_id_ = idx;
CUBLAS_CALL(cublasSetStream(this->getBlasHandle(), streams_[idx]));
}
return streams_[idx];
} else if (streams_.size() == idx) {
cudaStream_t s;
if (non_blocking_) {
CUDA_CALL(cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking));
} else {
CUDA_CALL(cudaStreamCreate(&s));
}
streams_.push_back(s);
stream_id_ = idx;
CUBLAS_CALL(cublasSetStream(this->getBlasHandle(), streams_[idx]));
DEBUG_OUT("Created stream id " << idx << " at : " << streams_[idx] << " ( s: " << s << ")");
return streams_[idx];
} else {
RPU_FATAL("Requested stream size mismatch.");
}
}
void CudaContext::setStream(cudaStream_t s) {
if (shared_) {
enforceDeviceId();
if (s != streams_[stream_id_]) {
if (stream_id_ != 0) {
this->synchronizeDevice();
} else {
this->synchronizeStream();
}
}
streams_[0] = s;
stream_id_ = 0;
} else {
RPU_FATAL("setStream: must be shared context.");
}
}
void CudaContext::createRandomGenerator() {
if (!rng_created_) {
enforceDeviceId();
CURAND_CALL(curandCreateGenerator(&rng_, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(curandSetStream(rng_, this->getStream()));
rng_created_ = true;
}
}
void CudaContext::setRandomSeed(unsigned long long rseed) {
enforceDeviceId();
if (!rng_created_) {
this->createRandomGenerator();
}
unsigned long long seed = rseed;
if (rseed == 0) {
seed = (unsigned long long)std::chrono::high_resolution_clock::now().time_since_epoch().count();
} else {
seed = rseed;
}
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(rng_, seed));
this->synchronizeStream();
}
void CudaContext::randNormal(float *dev_array, int size, float mean, float stddev) {
if (!rng_created_) {
setRandomSeed(0); // will create random generator on the fly
}
if (stddev > 0) {
CURAND_CALL(curandGenerateNormal(rng_, dev_array, size, mean, stddev));
} else {
RPU::math::elemconst(this, dev_array, size, mean);
}
}
void CudaContext::randUniform(float *dev_array, int size) {
if (!rng_created_) {
setRandomSeed(0);
}
CURAND_CALL(curandGenerateUniform(rng_, dev_array, size));
}
curandState_t *CudaContext::getRandomStates(int size) {
int n = size;
if (n <= 0) {
n = getSMCount() * maxThreadsPerBlock();
}
if (shared_random_states_.size() <= stream_id_) {
shared_random_states_.resize(stream_id_ + 1);
}
if (!shared_random_states_[stream_id_] || (n > shared_random_states_[stream_id_]->getSize())) {
curandSetup(this, shared_random_states_[stream_id_], n, 0, false);
}
return shared_random_states_[stream_id_]->getData();
}
void CudaContext::recordWaitEvent(CudaContext *wait_on_context) {
this->recordWaitEvent(wait_on_context->getStream(), wait_on_context->getEvent());
}
void CudaContext::recordEvent() { CUDA_CALL(cudaEventRecord(event_, streams_[stream_id_])); }
void CudaContext::waitEvent(cudaEvent_t wait_on_event) {
CUDA_CALL(cudaStreamWaitEvent(streams_[stream_id_], wait_on_event, 0));
}
void CudaContext::waitEvent(CudaContext *wait_on_context) {
waitEvent(wait_on_context->getEvent());
}
void CudaContext::recordWaitEvent(cudaStream_t s) { this->recordWaitEvent(s, event_); }
void CudaContext::recordWaitEvent(cudaStream_t s, cudaEvent_t e) {
if (streams_[stream_id_] != s) {
CUDA_CALL(cudaEventRecord(e, s));
CUDA_CALL(cudaStreamWaitEvent(streams_[stream_id_], e, 0));
}
}
//**********************************************************************//
template <typename T>
CudaArray<T>::CudaArray(CudaContext *c) : size_(0), width_(0), height_(1), pitch_(0), context_(c) {}
template <typename T> CudaArray<T>::CudaArray(CudaContext *c, int n) : CudaArray(c) {
size_ = n;
width_ = n;
height_ = 1; // this needs to be one! No height>1 supported yet
if (n > 0) {
context_->enforceDeviceId();
CUDA_CALL(cudaMallocPitch(&values_, &pitch_, n * sizeof(T), height_));
}
}
template <typename T>
CudaArray<T>::CudaArray(CudaContext *c, int n, const T *host_array) : CudaArray(c, n) {
if (n > 0) {
this->assign(host_array);
context_->synchronize(); // better syncrhonize. Constructing is considered slow anyway
}
}
template <typename T> CudaArray<T>::~CudaArray() {
// no sync because no ownership of context !! (might be already destructed)
if ((size_ > 0) && (values_ != nullptr) && (!shared_if_)) {
context_->enforceDeviceId();
cudaFree(values_);
values_ = nullptr;
}
}
// copy constructor
template <typename T> CudaArray<T>::CudaArray(const CudaArray<T> &other) {
size_ = other.size_;
width_ = other.width_;
height_ = other.height_;
pitch_ = other.pitch_;
context_ = other.context_;
values_ = nullptr;
if (size_ > 0) {
context_->enforceDeviceId();
CUDA_CALL(cudaMallocPitch(&values_, &pitch_, size_ * sizeof(T), height_));
this->assign(other);
context_->synchronize(); // better synchronize. Constructing is slow anyway
}
if (other.shared_if_) {
this->setShared(other.values_);
}
DEBUG_OUT("CudaArray copy constructed.");
}
// copy assignment
template <typename T> CudaArray<T> &CudaArray<T>::operator=(const CudaArray<T> &other) {
context_->enforceDeviceId();
CudaArray<T> tmp(other); // seems a bit inefficient...
swap(*this, tmp);
context_->synchronize(); // need sync because of tmp
return *this;
}
// move constructor
template <typename T> CudaArray<T>::CudaArray(CudaArray<T> &&other) {
context_->enforceDeviceId();
*this = std::move(other);
}
// move assignment
template <typename T> CudaArray<T> &CudaArray<T>::operator=(CudaArray<T> &&other) {
size_ = other.size_;
other.size_ = 0;
width_ = other.width_;
other.width_ = 0;
height_ = other.height_;
other.height_ = 0;
pitch_ = other.pitch_;
other.pitch_ = 0;
context_ = other.context_;
other.context_ = nullptr;
values_ = other.values_;
other.values_ = nullptr;
shared_if_ = other.shared_if_;
return *this;
}
template <typename T> void CudaArray<T>::setConst(T set_value) {
DEBUG_OUT(
"Set (hsize,P,W,H): " << size_ << ", " << pitch_ << ", " << width_ * sizeof(T) << ", "
<< height_);
if (size_ > 0) {
context_->enforceDeviceId();
if (set_value != 0) {
RPU::math::elemconst(context_, values_, size_, set_value);
} else {
CUDA_CALL(cudaMemset2DAsync(
values_, pitch_, 0, this->getWidthBytes(), height_, context_->getStream()));
}
}
}
template <> void CudaArray<curandStateXORWOW>::setConst(curandStateXORWOW set_value) {
RPU_FATAL("Cannot set curandstates to some values.");
}
template <> void CudaArray<double *>::setConst(double *set_value) {
RPU_FATAL("Cannot set pointer types to some values.");
}
template <> void CudaArray<float *>::setConst(float *set_value) {
RPU_FATAL("Cannot set pointer types to some values.");
}
template <typename T> void CudaArray<T>::printValues(int nmax) const {
T *values = new T[size_];
this->copyTo(values); // will synchronize
int n = nmax > 0 ? MIN(nmax, size_) : size_;
for (int i = 0; i < n; ++i) {
std::cout << "[" << i << "]:" << values[i] << ", ";
}
if (n < size_) {
std::cout << "...";
}
std::cout << std::endl;
delete[] values;
}
template <> void CudaArray<curandStateXORWOW>::printValues(int nmax) const {
RPU_FATAL("Cannot print curandstates.");
}
template <typename T> void CudaArray<T>::assign(const T *host_array) {
int sz = size_ * sizeof(T);
DEBUG_OUT(
"Assign host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", "
<< height_);
context_->enforceDeviceId();
context_->synchronize();
CUDA_CALL(cudaMemcpy2DAsync(
values_, pitch_, host_array, sz, sz, 1, cudaMemcpyHostToDevice, context_->getStream()));
}
template <typename T>
void CudaArray<T>::assignTranspose(const T *host_array, const int m, const int n) {
// col major to row major
if (m * n != size_) {
RPU_FATAL("Size mismatch");
}
T *transposed_array = new T[size_];
for (int i = 0; i < size_; i++) {
int i_col = (i % n);
int i_row = (i / n);
transposed_array[i_col * m + i_row] = host_array[i];
}
context_->enforceDeviceId();
int sz = size_ * sizeof(T);
DEBUG_OUT(
"Assign host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", "
<< height_);
context_->synchronize();
CUDA_CALL(cudaMemcpy2D(
values_, pitch_, transposed_array, sz, sz, 1, cudaMemcpyHostToDevice)); // no async
delete[] transposed_array;
}
template <typename T> void CudaArray<T>::assign(const CudaArray<T> &source) {
DEBUG_OUT(
"Assign device (P,W,H): "
<< ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_);
if (source.getSize() != size_) {
RPU_FATAL("Assignment of Cuda Array failed. Size mismatch.");
}
if ((size_ > 0) && (source.getSize() > 0)) {
cudaStream_t s = context_->getStream();
context_->synchronizeWith(source.getContext());
CUDA_CALL(cudaMemcpy2DAsync(
values_, pitch_, source.getDataConst(), source.getPitch(), source.getWidthBytes(), 1,
cudaMemcpyDeviceToDevice, s));
}
}
template <typename T> void CudaArray<T>::assignFromDevice(const T *device_array) {
DEBUG_OUT(
"Assign device (P,W,H): "
<< ", " << pitch_ << ", " << width_ * sizeof(T) << ", " << height_);
if ((size_ > 0)) {
int sz = size_ * sizeof(T);
cudaStream_t s = context_->getStream();
context_->synchronizeDevice(); // better do device-wide. Not clear where the device array lives
CUDA_CALL(
cudaMemcpy2DAsync(values_, pitch_, device_array, sz, sz, 1, cudaMemcpyDeviceToDevice, s));
}
}
template <typename T> void CudaArray<T>::setShared(T *device_array) {
// destruct
if ((size_ > 0) && (values_ != nullptr) && (!shared_if_)) {
context_->enforceDeviceId();
CUDA_CALL(cudaFree(values_));
values_ = nullptr;
}
shared_if_ = true;
values_ = device_array; // assign memory shared (memory is governed from outside)
// Caution: does not CHECK THE SIZE OF THE GIVEN ARRAY!
}
template <typename T> void CudaArray<T>::copyTo(T *host_array) const {
int sz = size_ * sizeof(T);
DEBUG_OUT(
"Copy to host (hsize,P,W,H): " << sz << ", " << pitch_ << ", " << width_ * sizeof(T) << ", "
<< height_);
if (size_ > 0) {
context_->enforceDeviceId();
CUDA_CALL(cudaMemcpy2DAsync(
host_array, sz, values_, pitch_, this->getWidthBytes(), height_, cudaMemcpyDeviceToHost,
context_->getStream()));
context_->synchronizeStream();
}
}
template <typename T> T *CudaArray<T>::getDataSafe(CudaContext *c) {
context_->synchronizeWith(c);
return values_;
}
#ifdef RPU_USE_DOUBLE
template class CudaArray<double>;
template class CudaArray<double *>;
#endif
template class CudaArray<float>;
template class CudaArray<float *>;
template class CudaArray<int>;
template class CudaArray<char>;
template class CudaArray<uint32_t>;
template class CudaArray<uint64_t>;
template class CudaArray<curandStateXORWOW>;
// reset
void resetCuda(int gpu_id) {
if (gpu_id >= 0) {
CUDA_CALL(cudaSetDevice(gpu_id));
}
CUDA_CALL(cudaDeviceReset());
CUDA_CALL(cudaFree(0));
CUDA_CALL(cudaDeviceSynchronize());
}
} // namespace RPU
|
a1862a755e712ad7a3cce1d5b8fa95132cdb10dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* simulate.cu
*
* Created on: 09/dic/2014
* Author: Edoardo Mondoni
*/
#include "simulate.h"
#include "samples.h"
#include "sim_parameters.h"
#include "cuda_settings.h"
#include "rng.h"
#include <math.h>
#include <hiprand/hiprand_kernel.h>
/*** CONSTANT MEMORY DEFINITIONS ***/
__constant__ double d_sigma_wn;
/*** FUNCTION IMPLEMENTATIONS ***/
void perform_simulation(double * d_alpha, double * d_theta, double * d_time, double * d_matrix,
double * d_periods, unsigned int noisy_flag, hiprandState_t * d_states) {
double *d_temp_vj;
dim3 s_block(MAX_BLOCK_SIZE);
dim3 s_grid((h_n_osc-1) / MAX_BLOCK_SIZE + 1);
hipMalloc(&d_temp_vj, sizeof(double) * s_grid.x * s_block.x); //every thread gets a cell in the array
if (!noisy_flag) {
hipLaunchKernelGGL(( simulate), dim3(s_grid), dim3(s_block), 0, 0, d_alpha, d_theta, d_time, d_periods, d_matrix, d_temp_vj);
} else {
double seq_w = pow(1e06 * h_period, 2) * pow(10.0, NOISE_ELL / 10.0);
double h_sigma_wn = sqrt(seq_w / (2 * h_tstep));
hipMemcpyToSymbol(d_sigma_wn, &h_sigma_wn, sizeof(double));
hipLaunchKernelGGL(( simulate_noisy), dim3(s_grid), dim3(s_block), 0, 0, d_alpha, d_theta, d_time, d_periods, d_matrix, d_temp_vj, d_states);
}
//Freeing resources
hipFree(d_temp_vj);
}
__global__ void simulate(double * d_alpha, double * d_theta, double * d_time,
double * d_periods, double * d_matrix, double * d_temp_vj) {
const unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
double omega = 2 * PI / d_periods[idx < d_n_osc ? idx : 0];
double alpha_prev = d_alpha[idx < d_n_osc ? idx : 0];
double temp_gamma, current;
/*
* The most immediate way to exclude out-of-bounds threads (in the last block), i.e. the if-guard
* enclosing the entire code (idx < d_n_osc) is not adequate to the current situation since the code
* contains a __syncthreads() instruction. __syncthreads()' behavior is undefined (and very likely
* to lead to infinite wait times) if enclosed in an if-clause which is not guaranteed to evaluate
* to the same truth value for ALL threads in a block. While that is true for every block up to
* the (n-1)-th, the last block likely contains some threads which do not correspond to an oscillator
* and which should remain idle.
* To solve this issue, we extended the d_temp_vj array to contain a cell for out-of-bounds threads
* as well. Those threads will compute meaningless garbage values, get to __syncthreads() and then be
* left out of the rest of the code: the last part of the for-loop, in fact, CAN be enclosed in
* an if-clause because it doesn't contain synchronization primitives.
*/
for (unsigned int t = 1; t < d_n_steps; t++) {
d_temp_vj[idx] = vo_lut(d_time[t] + alpha_prev, d_periods[idx < d_n_osc ? idx : 0]);
__syncthreads(); //wait for the temporary values to be available
if(idx < d_n_osc) {
temp_gamma = gamma_lut(d_time[t] + alpha_prev, d_periods[idx < d_n_osc ? idx : 0]);
current = 0.0;
//This for loop could become another kernel if we could use Dynamic Parallelism
//(CUDA >= 5.0 and Compute Capability >= 3.5 required) --> ROOM FOR ENHANCEMENTS
for (unsigned int k = 0; k < d_n_osc; k++)
current = current + d_matrix[idx + k * d_n_osc] * d_temp_vj[k];
alpha_prev = alpha_prev + d_tstep * (temp_gamma * current); //calculation of alpha
d_alpha[idx + t * d_n_osc] = alpha_prev; //storing alpha in the result matrix
d_theta[idx + t * d_n_osc] = omega * (d_time[t] + alpha_prev); //calculating theta
}
}
}
__global__ void simulate_noisy(double * d_alpha, double * d_theta, double * d_time,
double * d_periods, double * d_matrix, double * d_temp_vj, hiprandState_t * d_states) {
const unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
double omega = 2 * PI / d_periods[idx < d_n_osc ? idx : 0];
double alpha_prev = d_alpha[idx < d_n_osc ? idx : 0];
double temp_gamma, current;
//See comments to the non-noisy version of the kernel
for (unsigned int t = 1; t < d_n_steps; t++) {
d_temp_vj[idx] = vo_lut(d_time[t] + alpha_prev, d_periods[idx < d_n_osc ? idx : 0]);
__syncthreads(); //wait for the temporary values to be available
if(idx < d_n_osc) {
temp_gamma = gamma_lut(d_time[t] + alpha_prev, d_periods[idx < d_n_osc ? idx : 0]);
current = 0.0;
for (unsigned int k = 0; k < d_n_osc; k++)
current = current + d_matrix[idx + k * d_n_osc] * d_temp_vj[k];
alpha_prev = alpha_prev + d_tstep * (temp_gamma * current + d_sigma_wn * hiprand_normal_double(&d_states[idx])); //calculation of alpha
d_alpha[idx + t * d_n_osc] = alpha_prev; //storing alpha in the result matrix
d_theta[idx + t * d_n_osc] = omega * (d_time[t] + alpha_prev); //calculating theta
}
}
}
__device__ double gamma_lut(double instant, double wave_period) {
double tau = fmod(instant, wave_period);
double index = N_SAMPLES * tau / wave_period;
unsigned int int_index = (unsigned int) index;
return d_gamma[int_index] + (index - int_index) * (d_gamma[int_index < N_SAMPLES ? int_index + 1 : N_SAMPLES] - d_gamma[int_index]);
}
__device__ double vo_lut(double instant, double wave_period) {
double tau = fmod(instant, wave_period);
double index = N_SAMPLES * tau / wave_period;
unsigned int int_index = (unsigned int) index;
return d_vo[int_index] + (index - int_index) * (d_vo[int_index < N_SAMPLES ? int_index + 1 : N_SAMPLES] - d_vo[int_index]);
}
|
a1862a755e712ad7a3cce1d5b8fa95132cdb10dd.cu
|
/*
* simulate.cu
*
* Created on: 09/dic/2014
* Author: Edoardo Mondoni
*/
#include "simulate.h"
#include "samples.h"
#include "sim_parameters.h"
#include "cuda_settings.h"
#include "rng.h"
#include <math.h>
#include <curand_kernel.h>
/*** CONSTANT MEMORY DEFINITIONS ***/
__constant__ double d_sigma_wn;
/*** FUNCTION IMPLEMENTATIONS ***/
void perform_simulation(double * d_alpha, double * d_theta, double * d_time, double * d_matrix,
double * d_periods, unsigned int noisy_flag, curandState * d_states) {
double *d_temp_vj;
dim3 s_block(MAX_BLOCK_SIZE);
dim3 s_grid((h_n_osc-1) / MAX_BLOCK_SIZE + 1);
cudaMalloc(&d_temp_vj, sizeof(double) * s_grid.x * s_block.x); //every thread gets a cell in the array
if (!noisy_flag) {
simulate<<<s_grid, s_block>>>(d_alpha, d_theta, d_time, d_periods, d_matrix, d_temp_vj);
} else {
double seq_w = pow(1e06 * h_period, 2) * pow(10.0, NOISE_ELL / 10.0);
double h_sigma_wn = sqrt(seq_w / (2 * h_tstep));
cudaMemcpyToSymbol(d_sigma_wn, &h_sigma_wn, sizeof(double));
simulate_noisy<<<s_grid, s_block>>>(d_alpha, d_theta, d_time, d_periods, d_matrix, d_temp_vj, d_states);
}
//Freeing resources
cudaFree(d_temp_vj);
}
__global__ void simulate(double * d_alpha, double * d_theta, double * d_time,
double * d_periods, double * d_matrix, double * d_temp_vj) {
const unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
double omega = 2 * PI / d_periods[idx < d_n_osc ? idx : 0];
double alpha_prev = d_alpha[idx < d_n_osc ? idx : 0];
double temp_gamma, current;
/*
* The most immediate way to exclude out-of-bounds threads (in the last block), i.e. the if-guard
* enclosing the entire code (idx < d_n_osc) is not adequate to the current situation since the code
* contains a __syncthreads() instruction. __syncthreads()' behavior is undefined (and very likely
* to lead to infinite wait times) if enclosed in an if-clause which is not guaranteed to evaluate
* to the same truth value for ALL threads in a block. While that is true for every block up to
* the (n-1)-th, the last block likely contains some threads which do not correspond to an oscillator
* and which should remain idle.
* To solve this issue, we extended the d_temp_vj array to contain a cell for out-of-bounds threads
* as well. Those threads will compute meaningless garbage values, get to __syncthreads() and then be
* left out of the rest of the code: the last part of the for-loop, in fact, CAN be enclosed in
* an if-clause because it doesn't contain synchronization primitives.
*/
for (unsigned int t = 1; t < d_n_steps; t++) {
d_temp_vj[idx] = vo_lut(d_time[t] + alpha_prev, d_periods[idx < d_n_osc ? idx : 0]);
__syncthreads(); //wait for the temporary values to be available
if(idx < d_n_osc) {
temp_gamma = gamma_lut(d_time[t] + alpha_prev, d_periods[idx < d_n_osc ? idx : 0]);
current = 0.0;
//This for loop could become another kernel if we could use Dynamic Parallelism
//(CUDA >= 5.0 and Compute Capability >= 3.5 required) --> ROOM FOR ENHANCEMENTS
for (unsigned int k = 0; k < d_n_osc; k++)
current = current + d_matrix[idx + k * d_n_osc] * d_temp_vj[k];
alpha_prev = alpha_prev + d_tstep * (temp_gamma * current); //calculation of alpha
d_alpha[idx + t * d_n_osc] = alpha_prev; //storing alpha in the result matrix
d_theta[idx + t * d_n_osc] = omega * (d_time[t] + alpha_prev); //calculating theta
}
}
}
__global__ void simulate_noisy(double * d_alpha, double * d_theta, double * d_time,
double * d_periods, double * d_matrix, double * d_temp_vj, curandState * d_states) {
const unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
double omega = 2 * PI / d_periods[idx < d_n_osc ? idx : 0];
double alpha_prev = d_alpha[idx < d_n_osc ? idx : 0];
double temp_gamma, current;
//See comments to the non-noisy version of the kernel
for (unsigned int t = 1; t < d_n_steps; t++) {
d_temp_vj[idx] = vo_lut(d_time[t] + alpha_prev, d_periods[idx < d_n_osc ? idx : 0]);
__syncthreads(); //wait for the temporary values to be available
if(idx < d_n_osc) {
temp_gamma = gamma_lut(d_time[t] + alpha_prev, d_periods[idx < d_n_osc ? idx : 0]);
current = 0.0;
for (unsigned int k = 0; k < d_n_osc; k++)
current = current + d_matrix[idx + k * d_n_osc] * d_temp_vj[k];
alpha_prev = alpha_prev + d_tstep * (temp_gamma * current + d_sigma_wn * curand_normal_double(&d_states[idx])); //calculation of alpha
d_alpha[idx + t * d_n_osc] = alpha_prev; //storing alpha in the result matrix
d_theta[idx + t * d_n_osc] = omega * (d_time[t] + alpha_prev); //calculating theta
}
}
}
__device__ double gamma_lut(double instant, double wave_period) {
double tau = fmod(instant, wave_period);
double index = N_SAMPLES * tau / wave_period;
unsigned int int_index = (unsigned int) index;
return d_gamma[int_index] + (index - int_index) * (d_gamma[int_index < N_SAMPLES ? int_index + 1 : N_SAMPLES] - d_gamma[int_index]);
}
__device__ double vo_lut(double instant, double wave_period) {
double tau = fmod(instant, wave_period);
double index = N_SAMPLES * tau / wave_period;
unsigned int int_index = (unsigned int) index;
return d_vo[int_index] + (index - int_index) * (d_vo[int_index < N_SAMPLES ? int_index + 1 : N_SAMPLES] - d_vo[int_index]);
}
|
ad94f775854e9fd89ebe17b29779325daf8375e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "int8_utils.cuh"
#include "src/fastertransformer/kernels/reduce_kernel_utils.cuh"
#include "src/fastertransformer/kernels/softmax_int8_kernels.h"
#include "src/fastertransformer/utils/cuda_utils.h"
namespace fastertransformer {
// input are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
// grid = (seq_len, batch_size, head_num)
// block.x = max(32, (seq_len/4 + 31)/32*32)
// for int32_t I; int8 O;
template<typename T>
__global__ void softmax_COL32(int8_t* output,
const int32_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
const int head_num_x_seq_len,
const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdx4 = threadIdx.x << 2;
char4* buf4Ptr = (char4*)output;
bool qual = threadIdx4 < seq_len;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
char4 tmp4 = {0, 0, 0, 0};
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) + (threadIdx4 & 0xffffffe0) * seq_len
+ (seq_id << 5) + (threadIdx4 & 31);
// set softmax of padding word to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual) {
buf4Ptr[inIdx >> 2] = tmp4;
}
continue;
}
float4 floatTmp4 = {0.0f, 0.0f, 0.0f, 0.0f};
if (qual) {
floatTmp4.x = static_cast<float>(__ldg(input + inIdx)) * scalar1;
floatTmp4.y = static_cast<float>(__ldg(input + inIdx + 1)) * scalar1;
floatTmp4.z = static_cast<float>(__ldg(input + inIdx + 2)) * scalar1;
floatTmp4.w = static_cast<float>(__ldg(input + inIdx + 3)) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = threadIdx4 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
// for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp4.x = floatTmp4.x + mask_val;
max_val = fmaxf(max_val, floatTmp4.x);
// for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 1))) * -10000.0f;
floatTmp4.y = floatTmp4.y + mask_val;
max_val = fmaxf(max_val, floatTmp4.y);
// for z
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 2))) * -10000.0f;
floatTmp4.z = floatTmp4.z + mask_val;
max_val = fmaxf(max_val, floatTmp4.z);
// for w
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 3))) * -10000.0f;
floatTmp4.w = floatTmp4.w + mask_val;
max_val = fmaxf(max_val, floatTmp4.w);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual) {
floatTmp4.x = __expf(floatTmp4.x - s_max);
sum_val += floatTmp4.x;
floatTmp4.y = __expf(floatTmp4.y - s_max);
sum_val += floatTmp4.y;
floatTmp4.z = __expf(floatTmp4.z - s_max);
sum_val += floatTmp4.z;
floatTmp4.w = __expf(floatTmp4.w - s_max);
sum_val += floatTmp4.w;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual) {
tmp4.x = float_to_int8_rn(floatTmp4.x * s_sum);
tmp4.y = float_to_int8_rn(floatTmp4.y * s_sum);
tmp4.z = float_to_int8_rn(floatTmp4.z * s_sum);
tmp4.w = float_to_int8_rn(floatTmp4.w * s_sum);
buf4Ptr[inIdx >> 2] = tmp4;
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len_padded, CUBLASLT_ORDER_COL32
// seq_len_padded = (seq_len+31)/32*32
// grid = (seq_len, batch_size, head_num)
// block.x = max(32, (seq_len_padded/4 + 31)/32*32)
// for int8_t IO;
template<typename T>
__global__ void softmax_COL32_varlen(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const int seq_len_padded,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
const int seq_len_x_seq_len,
const int seq_len_x_seq_len_padded)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b);
int mask_id;
int threadIdx4 = threadIdx.x << 2;
char4* buf4Ptr = (char4*)output;
const char4* inBuf4Ptr = (const char4*)input;
const bool qual = threadIdx4 < seq_len;
const bool qual_padded = threadIdx4 < seq_len_padded;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
char4 tmp4 = {0, 0, 0, 0};
int inIdx = ((blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len_padded)
+ (threadIdx4 & 0xffffffe0) * seq_len + (seq_id << 5) + (threadIdx4 & 31))
>> 2;
// set softmax of padding word in rows to 0
const float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual_padded) {
buf4Ptr[inIdx] = tmp4;
}
continue;
}
// set softmax of padding word in cols to 0
float4 floatTmp4 = {0.0f, 0.0f, 0.0f, 0.0f};
if (qual) {
tmp4 = __ldg(inBuf4Ptr + inIdx);
floatTmp4.x = static_cast<float>(tmp4.x) * scalar1;
floatTmp4.y = static_cast<float>(tmp4.y) * scalar1;
floatTmp4.z = static_cast<float>(tmp4.z) * scalar1;
floatTmp4.w = static_cast<float>(tmp4.w) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = threadIdx4 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
// for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp4.x = floatTmp4.x + mask_val;
max_val = fmaxf(max_val, floatTmp4.x);
// for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 1))) * -10000.0f;
floatTmp4.y = floatTmp4.y + mask_val;
max_val = fmaxf(max_val, floatTmp4.y);
// for z
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 2))) * -10000.0f;
floatTmp4.z = floatTmp4.z + mask_val;
max_val = fmaxf(max_val, floatTmp4.z);
// for w
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 3))) * -10000.0f;
floatTmp4.w = floatTmp4.w + mask_val;
max_val = fmaxf(max_val, floatTmp4.w);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual) {
floatTmp4.x = __expf(floatTmp4.x - s_max);
sum_val += floatTmp4.x;
floatTmp4.y = __expf(floatTmp4.y - s_max);
sum_val += floatTmp4.y;
floatTmp4.z = __expf(floatTmp4.z - s_max);
sum_val += floatTmp4.z;
floatTmp4.w = __expf(floatTmp4.w - s_max);
sum_val += floatTmp4.w;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual_padded) {
tmp4.x = qual ? float_to_int8_rn(floatTmp4.x * s_sum) : static_cast<int8_t>(0);
tmp4.y = qual ? float_to_int8_rn(floatTmp4.y * s_sum) : static_cast<int8_t>(0);
tmp4.z = qual ? float_to_int8_rn(floatTmp4.z * s_sum) : static_cast<int8_t>(0);
tmp4.w = qual ? float_to_int8_rn(floatTmp4.w * s_sum) : static_cast<int8_t>(0);
buf4Ptr[inIdx] = tmp4;
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len_padded, CUBLASLT_ORDER_COL32
// seq_len_padded = (seq_len+31)/32*32
// grid = (seq_len, batch_size, head_num)
// block.x = max(32, (seq_len_padded + 31)/32*32)
// for int8_t IO, I/O with int8_t element;
template<typename T>
__global__ void softmax_COL32_perElement_varlen(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const int seq_len_padded,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
const int seq_len_x_seq_len,
const int seq_len_x_seq_len_padded)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b);
int mask_id;
const int tidx = threadIdx.x;
const bool qual = tidx < seq_len;
const bool qual_padded = tidx < seq_len_padded;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
int8_t tmp = 0;
int inIdx = ((blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len_padded) + (tidx & 0xffffffe0) * seq_len
+ (seq_id << 5) + (tidx & 31));
// set softmax of padding word in rows to 0
const float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual_padded) {
output[inIdx] = tmp;
}
continue;
}
// set softmax of padding word in cols to 0
float floatTmp = qual ? (static_cast<float>(__ldg(input + inIdx)) * scalar1) : 0.0f;
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = tidx + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp = floatTmp + mask_val;
}
max_val = blockDim.x <= 32 ? warpReduceMax(floatTmp) : blockReduceMax<float>(floatTmp);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
floatTmp = qual ? __expf(floatTmp - s_max) : floatTmp;
sum_val = blockDim.x <= 32 ? warpReduceSum(floatTmp) : blockReduceSum<float>(floatTmp);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual_padded) {
tmp = qual ? float_to_int8_rn(floatTmp * s_sum) : static_cast<int8_t>(0);
output[inIdx] = tmp;
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
// grid = (seq_len, batch_size, head_num)
// block.x = (seq_len + 31)/32
// for int32_t I; int8 O;
// for seq_len <= 32
template<typename T>
__global__ void softmax_COL32_LE32(int8_t* output,
const int32_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
const int head_num_x_seq_len,
const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdxx = threadIdx.x;
bool qual = threadIdxx < seq_len;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) + (threadIdxx & 0xffffffe0) * seq_len
+ (seq_id << 5) + (threadIdxx & 31);
// set softmax of padding word to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual) {
output[inIdx] = 0;
}
continue;
}
float floatTmp = qual ? static_cast<float>(__ldg(input + inIdx)) * scalar1 : 0.0f;
float mask_val, max_val;
__shared__ float s_max, s_sum;
mask_id = qual ? threadIdxx + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len : 0;
mask_val = qual ? (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f : 0.0f;
floatTmp = qual ? floatTmp + mask_val : 0.0f;
max_val = qual ? floatTmp : -1e20f;
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
floatTmp = qual ? __expf(floatTmp - s_max) : 0.0f;
float sum_val = blockDim.x <= 32 ? warpReduceSum(floatTmp) : blockReduceSum<float>(floatTmp);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual) {
output[inIdx] = float_to_int8_rn(floatTmp * s_sum);
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len_padded, CUBLASLT_ORDER_COL32
// seq_len_padded = (seq_len+31)/32*32
// attr_mask is [batch_size, seq_len, seq_len]
// grid = (seq_len, batch_size, head_num)
// block.x = seq_len_padded
// for int8_t IO;
// for seq_len_padded == 32
template<typename T>
__global__ void softmax_COL32_LE32_varlen(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const int seq_len_padded,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
const int seq_len_x_seq_len,
const int seq_len_x_seq_len_padded)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b);
int mask_id;
int threadIdxx = threadIdx.x;
const bool qual = threadIdxx < seq_len;
const bool qual_padded = threadIdxx < seq_len_padded;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len_padded)
+ (threadIdxx & 0xffffffe0) * seq_len + (seq_id << 5) + (threadIdxx & 31);
// set softmax of padding word in rows to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual_padded) {
output[inIdx] = 0;
}
continue;
}
float mask_val, max_val;
__shared__ float s_max, s_sum;
// set softmax of padding word in cols to 0
float floatTmp = qual ? static_cast<float>(__ldg(input + inIdx)) * scalar1 : 0.0f;
mask_id = qual ? threadIdxx + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len : 0;
mask_val = qual ? (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f : 0.0f;
floatTmp = qual ? floatTmp + mask_val : 0.0f;
max_val = qual ? floatTmp : -1e20f;
max_val = warpReduceMax(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
floatTmp = qual ? __expf(floatTmp - s_max) : 0.0f;
float sum_val = blockDim.x <= 32 ? warpReduceSum(floatTmp) : blockReduceSum<float>(floatTmp);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual_padded) {
output[inIdx] = qual ? float_to_int8_rn(floatTmp * s_sum) : static_cast<int8_t>(0);
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
// grid = (seq_len, batch_size, head_num)
// block.x = max(32, (seq_len/2 + 31)/32*32)
// for int32_t I; int8 O;
// for seq_len in (32, 64]
template<typename T>
__global__ void softmax_COL32_LE64(int8_t* output,
const int32_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
const int head_num_x_seq_len,
const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdx2 = threadIdx.x << 1;
char2* buf2Ptr = (char2*)output;
bool qual = threadIdx2 < seq_len;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
char2 tmp2 = {0, 0};
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) + (threadIdx2 & 0xffffffe0) * seq_len
+ (seq_id << 5) + (threadIdx2 & 31);
// set softmax of padding word to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual) {
buf2Ptr[inIdx >> 1] = tmp2;
}
continue;
}
float2 floatTmp2 = {0.0f, 0.0f};
if (qual) {
floatTmp2.x = static_cast<float>(__ldg(input + inIdx)) * scalar1;
floatTmp2.y = static_cast<float>(__ldg(input + inIdx + 1)) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = threadIdx2 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
// for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp2.x = floatTmp2.x + mask_val;
// for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 1))) * -10000.0f;
floatTmp2.y = floatTmp2.y + mask_val;
max_val = fmaxf(floatTmp2.x, floatTmp2.y);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual) {
floatTmp2.x = __expf(floatTmp2.x - s_max);
sum_val += floatTmp2.x;
floatTmp2.y = __expf(floatTmp2.y - s_max);
sum_val += floatTmp2.y;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual) {
tmp2.x = float_to_int8_rn(floatTmp2.x * s_sum);
tmp2.y = float_to_int8_rn(floatTmp2.y * s_sum);
buf2Ptr[inIdx >> 1] = tmp2;
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len_padded, CUBLASLT_ORDER_COL32
// seq_len_padded = (seq_len+31)/32*32
// grid = (seq_len, batch_size, head_num)
// block.x = 32
// for int8_t IO
// for seq_len in (32, 64]
template<typename T>
__global__ void softmax_COL32_LE64_varlen(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const int seq_len_padded,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
const int seq_len_x_seq_len,
const int seq_len_x_seq_len_padded)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b);
int mask_id;
int threadIdx2 = threadIdx.x << 1;
char2* buf2Ptr = (char2*)output;
const char2* inBuf2Ptr = (const char2*)input;
const bool qual = threadIdx2 < seq_len;
const bool qual_padded = threadIdx2 < seq_len_padded;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
char2 tmp2 = {0, 0};
int inIdx = ((blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len_padded)
+ (threadIdx2 & 0xffffffe0) * seq_len + (seq_id << 5) + (threadIdx2 & 31))
>> 1;
// set softmax of padding word in rows to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual_padded) {
buf2Ptr[inIdx] = tmp2;
}
continue;
}
// set softmax of padding word in cols to 0
float2 floatTmp2 = {0.0f, 0.0f};
if (qual) {
tmp2 = __ldg(inBuf2Ptr + inIdx);
floatTmp2.x = static_cast<float>(tmp2.x) * scalar1;
floatTmp2.y = static_cast<float>(tmp2.y) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = threadIdx2 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
// for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp2.x = floatTmp2.x + mask_val;
// for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 1))) * -10000.0f;
floatTmp2.y = floatTmp2.y + mask_val;
max_val = fmaxf(floatTmp2.x, floatTmp2.y);
}
max_val = warpReduceMax(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual) {
floatTmp2.x = __expf(floatTmp2.x - s_max);
sum_val += floatTmp2.x;
floatTmp2.y = __expf(floatTmp2.y - s_max);
sum_val += floatTmp2.y;
}
sum_val = warpReduceSum(sum_val);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual_padded) {
tmp2.x = qual ? float_to_int8_rn(floatTmp2.x * s_sum) : static_cast<int8_t>(0);
tmp2.y = qual ? float_to_int8_rn(floatTmp2.y * s_sum) : static_cast<int8_t>(0);
buf2Ptr[inIdx] = tmp2;
}
}
}
template<typename T>
void invokeSoftmaxCOL32(int8_t* output,
const int32_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
hipStream_t stream)
{
dim3 grid, block;
grid.x = seq_len;
grid.y = batch_size;
grid.z = head_num;
if (seq_len <= 32) {
if (batch_size * head_num > 960) {
grid.x = ceil(float(seq_len) / 32.0f);
}
block.x = (seq_len + 31) / 32 * 32;
hipLaunchKernelGGL(( softmax_COL32_LE32), dim3(grid), dim3(block), 0, stream, output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
scalar1a,
scalar1b,
scalar1c,
amax_ptr,
seq_len * head_num,
seq_len * seq_len);
}
else if (seq_len <= 64) {
assert(seq_len % 2 == 0);
block.x = (seq_len / 2 + 31) / 32 * 32;
if (batch_size * head_num > 960) {
grid.x = ceil(float(seq_len) / 32.0f);
}
hipLaunchKernelGGL(( softmax_COL32_LE64), dim3(grid), dim3(block), 0, stream, output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
scalar1a,
scalar1b,
scalar1c,
amax_ptr,
seq_len * head_num,
seq_len * seq_len);
}
else {
assert(seq_len % 4 == 0);
block.x = (seq_len / 4 + 31) / 32 * 32;
hipLaunchKernelGGL(( softmax_COL32), dim3(grid), dim3(block), 0, stream, output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
scalar1a,
scalar1b,
scalar1c,
amax_ptr,
seq_len * head_num,
seq_len * seq_len);
}
}
template void invokeSoftmaxCOL32(int8_t* output,
const int32_t* input,
const float* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
hipStream_t stream);
template void invokeSoftmaxCOL32(int8_t* output,
const int32_t* input,
const half* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
hipStream_t stream);
template<typename T>
void invokeSoftmaxCOL32(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
hipStream_t stream)
{
dim3 grid, block;
grid.x = seq_len;
grid.y = batch_size;
grid.z = head_num;
const int seq_len_padded = (seq_len + 31) / 32 * 32;
if (seq_len <= 32) {
if (batch_size * head_num > 960) {
grid.x = ceil(float(seq_len) / 32.0f);
}
block.x = seq_len_padded;
hipLaunchKernelGGL(( softmax_COL32_LE32_varlen), dim3(grid), dim3(block), 0, stream, output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
seq_len_padded,
scalar1a,
scalar1b,
amax_ptr,
seq_len * seq_len,
seq_len * seq_len_padded);
}
else if (seq_len <= 64 && (seq_len % 2 == 0)) {
block.x = 32;
if (batch_size * head_num > 960) {
grid.x = ceil(float(seq_len) / 32.0f);
}
hipLaunchKernelGGL(( softmax_COL32_LE64_varlen), dim3(grid), dim3(block), 0, stream, output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
seq_len_padded,
scalar1a,
scalar1b,
amax_ptr,
seq_len * seq_len,
seq_len * seq_len_padded);
}
else if (seq_len > 64 && (seq_len % 4 == 0)) {
block.x = (seq_len_padded / 4 + 31) / 32 * 32;
hipLaunchKernelGGL(( softmax_COL32_varlen), dim3(grid), dim3(block), 0, stream, output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
seq_len_padded,
scalar1a,
scalar1b,
amax_ptr,
seq_len * seq_len,
seq_len * seq_len_padded);
}
else {
block.x = (seq_len_padded + 31) / 32 * 32;
hipLaunchKernelGGL(( softmax_COL32_perElement_varlen), dim3(grid), dim3(block), 0, stream, output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
seq_len_padded,
scalar1a,
scalar1b,
amax_ptr,
seq_len * seq_len,
seq_len * seq_len_padded);
}
}
template void invokeSoftmaxCOL32(int8_t* output,
const int8_t* input,
const float* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
hipStream_t stream);
template void invokeSoftmaxCOL32(int8_t* output,
const int8_t* input,
const half* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
hipStream_t stream);
/******************* invokeSoftmaxCOL32 ***********************/
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = max(32, (window_len + 31)/32*32)
// qk_buf is [batch, window_num, num_head, window_len, window_len]
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-majot
template<typename T>
__global__ void softmax_INT8IO_kernel_COL32(int8_t* a_buf,
int8_t* qk_buf_int8,
const T* attn_mask,
const T* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* out_scale_ptr)
{
bool qual = threadIdx.x < window_len;
const int padded_winlen = (window_len + 31) / 32 * 32;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
__shared__ float s_mean, s_max;
int qk_offset = (blockIdx.z * gridDim.y + blockIdx.y) * window_len * padded_winlen
+ ((threadIdx.x >> 5) << 5) * window_len + (window_id << 5) + (threadIdx.x & 31);
;
if (qual) {
const int offset_in_window = window_id * window_len + threadIdx.x;
const int relative_pos_bias_offset = (blockIdx.y % num_head) * window_len_x_window_len + offset_in_window;
float mask_val =
(attn_mask == nullptr) ?
0.0f :
static_cast<float>(
__ldg(attn_mask + ((blockIdx.y / num_head) * window_len_x_window_len + offset_in_window)));
tmp = scalar * static_cast<float>(qk_buf_int8[qk_offset]) * __ldg(deQ_scale_ptr) + mask_val
+ static_cast<float>(__ldg(relative_pos_bias + relative_pos_bias_offset));
}
float max_val = blockReduceMax<float>(tmp);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float qk_tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
a_buf[qk_offset] = qual ? float_to_int8_rn(qk_tmp * s_mean * __ldg(out_scale_ptr)) : 0;
}
}
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = (window_len / 4 + 31)/32*32
// a_buf/qk_buf is [batch, window_num, num_head, window_len, window_len] + COL32
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-major
template<typename T4, typename T>
__global__ void softmax_INT8IO_kernel_COL32_element4(int8_t* a_buf,
int8_t* qk_buf_int8,
const T4* attn_mask,
const T4* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* out_scale_ptr)
{
const int padded_winlen = (window_len + 31) / 32 * 32;
const int col_id = threadIdx.x << 2;
bool qual = col_id < window_len;
const T4 zero = {T(0.0f), T(0.0f), T(0.0f), T(0.0f)};
const float deQ_scale = __ldg(deQ_scale_ptr);
const float out_scale = __ldg(out_scale_ptr);
char4* inputPtr = (char4*)qk_buf_int8;
char4* outputPtr = (char4*)a_buf;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
char4 qk_val;
float4 local_qk_val;
__shared__ float s_mean, s_max;
int qk_offset = (blockIdx.z * gridDim.y + blockIdx.y) * window_len * padded_winlen
+ ((col_id >> 5) << 5) * window_len + (window_id << 5) + (col_id & 31);
if (qual) {
const int offset_in_window = window_id * window_len + col_id;
const int relative_pos_bias_offset = (blockIdx.y % num_head) * window_len_x_window_len + offset_in_window;
const int attn_mask_offset = (blockIdx.y / num_head) * window_len_x_window_len + offset_in_window;
const T4 bias_val = relative_pos_bias[relative_pos_bias_offset >> 2];
const T4 mask_val = (attn_mask == nullptr) ? zero : attn_mask[attn_mask_offset >> 2];
qk_val = inputPtr[qk_offset >> 2];
local_qk_val.x = static_cast<float>(qk_val.x);
local_qk_val.y = static_cast<float>(qk_val.y);
local_qk_val.z = static_cast<float>(qk_val.z);
local_qk_val.w = static_cast<float>(qk_val.w);
local_qk_val.x =
scalar * local_qk_val.x * deQ_scale + static_cast<float>(mask_val.x) + static_cast<float>(bias_val.x);
local_qk_val.y =
scalar * local_qk_val.y * deQ_scale + static_cast<float>(mask_val.y) + static_cast<float>(bias_val.y);
local_qk_val.z =
scalar * local_qk_val.z * deQ_scale + static_cast<float>(mask_val.z) + static_cast<float>(bias_val.z);
local_qk_val.w =
scalar * local_qk_val.w * deQ_scale + static_cast<float>(mask_val.w) + static_cast<float>(bias_val.w);
tmp = local_qk_val.x > local_qk_val.y ? local_qk_val.x : local_qk_val.y;
tmp = tmp > local_qk_val.z ? tmp : local_qk_val.z;
tmp = tmp > local_qk_val.w ? tmp : local_qk_val.w;
}
float max_val = blockDim.x <= 32 ? warpReduceMax<float>(tmp) : blockReduceMax<float>(tmp);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
local_qk_val.x = qual ? __expf(local_qk_val.x - s_max) : 0.0f;
local_qk_val.y = qual ? __expf(local_qk_val.y - s_max) : 0.0f;
local_qk_val.z = qual ? __expf(local_qk_val.z - s_max) : 0.0f;
local_qk_val.w = qual ? __expf(local_qk_val.w - s_max) : 0.0f;
float sum_val = blockDim.x <= 32 ?
warpReduceSum<float>(local_qk_val.x + local_qk_val.y + local_qk_val.z + local_qk_val.w) :
blockReduceSum<float>(local_qk_val.x + local_qk_val.y + local_qk_val.z + local_qk_val.w);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if (qual) {
char4 outTmp;
outTmp.x = float_to_int8_rn(local_qk_val.x * s_mean * out_scale);
outTmp.y = float_to_int8_rn(local_qk_val.y * s_mean * out_scale);
outTmp.z = float_to_int8_rn(local_qk_val.z * s_mean * out_scale);
outTmp.w = float_to_int8_rn(local_qk_val.w * s_mean * out_scale);
outputPtr[qk_offset >> 2] = outTmp;
}
}
}
template<typename T>
void invokeSoftmaxWithRelPosBiasCOL32(int8_t* a_buf,
int8_t* qk_buf_int8,
const T* attn_mask,
const T* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* out_scale_ptr,
hipStream_t stream)
{
dim3 grid(window_len, window_num * num_head, batch_size);
if (window_len % 4 == 0 && window_len / 4 >= 32) {
dim3 block((window_len / 4 + 31) / 32 * 32);
if (batch_size * window_num * num_head > 960) {
grid.x = ceil(float(window_len) / 32.0f);
}
if (std::is_same<T, float>::value) {
hipLaunchKernelGGL(( softmax_INT8IO_kernel_COL32_element4<float4, float>)
, dim3(grid), dim3(block), 0, stream, a_buf,
qk_buf_int8,
(const float4*)attn_mask,
(const float4*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
scalar,
deQ_scale_ptr,
out_scale_ptr);
}
else if (std::is_same<T, half>::value) {
hipLaunchKernelGGL(( softmax_INT8IO_kernel_COL32_element4<half4, half>)
, dim3(grid), dim3(block), 0, stream, a_buf,
qk_buf_int8,
(const half4*)attn_mask,
(const half4*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
scalar,
deQ_scale_ptr,
out_scale_ptr);
}
}
else {
dim3 block((window_len + 31) / 32 * 32);
hipLaunchKernelGGL(( softmax_INT8IO_kernel_COL32), dim3(grid), dim3(block), 0, stream, a_buf,
qk_buf_int8,
attn_mask,
relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
scalar,
deQ_scale_ptr,
out_scale_ptr);
}
}
template void invokeSoftmaxWithRelPosBiasCOL32(int8_t* a_buf,
int8_t* qk_buf_int8,
const float* attn_mask,
const float* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* output_scale_ptr,
hipStream_t stream);
template void invokeSoftmaxWithRelPosBiasCOL32(int8_t* a_buf,
int8_t* qk_buf_int8,
const half* attn_mask,
const half* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* output_scale_ptr,
hipStream_t stream);
} // namespace fastertransformer
|
ad94f775854e9fd89ebe17b29779325daf8375e5.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "int8_utils.cuh"
#include "src/fastertransformer/kernels/reduce_kernel_utils.cuh"
#include "src/fastertransformer/kernels/softmax_int8_kernels.h"
#include "src/fastertransformer/utils/cuda_utils.h"
namespace fastertransformer {
// input are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
// grid = (seq_len, batch_size, head_num)
// block.x = max(32, (seq_len/4 + 31)/32*32)
// for int32_t I; int8 O;
template<typename T>
__global__ void softmax_COL32(int8_t* output,
const int32_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
const int head_num_x_seq_len,
const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdx4 = threadIdx.x << 2;
char4* buf4Ptr = (char4*)output;
bool qual = threadIdx4 < seq_len;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
char4 tmp4 = {0, 0, 0, 0};
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) + (threadIdx4 & 0xffffffe0) * seq_len
+ (seq_id << 5) + (threadIdx4 & 31);
// set softmax of padding word to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual) {
buf4Ptr[inIdx >> 2] = tmp4;
}
continue;
}
float4 floatTmp4 = {0.0f, 0.0f, 0.0f, 0.0f};
if (qual) {
floatTmp4.x = static_cast<float>(__ldg(input + inIdx)) * scalar1;
floatTmp4.y = static_cast<float>(__ldg(input + inIdx + 1)) * scalar1;
floatTmp4.z = static_cast<float>(__ldg(input + inIdx + 2)) * scalar1;
floatTmp4.w = static_cast<float>(__ldg(input + inIdx + 3)) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = threadIdx4 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
// for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp4.x = floatTmp4.x + mask_val;
max_val = fmaxf(max_val, floatTmp4.x);
// for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 1))) * -10000.0f;
floatTmp4.y = floatTmp4.y + mask_val;
max_val = fmaxf(max_val, floatTmp4.y);
// for z
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 2))) * -10000.0f;
floatTmp4.z = floatTmp4.z + mask_val;
max_val = fmaxf(max_val, floatTmp4.z);
// for w
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 3))) * -10000.0f;
floatTmp4.w = floatTmp4.w + mask_val;
max_val = fmaxf(max_val, floatTmp4.w);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual) {
floatTmp4.x = __expf(floatTmp4.x - s_max);
sum_val += floatTmp4.x;
floatTmp4.y = __expf(floatTmp4.y - s_max);
sum_val += floatTmp4.y;
floatTmp4.z = __expf(floatTmp4.z - s_max);
sum_val += floatTmp4.z;
floatTmp4.w = __expf(floatTmp4.w - s_max);
sum_val += floatTmp4.w;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual) {
tmp4.x = float_to_int8_rn(floatTmp4.x * s_sum);
tmp4.y = float_to_int8_rn(floatTmp4.y * s_sum);
tmp4.z = float_to_int8_rn(floatTmp4.z * s_sum);
tmp4.w = float_to_int8_rn(floatTmp4.w * s_sum);
buf4Ptr[inIdx >> 2] = tmp4;
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len_padded, CUBLASLT_ORDER_COL32
// seq_len_padded = (seq_len+31)/32*32
// grid = (seq_len, batch_size, head_num)
// block.x = max(32, (seq_len_padded/4 + 31)/32*32)
// for int8_t IO;
template<typename T>
__global__ void softmax_COL32_varlen(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const int seq_len_padded,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
const int seq_len_x_seq_len,
const int seq_len_x_seq_len_padded)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b);
int mask_id;
int threadIdx4 = threadIdx.x << 2;
char4* buf4Ptr = (char4*)output;
const char4* inBuf4Ptr = (const char4*)input;
const bool qual = threadIdx4 < seq_len;
const bool qual_padded = threadIdx4 < seq_len_padded;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
char4 tmp4 = {0, 0, 0, 0};
int inIdx = ((blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len_padded)
+ (threadIdx4 & 0xffffffe0) * seq_len + (seq_id << 5) + (threadIdx4 & 31))
>> 2;
// set softmax of padding word in rows to 0
const float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual_padded) {
buf4Ptr[inIdx] = tmp4;
}
continue;
}
// set softmax of padding word in cols to 0
float4 floatTmp4 = {0.0f, 0.0f, 0.0f, 0.0f};
if (qual) {
tmp4 = __ldg(inBuf4Ptr + inIdx);
floatTmp4.x = static_cast<float>(tmp4.x) * scalar1;
floatTmp4.y = static_cast<float>(tmp4.y) * scalar1;
floatTmp4.z = static_cast<float>(tmp4.z) * scalar1;
floatTmp4.w = static_cast<float>(tmp4.w) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = threadIdx4 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
// for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp4.x = floatTmp4.x + mask_val;
max_val = fmaxf(max_val, floatTmp4.x);
// for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 1))) * -10000.0f;
floatTmp4.y = floatTmp4.y + mask_val;
max_val = fmaxf(max_val, floatTmp4.y);
// for z
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 2))) * -10000.0f;
floatTmp4.z = floatTmp4.z + mask_val;
max_val = fmaxf(max_val, floatTmp4.z);
// for w
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 3))) * -10000.0f;
floatTmp4.w = floatTmp4.w + mask_val;
max_val = fmaxf(max_val, floatTmp4.w);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual) {
floatTmp4.x = __expf(floatTmp4.x - s_max);
sum_val += floatTmp4.x;
floatTmp4.y = __expf(floatTmp4.y - s_max);
sum_val += floatTmp4.y;
floatTmp4.z = __expf(floatTmp4.z - s_max);
sum_val += floatTmp4.z;
floatTmp4.w = __expf(floatTmp4.w - s_max);
sum_val += floatTmp4.w;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual_padded) {
tmp4.x = qual ? float_to_int8_rn(floatTmp4.x * s_sum) : static_cast<int8_t>(0);
tmp4.y = qual ? float_to_int8_rn(floatTmp4.y * s_sum) : static_cast<int8_t>(0);
tmp4.z = qual ? float_to_int8_rn(floatTmp4.z * s_sum) : static_cast<int8_t>(0);
tmp4.w = qual ? float_to_int8_rn(floatTmp4.w * s_sum) : static_cast<int8_t>(0);
buf4Ptr[inIdx] = tmp4;
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len_padded, CUBLASLT_ORDER_COL32
// seq_len_padded = (seq_len+31)/32*32
// grid = (seq_len, batch_size, head_num)
// block.x = max(32, (seq_len_padded + 31)/32*32)
// for int8_t IO, I/O with int8_t element;
template<typename T>
__global__ void softmax_COL32_perElement_varlen(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const int seq_len_padded,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
const int seq_len_x_seq_len,
const int seq_len_x_seq_len_padded)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b);
int mask_id;
const int tidx = threadIdx.x;
const bool qual = tidx < seq_len;
const bool qual_padded = tidx < seq_len_padded;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
int8_t tmp = 0;
int inIdx = ((blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len_padded) + (tidx & 0xffffffe0) * seq_len
+ (seq_id << 5) + (tidx & 31));
// set softmax of padding word in rows to 0
const float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual_padded) {
output[inIdx] = tmp;
}
continue;
}
// set softmax of padding word in cols to 0
float floatTmp = qual ? (static_cast<float>(__ldg(input + inIdx)) * scalar1) : 0.0f;
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = tidx + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp = floatTmp + mask_val;
}
max_val = blockDim.x <= 32 ? warpReduceMax(floatTmp) : blockReduceMax<float>(floatTmp);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
floatTmp = qual ? __expf(floatTmp - s_max) : floatTmp;
sum_val = blockDim.x <= 32 ? warpReduceSum(floatTmp) : blockReduceSum<float>(floatTmp);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual_padded) {
tmp = qual ? float_to_int8_rn(floatTmp * s_sum) : static_cast<int8_t>(0);
output[inIdx] = tmp;
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
// grid = (seq_len, batch_size, head_num)
// block.x = (seq_len + 31)/32
// for int32_t I; int8 O;
// for seq_len <= 32
template<typename T>
__global__ void softmax_COL32_LE32(int8_t* output,
const int32_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
const int head_num_x_seq_len,
const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdxx = threadIdx.x;
bool qual = threadIdxx < seq_len;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) + (threadIdxx & 0xffffffe0) * seq_len
+ (seq_id << 5) + (threadIdxx & 31);
// set softmax of padding word to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual) {
output[inIdx] = 0;
}
continue;
}
float floatTmp = qual ? static_cast<float>(__ldg(input + inIdx)) * scalar1 : 0.0f;
float mask_val, max_val;
__shared__ float s_max, s_sum;
mask_id = qual ? threadIdxx + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len : 0;
mask_val = qual ? (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f : 0.0f;
floatTmp = qual ? floatTmp + mask_val : 0.0f;
max_val = qual ? floatTmp : -1e20f;
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
floatTmp = qual ? __expf(floatTmp - s_max) : 0.0f;
float sum_val = blockDim.x <= 32 ? warpReduceSum(floatTmp) : blockReduceSum<float>(floatTmp);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual) {
output[inIdx] = float_to_int8_rn(floatTmp * s_sum);
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len_padded, CUBLASLT_ORDER_COL32
// seq_len_padded = (seq_len+31)/32*32
// attr_mask is [batch_size, seq_len, seq_len]
// grid = (seq_len, batch_size, head_num)
// block.x = seq_len_padded
// for int8_t IO;
// for seq_len_padded == 32
template<typename T>
__global__ void softmax_COL32_LE32_varlen(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const int seq_len_padded,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
const int seq_len_x_seq_len,
const int seq_len_x_seq_len_padded)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b);
int mask_id;
int threadIdxx = threadIdx.x;
const bool qual = threadIdxx < seq_len;
const bool qual_padded = threadIdxx < seq_len_padded;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len_padded)
+ (threadIdxx & 0xffffffe0) * seq_len + (seq_id << 5) + (threadIdxx & 31);
// set softmax of padding word in rows to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual_padded) {
output[inIdx] = 0;
}
continue;
}
float mask_val, max_val;
__shared__ float s_max, s_sum;
// set softmax of padding word in cols to 0
float floatTmp = qual ? static_cast<float>(__ldg(input + inIdx)) * scalar1 : 0.0f;
mask_id = qual ? threadIdxx + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len : 0;
mask_val = qual ? (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f : 0.0f;
floatTmp = qual ? floatTmp + mask_val : 0.0f;
max_val = qual ? floatTmp : -1e20f;
max_val = warpReduceMax(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
floatTmp = qual ? __expf(floatTmp - s_max) : 0.0f;
float sum_val = blockDim.x <= 32 ? warpReduceSum(floatTmp) : blockReduceSum<float>(floatTmp);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual_padded) {
output[inIdx] = qual ? float_to_int8_rn(floatTmp * s_sum) : static_cast<int8_t>(0);
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
// grid = (seq_len, batch_size, head_num)
// block.x = max(32, (seq_len/2 + 31)/32*32)
// for int32_t I; int8 O;
// for seq_len in (32, 64]
template<typename T>
__global__ void softmax_COL32_LE64(int8_t* output,
const int32_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
const int head_num_x_seq_len,
const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdx2 = threadIdx.x << 1;
char2* buf2Ptr = (char2*)output;
bool qual = threadIdx2 < seq_len;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
char2 tmp2 = {0, 0};
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) + (threadIdx2 & 0xffffffe0) * seq_len
+ (seq_id << 5) + (threadIdx2 & 31);
// set softmax of padding word to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual) {
buf2Ptr[inIdx >> 1] = tmp2;
}
continue;
}
float2 floatTmp2 = {0.0f, 0.0f};
if (qual) {
floatTmp2.x = static_cast<float>(__ldg(input + inIdx)) * scalar1;
floatTmp2.y = static_cast<float>(__ldg(input + inIdx + 1)) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = threadIdx2 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
// for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp2.x = floatTmp2.x + mask_val;
// for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 1))) * -10000.0f;
floatTmp2.y = floatTmp2.y + mask_val;
max_val = fmaxf(floatTmp2.x, floatTmp2.y);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual) {
floatTmp2.x = __expf(floatTmp2.x - s_max);
sum_val += floatTmp2.x;
floatTmp2.y = __expf(floatTmp2.y - s_max);
sum_val += floatTmp2.y;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual) {
tmp2.x = float_to_int8_rn(floatTmp2.x * s_sum);
tmp2.y = float_to_int8_rn(floatTmp2.y * s_sum);
buf2Ptr[inIdx >> 1] = tmp2;
}
}
}
// input are a series of sub-matrixes of m = seq_len, n = seq_len_padded, CUBLASLT_ORDER_COL32
// seq_len_padded = (seq_len+31)/32*32
// grid = (seq_len, batch_size, head_num)
// block.x = 32
// for int8_t IO
// for seq_len in (32, 64]
template<typename T>
__global__ void softmax_COL32_LE64_varlen(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const int seq_len_padded,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
const int seq_len_x_seq_len,
const int seq_len_x_seq_len_padded)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b);
int mask_id;
int threadIdx2 = threadIdx.x << 1;
char2* buf2Ptr = (char2*)output;
const char2* inBuf2Ptr = (const char2*)input;
const bool qual = threadIdx2 < seq_len;
const bool qual_padded = threadIdx2 < seq_len_padded;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x) {
char2 tmp2 = {0, 0};
int inIdx = ((blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len_padded)
+ (threadIdx2 & 0xffffffe0) * seq_len + (seq_id << 5) + (threadIdx2 & 31))
>> 1;
// set softmax of padding word in rows to 0
float mask_in_seq = static_cast<float>(__ldg(attr_mask + (blockIdx.y * seq_len_x_seq_len + seq_id)));
if (mask_in_seq < 0.1f) {
if (qual_padded) {
buf2Ptr[inIdx] = tmp2;
}
continue;
}
// set softmax of padding word in cols to 0
float2 floatTmp2 = {0.0f, 0.0f};
if (qual) {
tmp2 = __ldg(inBuf2Ptr + inIdx);
floatTmp2.x = static_cast<float>(tmp2.x) * scalar1;
floatTmp2.y = static_cast<float>(tmp2.y) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual) {
mask_id = threadIdx2 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
// for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id))) * -10000.0f;
floatTmp2.x = floatTmp2.x + mask_val;
// for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask + mask_id + 1))) * -10000.0f;
floatTmp2.y = floatTmp2.y + mask_val;
max_val = fmaxf(floatTmp2.x, floatTmp2.y);
}
max_val = warpReduceMax(max_val);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual) {
floatTmp2.x = __expf(floatTmp2.x - s_max);
sum_val += floatTmp2.x;
floatTmp2.y = __expf(floatTmp2.y - s_max);
sum_val += floatTmp2.y;
}
sum_val = warpReduceSum(sum_val);
if (threadIdx.x == 0) {
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual_padded) {
tmp2.x = qual ? float_to_int8_rn(floatTmp2.x * s_sum) : static_cast<int8_t>(0);
tmp2.y = qual ? float_to_int8_rn(floatTmp2.y * s_sum) : static_cast<int8_t>(0);
buf2Ptr[inIdx] = tmp2;
}
}
}
template<typename T>
void invokeSoftmaxCOL32(int8_t* output,
const int32_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
cudaStream_t stream)
{
dim3 grid, block;
grid.x = seq_len;
grid.y = batch_size;
grid.z = head_num;
if (seq_len <= 32) {
if (batch_size * head_num > 960) {
grid.x = ceil(float(seq_len) / 32.0f);
}
block.x = (seq_len + 31) / 32 * 32;
softmax_COL32_LE32<<<grid, block, 0, stream>>>(output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
scalar1a,
scalar1b,
scalar1c,
amax_ptr,
seq_len * head_num,
seq_len * seq_len);
}
else if (seq_len <= 64) {
assert(seq_len % 2 == 0);
block.x = (seq_len / 2 + 31) / 32 * 32;
if (batch_size * head_num > 960) {
grid.x = ceil(float(seq_len) / 32.0f);
}
softmax_COL32_LE64<<<grid, block, 0, stream>>>(output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
scalar1a,
scalar1b,
scalar1c,
amax_ptr,
seq_len * head_num,
seq_len * seq_len);
}
else {
assert(seq_len % 4 == 0);
block.x = (seq_len / 4 + 31) / 32 * 32;
softmax_COL32<<<grid, block, 0, stream>>>(output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
scalar1a,
scalar1b,
scalar1c,
amax_ptr,
seq_len * head_num,
seq_len * seq_len);
}
}
template void invokeSoftmaxCOL32(int8_t* output,
const int32_t* input,
const float* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
cudaStream_t stream);
template void invokeSoftmaxCOL32(int8_t* output,
const int32_t* input,
const half* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* scalar1c,
const float* amax_ptr,
cudaStream_t stream);
template<typename T>
void invokeSoftmaxCOL32(int8_t* output,
const int8_t* input,
const T* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
cudaStream_t stream)
{
dim3 grid, block;
grid.x = seq_len;
grid.y = batch_size;
grid.z = head_num;
const int seq_len_padded = (seq_len + 31) / 32 * 32;
if (seq_len <= 32) {
if (batch_size * head_num > 960) {
grid.x = ceil(float(seq_len) / 32.0f);
}
block.x = seq_len_padded;
softmax_COL32_LE32_varlen<<<grid, block, 0, stream>>>(output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
seq_len_padded,
scalar1a,
scalar1b,
amax_ptr,
seq_len * seq_len,
seq_len * seq_len_padded);
}
else if (seq_len <= 64 && (seq_len % 2 == 0)) {
block.x = 32;
if (batch_size * head_num > 960) {
grid.x = ceil(float(seq_len) / 32.0f);
}
softmax_COL32_LE64_varlen<<<grid, block, 0, stream>>>(output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
seq_len_padded,
scalar1a,
scalar1b,
amax_ptr,
seq_len * seq_len,
seq_len * seq_len_padded);
}
else if (seq_len > 64 && (seq_len % 4 == 0)) {
block.x = (seq_len_padded / 4 + 31) / 32 * 32;
softmax_COL32_varlen<<<grid, block, 0, stream>>>(output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
seq_len_padded,
scalar1a,
scalar1b,
amax_ptr,
seq_len * seq_len,
seq_len * seq_len_padded);
}
else {
block.x = (seq_len_padded + 31) / 32 * 32;
softmax_COL32_perElement_varlen<<<grid, block, 0, stream>>>(output,
input,
attr_mask,
batch_size,
head_num,
seq_len,
seq_len_padded,
scalar1a,
scalar1b,
amax_ptr,
seq_len * seq_len,
seq_len * seq_len_padded);
}
}
template void invokeSoftmaxCOL32(int8_t* output,
const int8_t* input,
const float* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
cudaStream_t stream);
template void invokeSoftmaxCOL32(int8_t* output,
const int8_t* input,
const half* attr_mask,
const int batch_size,
const int head_num,
const int seq_len,
const float scalar1a,
const float* scalar1b,
const float* amax_ptr,
cudaStream_t stream);
/******************* invokeSoftmaxCOL32 ***********************/
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = max(32, (window_len + 31)/32*32)
// qk_buf is [batch, window_num, num_head, window_len, window_len]
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-majot
template<typename T>
__global__ void softmax_INT8IO_kernel_COL32(int8_t* a_buf,
int8_t* qk_buf_int8,
const T* attn_mask,
const T* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* out_scale_ptr)
{
bool qual = threadIdx.x < window_len;
const int padded_winlen = (window_len + 31) / 32 * 32;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
__shared__ float s_mean, s_max;
int qk_offset = (blockIdx.z * gridDim.y + blockIdx.y) * window_len * padded_winlen
+ ((threadIdx.x >> 5) << 5) * window_len + (window_id << 5) + (threadIdx.x & 31);
;
if (qual) {
const int offset_in_window = window_id * window_len + threadIdx.x;
const int relative_pos_bias_offset = (blockIdx.y % num_head) * window_len_x_window_len + offset_in_window;
float mask_val =
(attn_mask == nullptr) ?
0.0f :
static_cast<float>(
__ldg(attn_mask + ((blockIdx.y / num_head) * window_len_x_window_len + offset_in_window)));
tmp = scalar * static_cast<float>(qk_buf_int8[qk_offset]) * __ldg(deQ_scale_ptr) + mask_val
+ static_cast<float>(__ldg(relative_pos_bias + relative_pos_bias_offset));
}
float max_val = blockReduceMax<float>(tmp);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
float qk_tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
a_buf[qk_offset] = qual ? float_to_int8_rn(qk_tmp * s_mean * __ldg(out_scale_ptr)) : 0;
}
}
// grid = (window_len/word_per_thread, window_num*num_head, batch_size)
// block.x = (window_len / 4 + 31)/32*32
// a_buf/qk_buf is [batch, window_num, num_head, window_len, window_len] + COL32
// attn_mask is [window_num, window_len, window_len] + row-major
// relative_pos_bias is [num_head, window_len, window_len] + row-major
template<typename T4, typename T>
__global__ void softmax_INT8IO_kernel_COL32_element4(int8_t* a_buf,
int8_t* qk_buf_int8,
const T4* attn_mask,
const T4* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const int window_len_x_window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* out_scale_ptr)
{
const int padded_winlen = (window_len + 31) / 32 * 32;
const int col_id = threadIdx.x << 2;
bool qual = col_id < window_len;
const T4 zero = {T(0.0f), T(0.0f), T(0.0f), T(0.0f)};
const float deQ_scale = __ldg(deQ_scale_ptr);
const float out_scale = __ldg(out_scale_ptr);
char4* inputPtr = (char4*)qk_buf_int8;
char4* outputPtr = (char4*)a_buf;
for (int window_id = blockIdx.x; window_id < window_len; window_id += gridDim.x) {
float tmp = -1e20f;
char4 qk_val;
float4 local_qk_val;
__shared__ float s_mean, s_max;
int qk_offset = (blockIdx.z * gridDim.y + blockIdx.y) * window_len * padded_winlen
+ ((col_id >> 5) << 5) * window_len + (window_id << 5) + (col_id & 31);
if (qual) {
const int offset_in_window = window_id * window_len + col_id;
const int relative_pos_bias_offset = (blockIdx.y % num_head) * window_len_x_window_len + offset_in_window;
const int attn_mask_offset = (blockIdx.y / num_head) * window_len_x_window_len + offset_in_window;
const T4 bias_val = relative_pos_bias[relative_pos_bias_offset >> 2];
const T4 mask_val = (attn_mask == nullptr) ? zero : attn_mask[attn_mask_offset >> 2];
qk_val = inputPtr[qk_offset >> 2];
local_qk_val.x = static_cast<float>(qk_val.x);
local_qk_val.y = static_cast<float>(qk_val.y);
local_qk_val.z = static_cast<float>(qk_val.z);
local_qk_val.w = static_cast<float>(qk_val.w);
local_qk_val.x =
scalar * local_qk_val.x * deQ_scale + static_cast<float>(mask_val.x) + static_cast<float>(bias_val.x);
local_qk_val.y =
scalar * local_qk_val.y * deQ_scale + static_cast<float>(mask_val.y) + static_cast<float>(bias_val.y);
local_qk_val.z =
scalar * local_qk_val.z * deQ_scale + static_cast<float>(mask_val.z) + static_cast<float>(bias_val.z);
local_qk_val.w =
scalar * local_qk_val.w * deQ_scale + static_cast<float>(mask_val.w) + static_cast<float>(bias_val.w);
tmp = local_qk_val.x > local_qk_val.y ? local_qk_val.x : local_qk_val.y;
tmp = tmp > local_qk_val.z ? tmp : local_qk_val.z;
tmp = tmp > local_qk_val.w ? tmp : local_qk_val.w;
}
float max_val = blockDim.x <= 32 ? warpReduceMax<float>(tmp) : blockReduceMax<float>(tmp);
if (threadIdx.x == 0) {
s_max = max_val;
}
__syncthreads();
local_qk_val.x = qual ? __expf(local_qk_val.x - s_max) : 0.0f;
local_qk_val.y = qual ? __expf(local_qk_val.y - s_max) : 0.0f;
local_qk_val.z = qual ? __expf(local_qk_val.z - s_max) : 0.0f;
local_qk_val.w = qual ? __expf(local_qk_val.w - s_max) : 0.0f;
float sum_val = blockDim.x <= 32 ?
warpReduceSum<float>(local_qk_val.x + local_qk_val.y + local_qk_val.z + local_qk_val.w) :
blockReduceSum<float>(local_qk_val.x + local_qk_val.y + local_qk_val.z + local_qk_val.w);
if (threadIdx.x == 0) {
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if (qual) {
char4 outTmp;
outTmp.x = float_to_int8_rn(local_qk_val.x * s_mean * out_scale);
outTmp.y = float_to_int8_rn(local_qk_val.y * s_mean * out_scale);
outTmp.z = float_to_int8_rn(local_qk_val.z * s_mean * out_scale);
outTmp.w = float_to_int8_rn(local_qk_val.w * s_mean * out_scale);
outputPtr[qk_offset >> 2] = outTmp;
}
}
}
template<typename T>
void invokeSoftmaxWithRelPosBiasCOL32(int8_t* a_buf,
int8_t* qk_buf_int8,
const T* attn_mask,
const T* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* out_scale_ptr,
cudaStream_t stream)
{
dim3 grid(window_len, window_num * num_head, batch_size);
if (window_len % 4 == 0 && window_len / 4 >= 32) {
dim3 block((window_len / 4 + 31) / 32 * 32);
if (batch_size * window_num * num_head > 960) {
grid.x = ceil(float(window_len) / 32.0f);
}
if (std::is_same<T, float>::value) {
softmax_INT8IO_kernel_COL32_element4<float4, float>
<<<grid, block, 0, stream>>>(a_buf,
qk_buf_int8,
(const float4*)attn_mask,
(const float4*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
scalar,
deQ_scale_ptr,
out_scale_ptr);
}
else if (std::is_same<T, half>::value) {
softmax_INT8IO_kernel_COL32_element4<half4, half>
<<<grid, block, 0, stream>>>(a_buf,
qk_buf_int8,
(const half4*)attn_mask,
(const half4*)relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
scalar,
deQ_scale_ptr,
out_scale_ptr);
}
}
else {
dim3 block((window_len + 31) / 32 * 32);
softmax_INT8IO_kernel_COL32<<<grid, block, 0, stream>>>(a_buf,
qk_buf_int8,
attn_mask,
relative_pos_bias,
batch_size,
num_head,
window_num,
window_len,
window_len * window_len,
scalar,
deQ_scale_ptr,
out_scale_ptr);
}
}
template void invokeSoftmaxWithRelPosBiasCOL32(int8_t* a_buf,
int8_t* qk_buf_int8,
const float* attn_mask,
const float* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* output_scale_ptr,
cudaStream_t stream);
template void invokeSoftmaxWithRelPosBiasCOL32(int8_t* a_buf,
int8_t* qk_buf_int8,
const half* attn_mask,
const half* relative_pos_bias,
const int batch_size,
const int num_head,
const int window_num,
const int window_len,
const float scalar,
const float* deQ_scale_ptr,
const float* output_scale_ptr,
cudaStream_t stream);
} // namespace fastertransformer
|
8d1b3ca77a4d4d1c25c8c8bd7db468aecf6cfd5d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void
add_bias(float *a, float *bias, float *out,
int size_x, int size_y, int size_z)
{
const int i = blockDim.y * blockIdx.y + threadIdx.y,
j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size_x && j < size_y)
{
int k = (i * size_y + j) * size_z;
for (int c = 0; c < size_z; c++)
out[k+c] = a[k+c] + bias[c];
}
}
|
8d1b3ca77a4d4d1c25c8c8bd7db468aecf6cfd5d.cu
|
__global__ void
add_bias(float *a, float *bias, float *out,
int size_x, int size_y, int size_z)
{
const int i = blockDim.y * blockIdx.y + threadIdx.y,
j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size_x && j < size_y)
{
int k = (i * size_y + j) * size_z;
for (int c = 0; c < size_z; c++)
out[k+c] = a[k+c] + bias[c];
}
}
|
ccbea60ee8b66c98ef45723b3cae72f88f5c26ed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*************************************************************************/
/** File: kmeans_clustering.c **/
/** Description: Implementation of find cluster for k-means **/
/** clustering algorithm **/
/*************************************************************************/
#include "kmeans.h"
#ifndef FLT_MAX
#define FLT_MAX 3.40282347e+38
#endif
#ifdef ACCESS_CENT_COALESCED
#define EXPRESSION(i) { \
dist+=(point[i]-dCentroid[centIndx])*(point[i]-dCentroid[centIndx]); \
centIndx += k; \
}
#else
#define EXPRESSION(i) { \
dist+=(point[i]-dCentroid[centIndx+i])*(point[i]-dCentroid[centIndx+i]); \
}
#endif
#ifdef ACCESS_DATA_COALESCED
#define INIT_VARS(i) { \
point[i]=Data[0]; \
Data = (float *)((char*)Data + pitchData); \
}
#else
#define INIT_VARS(i) { \
point[i]=Data[i]; \
}
#endif
__constant__ float dCentroid[12000]; // The max possible size is 65536 bytes. We are leaving some empty space for compiler to use.
template <int dimensions>
__global__ void assign_labels( int n, int k, float *dData, int *Index, size_t pitchData)
{
float point[dimensions];
const int pt_i = blockIdx.x * blockDim.x + threadIdx.x;
if(pt_i < n) {
float* Data = dData;
#ifdef ACCESS_DATA_COALESCED
Data = Data + pt_i; // coalesced access from global memory
#else
Data = (float*)((char*)Data + pt_i*pitchData); // non-coalesced access from global memory
#endif
INIT_VARS(0);
if(dimensions>1)
INIT_VARS(1)
if(dimensions>2)
INIT_VARS(2)
if(dimensions>3)
INIT_VARS(3)
if(dimensions>4)
INIT_VARS(4)
if(dimensions>5)
INIT_VARS(5)
if(dimensions>6)
INIT_VARS(6)
if(dimensions>7)
INIT_VARS(7)
if(dimensions>8)
INIT_VARS(8)
if(dimensions>9)
INIT_VARS(9)
if(dimensions>10)
INIT_VARS(10)
if(dimensions>11)
INIT_VARS(11)
if(dimensions>12)
INIT_VARS(12)
if(dimensions>13)
INIT_VARS(13)
if(dimensions>14)
INIT_VARS(14)
if(dimensions>15)
INIT_VARS(15)
if(dimensions>16)
INIT_VARS(16)
if(dimensions>17)
INIT_VARS(17)
if(dimensions>18)
INIT_VARS(18)
if(dimensions>19)
INIT_VARS(19)
if(dimensions>20)
INIT_VARS(20)
if(dimensions>21)
INIT_VARS(21)
int centIndx = 0;
float min_dist = FLT_MAX;
int closest = 0;
#pragma unroll 2
for(int cent_i=0; cent_i< k; cent_i++)
{
float dist=0;
EXPRESSION(0)
if(dimensions>1)
EXPRESSION(1)
if(dimensions>2)
EXPRESSION(2)
if(dimensions>3)
EXPRESSION(3)
if(dimensions>4)
EXPRESSION(4)
if(dimensions>5)
EXPRESSION(5)
if(dimensions>6)
EXPRESSION(6)
if(dimensions>7)
EXPRESSION(7)
if(dimensions>8)
EXPRESSION(8)
if(dimensions>9)
EXPRESSION(9)
if(dimensions>10)
EXPRESSION(10)
if(dimensions>11)
EXPRESSION(11)
if(dimensions>12)
EXPRESSION(12)
if(dimensions>13)
EXPRESSION(13)
if(dimensions>14)
EXPRESSION(14)
if(dimensions>15)
EXPRESSION(15)
if(dimensions>16)
EXPRESSION(16)
if(dimensions>17)
EXPRESSION(17)
if(dimensions>18)
EXPRESSION(18)
if(dimensions>19)
EXPRESSION(19)
if(dimensions>20)
EXPRESSION(20)
if(dimensions>21)
EXPRESSION(21)
if(dist < min_dist) {
min_dist = dist;
closest = cent_i;
}
#ifdef ACCESS_CENT_COALESCED
centIndx = cent_i+1;
#else
//Centroid = Centroid + dimensions;
centIndx = centIndx + dimensions;
#endif
}
Index[pt_i] = closest;
}
}
#define INVOKE_ASSIGN_LABELS_DEV(i) \
hipLaunchKernelGGL(( assign_labels<i>), dim3(dim3(blockDim)),dim3(dim3(threadDim)), 0, 0, n, k, dData, Index, pitchData); \
break;
void callAssignLabels(int n, int k, float *dData, int *Index, size_t pitchData, int d, int blockDim,
int threadDim) {
switch(d)
{
case 1: INVOKE_ASSIGN_LABELS_DEV(1)
case 2: INVOKE_ASSIGN_LABELS_DEV(2)
case 3: INVOKE_ASSIGN_LABELS_DEV(3)
case 4: INVOKE_ASSIGN_LABELS_DEV(4)
case 5: INVOKE_ASSIGN_LABELS_DEV(5)
case 6: INVOKE_ASSIGN_LABELS_DEV(6)
case 7: INVOKE_ASSIGN_LABELS_DEV(7)
case 8: INVOKE_ASSIGN_LABELS_DEV(8)
case 9: INVOKE_ASSIGN_LABELS_DEV(9)
case 10: INVOKE_ASSIGN_LABELS_DEV(10)
case 11: INVOKE_ASSIGN_LABELS_DEV(11)
case 12: INVOKE_ASSIGN_LABELS_DEV(12)
case 13: INVOKE_ASSIGN_LABELS_DEV(13)
case 14: INVOKE_ASSIGN_LABELS_DEV(14)
case 15: INVOKE_ASSIGN_LABELS_DEV(15)
case 16: INVOKE_ASSIGN_LABELS_DEV(16)
case 17: INVOKE_ASSIGN_LABELS_DEV(17)
case 18: INVOKE_ASSIGN_LABELS_DEV(18)
case 19: INVOKE_ASSIGN_LABELS_DEV(19)
case 20: INVOKE_ASSIGN_LABELS_DEV(20)
case 21: INVOKE_ASSIGN_LABELS_DEV(21)
case 22: INVOKE_ASSIGN_LABELS_DEV(22)
default: printf("Only data points with dimension ranging from 1 to 22 are supported.\n");
exit(EXIT_FAILURE);
}
}
|
ccbea60ee8b66c98ef45723b3cae72f88f5c26ed.cu
|
/*************************************************************************/
/** File: kmeans_clustering.c **/
/** Description: Implementation of find cluster for k-means **/
/** clustering algorithm **/
/*************************************************************************/
#include "kmeans.h"
#ifndef FLT_MAX
#define FLT_MAX 3.40282347e+38
#endif
#ifdef ACCESS_CENT_COALESCED
#define EXPRESSION(i) { \
dist+=(point[i]-dCentroid[centIndx])*(point[i]-dCentroid[centIndx]); \
centIndx += k; \
}
#else
#define EXPRESSION(i) { \
dist+=(point[i]-dCentroid[centIndx+i])*(point[i]-dCentroid[centIndx+i]); \
}
#endif
#ifdef ACCESS_DATA_COALESCED
#define INIT_VARS(i) { \
point[i]=Data[0]; \
Data = (float *)((char*)Data + pitchData); \
}
#else
#define INIT_VARS(i) { \
point[i]=Data[i]; \
}
#endif
__constant__ float dCentroid[12000]; // The max possible size is 65536 bytes. We are leaving some empty space for compiler to use.
template <int dimensions>
__global__ void assign_labels( int n, int k, float *dData, int *Index, size_t pitchData)
{
float point[dimensions];
const int pt_i = blockIdx.x * blockDim.x + threadIdx.x;
if(pt_i < n) {
float* Data = dData;
#ifdef ACCESS_DATA_COALESCED
Data = Data + pt_i; // coalesced access from global memory
#else
Data = (float*)((char*)Data + pt_i*pitchData); // non-coalesced access from global memory
#endif
INIT_VARS(0);
if(dimensions>1)
INIT_VARS(1)
if(dimensions>2)
INIT_VARS(2)
if(dimensions>3)
INIT_VARS(3)
if(dimensions>4)
INIT_VARS(4)
if(dimensions>5)
INIT_VARS(5)
if(dimensions>6)
INIT_VARS(6)
if(dimensions>7)
INIT_VARS(7)
if(dimensions>8)
INIT_VARS(8)
if(dimensions>9)
INIT_VARS(9)
if(dimensions>10)
INIT_VARS(10)
if(dimensions>11)
INIT_VARS(11)
if(dimensions>12)
INIT_VARS(12)
if(dimensions>13)
INIT_VARS(13)
if(dimensions>14)
INIT_VARS(14)
if(dimensions>15)
INIT_VARS(15)
if(dimensions>16)
INIT_VARS(16)
if(dimensions>17)
INIT_VARS(17)
if(dimensions>18)
INIT_VARS(18)
if(dimensions>19)
INIT_VARS(19)
if(dimensions>20)
INIT_VARS(20)
if(dimensions>21)
INIT_VARS(21)
int centIndx = 0;
float min_dist = FLT_MAX;
int closest = 0;
#pragma unroll 2
for(int cent_i=0; cent_i< k; cent_i++)
{
float dist=0;
EXPRESSION(0)
if(dimensions>1)
EXPRESSION(1)
if(dimensions>2)
EXPRESSION(2)
if(dimensions>3)
EXPRESSION(3)
if(dimensions>4)
EXPRESSION(4)
if(dimensions>5)
EXPRESSION(5)
if(dimensions>6)
EXPRESSION(6)
if(dimensions>7)
EXPRESSION(7)
if(dimensions>8)
EXPRESSION(8)
if(dimensions>9)
EXPRESSION(9)
if(dimensions>10)
EXPRESSION(10)
if(dimensions>11)
EXPRESSION(11)
if(dimensions>12)
EXPRESSION(12)
if(dimensions>13)
EXPRESSION(13)
if(dimensions>14)
EXPRESSION(14)
if(dimensions>15)
EXPRESSION(15)
if(dimensions>16)
EXPRESSION(16)
if(dimensions>17)
EXPRESSION(17)
if(dimensions>18)
EXPRESSION(18)
if(dimensions>19)
EXPRESSION(19)
if(dimensions>20)
EXPRESSION(20)
if(dimensions>21)
EXPRESSION(21)
if(dist < min_dist) {
min_dist = dist;
closest = cent_i;
}
#ifdef ACCESS_CENT_COALESCED
centIndx = cent_i+1;
#else
//Centroid = Centroid + dimensions;
centIndx = centIndx + dimensions;
#endif
}
Index[pt_i] = closest;
}
}
#define INVOKE_ASSIGN_LABELS_DEV(i) \
assign_labels<i><<<dim3(blockDim),dim3(threadDim)>>>( n, k, dData, Index, pitchData); \
break;
void callAssignLabels(int n, int k, float *dData, int *Index, size_t pitchData, int d, int blockDim,
int threadDim) {
switch(d)
{
case 1: INVOKE_ASSIGN_LABELS_DEV(1)
case 2: INVOKE_ASSIGN_LABELS_DEV(2)
case 3: INVOKE_ASSIGN_LABELS_DEV(3)
case 4: INVOKE_ASSIGN_LABELS_DEV(4)
case 5: INVOKE_ASSIGN_LABELS_DEV(5)
case 6: INVOKE_ASSIGN_LABELS_DEV(6)
case 7: INVOKE_ASSIGN_LABELS_DEV(7)
case 8: INVOKE_ASSIGN_LABELS_DEV(8)
case 9: INVOKE_ASSIGN_LABELS_DEV(9)
case 10: INVOKE_ASSIGN_LABELS_DEV(10)
case 11: INVOKE_ASSIGN_LABELS_DEV(11)
case 12: INVOKE_ASSIGN_LABELS_DEV(12)
case 13: INVOKE_ASSIGN_LABELS_DEV(13)
case 14: INVOKE_ASSIGN_LABELS_DEV(14)
case 15: INVOKE_ASSIGN_LABELS_DEV(15)
case 16: INVOKE_ASSIGN_LABELS_DEV(16)
case 17: INVOKE_ASSIGN_LABELS_DEV(17)
case 18: INVOKE_ASSIGN_LABELS_DEV(18)
case 19: INVOKE_ASSIGN_LABELS_DEV(19)
case 20: INVOKE_ASSIGN_LABELS_DEV(20)
case 21: INVOKE_ASSIGN_LABELS_DEV(21)
case 22: INVOKE_ASSIGN_LABELS_DEV(22)
default: printf("Only data points with dimension ranging from 1 to 22 are supported.\n");
exit(EXIT_FAILURE);
}
}
|
82a5d38a099c4cbb1c1f220a95e51018524820b7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MatrixMulKernelTiles(float* d_M, float* d_N, float* d_P, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the d_M and d_N tiles required to compute d_P element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Coolaborative loading of d_M and d_N tiles into shared memory
Mds[ty][tx] = d_M[Row*Width + m*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(m*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
|
82a5d38a099c4cbb1c1f220a95e51018524820b7.cu
|
#include "includes.h"
__global__ void MatrixMulKernelTiles(float* d_M, float* d_N, float* d_P, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the d_M and d_N tiles required to compute d_P element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Coolaborative loading of d_M and d_N tiles into shared memory
Mds[ty][tx] = d_M[Row*Width + m*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(m*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
|
d5aabc865623ae2e2775807220a4615d9f27a9d7.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_svd/thrust_wrappers.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Wajih Halim Boukaram
* @date 2018-11-14
**/
#include <thrust/execution_policy.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform_scan.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/extrema.h>
#include <thrust/fill.h>
#include <iostream>
#include "thrust_wrappers.h"
#include "kblas_gpu_util.ch"
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Some array utility functions
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void exclusiveScan(int* array, int num_entries, int* result, int init, hipStream_t stream)
{
thrust::exclusive_scan(
thrust::hip::par.on(stream),
array, array + num_entries, result, init
);
}
void inclusiveScan(int* array, int num_entries, int* result, hipStream_t stream)
{
thrust::inclusive_scan(
thrust::hip::par.on(stream),
array, array + num_entries, result
);
}
template<class T>
T getMaxElementT(T* a, int elements, hipStream_t stream)
{
thrust::device_ptr<T> dev_a(a);
return *(thrust::max_element(
thrust::hip::par.on(stream),
dev_a, dev_a + elements
));
}
int getMaxElement(int* a, int elements, hipStream_t stream)
{
return getMaxElementT<int>(a, elements, stream);
}
float getMaxElement(float* a, int elements, hipStream_t stream)
{
return getMaxElementT<float>(a, elements, stream);
}
double getMaxElement(double* a, int elements, hipStream_t stream)
{
return getMaxElementT<double>(a, elements, stream);
}
template<class T>
T reduceSumT(T* a, int elements, hipStream_t stream)
{
thrust::device_ptr<T> dev_a(a);
return thrust::reduce(
thrust::hip::par.on(stream),
dev_a, dev_a + elements
);
}
double reduceSum(double* a, int elements, hipStream_t stream)
{
return reduceSumT<double>(a, elements, stream);
}
float reduceSum(float* a, int elements, hipStream_t stream)
{
return reduceSumT<float>(a, elements, stream);
}
template<class Real>
void fillArrayT(Real* array, int num_entries, Real val, hipStream_t stream)
{
thrust::device_ptr<Real> dev_start(array);
thrust::device_ptr<Real> dev_end(array + num_entries);
thrust::fill(
thrust::hip::par.on(stream),
dev_start, dev_end, val)
;
}
void fillArray(float* array, int num_entries, float val, hipStream_t stream)
{
fillArrayT<float>(array, num_entries, val, stream);
}
void fillArray(double* array, int num_entries, double val, hipStream_t stream)
{
fillArrayT<double>(array, num_entries, val, stream);
}
void fillArray(int* array, int num_entries, int val, hipStream_t stream)
{
fillArrayT<int>(array, num_entries, val, stream);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Generating array of pointers from either a strided array or another array of pointers
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<class T, class T_ptr>
struct UnaryAoAAssign : public thrust::unary_function<int, T*>
{
T_ptr original_array;
int stride, offset;
UnaryAoAAssign(T_ptr original_array, int stride, int offset)
{
this->original_array = original_array;
this->stride = stride;
this->offset = offset;
}
__host__ __device__
T* operator()(const unsigned int& thread_id) const
{
return getOperationPtr<T>(original_array, thread_id, stride) + offset;
}
};
template<class T, class T_ptr>
void generateArrayOfPointersT(T_ptr original_array, T** array_of_arrays, int stride, int offset, int num_arrays, hipStream_t stream)
{
thrust::device_ptr<T*> dev_data(array_of_arrays);
thrust::transform(
thrust::hip::par.on(stream),
thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(num_arrays),
dev_data,
UnaryAoAAssign<T, T_ptr>(original_array, stride, offset)
);
}
void generateArrayOfPointers(double* original_array, double** array_of_arrays, int stride, int offset, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<double, double*>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(double* original_array, double** array_of_arrays, int stride, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<double, double*>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(double** original_array, double** array_of_arrays, int stride, int offset, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<double, double**>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(double** original_array, double** array_of_arrays, int stride, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<double, double**>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(float* original_array, float** array_of_arrays, int stride, int offset, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<float, float*>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(float* original_array, float** array_of_arrays, int stride, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<float, float*>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(float** original_array, float** array_of_arrays, int stride, int offset, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<float, float**>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(float** original_array, float** array_of_arrays, int stride, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<float, float**>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(int* original_array, int** array_of_arrays, int stride, int offset, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<int, int*>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(int* original_array, int** array_of_arrays, int stride, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<int, int*>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(int** original_array, int** array_of_arrays, int stride, int offset, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<int, int**>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(int** original_array, int** array_of_arrays, int stride, int num_arrays, hipStream_t stream)
{ generateArrayOfPointersT<int, int**>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
|
d5aabc865623ae2e2775807220a4615d9f27a9d7.cu
|
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_svd/thrust_wrappers.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Wajih Halim Boukaram
* @date 2018-11-14
**/
#include <thrust/execution_policy.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform_scan.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/extrema.h>
#include <thrust/fill.h>
#include <iostream>
#include "thrust_wrappers.h"
#include "kblas_gpu_util.ch"
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Some array utility functions
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void exclusiveScan(int* array, int num_entries, int* result, int init, cudaStream_t stream)
{
thrust::exclusive_scan(
thrust::cuda::par.on(stream),
array, array + num_entries, result, init
);
}
void inclusiveScan(int* array, int num_entries, int* result, cudaStream_t stream)
{
thrust::inclusive_scan(
thrust::cuda::par.on(stream),
array, array + num_entries, result
);
}
template<class T>
T getMaxElementT(T* a, int elements, cudaStream_t stream)
{
thrust::device_ptr<T> dev_a(a);
return *(thrust::max_element(
thrust::cuda::par.on(stream),
dev_a, dev_a + elements
));
}
int getMaxElement(int* a, int elements, cudaStream_t stream)
{
return getMaxElementT<int>(a, elements, stream);
}
float getMaxElement(float* a, int elements, cudaStream_t stream)
{
return getMaxElementT<float>(a, elements, stream);
}
double getMaxElement(double* a, int elements, cudaStream_t stream)
{
return getMaxElementT<double>(a, elements, stream);
}
template<class T>
T reduceSumT(T* a, int elements, cudaStream_t stream)
{
thrust::device_ptr<T> dev_a(a);
return thrust::reduce(
thrust::cuda::par.on(stream),
dev_a, dev_a + elements
);
}
double reduceSum(double* a, int elements, cudaStream_t stream)
{
return reduceSumT<double>(a, elements, stream);
}
float reduceSum(float* a, int elements, cudaStream_t stream)
{
return reduceSumT<float>(a, elements, stream);
}
template<class Real>
void fillArrayT(Real* array, int num_entries, Real val, cudaStream_t stream)
{
thrust::device_ptr<Real> dev_start(array);
thrust::device_ptr<Real> dev_end(array + num_entries);
thrust::fill(
thrust::cuda::par.on(stream),
dev_start, dev_end, val)
;
}
void fillArray(float* array, int num_entries, float val, cudaStream_t stream)
{
fillArrayT<float>(array, num_entries, val, stream);
}
void fillArray(double* array, int num_entries, double val, cudaStream_t stream)
{
fillArrayT<double>(array, num_entries, val, stream);
}
void fillArray(int* array, int num_entries, int val, cudaStream_t stream)
{
fillArrayT<int>(array, num_entries, val, stream);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Generating array of pointers from either a strided array or another array of pointers
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<class T, class T_ptr>
struct UnaryAoAAssign : public thrust::unary_function<int, T*>
{
T_ptr original_array;
int stride, offset;
UnaryAoAAssign(T_ptr original_array, int stride, int offset)
{
this->original_array = original_array;
this->stride = stride;
this->offset = offset;
}
__host__ __device__
T* operator()(const unsigned int& thread_id) const
{
return getOperationPtr<T>(original_array, thread_id, stride) + offset;
}
};
template<class T, class T_ptr>
void generateArrayOfPointersT(T_ptr original_array, T** array_of_arrays, int stride, int offset, int num_arrays, cudaStream_t stream)
{
thrust::device_ptr<T*> dev_data(array_of_arrays);
thrust::transform(
thrust::cuda::par.on(stream),
thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(num_arrays),
dev_data,
UnaryAoAAssign<T, T_ptr>(original_array, stride, offset)
);
}
void generateArrayOfPointers(double* original_array, double** array_of_arrays, int stride, int offset, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<double, double*>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(double* original_array, double** array_of_arrays, int stride, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<double, double*>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(double** original_array, double** array_of_arrays, int stride, int offset, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<double, double**>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(double** original_array, double** array_of_arrays, int stride, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<double, double**>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(float* original_array, float** array_of_arrays, int stride, int offset, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<float, float*>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(float* original_array, float** array_of_arrays, int stride, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<float, float*>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(float** original_array, float** array_of_arrays, int stride, int offset, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<float, float**>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(float** original_array, float** array_of_arrays, int stride, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<float, float**>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(int* original_array, int** array_of_arrays, int stride, int offset, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<int, int*>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(int* original_array, int** array_of_arrays, int stride, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<int, int*>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
void generateArrayOfPointers(int** original_array, int** array_of_arrays, int stride, int offset, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<int, int**>(original_array, array_of_arrays, stride, offset, num_arrays, stream); }
void generateArrayOfPointers(int** original_array, int** array_of_arrays, int stride, int num_arrays, cudaStream_t stream)
{ generateArrayOfPointersT<int, int**>(original_array, array_of_arrays, stride, 0, num_arrays, stream); }
|
a02d0d434e044bdb50ce09e883bb93a2091a3af2.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Based off earlier start from:
* https://github.com/Robadob/SP-Bench/commit/35dcbb81cc0b73cdb6b08fb622f13e688a878133
*/
#define _CRT_SECURE_NO_WARNINGS
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime_api.h"
#include <stdio.h>
#include <cmath>
#include <glm/glm.hpp>
#include <glm/gtx/component_wise.hpp>
#include <glm/gtc/constants.hpp>
#include <hiprand/hiprand_kernel.h>
#include <texture_fetch_functions.h>
#include <hipcub/hipcub.hpp>
#include <glm/gtc/epsilon.hpp>
#define EPSILON 0.0001f
//#define CIRCLES
//Cuda call
static void HandleCUDAError(const char *file,
int line,
hipError_t status = hipGetLastError()) {
#ifdef _DEBUG
hipDeviceSynchronize();
#endif
if (status != hipError_t::hipSuccess || (status = hipGetLastError()) != hipError_t::hipSuccess)
{
printf("%s(%i) CUDA Error Occurred;\n%s\n", file, line, hipGetErrorString(status));
#ifdef _DEBUG
getchar();
#endif
exit(1);
}
}
#define CUDA_CALL( err ) (HandleCUDAError(__FILE__, __LINE__ , err))
#define CUDA_CHECK() (HandleCUDAError(__FILE__, __LINE__))
//Logging (found in log.cpp)
#include <fstream>
void createLog(std::ofstream &f);
void log(std::ofstream &f,
const unsigned int &estRadialNeighbours,
const unsigned int &agentCount,
const unsigned int &envWidth,
const float &PBM_control,
const float &kernel_control,
const float &PBM,
const float &kernel,
const unsigned int &fails
);
__device__ __constant__ unsigned int d_agentCount;
__device__ __constant__ float d_environmentWidth_float;
__device__ __constant__ unsigned int d_gridDim;
glm::uvec3 GRID_DIMS;
__device__ __constant__ float d_gridDim_float;
__device__ __constant__ float d_RADIUS;
__device__ __constant__ float d_R_SIN_45;
__device__ __constant__ float d_binWidth;
//For thread block max bin check
unsigned int *d_PBM_max_count;
unsigned int PBM_max_count = 0;
unsigned int PBM_max_Moore_count = 0;//This is unused, it could be used if we wished to load entire Moore neighbourhood at once to shared mem, instead we load a bin at a time
texture<float4> d_texMessages;
texture<unsigned int> d_texPBM;
__global__ void init_curand(hiprandState_t *state, unsigned long long seed) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < d_agentCount)
hiprand_init(seed, id, 0, &state[id]);
}
__global__ void init_agents(hiprandState_t *state, glm::vec4 *locationMessages) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= d_agentCount)
return;
//curand_unform returns 0<x<=1.0, not much can really do about 0 exclusive
//negate and + 1.0, to make 0<=x<1.0
locationMessages[id].x = (-hiprand_uniform(&state[id]) + 1.0f)*d_environmentWidth_float;
locationMessages[id].y = (-hiprand_uniform(&state[id]) + 1.0f)*d_environmentWidth_float;
locationMessages[id].z = (-hiprand_uniform(&state[id]) + 1.0f)*d_environmentWidth_float;
}
__device__ __forceinline__ glm::ivec3 getGridPosition(glm::vec3 worldPos)
{
//Clamp each grid coord to 0<=x<dim
return clamp(floor((worldPos / d_environmentWidth_float)*d_gridDim_float), glm::vec3(0), glm::vec3((float)d_gridDim - 1));
}
__device__ __forceinline__ unsigned int getHash(glm::ivec3 gridPos)
{
//Bound gridPos to gridDimensions
gridPos = clamp(gridPos, glm::ivec3(0), glm::ivec3(d_gridDim - 1));
//Compute hash (effectivley an index for to a bin within the partitioning grid in this case)
return (unsigned int)(
(gridPos.z * d_gridDim * d_gridDim) + //z
(gridPos.y * d_gridDim) + //y
gridPos.x); //x
}
__global__ void atomicHistogram(unsigned int* bin_index, unsigned int* bin_sub_index, unsigned int *pbm_counts, glm::vec4 *messageBuffer)
{
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//Kill excess threads
if (index >= d_agentCount) return;
glm::ivec3 gridPos = getGridPosition(messageBuffer[index]);
unsigned int hash = getHash(gridPos);
bin_index[index] = hash;
unsigned int bin_idx = atomicInc((unsigned int*)&pbm_counts[hash], 0xFFFFFFFF);
bin_sub_index[index] = bin_idx;
}
__global__ void reorderLocationMessages(
unsigned int* bin_index,
unsigned int* bin_sub_index,
unsigned int *pbm,
glm::vec4 *unordered_messages,
glm::vec4 *ordered_messages
)
{
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//Kill excess threads
if (index >= d_agentCount) return;
unsigned int i = bin_index[index];
unsigned int sorted_index = pbm[i] + bin_sub_index[index];
//Order messages into swap space
ordered_messages[sorted_index] = unordered_messages[index];
}
int requiredSM(int blockSize)
{
hipDeviceProp_t dp;
int device;
hipGetDevice(&device);
memset(&dp, sizeof(hipDeviceProp_t), 0);
hipGetDeviceProperties(&dp, device);
//We could use dp.sharedMemPerBlock/N to improve occupancy
return (int)min(PBM_max_count * sizeof(float3), dp.sharedMemPerBlock);//Need to limit this to the max SM
}
/**
* Kernel must be launched 1 block per bin
* This removes the necessity of __launch_bounds__(64) as all threads in block are touching the same messages
* However we end up with alot of (mostly) idle threads if one bin dense, others empty.
*/
__global__ void __launch_bounds__(64) neighbourSearch_control(const glm::vec4 *agents, glm::vec4 *out)
{
#define STRIPS
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//Kill excess threads
if (index >= d_agentCount) return;
glm::vec3 pos = *(glm::vec3*)&agents[index];
glm::ivec3 gridPos = getGridPosition(pos);
glm::ivec3 gridPosRelative;
unsigned int count = 0;
glm::vec3 average = glm::vec3(0);
for (gridPosRelative.z = -1; gridPosRelative.z <= 1; gridPosRelative.z++)
{//zmin to zmax
int currentBinZ = gridPos.z + gridPosRelative.z;
if (currentBinZ >= 0 && currentBinZ < d_gridDim)
{
for (gridPosRelative.y = -1; gridPosRelative.y <= 1; gridPosRelative.y++)
{//ymin to ymax
int currentBinY = gridPos.y + gridPosRelative.y;
if (currentBinY >= 0 && currentBinY < d_gridDim)
{
#ifndef STRIPS
for (gridPosRelative.x = -1; gridPosRelative.x <= 1; gridPosRelative.x++)
{//xmin to xmax
int currentBinX = gridPos.x + gridPosRelative.x;
//Find bin start and end
unsigned int binHash = getHash(glm::ivec3(currentBinX, currentBinY, currentBinZ));
//if (binHash>d_gridDim*d_gridDim)
//{
// printf("Hash: %d, gridDim: %d, pos: (%d, %d)\n", binHash, d_gridDim, tGridPos.x, tGridPos.y);
//}
unsigned int binStart = tex1Dfetch(d_texPBM, binHash);
unsigned int binEnd = tex1Dfetch(d_texPBM, binHash + 1);
#else
int currentBinX = gridPos.x - 1;
currentBinX = currentBinX >= 0 ? currentBinX : 0;
unsigned int binHash = getHash(glm::ivec3(currentBinX, currentBinY, currentBinZ));
unsigned int binStart = tex1Dfetch(d_texPBM, binHash);
currentBinX = gridPos.x + 1;
currentBinX = currentBinX < d_gridDim ? currentBinX : d_gridDim - 1;
binHash = getHash(glm::ivec3(currentBinX, currentBinY, currentBinZ));
unsigned int binEnd = tex1Dfetch(d_texPBM, binHash + 1);
#endif
//Iterate messages in range
for (unsigned int i = binStart; i < binEnd; ++i)
{
if (i != index)//Ignore self
{
float4 message = tex1Dfetch(d_texMessages, i);
#ifndef CIRCLES
if (distance(*(glm::vec3*)&message, pos) < d_RADIUS)
{
//message.z = pow(sqrt(sin(distance(message, pos))),3.1f);//Bonus compute
average += *(glm::vec3*)&message;
count++;
}
#else
glm::vec3 toLoc = (*(glm::vec3*)&message) - pos;//Difference
float separation = length(toLoc);
if (separation < d_RADIUS && separation > 0)
{
const float REPULSE_FACTOR = 0.05f;
float k = sinf((separation / d_RADIUS)*3.141*-2)*REPULSE_FACTOR;
toLoc /= separation;//Normalize (without recalculating seperation)
average += k * toLoc;
count++;
}
#endif
}
}
}
#ifndef STRIPS
}
#endif
}
}
}
average /= count>0 ? count : 1;
#ifndef CIRCLES
out[index].x = average.x;
out[index].y = average.y;
out[index].z = average.z;
#else
out[index].x = pos.x + average.x;
out[index].y = pos.y + average.y;
out[index].z = pos.z + average.z;
#endif
}
/**
* Kernel must be launched 1 block per bin
* This removes the necessity of __launch_bounds__(64) as all threads in block are touching the same messages
* However we end up with alot of (mostly) idle threads if one bin dense, others empty.
*/
__global__ void neighbourSearch(const glm::vec4 *agents, glm::vec4 *out)
{
extern __shared__ float3 sm_messages[];
//My data
glm::ivec3 myBin = glm::ivec3(blockIdx.x / d_gridDim, blockIdx.x % d_gridDim, blockIdx.y);
unsigned int index = UINT_MAX;
glm::vec3 pos;
{
unsigned int binHash = getHash(myBin);
unsigned int binStart = tex1Dfetch(d_texPBM, binHash);
unsigned int binEnd = tex1Dfetch(d_texPBM, binHash + 1);
unsigned int binCount = binEnd - binStart;
if (threadIdx.x < binCount)
{
index = binStart + threadIdx.x;
pos = *(glm::vec3*)&agents[index];
}
}
//Model data
unsigned int count = 0;
glm::vec3 average = glm::vec3(0);
//Iterate each bin in the Moore neighbourhood
glm::ivec3 currentBin;
for(int _x = -1;_x<=1;++_x)
{
currentBin.x = myBin.x + _x;
if (currentBin.x >= 0 && currentBin.x < d_gridDim)
{
for (int _y = -1; _y <= 1; ++_y)
{
currentBin.y = myBin.y + _y;
if (currentBin.y >= 0 && currentBin.y < d_gridDim)
{
for (int _z = -1; _z <= 1; ++_z)
{
currentBin.z = myBin.z + _z;
if (currentBin.z >= 0 && currentBin.z < d_gridDim)
{
//Now we must load all messages from currentBin into shared memory
//WARNING: There is an unhandled edge case whereby we dont have enough shared memory, and must segment the load
unsigned int binHash = getHash(currentBin);
unsigned int binStart = tex1Dfetch(d_texPBM, binHash);
unsigned int binEnd = tex1Dfetch(d_texPBM, binHash + 1);
unsigned int binCount = binEnd - binStart;
//If this bin has a message for us to load
if (threadIdx.x < binCount)
{
//Load the message into shared memory
float4 message = tex1Dfetch(d_texMessages, binStart + threadIdx.x);
sm_messages[threadIdx.x] = *(float3*)&message;
}
//Wait for all loading to be completed
__syncthreads();
//If we host a valid message...
if (index != UINT_MAX)
{
//Iterate the loaded messages
for (unsigned int i = 0; i < binCount; ++i)
{
//Skip our own loaded message
if (_x == 0 && _y == 0 && _z == 0 && i == threadIdx.x)
continue;
float3 message = sm_messages[i];
#ifndef CIRCLES
if (distance(*(glm::vec3*)&message, pos) < d_RADIUS)
{
//message.z = pow(sqrt(sin(distance(message, pos))),3.1f);//Bonus compute
average += *(glm::vec3*)&message;
count++;
}
#else
glm::vec3 toLoc = (*(glm::vec3*)&message) - pos;//Difference
float separation = length(toLoc);
if (separation < d_RADIUS && separation > 0)
{
const float REPULSE_FACTOR = 0.05f;
float k = sinf((separation / d_RADIUS)*3.141*-2)*REPULSE_FACTOR;
toLoc /= separation;//Normalize (without recalculating seperation)
average += k * toLoc;
count++;
}
#endif
}
}
//Wait for all processing to be completed, so that we can proceed to next bin
__syncthreads();
}
}
}
//Could optimise here, by handling binHash outside the loop
//For this would need to iterate _y on the outside, so hashes are contiguous
}
}
}
//If we have a valid message...
if(index != UINT_MAX)
{
average /= count>0 ? count : 1;
#ifndef CIRCLES
out[index].x = average.x;
out[index].y = average.y;
out[index].z = average.z;
#else
out[index].x = pos.x + average.x;
out[index].y = pos.y + average.y;
out[index].z = pos.z + average.z;
#endif
}
}
__global__ void unsortMessages(
unsigned int* bin_index,
unsigned int* bin_sub_index,
unsigned int *pbm,
glm::vec4 *ordered_messages,
glm::vec4 *unordered_messages
)
{
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//Kill excess threads
if (index >= d_agentCount) return;
unsigned int i = bin_index[index];
unsigned int sorted_index = pbm[i] + bin_sub_index[index];
//Order messages into swap space
unordered_messages[index] = ordered_messages[sorted_index];
}
/**
* This program is to act as a test rig to demonstrate the raw impact of raw message handling
*/
void run(std::ofstream &f, const unsigned int ENV_WIDTH, const unsigned int AGENT_COUNT = 1000000)
{
void *d_CUB_temp_storage = nullptr;
size_t d_CUB_temp_storage_bytes = 0;
//Spatial partitioning mock
//Fixed 2D environment of 1000x1000
//Filled with 1,000,000 randomly distributed agents
//const unsigned int ENV_WIDTH = 250;
float ENV_WIDTH_float = (float)ENV_WIDTH;
const unsigned int RNG_SEED = 12;
const unsigned int ENV_VOLUME = ENV_WIDTH * ENV_WIDTH * ENV_WIDTH;
CUDA_CALL(hipMemcpyToSymbol(d_agentCount, &AGENT_COUNT, sizeof(unsigned int)));
CUDA_CALL(hipMemcpyToSymbol(d_environmentWidth_float, &ENV_WIDTH_float, sizeof(float)));
//vec4 used instead of vec3 bc texture memory reqs
glm::vec4 *d_agents_init = nullptr, *d_agents = nullptr, *d_out = nullptr;
unsigned int *d_keys = nullptr, *d_vals = nullptr;
CUDA_CALL(hipMalloc(&d_agents_init, sizeof(glm::vec4) * AGENT_COUNT));
CUDA_CALL(hipMalloc(&d_agents, sizeof(glm::vec4) * AGENT_COUNT));
CUDA_CALL(hipMalloc(&d_out, sizeof(glm::vec4) * AGENT_COUNT));
glm::vec4 *h_out = (glm::vec4*)malloc(sizeof(glm::vec4) * AGENT_COUNT);
glm::vec4 *h_out_control = (glm::vec4*)malloc(sizeof(glm::vec4) * AGENT_COUNT);
//Init agents
{
//Generate hiprand
hiprandState_t *d_rng;
CUDA_CALL(hipMalloc(&d_rng, AGENT_COUNT * sizeof(hiprandState_t)));
//Arbitrary thread block sizes (speed not too important during one off initialisation)
unsigned int initThreads = 512;
unsigned int initBlocks = (AGENT_COUNT / initThreads) + 1;
init_curand << <initBlocks, initThreads >> >(d_rng, RNG_SEED);//Defined in CircleKernels.cuh
CUDA_CALL(hipDeviceSynchronize());
hipProfilerStart();//Start here because init_curand is super slow for large agent count's.
init_agents << <initBlocks, initThreads >> >(d_rng, d_agents_init);
//Free hiprand
CUDA_CALL(hipFree(d_rng));
CUDA_CALL(hipMalloc(&d_keys, sizeof(unsigned int)*AGENT_COUNT));
CUDA_CALL(hipMalloc(&d_vals, sizeof(unsigned int)*AGENT_COUNT));
}
//Decide interaction radius
//for a range of bin widths
const float RADIUS = 1.0f;//
const float RADIAL_VOLUME = glm::pi<float>()*RADIUS*RADIUS*RADIUS*(4.0f/3.0f);
const unsigned int AVERAGE_NEIGHBOURS = (unsigned int)(AGENT_COUNT*RADIAL_VOLUME / ENV_VOLUME);
printf("Agents: %d, RVol: %.2f, Average Neighbours: %d\n", AGENT_COUNT, RADIAL_VOLUME, AVERAGE_NEIGHBOURS);
//{
// hipFree(d_agents_init);
// hipFree(d_agents);
// hipFree(d_out);
// return;
//}
const float rSin45 = (float)(RADIUS*sin(glm::radians(45)));
CUDA_CALL(hipMemcpyToSymbol(d_RADIUS, &RADIUS, sizeof(float)));
CUDA_CALL(hipMemcpyToSymbol(d_R_SIN_45, &rSin45, sizeof(float)));
{
{
//Copy init state to d_out
CUDA_CALL(hipMemcpy(d_out, d_agents_init, sizeof(glm::vec4)*AGENT_COUNT, hipMemcpyDeviceToDevice));
}
//Decide bin width (as a ratio to radius)
const float BIN_WIDTH = RADIUS;
float GRID_DIMS_float = ENV_WIDTH / BIN_WIDTH;
GRID_DIMS = glm::uvec3((unsigned int)ceil(GRID_DIMS_float));
CUDA_CALL(hipMemcpyToSymbol(d_binWidth, &BIN_WIDTH, sizeof(float)));
CUDA_CALL(hipMemcpyToSymbol(d_gridDim, &GRID_DIMS.x, sizeof(unsigned int)));
CUDA_CALL(hipMemcpyToSymbol(d_gridDim_float, &GRID_DIMS_float, sizeof(float)));
const unsigned int BIN_COUNT = glm::compMul(GRID_DIMS);
hipEvent_t start_PBM, end_PBM, start_kernel, end_kernel;
hipEventCreate(&start_PBM);
hipEventCreate(&end_PBM);
hipEventCreate(&start_kernel);
hipEventCreate(&end_kernel);
//BuildPBM
unsigned int *d_PBM_counts = nullptr;
unsigned int *d_PBM = nullptr;
CUDA_CALL(hipMalloc(&d_PBM_counts, (BIN_COUNT + 1) * sizeof(unsigned int)));
CUDA_CALL(hipMalloc(&d_PBM, (BIN_COUNT + 1) * sizeof(unsigned int)));
//Prep for threadblocks
CUDA_CALL(hipMalloc(&d_PBM_max_count, sizeof(unsigned int)));
CUDA_CALL(hipMemset(d_PBM_max_count, 0, sizeof(unsigned int)));
{//Resize cub temp if required
size_t bytesCheck, bytesCheck2;
hipcub::DeviceScan::ExclusiveSum(nullptr, bytesCheck, d_PBM, d_PBM_counts, BIN_COUNT + 1);
hipcub::DeviceReduce::Max(nullptr, bytesCheck2, d_PBM_counts, d_PBM_max_count, BIN_COUNT);
bytesCheck = glm::max(bytesCheck, bytesCheck2);
if (bytesCheck > d_CUB_temp_storage_bytes)
{
if (d_CUB_temp_storage)
{
CUDA_CALL(hipFree(d_CUB_temp_storage));
}
d_CUB_temp_storage_bytes = bytesCheck;
CUDA_CALL(hipMalloc(&d_CUB_temp_storage, d_CUB_temp_storage_bytes));
}
}
float pbmMillis_control = 0, kernelMillis_control = 0;
float pbmMillis = 0, kernelMillis = 0;
for (unsigned int _j = 1; _j < UINT_MAX; --_j)
{
//1 = control
//0 = threadblock
bool isControl = _j != 0;
//For 200 iterations (to produce an average)
const unsigned int ITERATIONS = 1;
for (unsigned int i = 0; i < ITERATIONS; ++i)
{
//Reset each run of average model
#ifndef CIRCLES
CUDA_CALL(hipMemcpy(d_out, d_agents_init, sizeof(glm::vec4)*AGENT_COUNT, hipMemcpyDeviceToDevice));
#endif
hipEventRecord(start_PBM);
{//Build atomic histogram
CUDA_CALL(hipMemset(d_PBM_counts, 0x00000000, (BIN_COUNT + 1) * sizeof(unsigned int)));
int blockSize; // The launch configurator returned block size
CUDA_CALL(hipOccupancyMaxActiveBlocksPerMultiprocessor(&blockSize, atomicHistogram, 32, 0));//Randomly 32
// Round up according to array size
int gridSize = (AGENT_COUNT + blockSize - 1) / blockSize;
atomicHistogram << <gridSize, blockSize >> > (d_keys, d_vals, d_PBM_counts, d_out);
CUDA_CALL(hipDeviceSynchronize());
}
{//Scan (sum), to finalise PBM
hipcub::DeviceScan::ExclusiveSum(d_CUB_temp_storage, d_CUB_temp_storage_bytes, d_PBM_counts, d_PBM, BIN_COUNT + 1);
}
{//Reorder messages
int blockSize; // The launch configurator returned block size
CUDA_CALL(hipOccupancyMaxActiveBlocksPerMultiprocessor(&blockSize, reorderLocationMessages, 32, 0));//Randomly 32
// Round up according to array size
int gridSize = (AGENT_COUNT + blockSize - 1) / blockSize;
//Copy messages from d_messages to d_messages_swap, in hash order
reorderLocationMessages << <gridSize, blockSize >> > (d_keys, d_vals, d_PBM, d_out, d_agents);
CUDA_CHECK();
}
if (!isControl)
{//Calc max bin size (for threadblocks)
hipcub::DeviceReduce::Max(d_CUB_temp_storage, d_CUB_temp_storage_bytes, d_PBM_counts, d_PBM_max_count, BIN_COUNT);
CUDA_CALL(hipGetLastError());
CUDA_CALL(hipMemcpy(&PBM_max_count, d_PBM_max_count, sizeof(unsigned int), hipMemcpyDeviceToHost));
//Calc moore size (bin size^dims?)
//PBM_max_Moore_count = (unsigned int)pow(PBM_max_count, 3);//2==2D//Unused, requires 9x shared mem in 2D, 27x in 3D
}
{//Fill PBM and Message Texture Buffers
CUDA_CALL(hipDeviceSynchronize());//Wait for return
CUDA_CALL(hipBindTexture(nullptr, d_texMessages, d_agents, sizeof(glm::vec4) * AGENT_COUNT));
CUDA_CALL(hipBindTexture(nullptr, d_texPBM, d_PBM, sizeof(unsigned int) * (BIN_COUNT + 1)));
}
hipEventRecord(end_PBM);
hipEventRecord(start_kernel);
if (isControl)
{
//Each message samples radial neighbours (static model)
int blockSize; // The launch configurator returned block size
CUDA_CALL(hipOccupancyMaxActiveBlocksPerMultiprocessor(&blockSize, reorderLocationMessages, 32, 0));//Randomly 32
// Round up according to array size
int gridSize = (AGENT_COUNT + blockSize - 1) / blockSize;
//Copy messages from d_agents to d_out, in hash order
neighbourSearch_control << <gridSize, blockSize >> > (d_agents, d_out);
CUDA_CHECK();
}
else
{
//Each message samples radial neighbours (static model)
int blockSize = PBM_max_count; //blockSize == largest bin size
assert(PBM_max_count > 0);
dim3 gridSize;
gridSize.x = GRID_DIMS.x*GRID_DIMS.y;
gridSize.y = GRID_DIMS.z;
gridSize.z = 1;
//Copy messages from d_agents to d_out, in hash order
neighbourSearch << <gridSize, blockSize, requiredSM(blockSize) >> > (d_agents, d_out);
CUDA_CHECK();
}
CUDA_CALL(hipDeviceSynchronize());
hipEventRecord(end_kernel);
hipEventSynchronize(end_kernel);
float _pbmMillis = 0, _kernelMillis = 0;
hipEventElapsedTime(&_pbmMillis, start_PBM, end_PBM);
hipEventElapsedTime(&_kernelMillis, start_kernel, end_kernel);
if (isControl)
{
pbmMillis_control += _pbmMillis;
kernelMillis_control += _kernelMillis;
}
else
{
pbmMillis += _pbmMillis;
kernelMillis += _kernelMillis;
}
}//for(ITERATIONS)
pbmMillis_control /= ITERATIONS;
kernelMillis_control /= ITERATIONS;
pbmMillis /= ITERATIONS;
kernelMillis /= ITERATIONS;
{//Unorder messages
int blockSize; // The launch configurator returned block size
CUDA_CALL(hipOccupancyMaxActiveBlocksPerMultiprocessor(&blockSize, reorderLocationMessages, 32, 0));//Randomly 32
// Round up according to array size
int gridSize = (AGENT_COUNT + blockSize - 1) / blockSize;
//Copy messages from d_out to d_agents, in hash order
unsortMessages << <gridSize, blockSize >> > (d_keys, d_vals, d_PBM, d_out, d_agents);
CUDA_CHECK();
//Swap d_out and d_agents
{
glm::vec4 *t = d_out;
d_out = d_agents;
d_agents = t;
}
//Wait for return
CUDA_CALL(hipDeviceSynchronize());
//Copy back to relative host array (for validation)
CUDA_CALL(hipMemcpy(isControl ? h_out_control : h_out, d_out, sizeof(glm::vec4)*AGENT_COUNT, hipMemcpyDeviceToHost));
CUDA_CALL(hipDeviceSynchronize());
}
}//for(MODE)
CUDA_CALL(hipUnbindTexture(d_texPBM));
CUDA_CALL(hipUnbindTexture(d_texMessages));
CUDA_CALL(hipFree(d_PBM_counts));
CUDA_CALL(hipFree(d_PBM));
//log();
printf("Control: PBM: %.2fms, Kernel: %.2fms\n", pbmMillis_control, kernelMillis_control);
printf("ThreadBlock: PBM: %.2fms, Kernel: %.2fms\n", pbmMillis, kernelMillis);
unsigned int fails = 0;
hipProfilerStop();
#ifndef CIRCLES
{//Validation
//Validate results for average model
//thrust::sort(thrust::hip::par, d_out, d_out + AGENT_COUNT, vec2Compare());
//CUDA_CALL(hipMemcpy(isControl ? h_out_control : h_out, d_out, sizeof(glm::vec2)*AGENT_COUNT, hipMemcpyDeviceToHost));
for (unsigned int i = 0; i < AGENT_COUNT; ++i)
{
assert(!(isnan(h_out[i].x) || isnan(h_out[i].y) || isnan(h_out[i].z)));
if (isnan(h_out[i].x) || isnan(h_out[i].y) || isnan(h_out[i].z))
printf("err nan\n");
auto ret = glm::epsilonEqual(glm::vec3(h_out[i]), glm::vec3(h_out_control[i]), EPSILON);
if (!(ret.x&&ret.y&&ret.z))
{
if (fails == 0)
printf("(%.5f, %.5f, %.5f) vs (%.5f, %.5f, %.5f)\n", h_out_control[i].x, h_out_control[i].y, h_out_control[i].z, h_out[i].x, h_out[i].y, h_out[i].z);
fails++;
}
}
if (fails > 0)
printf("%d/%d (%.1f%%) Failed.\n", fails, AGENT_COUNT, 100 * (fails / (float)AGENT_COUNT));
else
printf("Validation passed %d/%d\n", AGENT_COUNT, AGENT_COUNT);
}
#endif
log(f, AVERAGE_NEIGHBOURS, AGENT_COUNT, ENV_WIDTH, pbmMillis_control, kernelMillis_control, pbmMillis, kernelMillis, fails);
}
CUDA_CALL(hipUnbindTexture(d_texMessages));
CUDA_CALL(hipFree(d_vals));
CUDA_CALL(hipFree(d_keys));
CUDA_CALL(hipFree(d_agents));
CUDA_CALL(hipFree(d_agents_init));
CUDA_CALL(hipFree(d_out));
free(h_out);
free(h_out_control);
hipDeviceReset();
}
void runAgents(std::ofstream &f, const unsigned int AGENT_COUNT, const float DENSITY)
{
//density refers to approximate number of neighbours
run(f, (unsigned int)cbrt(AGENT_COUNT / (DENSITY*6.45 / 27)), AGENT_COUNT);
}
int main()
{
{
std::ofstream f;
createLog(f);
assert(f.is_open());
//for (unsigned int i = 20000; i <= 3000000; i += 20000)
for (unsigned int i = 100000; i <= 100000; i += 20000)
{
//Run i agents in a density with roughly 60 radial neighbours, and log
//Within this, it is tested over a range of proportional bin widths
runAgents(f, i, 70);
}
}
printf("fin\n");
//getchar();
return 0;
}
|
a02d0d434e044bdb50ce09e883bb93a2091a3af2.cu
|
/**
* Based off earlier start from:
* https://github.com/Robadob/SP-Bench/commit/35dcbb81cc0b73cdb6b08fb622f13e688a878133
*/
#define _CRT_SECURE_NO_WARNINGS
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_profiler_api.h"
#include <stdio.h>
#include <cmath>
#include <glm/glm.hpp>
#include <glm/gtx/component_wise.hpp>
#include <glm/gtc/constants.hpp>
#include <curand_kernel.h>
#include <texture_fetch_functions.h>
#include <cub/cub.cuh>
#include <glm/gtc/epsilon.hpp>
#define EPSILON 0.0001f
//#define CIRCLES
//Cuda call
static void HandleCUDAError(const char *file,
int line,
cudaError_t status = cudaGetLastError()) {
#ifdef _DEBUG
cudaDeviceSynchronize();
#endif
if (status != cudaError::cudaSuccess || (status = cudaGetLastError()) != cudaError::cudaSuccess)
{
printf("%s(%i) CUDA Error Occurred;\n%s\n", file, line, cudaGetErrorString(status));
#ifdef _DEBUG
getchar();
#endif
exit(1);
}
}
#define CUDA_CALL( err ) (HandleCUDAError(__FILE__, __LINE__ , err))
#define CUDA_CHECK() (HandleCUDAError(__FILE__, __LINE__))
//Logging (found in log.cpp)
#include <fstream>
void createLog(std::ofstream &f);
void log(std::ofstream &f,
const unsigned int &estRadialNeighbours,
const unsigned int &agentCount,
const unsigned int &envWidth,
const float &PBM_control,
const float &kernel_control,
const float &PBM,
const float &kernel,
const unsigned int &fails
);
__device__ __constant__ unsigned int d_agentCount;
__device__ __constant__ float d_environmentWidth_float;
__device__ __constant__ unsigned int d_gridDim;
glm::uvec3 GRID_DIMS;
__device__ __constant__ float d_gridDim_float;
__device__ __constant__ float d_RADIUS;
__device__ __constant__ float d_R_SIN_45;
__device__ __constant__ float d_binWidth;
//For thread block max bin check
unsigned int *d_PBM_max_count;
unsigned int PBM_max_count = 0;
unsigned int PBM_max_Moore_count = 0;//This is unused, it could be used if we wished to load entire Moore neighbourhood at once to shared mem, instead we load a bin at a time
texture<float4> d_texMessages;
texture<unsigned int> d_texPBM;
__global__ void init_curand(curandState *state, unsigned long long seed) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < d_agentCount)
curand_init(seed, id, 0, &state[id]);
}
__global__ void init_agents(curandState *state, glm::vec4 *locationMessages) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= d_agentCount)
return;
//curand_unform returns 0<x<=1.0, not much can really do about 0 exclusive
//negate and + 1.0, to make 0<=x<1.0
locationMessages[id].x = (-curand_uniform(&state[id]) + 1.0f)*d_environmentWidth_float;
locationMessages[id].y = (-curand_uniform(&state[id]) + 1.0f)*d_environmentWidth_float;
locationMessages[id].z = (-curand_uniform(&state[id]) + 1.0f)*d_environmentWidth_float;
}
__device__ __forceinline__ glm::ivec3 getGridPosition(glm::vec3 worldPos)
{
//Clamp each grid coord to 0<=x<dim
return clamp(floor((worldPos / d_environmentWidth_float)*d_gridDim_float), glm::vec3(0), glm::vec3((float)d_gridDim - 1));
}
__device__ __forceinline__ unsigned int getHash(glm::ivec3 gridPos)
{
//Bound gridPos to gridDimensions
gridPos = clamp(gridPos, glm::ivec3(0), glm::ivec3(d_gridDim - 1));
//Compute hash (effectivley an index for to a bin within the partitioning grid in this case)
return (unsigned int)(
(gridPos.z * d_gridDim * d_gridDim) + //z
(gridPos.y * d_gridDim) + //y
gridPos.x); //x
}
__global__ void atomicHistogram(unsigned int* bin_index, unsigned int* bin_sub_index, unsigned int *pbm_counts, glm::vec4 *messageBuffer)
{
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//Kill excess threads
if (index >= d_agentCount) return;
glm::ivec3 gridPos = getGridPosition(messageBuffer[index]);
unsigned int hash = getHash(gridPos);
bin_index[index] = hash;
unsigned int bin_idx = atomicInc((unsigned int*)&pbm_counts[hash], 0xFFFFFFFF);
bin_sub_index[index] = bin_idx;
}
__global__ void reorderLocationMessages(
unsigned int* bin_index,
unsigned int* bin_sub_index,
unsigned int *pbm,
glm::vec4 *unordered_messages,
glm::vec4 *ordered_messages
)
{
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//Kill excess threads
if (index >= d_agentCount) return;
unsigned int i = bin_index[index];
unsigned int sorted_index = pbm[i] + bin_sub_index[index];
//Order messages into swap space
ordered_messages[sorted_index] = unordered_messages[index];
}
int requiredSM(int blockSize)
{
cudaDeviceProp dp;
int device;
cudaGetDevice(&device);
memset(&dp, sizeof(cudaDeviceProp), 0);
cudaGetDeviceProperties(&dp, device);
//We could use dp.sharedMemPerBlock/N to improve occupancy
return (int)min(PBM_max_count * sizeof(float3), dp.sharedMemPerBlock);//Need to limit this to the max SM
}
/**
* Kernel must be launched 1 block per bin
* This removes the necessity of __launch_bounds__(64) as all threads in block are touching the same messages
* However we end up with alot of (mostly) idle threads if one bin dense, others empty.
*/
__global__ void __launch_bounds__(64) neighbourSearch_control(const glm::vec4 *agents, glm::vec4 *out)
{
#define STRIPS
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//Kill excess threads
if (index >= d_agentCount) return;
glm::vec3 pos = *(glm::vec3*)&agents[index];
glm::ivec3 gridPos = getGridPosition(pos);
glm::ivec3 gridPosRelative;
unsigned int count = 0;
glm::vec3 average = glm::vec3(0);
for (gridPosRelative.z = -1; gridPosRelative.z <= 1; gridPosRelative.z++)
{//zmin to zmax
int currentBinZ = gridPos.z + gridPosRelative.z;
if (currentBinZ >= 0 && currentBinZ < d_gridDim)
{
for (gridPosRelative.y = -1; gridPosRelative.y <= 1; gridPosRelative.y++)
{//ymin to ymax
int currentBinY = gridPos.y + gridPosRelative.y;
if (currentBinY >= 0 && currentBinY < d_gridDim)
{
#ifndef STRIPS
for (gridPosRelative.x = -1; gridPosRelative.x <= 1; gridPosRelative.x++)
{//xmin to xmax
int currentBinX = gridPos.x + gridPosRelative.x;
//Find bin start and end
unsigned int binHash = getHash(glm::ivec3(currentBinX, currentBinY, currentBinZ));
//if (binHash>d_gridDim*d_gridDim)
//{
// printf("Hash: %d, gridDim: %d, pos: (%d, %d)\n", binHash, d_gridDim, tGridPos.x, tGridPos.y);
//}
unsigned int binStart = tex1Dfetch(d_texPBM, binHash);
unsigned int binEnd = tex1Dfetch(d_texPBM, binHash + 1);
#else
int currentBinX = gridPos.x - 1;
currentBinX = currentBinX >= 0 ? currentBinX : 0;
unsigned int binHash = getHash(glm::ivec3(currentBinX, currentBinY, currentBinZ));
unsigned int binStart = tex1Dfetch(d_texPBM, binHash);
currentBinX = gridPos.x + 1;
currentBinX = currentBinX < d_gridDim ? currentBinX : d_gridDim - 1;
binHash = getHash(glm::ivec3(currentBinX, currentBinY, currentBinZ));
unsigned int binEnd = tex1Dfetch(d_texPBM, binHash + 1);
#endif
//Iterate messages in range
for (unsigned int i = binStart; i < binEnd; ++i)
{
if (i != index)//Ignore self
{
float4 message = tex1Dfetch(d_texMessages, i);
#ifndef CIRCLES
if (distance(*(glm::vec3*)&message, pos) < d_RADIUS)
{
//message.z = pow(sqrt(sin(distance(message, pos))),3.1f);//Bonus compute
average += *(glm::vec3*)&message;
count++;
}
#else
glm::vec3 toLoc = (*(glm::vec3*)&message) - pos;//Difference
float separation = length(toLoc);
if (separation < d_RADIUS && separation > 0)
{
const float REPULSE_FACTOR = 0.05f;
float k = sinf((separation / d_RADIUS)*3.141*-2)*REPULSE_FACTOR;
toLoc /= separation;//Normalize (without recalculating seperation)
average += k * toLoc;
count++;
}
#endif
}
}
}
#ifndef STRIPS
}
#endif
}
}
}
average /= count>0 ? count : 1;
#ifndef CIRCLES
out[index].x = average.x;
out[index].y = average.y;
out[index].z = average.z;
#else
out[index].x = pos.x + average.x;
out[index].y = pos.y + average.y;
out[index].z = pos.z + average.z;
#endif
}
/**
* Kernel must be launched 1 block per bin
* This removes the necessity of __launch_bounds__(64) as all threads in block are touching the same messages
* However we end up with alot of (mostly) idle threads if one bin dense, others empty.
*/
__global__ void neighbourSearch(const glm::vec4 *agents, glm::vec4 *out)
{
extern __shared__ float3 sm_messages[];
//My data
glm::ivec3 myBin = glm::ivec3(blockIdx.x / d_gridDim, blockIdx.x % d_gridDim, blockIdx.y);
unsigned int index = UINT_MAX;
glm::vec3 pos;
{
unsigned int binHash = getHash(myBin);
unsigned int binStart = tex1Dfetch(d_texPBM, binHash);
unsigned int binEnd = tex1Dfetch(d_texPBM, binHash + 1);
unsigned int binCount = binEnd - binStart;
if (threadIdx.x < binCount)
{
index = binStart + threadIdx.x;
pos = *(glm::vec3*)&agents[index];
}
}
//Model data
unsigned int count = 0;
glm::vec3 average = glm::vec3(0);
//Iterate each bin in the Moore neighbourhood
glm::ivec3 currentBin;
for(int _x = -1;_x<=1;++_x)
{
currentBin.x = myBin.x + _x;
if (currentBin.x >= 0 && currentBin.x < d_gridDim)
{
for (int _y = -1; _y <= 1; ++_y)
{
currentBin.y = myBin.y + _y;
if (currentBin.y >= 0 && currentBin.y < d_gridDim)
{
for (int _z = -1; _z <= 1; ++_z)
{
currentBin.z = myBin.z + _z;
if (currentBin.z >= 0 && currentBin.z < d_gridDim)
{
//Now we must load all messages from currentBin into shared memory
//WARNING: There is an unhandled edge case whereby we dont have enough shared memory, and must segment the load
unsigned int binHash = getHash(currentBin);
unsigned int binStart = tex1Dfetch(d_texPBM, binHash);
unsigned int binEnd = tex1Dfetch(d_texPBM, binHash + 1);
unsigned int binCount = binEnd - binStart;
//If this bin has a message for us to load
if (threadIdx.x < binCount)
{
//Load the message into shared memory
float4 message = tex1Dfetch(d_texMessages, binStart + threadIdx.x);
sm_messages[threadIdx.x] = *(float3*)&message;
}
//Wait for all loading to be completed
__syncthreads();
//If we host a valid message...
if (index != UINT_MAX)
{
//Iterate the loaded messages
for (unsigned int i = 0; i < binCount; ++i)
{
//Skip our own loaded message
if (_x == 0 && _y == 0 && _z == 0 && i == threadIdx.x)
continue;
float3 message = sm_messages[i];
#ifndef CIRCLES
if (distance(*(glm::vec3*)&message, pos) < d_RADIUS)
{
//message.z = pow(sqrt(sin(distance(message, pos))),3.1f);//Bonus compute
average += *(glm::vec3*)&message;
count++;
}
#else
glm::vec3 toLoc = (*(glm::vec3*)&message) - pos;//Difference
float separation = length(toLoc);
if (separation < d_RADIUS && separation > 0)
{
const float REPULSE_FACTOR = 0.05f;
float k = sinf((separation / d_RADIUS)*3.141*-2)*REPULSE_FACTOR;
toLoc /= separation;//Normalize (without recalculating seperation)
average += k * toLoc;
count++;
}
#endif
}
}
//Wait for all processing to be completed, so that we can proceed to next bin
__syncthreads();
}
}
}
//Could optimise here, by handling binHash outside the loop
//For this would need to iterate _y on the outside, so hashes are contiguous
}
}
}
//If we have a valid message...
if(index != UINT_MAX)
{
average /= count>0 ? count : 1;
#ifndef CIRCLES
out[index].x = average.x;
out[index].y = average.y;
out[index].z = average.z;
#else
out[index].x = pos.x + average.x;
out[index].y = pos.y + average.y;
out[index].z = pos.z + average.z;
#endif
}
}
__global__ void unsortMessages(
unsigned int* bin_index,
unsigned int* bin_sub_index,
unsigned int *pbm,
glm::vec4 *ordered_messages,
glm::vec4 *unordered_messages
)
{
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//Kill excess threads
if (index >= d_agentCount) return;
unsigned int i = bin_index[index];
unsigned int sorted_index = pbm[i] + bin_sub_index[index];
//Order messages into swap space
unordered_messages[index] = ordered_messages[sorted_index];
}
/**
* This program is to act as a test rig to demonstrate the raw impact of raw message handling
*/
void run(std::ofstream &f, const unsigned int ENV_WIDTH, const unsigned int AGENT_COUNT = 1000000)
{
void *d_CUB_temp_storage = nullptr;
size_t d_CUB_temp_storage_bytes = 0;
//Spatial partitioning mock
//Fixed 2D environment of 1000x1000
//Filled with 1,000,000 randomly distributed agents
//const unsigned int ENV_WIDTH = 250;
float ENV_WIDTH_float = (float)ENV_WIDTH;
const unsigned int RNG_SEED = 12;
const unsigned int ENV_VOLUME = ENV_WIDTH * ENV_WIDTH * ENV_WIDTH;
CUDA_CALL(cudaMemcpyToSymbol(d_agentCount, &AGENT_COUNT, sizeof(unsigned int)));
CUDA_CALL(cudaMemcpyToSymbol(d_environmentWidth_float, &ENV_WIDTH_float, sizeof(float)));
//vec4 used instead of vec3 bc texture memory reqs
glm::vec4 *d_agents_init = nullptr, *d_agents = nullptr, *d_out = nullptr;
unsigned int *d_keys = nullptr, *d_vals = nullptr;
CUDA_CALL(cudaMalloc(&d_agents_init, sizeof(glm::vec4) * AGENT_COUNT));
CUDA_CALL(cudaMalloc(&d_agents, sizeof(glm::vec4) * AGENT_COUNT));
CUDA_CALL(cudaMalloc(&d_out, sizeof(glm::vec4) * AGENT_COUNT));
glm::vec4 *h_out = (glm::vec4*)malloc(sizeof(glm::vec4) * AGENT_COUNT);
glm::vec4 *h_out_control = (glm::vec4*)malloc(sizeof(glm::vec4) * AGENT_COUNT);
//Init agents
{
//Generate curand
curandState *d_rng;
CUDA_CALL(cudaMalloc(&d_rng, AGENT_COUNT * sizeof(curandState)));
//Arbitrary thread block sizes (speed not too important during one off initialisation)
unsigned int initThreads = 512;
unsigned int initBlocks = (AGENT_COUNT / initThreads) + 1;
init_curand << <initBlocks, initThreads >> >(d_rng, RNG_SEED);//Defined in CircleKernels.cuh
CUDA_CALL(cudaDeviceSynchronize());
cudaProfilerStart();//Start here because init_curand is super slow for large agent count's.
init_agents << <initBlocks, initThreads >> >(d_rng, d_agents_init);
//Free curand
CUDA_CALL(cudaFree(d_rng));
CUDA_CALL(cudaMalloc(&d_keys, sizeof(unsigned int)*AGENT_COUNT));
CUDA_CALL(cudaMalloc(&d_vals, sizeof(unsigned int)*AGENT_COUNT));
}
//Decide interaction radius
//for a range of bin widths
const float RADIUS = 1.0f;//
const float RADIAL_VOLUME = glm::pi<float>()*RADIUS*RADIUS*RADIUS*(4.0f/3.0f);
const unsigned int AVERAGE_NEIGHBOURS = (unsigned int)(AGENT_COUNT*RADIAL_VOLUME / ENV_VOLUME);
printf("Agents: %d, RVol: %.2f, Average Neighbours: %d\n", AGENT_COUNT, RADIAL_VOLUME, AVERAGE_NEIGHBOURS);
//{
// cudaFree(d_agents_init);
// cudaFree(d_agents);
// cudaFree(d_out);
// return;
//}
const float rSin45 = (float)(RADIUS*sin(glm::radians(45)));
CUDA_CALL(cudaMemcpyToSymbol(d_RADIUS, &RADIUS, sizeof(float)));
CUDA_CALL(cudaMemcpyToSymbol(d_R_SIN_45, &rSin45, sizeof(float)));
{
{
//Copy init state to d_out
CUDA_CALL(cudaMemcpy(d_out, d_agents_init, sizeof(glm::vec4)*AGENT_COUNT, cudaMemcpyDeviceToDevice));
}
//Decide bin width (as a ratio to radius)
const float BIN_WIDTH = RADIUS;
float GRID_DIMS_float = ENV_WIDTH / BIN_WIDTH;
GRID_DIMS = glm::uvec3((unsigned int)ceil(GRID_DIMS_float));
CUDA_CALL(cudaMemcpyToSymbol(d_binWidth, &BIN_WIDTH, sizeof(float)));
CUDA_CALL(cudaMemcpyToSymbol(d_gridDim, &GRID_DIMS.x, sizeof(unsigned int)));
CUDA_CALL(cudaMemcpyToSymbol(d_gridDim_float, &GRID_DIMS_float, sizeof(float)));
const unsigned int BIN_COUNT = glm::compMul(GRID_DIMS);
cudaEvent_t start_PBM, end_PBM, start_kernel, end_kernel;
cudaEventCreate(&start_PBM);
cudaEventCreate(&end_PBM);
cudaEventCreate(&start_kernel);
cudaEventCreate(&end_kernel);
//BuildPBM
unsigned int *d_PBM_counts = nullptr;
unsigned int *d_PBM = nullptr;
CUDA_CALL(cudaMalloc(&d_PBM_counts, (BIN_COUNT + 1) * sizeof(unsigned int)));
CUDA_CALL(cudaMalloc(&d_PBM, (BIN_COUNT + 1) * sizeof(unsigned int)));
//Prep for threadblocks
CUDA_CALL(cudaMalloc(&d_PBM_max_count, sizeof(unsigned int)));
CUDA_CALL(cudaMemset(d_PBM_max_count, 0, sizeof(unsigned int)));
{//Resize cub temp if required
size_t bytesCheck, bytesCheck2;
cub::DeviceScan::ExclusiveSum(nullptr, bytesCheck, d_PBM, d_PBM_counts, BIN_COUNT + 1);
cub::DeviceReduce::Max(nullptr, bytesCheck2, d_PBM_counts, d_PBM_max_count, BIN_COUNT);
bytesCheck = glm::max(bytesCheck, bytesCheck2);
if (bytesCheck > d_CUB_temp_storage_bytes)
{
if (d_CUB_temp_storage)
{
CUDA_CALL(cudaFree(d_CUB_temp_storage));
}
d_CUB_temp_storage_bytes = bytesCheck;
CUDA_CALL(cudaMalloc(&d_CUB_temp_storage, d_CUB_temp_storage_bytes));
}
}
float pbmMillis_control = 0, kernelMillis_control = 0;
float pbmMillis = 0, kernelMillis = 0;
for (unsigned int _j = 1; _j < UINT_MAX; --_j)
{
//1 = control
//0 = threadblock
bool isControl = _j != 0;
//For 200 iterations (to produce an average)
const unsigned int ITERATIONS = 1;
for (unsigned int i = 0; i < ITERATIONS; ++i)
{
//Reset each run of average model
#ifndef CIRCLES
CUDA_CALL(cudaMemcpy(d_out, d_agents_init, sizeof(glm::vec4)*AGENT_COUNT, cudaMemcpyDeviceToDevice));
#endif
cudaEventRecord(start_PBM);
{//Build atomic histogram
CUDA_CALL(cudaMemset(d_PBM_counts, 0x00000000, (BIN_COUNT + 1) * sizeof(unsigned int)));
int blockSize; // The launch configurator returned block size
CUDA_CALL(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&blockSize, atomicHistogram, 32, 0));//Randomly 32
// Round up according to array size
int gridSize = (AGENT_COUNT + blockSize - 1) / blockSize;
atomicHistogram << <gridSize, blockSize >> > (d_keys, d_vals, d_PBM_counts, d_out);
CUDA_CALL(cudaDeviceSynchronize());
}
{//Scan (sum), to finalise PBM
cub::DeviceScan::ExclusiveSum(d_CUB_temp_storage, d_CUB_temp_storage_bytes, d_PBM_counts, d_PBM, BIN_COUNT + 1);
}
{//Reorder messages
int blockSize; // The launch configurator returned block size
CUDA_CALL(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&blockSize, reorderLocationMessages, 32, 0));//Randomly 32
// Round up according to array size
int gridSize = (AGENT_COUNT + blockSize - 1) / blockSize;
//Copy messages from d_messages to d_messages_swap, in hash order
reorderLocationMessages << <gridSize, blockSize >> > (d_keys, d_vals, d_PBM, d_out, d_agents);
CUDA_CHECK();
}
if (!isControl)
{//Calc max bin size (for threadblocks)
cub::DeviceReduce::Max(d_CUB_temp_storage, d_CUB_temp_storage_bytes, d_PBM_counts, d_PBM_max_count, BIN_COUNT);
CUDA_CALL(cudaGetLastError());
CUDA_CALL(cudaMemcpy(&PBM_max_count, d_PBM_max_count, sizeof(unsigned int), cudaMemcpyDeviceToHost));
//Calc moore size (bin size^dims?)
//PBM_max_Moore_count = (unsigned int)pow(PBM_max_count, 3);//2==2D//Unused, requires 9x shared mem in 2D, 27x in 3D
}
{//Fill PBM and Message Texture Buffers
CUDA_CALL(cudaDeviceSynchronize());//Wait for return
CUDA_CALL(cudaBindTexture(nullptr, d_texMessages, d_agents, sizeof(glm::vec4) * AGENT_COUNT));
CUDA_CALL(cudaBindTexture(nullptr, d_texPBM, d_PBM, sizeof(unsigned int) * (BIN_COUNT + 1)));
}
cudaEventRecord(end_PBM);
cudaEventRecord(start_kernel);
if (isControl)
{
//Each message samples radial neighbours (static model)
int blockSize; // The launch configurator returned block size
CUDA_CALL(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&blockSize, reorderLocationMessages, 32, 0));//Randomly 32
// Round up according to array size
int gridSize = (AGENT_COUNT + blockSize - 1) / blockSize;
//Copy messages from d_agents to d_out, in hash order
neighbourSearch_control << <gridSize, blockSize >> > (d_agents, d_out);
CUDA_CHECK();
}
else
{
//Each message samples radial neighbours (static model)
int blockSize = PBM_max_count; //blockSize == largest bin size
assert(PBM_max_count > 0);
dim3 gridSize;
gridSize.x = GRID_DIMS.x*GRID_DIMS.y;
gridSize.y = GRID_DIMS.z;
gridSize.z = 1;
//Copy messages from d_agents to d_out, in hash order
neighbourSearch << <gridSize, blockSize, requiredSM(blockSize) >> > (d_agents, d_out);
CUDA_CHECK();
}
CUDA_CALL(cudaDeviceSynchronize());
cudaEventRecord(end_kernel);
cudaEventSynchronize(end_kernel);
float _pbmMillis = 0, _kernelMillis = 0;
cudaEventElapsedTime(&_pbmMillis, start_PBM, end_PBM);
cudaEventElapsedTime(&_kernelMillis, start_kernel, end_kernel);
if (isControl)
{
pbmMillis_control += _pbmMillis;
kernelMillis_control += _kernelMillis;
}
else
{
pbmMillis += _pbmMillis;
kernelMillis += _kernelMillis;
}
}//for(ITERATIONS)
pbmMillis_control /= ITERATIONS;
kernelMillis_control /= ITERATIONS;
pbmMillis /= ITERATIONS;
kernelMillis /= ITERATIONS;
{//Unorder messages
int blockSize; // The launch configurator returned block size
CUDA_CALL(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&blockSize, reorderLocationMessages, 32, 0));//Randomly 32
// Round up according to array size
int gridSize = (AGENT_COUNT + blockSize - 1) / blockSize;
//Copy messages from d_out to d_agents, in hash order
unsortMessages << <gridSize, blockSize >> > (d_keys, d_vals, d_PBM, d_out, d_agents);
CUDA_CHECK();
//Swap d_out and d_agents
{
glm::vec4 *t = d_out;
d_out = d_agents;
d_agents = t;
}
//Wait for return
CUDA_CALL(cudaDeviceSynchronize());
//Copy back to relative host array (for validation)
CUDA_CALL(cudaMemcpy(isControl ? h_out_control : h_out, d_out, sizeof(glm::vec4)*AGENT_COUNT, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaDeviceSynchronize());
}
}//for(MODE)
CUDA_CALL(cudaUnbindTexture(d_texPBM));
CUDA_CALL(cudaUnbindTexture(d_texMessages));
CUDA_CALL(cudaFree(d_PBM_counts));
CUDA_CALL(cudaFree(d_PBM));
//log();
printf("Control: PBM: %.2fms, Kernel: %.2fms\n", pbmMillis_control, kernelMillis_control);
printf("ThreadBlock: PBM: %.2fms, Kernel: %.2fms\n", pbmMillis, kernelMillis);
unsigned int fails = 0;
cudaProfilerStop();
#ifndef CIRCLES
{//Validation
//Validate results for average model
//thrust::sort(thrust::cuda::par, d_out, d_out + AGENT_COUNT, vec2Compare());
//CUDA_CALL(cudaMemcpy(isControl ? h_out_control : h_out, d_out, sizeof(glm::vec2)*AGENT_COUNT, cudaMemcpyDeviceToHost));
for (unsigned int i = 0; i < AGENT_COUNT; ++i)
{
assert(!(isnan(h_out[i].x) || isnan(h_out[i].y) || isnan(h_out[i].z)));
if (isnan(h_out[i].x) || isnan(h_out[i].y) || isnan(h_out[i].z))
printf("err nan\n");
auto ret = glm::epsilonEqual(glm::vec3(h_out[i]), glm::vec3(h_out_control[i]), EPSILON);
if (!(ret.x&&ret.y&&ret.z))
{
if (fails == 0)
printf("(%.5f, %.5f, %.5f) vs (%.5f, %.5f, %.5f)\n", h_out_control[i].x, h_out_control[i].y, h_out_control[i].z, h_out[i].x, h_out[i].y, h_out[i].z);
fails++;
}
}
if (fails > 0)
printf("%d/%d (%.1f%%) Failed.\n", fails, AGENT_COUNT, 100 * (fails / (float)AGENT_COUNT));
else
printf("Validation passed %d/%d\n", AGENT_COUNT, AGENT_COUNT);
}
#endif
log(f, AVERAGE_NEIGHBOURS, AGENT_COUNT, ENV_WIDTH, pbmMillis_control, kernelMillis_control, pbmMillis, kernelMillis, fails);
}
CUDA_CALL(cudaUnbindTexture(d_texMessages));
CUDA_CALL(cudaFree(d_vals));
CUDA_CALL(cudaFree(d_keys));
CUDA_CALL(cudaFree(d_agents));
CUDA_CALL(cudaFree(d_agents_init));
CUDA_CALL(cudaFree(d_out));
free(h_out);
free(h_out_control);
cudaDeviceReset();
}
void runAgents(std::ofstream &f, const unsigned int AGENT_COUNT, const float DENSITY)
{
//density refers to approximate number of neighbours
run(f, (unsigned int)cbrt(AGENT_COUNT / (DENSITY*6.45 / 27)), AGENT_COUNT);
}
int main()
{
{
std::ofstream f;
createLog(f);
assert(f.is_open());
//for (unsigned int i = 20000; i <= 3000000; i += 20000)
for (unsigned int i = 100000; i <= 100000; i += 20000)
{
//Run i agents in a density with roughly 60 radial neighbours, and log
//Within this, it is tested over a range of proportional bin widths
runAgents(f, i, 70);
}
}
printf("fin\n");
//getchar();
return 0;
}
|
4b98d24392fb16a00c5bc5bcc428b3fafe3ecf6c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdio.h>
#define N 32
#define THREADS_PER_BLOCK 32
__global__ void dotproduct(float* x, float* y, float* result) {
// Compute the index this thread should use to access elements
size_t index = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
// Create space for a shared array that all threads in this block will
// use to store pairwise products
__shared__ float temp[THREADS_PER_BLOCK];
// Compute pairwise products
temp[threadIdx.x] = x[index] * y[index];
__syncthreads();
// The thread with index zero will sum up the values in temp
if(threadIdx.x == 0) {
float sum = 0;
int i;
for(i=0; i<THREADS_PER_BLOCK; i++) {
atomicAdd(&sum, temp[i]);
}
// Add the sum for this block to the result
*result += sum;
}
}
int main() {
// Allocate arrays for X and Y on the CPU
float* cpu_x = (float*)malloc(sizeof(float) * N);
float* cpu_y = (float*)malloc(sizeof(float) * N);
// Initialize X and Y
int i;
for(i=0; i<N; i++) {
cpu_x[i] = (float)i;
cpu_y[i] = (float)i;
}
// Allocate space for X and Y on the GPU
float* gpu_x;
float* gpu_y;
float gpu_result = 0.0;
if(hipMalloc(&gpu_x, sizeof(float) * N) != hipSuccess) {
fprintf(stderr, "Failed to allocate X array on GPU\n");
exit(2);
}
if(hipMalloc(&gpu_y, sizeof(float) * N) != hipSuccess) {
fprintf(stderr, "Failed to allocate Y array on GPU\n");
exit(2);
}
// Copy the host X and Y arrays to the device X and Y
// arrays
if(hipMemcpy(gpu_x, cpu_x, sizeof(float) * N, hipMemcpyHostToDevice) !=
hipSuccess) {
fprintf(stderr, "Failed to copy X to the GPU\n");
}
if(hipMemcpy(gpu_y, cpu_y, sizeof(float) * N,
hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Failed to copy Y to the GPU\n");
}
// How many blocks should be run, rounding up to
// include all threads?
size_t blocks = (N + THREADS_PER_BLOCK - 1) /
THREADS_PER_BLOCK;
// Run the saxpy kernel
hipLaunchKernelGGL(( dotproduct), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, gpu_x, gpu_y, &gpu_result);
// Wait for the kernel to finish
hipDeviceSynchronize();
printf("%f\n", gpu_result);
hipFree(gpu_x);
hipFree(gpu_y);
free(cpu_x);
free(cpu_y);
return 0;
}
|
4b98d24392fb16a00c5bc5bcc428b3fafe3ecf6c.cu
|
#include <stdint.h>
#include <stdio.h>
#define N 32
#define THREADS_PER_BLOCK 32
__global__ void dotproduct(float* x, float* y, float* result) {
// Compute the index this thread should use to access elements
size_t index = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
// Create space for a shared array that all threads in this block will
// use to store pairwise products
__shared__ float temp[THREADS_PER_BLOCK];
// Compute pairwise products
temp[threadIdx.x] = x[index] * y[index];
__syncthreads();
// The thread with index zero will sum up the values in temp
if(threadIdx.x == 0) {
float sum = 0;
int i;
for(i=0; i<THREADS_PER_BLOCK; i++) {
atomicAdd(&sum, temp[i]);
}
// Add the sum for this block to the result
*result += sum;
}
}
int main() {
// Allocate arrays for X and Y on the CPU
float* cpu_x = (float*)malloc(sizeof(float) * N);
float* cpu_y = (float*)malloc(sizeof(float) * N);
// Initialize X and Y
int i;
for(i=0; i<N; i++) {
cpu_x[i] = (float)i;
cpu_y[i] = (float)i;
}
// Allocate space for X and Y on the GPU
float* gpu_x;
float* gpu_y;
float gpu_result = 0.0;
if(cudaMalloc(&gpu_x, sizeof(float) * N) != cudaSuccess) {
fprintf(stderr, "Failed to allocate X array on GPU\n");
exit(2);
}
if(cudaMalloc(&gpu_y, sizeof(float) * N) != cudaSuccess) {
fprintf(stderr, "Failed to allocate Y array on GPU\n");
exit(2);
}
// Copy the host X and Y arrays to the device X and Y
// arrays
if(cudaMemcpy(gpu_x, cpu_x, sizeof(float) * N, cudaMemcpyHostToDevice) !=
cudaSuccess) {
fprintf(stderr, "Failed to copy X to the GPU\n");
}
if(cudaMemcpy(gpu_y, cpu_y, sizeof(float) * N,
cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Failed to copy Y to the GPU\n");
}
// How many blocks should be run, rounding up to
// include all threads?
size_t blocks = (N + THREADS_PER_BLOCK - 1) /
THREADS_PER_BLOCK;
// Run the saxpy kernel
dotproduct<<<blocks, THREADS_PER_BLOCK>>>(gpu_x, gpu_y, &gpu_result);
// Wait for the kernel to finish
cudaDeviceSynchronize();
printf("%f\n", gpu_result);
cudaFree(gpu_x);
cudaFree(gpu_y);
free(cpu_x);
free(cpu_y);
return 0;
}
|
4c4ccebcf537f65b7753c7e181fb0953a188724b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FCNMaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,
const int stride_h, const int stride_w, const int kstride_h, const int kstride_w,
const int pad_h, const int pad_w, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + ext_kernel_h, height);
int wend = min(wstart + ext_kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
if ((kstride_h_ != 1) || (kstride_w_ != 1)) {
// we have validated the pooling type is MAX for FCN
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FCNMaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h_, ext_kernel_w_,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_, top_data,
mask, top_mask);
return;
}
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
CHECK((kstride_h_ == 1) && (kstride_w_ == 1)) << "Backward_gpu is not implemented for FCN pooling";
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
4c4ccebcf537f65b7753c7e181fb0953a188724b.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FCNMaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int ext_kernel_h, const int ext_kernel_w,
const int stride_h, const int stride_w, const int kstride_h, const int kstride_w,
const int pad_h, const int pad_w, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + ext_kernel_h, height);
int wend = min(wstart + ext_kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; h += kstride_h) {
for (int w = wstart; w < wend; w += kstride_w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
if ((kstride_h_ != 1) || (kstride_w_ != 1)) {
// we have validated the pooling type is MAX for FCN
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
FCNMaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, ext_kernel_h_, ext_kernel_w_,
stride_h_, stride_w_, kstride_h_, kstride_w_,
pad_h_, pad_w_, top_data,
mask, top_mask);
return;
}
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
CHECK((kstride_h_ == 1) && (kstride_w_ == 1)) << "Backward_gpu is not implemented for FCN pooling";
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
1fe1eb28f33809e652b34accdc8f2034562d89db.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "datadef.h"
__global__ void find_E_grid_index_kernel(unsigned N, unsigned N_energies, unsigned* active, float * main_E_grid, float* E , unsigned * index, unsigned* done){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= N){return;}
// remap to active
//tid=active[tid];
if(done[tid]){return;}
// load data
float value = E[tid];
unsigned donesearching = 0;
unsigned cnt = 1;
unsigned powtwo = 2;
unsigned olddex=index[tid];
int dex = (N_energies-1) / 2; //N_energiesgth starts at 1, duh
//printf("%p %d %10.4E\n",main_E_grid,dex,value);
//int k;
while(!donesearching){
powtwo = powtwo * 2;
if ( main_E_grid[dex] <= value &&
main_E_grid[dex+1] > value ) { donesearching = 1; }
else if ( main_E_grid[dex] > value ) { dex = dex - ((N_energies / powtwo) + 1) ; cnt++; } // +1's are to do a ceiling instead of a floor on integer division
else if ( main_E_grid[dex] < value ) { dex = dex + ((N_energies / powtwo) + 1) ; cnt++; }
if(cnt>30){
donesearching=1;
printf("binary search iteration overflow! %p %d % 10.8f tid=%u\n",main_E_grid,N_energies,value,tid);
dex=0;
}
// edge checks... fix later???
if(dex<0){
//printf("binary search error! dex=%d, (ptr,N_energies,value) %p %d % 10.8f\n",dex,main_E_grid,N_energies,value);
//for(k=0;k<N_energies;k++){printf("%10.8E\n",main_E_grid[k]);}
dex=0;
//donesearching=1;
}
if(dex>=N_energies){
//printf("binary search error! dex=%d, (ptr,N_energies,value) %p %d % 10.8f\n",dex,main_E_grid,N_energies,value);
//for(k=0;k<N_energies;k++){printf("%10.8E\n",main_E_grid[k]);}
dex=N_energies-1;
//donesearching=1;
}
}
//write output index
index[tid]=dex;
//if(olddex!=dex){printf("E_i %6.4E E %6.4E E_i+1 %6.4E, dex %u quaddex %u\n",main_E_grid[dex],value,main_E_grid[dex+1],dex,olddex);}
}
void find_E_grid_index(unsigned NUM_THREADS, unsigned N, unsigned N_energies,unsigned* active, float * main_E_grid, float* E , unsigned * index , unsigned* done){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
hipLaunchKernelGGL(( find_E_grid_index_kernel) , dim3(blks), dim3(NUM_THREADS) , 0, 0, N, N_energies, active, main_E_grid, E , index , done);
hipDeviceSynchronize();
}
|
1fe1eb28f33809e652b34accdc8f2034562d89db.cu
|
#include <cuda.h>
#include <stdio.h>
#include "datadef.h"
__global__ void find_E_grid_index_kernel(unsigned N, unsigned N_energies, unsigned* active, float * main_E_grid, float* E , unsigned * index, unsigned* done){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= N){return;}
// remap to active
//tid=active[tid];
if(done[tid]){return;}
// load data
float value = E[tid];
unsigned donesearching = 0;
unsigned cnt = 1;
unsigned powtwo = 2;
unsigned olddex=index[tid];
int dex = (N_energies-1) / 2; //N_energiesgth starts at 1, duh
//printf("%p %d %10.4E\n",main_E_grid,dex,value);
//int k;
while(!donesearching){
powtwo = powtwo * 2;
if ( main_E_grid[dex] <= value &&
main_E_grid[dex+1] > value ) { donesearching = 1; }
else if ( main_E_grid[dex] > value ) { dex = dex - ((N_energies / powtwo) + 1) ; cnt++; } // +1's are to do a ceiling instead of a floor on integer division
else if ( main_E_grid[dex] < value ) { dex = dex + ((N_energies / powtwo) + 1) ; cnt++; }
if(cnt>30){
donesearching=1;
printf("binary search iteration overflow! %p %d % 10.8f tid=%u\n",main_E_grid,N_energies,value,tid);
dex=0;
}
// edge checks... fix later???
if(dex<0){
//printf("binary search error! dex=%d, (ptr,N_energies,value) %p %d % 10.8f\n",dex,main_E_grid,N_energies,value);
//for(k=0;k<N_energies;k++){printf("%10.8E\n",main_E_grid[k]);}
dex=0;
//donesearching=1;
}
if(dex>=N_energies){
//printf("binary search error! dex=%d, (ptr,N_energies,value) %p %d % 10.8f\n",dex,main_E_grid,N_energies,value);
//for(k=0;k<N_energies;k++){printf("%10.8E\n",main_E_grid[k]);}
dex=N_energies-1;
//donesearching=1;
}
}
//write output index
index[tid]=dex;
//if(olddex!=dex){printf("E_i %6.4E E %6.4E E_i+1 %6.4E, dex %u quaddex %u\n",main_E_grid[dex],value,main_E_grid[dex+1],dex,olddex);}
}
void find_E_grid_index(unsigned NUM_THREADS, unsigned N, unsigned N_energies,unsigned* active, float * main_E_grid, float* E , unsigned * index , unsigned* done){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
find_E_grid_index_kernel <<< blks, NUM_THREADS >>> ( N, N_energies, active, main_E_grid, E , index , done);
cudaThreadSynchronize();
}
|
476aa98ac3101fe08b7eac12b57cde2630cce55a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "wave_2d.h"
#include <time.h>
#define BLOCK_NUM 32
#define THREAD_NUM 512
extern "C"{
__global__ void kernel_cuda_update(double *olddata, double *data, double *newdata, double C, double K, double dt, int step){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int x, i, j, y;
int add_i, add_j, sub_i, sub_j;
for(x = tid + bid*THREAD_NUM; x < ARR_SZ; x += THREAD_NUM*BLOCK_NUM){
i = x / GRID_SZ;
j = x % GRID_SZ;
add_i = i+1 >= GRID_SZ ? i : i+1;
add_j = j+1 >= GRID_SZ ? j : j+1;
sub_i = i-1 < 0 ? 0 : i - 1;
sub_j = j-1 < 0 ? 0 : j - 1;
double pot = data[add_i * GRID_SZ + j] +
data[sub_i * GRID_SZ + j] +
data[add_j + i * GRID_SZ] +
data[sub_j + i * GRID_SZ] -
4 * data[i * GRID_SZ + j] ;
double tmp = C * dt;
newdata[x] = ( tmp*tmp * pot * 2 + 4 * data[x] - olddata[x] *(2 - K * dt)) / (2 + K*dt);
}
}
void cuda_update(double* olddata, double* data, double* newdata,double C,double K, double dt, int step){
double *gpu_data, *gpu_old, *gpu_new, *tmp;
hipMalloc((void**) &gpu_data, sizeof(double)*ARR_SZ);
hipMalloc((void**) &gpu_old, sizeof(double)*ARR_SZ);
hipMalloc((void**) &gpu_new, sizeof(double)*ARR_SZ);
hipMemcpy(gpu_data, data, sizeof(double)*ARR_SZ, hipMemcpyHostToDevice);
hipMemcpy(gpu_old, olddata, sizeof(double)*ARR_SZ, hipMemcpyHostToDevice);
hipMemcpy(gpu_new, newdata, sizeof(double)*ARR_SZ, hipMemcpyHostToDevice);
int i;
for(i = 1;i <= step; ++i){
hipLaunchKernelGGL(( kernel_cuda_update), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, gpu_old, gpu_data, gpu_new,C, K, dt, step);
tmp = gpu_old;
gpu_old = gpu_data;
gpu_data = gpu_new;
gpu_new = tmp;
}
hipMemcpy(data, gpu_data, sizeof(double)*ARR_SZ, hipMemcpyDeviceToHost);
hipFree(gpu_data);
hipFree(gpu_old);
hipFree(gpu_new);
}
}
|
476aa98ac3101fe08b7eac12b57cde2630cce55a.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "wave_2d.h"
#include <time.h>
#define BLOCK_NUM 32
#define THREAD_NUM 512
extern "C"{
__global__ void kernel_cuda_update(double *olddata, double *data, double *newdata, double C, double K, double dt, int step){
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int x, i, j, y;
int add_i, add_j, sub_i, sub_j;
for(x = tid + bid*THREAD_NUM; x < ARR_SZ; x += THREAD_NUM*BLOCK_NUM){
i = x / GRID_SZ;
j = x % GRID_SZ;
add_i = i+1 >= GRID_SZ ? i : i+1;
add_j = j+1 >= GRID_SZ ? j : j+1;
sub_i = i-1 < 0 ? 0 : i - 1;
sub_j = j-1 < 0 ? 0 : j - 1;
double pot = data[add_i * GRID_SZ + j] +
data[sub_i * GRID_SZ + j] +
data[add_j + i * GRID_SZ] +
data[sub_j + i * GRID_SZ] -
4 * data[i * GRID_SZ + j] ;
double tmp = C * dt;
newdata[x] = ( tmp*tmp * pot * 2 + 4 * data[x] - olddata[x] *(2 - K * dt)) / (2 + K*dt);
}
}
void cuda_update(double* olddata, double* data, double* newdata,double C,double K, double dt, int step){
double *gpu_data, *gpu_old, *gpu_new, *tmp;
cudaMalloc((void**) &gpu_data, sizeof(double)*ARR_SZ);
cudaMalloc((void**) &gpu_old, sizeof(double)*ARR_SZ);
cudaMalloc((void**) &gpu_new, sizeof(double)*ARR_SZ);
cudaMemcpy(gpu_data, data, sizeof(double)*ARR_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_old, olddata, sizeof(double)*ARR_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_new, newdata, sizeof(double)*ARR_SZ, cudaMemcpyHostToDevice);
int i;
for(i = 1;i <= step; ++i){
kernel_cuda_update<<< BLOCK_NUM, THREAD_NUM>>>(gpu_old, gpu_data, gpu_new,C, K, dt, step);
tmp = gpu_old;
gpu_old = gpu_data;
gpu_data = gpu_new;
gpu_new = tmp;
}
cudaMemcpy(data, gpu_data, sizeof(double)*ARR_SZ, cudaMemcpyDeviceToHost);
cudaFree(gpu_data);
cudaFree(gpu_old);
cudaFree(gpu_new);
}
}
|
5d7122a3a084a27e5527e519f460fb00ae4b2b74.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Hartwig Anzt
@author Goran Flegar
@generated from sparse/blas/zgeisai_batched32.cu, normal z -> d, Sun Nov 20 20:20:42 2016
*/
#include "magmasparse_internal.h"
#include <hip/hip_runtime_api.h>
#define PRECISION_d
#define REAL
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#if (TORCH_HIP_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_dlowerisai_regs_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
double rB; // registers for trsv
double dA[ block_size ]; // registers for trisystem
double rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t ];
int alim = Arow[ t+1 ];
int l = mstart;
int idx = 0;
while( k < alim && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k % block_size == tid)
rB /= rA;
double top = __shfl(rB, k % block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_dlowerisai_regs_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
magma_dlowerisai_regs_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_dlowerisai_regs_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
/*
template <int block_size, template <int> class func>
class Switcher {
public:
static __device__ void
switch_func(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
func<block_size>(num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
Switcher<block_size-1,func>::switch_func(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
};
template<template <int> class func>
class Switcher<0, func> {
public:
static __device__ void
switch_func(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
};
*/
template <>
__device__ __forceinline__ void
magma_dlowerisai_regs_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_dlowerisai_regs_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
//Switcher<MaxBlockSize, magma_dlowerisai_regs_kernel>::switch_func(
magma_dlowerisai_regs_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_dupperisai_regs_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
double rB; // registers for trsv
double dA[ block_size ]; // registers for trisystem
double rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t ];
int alim = Arow[ t+1 ];
int l = mstart;
int idx = 0;
while( k < alim && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
double bottom = __shfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_dupperisai_regs_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
magma_dupperisai_regs_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_dupperisai_regs_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_dupperisai_regs_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_dupperisai_regs_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_dupperisai_regs_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_dlowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
double rB; // registers for trsv
double dA[ block_size ]; // registers for trisystem
double rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
double top = __shfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_dlowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
magma_dlowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_dlowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_dlowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_dlowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_dlowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_dupperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
double rB; // registers for trsv
double dA[ block_size ]; // registers for trisystem
double rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
double bottom = __shfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_dupperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
magma_dupperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_dupperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_dupperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_dupperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_dupperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_d_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_d_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems double*
trisystems
@param[out]
rhs double*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_disai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_d_matrix L,
magma_d_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (TORCH_HIP_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// routine 1
// int r1bs1 = 32;
// int r1bs2 = 1;
// int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 );
// int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
// int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
// //printf(" grid: %d x %d x %d\n", r1dg1, r1dg2, r1dg3 );
// dim3 r1block( r1bs1, r1bs2, 1 );
// dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( double( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
// int r2bs1 = 32;
// int r2bs2 = 1;
// int r2dg1 = min( int( sqrt( double( magma_ceildiv( M->num_rows, r2bs2 )))), 65535);
// int r2dg2 = min(magma_ceildiv( M->num_rows, r2dg1 ), 65535);
// int r2dg3 = magma_ceildiv( M->num_rows, r2dg1*r2dg2 );
// dim3 r2block( r2bs1, r2bs2, 1 );
// dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
hipLaunchKernelGGL(( magma_dlowerisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
hipLaunchKernelGGL(( magma_dupperisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
5d7122a3a084a27e5527e519f460fb00ae4b2b74.cu
|
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Hartwig Anzt
@author Goran Flegar
@generated from sparse/blas/zgeisai_batched32.cu, normal z -> d, Sun Nov 20 20:20:42 2016
*/
#include "magmasparse_internal.h"
#include <cuda_profiler_api.h>
#define PRECISION_d
#define REAL
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <cuda.h> // for CUDA_VERSION
#if (CUDA_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_dlowerisai_regs_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
double rB; // registers for trsv
double dA[ block_size ]; // registers for trisystem
double rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t ];
int alim = Arow[ t+1 ];
int l = mstart;
int idx = 0;
while( k < alim && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k % block_size == tid)
rB /= rA;
double top = __shfl(rB, k % block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_dlowerisai_regs_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
magma_dlowerisai_regs_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_dlowerisai_regs_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
/*
template <int block_size, template <int> class func>
class Switcher {
public:
static __device__ void
switch_func(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
func<block_size>(num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
Switcher<block_size-1,func>::switch_func(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
};
template<template <int> class func>
class Switcher<0, func> {
public:
static __device__ void
switch_func(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
};
*/
template <>
__device__ __forceinline__ void
magma_dlowerisai_regs_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_dlowerisai_regs_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
//Switcher<MaxBlockSize, magma_dlowerisai_regs_kernel>::switch_func(
magma_dlowerisai_regs_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_dupperisai_regs_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row+1 ];
double rB; // registers for trsv
double dA[ block_size ]; // registers for trisystem
double rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t ];
int alim = Arow[ t+1 ];
int l = mstart;
int idx = 0;
while( k < alim && l < mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k++;
l++;
idx++;
} else if( acol < mcol ){// need to check next element
k++;
} else { // element does not exist, i.e. l < LC.col[k]
l++; // check next elment in the sparsity pattern
idx++; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
double bottom = __shfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_dupperisai_regs_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
magma_dupperisai_regs_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_dupperisai_regs_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_dupperisai_regs_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_dupperisai_regs_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_dupperisai_regs_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_dlowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
double rB; // registers for trsv
double dA[ block_size ]; // registers for trisystem
double rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
double top = __shfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_dlowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
magma_dlowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_dlowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_dlowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_dlowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_dlowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_dupperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
double rB; // registers for trsv
double dA[ block_size ]; // registers for trisystem
double rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_D_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_D_ONE : MAGMA_D_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
double bottom = __shfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_dupperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
if (N == block_size) {
magma_dupperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_dupperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_dupperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
// TODO(Hartwig): Are you soure we want to have printfs called from the
// device?
printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_dupperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const double * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
double *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_dupperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_d_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_d_matrix*
SPAI preconditioner CSR col-major
@param[out]
sizes magma_int_t*
Number of Elements that are replaced.
@param[out]
locations magma_int_t*
Array indicating the locations.
@param[out]
trisystems double*
trisystems
@param[out]
rhs double*
right-hand sides
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_disai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_d_matrix L,
magma_d_matrix *M,
magma_index_t *sizes,
magma_index_t *locations,
double *trisystems,
double *rhs,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (CUDA_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// routine 1
// int r1bs1 = 32;
// int r1bs2 = 1;
// int r1dg1 = min( int( sqrt( double( M->num_rows ))), 65535 );
// int r1dg2 = min(magma_ceildiv( M->num_rows, r1dg1 ), 65535);
// int r1dg3 = magma_ceildiv( M->num_rows, r1dg1*r1dg2 );
// //printf(" grid: %d x %d x %d\n", r1dg1, r1dg2, r1dg3 );
// dim3 r1block( r1bs1, r1bs2, 1 );
// dim3 r1grid( r1dg1, r1dg2, r1dg3 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( double( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
// int r2bs1 = 32;
// int r2bs2 = 1;
// int r2dg1 = min( int( sqrt( double( magma_ceildiv( M->num_rows, r2bs2 )))), 65535);
// int r2dg2 = min(magma_ceildiv( M->num_rows, r2dg1 ), 65535);
// int r2dg3 = magma_ceildiv( M->num_rows, r2dg1*r2dg2 );
// dim3 r2block( r2bs1, r2bs2, 1 );
// dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
magma_dlowerisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
magma_dupperisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
77b066940d5af1ef6e75ccc17f512c583dc31f56.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include "dock.h"
#include "gpu.cuh"
*/
/*
#define expf(a) (a)
#define powf(a,b) (a+b)
#define logf(a) (a)
#define sqrtf(a) (a)
*/
__device__ void
CalcEnergy_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// reduce all points on the X-Y plate
__shared__ float evdw[TperB]; // e[0]
__shared__ float eele[TperB]; // e[1]
__shared__ float epmf[TperB]; // e[2]
__shared__ float epsp[TperB]; // e[3]
__shared__ float ehdb[TperB]; // e[4]
// reduce through only x axis
__shared__ float a_val[BDy][BDx]; // reused by hpc, kde, lhm ???????
__shared__ float a_sz[BDy][BDx]; // ???????
__shared__ float ehpc[BDy]; // e[5]
__shared__ float ekde[BDy]; // e[6]
__shared__ float elhm[BDy]; // e[7]
evdw[bidx] = 0.0f;
eele[bidx] = 0.0f;
epmf[bidx] = 0.0f;
epsp[bidx] = 0.0f;
ehdb[bidx] = 0.0f;
if (bidx < BDy) {
ehpc[bidx] = 0.0f;
ekde[bidx] = 0.0f;
elhm[bidx] = 0.0f;
}
__syncthreads ();
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst_pow2 = dx * dx + dy * dy + dz * dz;
const float dst_pow4 = dst_pow2 * dst_pow2;
const float dst = sqrtf (dst_pow2);
/* hydrophobic potential */
if (myprt->c0_and_d12_or_c2[p] == 1 && dst_pow2 <= 81.0f) {
a_val[threadIdx.y][threadIdx.x] += myprt->hpp[p] *
(1.0f - (3.5f / 81.0f * dst_pow2 -
4.5f / 81.0f / 81.0f * dst_pow4 +
2.5f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow2 -
0.5f / 81.0f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow4));
}
/* L-J potential */
const float p1 = enepara_dc->p1a[lig_t][prt_t] / (dst_pow4 * dst_pow4 * dst);
const float p2 = enepara_dc->p2a[lig_t][prt_t] / (dst_pow4 * dst_pow2);
const float p4 = p1 * enepara_lj0_dc * (1.0f + enepara_lj1_dc * dst_pow2) + 1.0f;
evdw[bidx] += (p1 - p2) / p4;
/* electrostatic potential */
const float s1 = enepara_el1_dc * dst;
float g1;
if (s1 < 1)
g1 = enepara_el0_dc + enepara_a1_dc * s1 * s1 + enepara_b1_dc * s1 * s1 * s1;
else
g1 = 1.0f / s1;
eele[bidx] += mylig->c[l] * myprt->ele[p] * g1;
/* contact potential */
const float dst_minus_pmf0 = dst - enepara_dc->pmf0[lig_t][prt_t];
epmf[bidx] +=
enepara_dc->pmf1[lig_t][prt_t] /
(1.0f + expf ((-0.5f * dst + 6.0f) * dst_minus_pmf0));
/* pocket-specific potential */
// the senmatics do not match with the original program:
// if (found psp[][])
// accumulate to epsp;
// else
// do nothing
if (myprt->c[p] == 2 && dst_minus_pmf0 <= 0) {
const int i1 = myprt->seq3r[p];
epsp[bidx] += psp_dc->psp[lig_t][i1]; // sparse matrix
}
/* hydrogen bond potential */
const float hdb0 = enepara_dc->hdb0[lig_t][prt_t];
if (hdb0 > 0.1f) {
const float hdb1 = enepara_dc->hdb1[lig_t][prt_t];
const float hdb3 = (dst - hdb0) * hdb1;
ehdb[bidx] += hdb1 * expf (-0.5f * hdb3 * hdb3);
}
} // if (p < pnp_dc)
} // prt loop
} // if (l < lna_dc)
/* hydrophobic restraits*/
SumReduction2D_d (a_val);
// transpose may help improve the performance
if (threadIdx.x == 0 && l < lna_dc) {
const int lig_t = mylig->t[l];
const float hpc2 = (a_val[threadIdx.y][0] - enepara_dc->hpl0[lig_t]) / enepara_dc->hpl1[lig_t];
ehpc[threadIdx.y] += 0.5f * hpc2 * hpc2 - enepara_dc->hpl2[lig_t];
}
} // lig loop
SumReduction1D_5_d (bidx, evdw, eele, epmf, epsp, ehdb);
if (bidx == 0) {
float eehpc = 0.0f;
for (int i = 0; i < BDy; ++i)
eehpc += ehpc[i];
ehpc[0] = eehpc;
}
#if 1
/* kde potential */
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
// kde loop, ~400
for (int j = 0; j < pnk_dc; j += blockDim.x) {
const int k = j + threadIdx.x;
if (k < pnk_dc) {
if (mylig->t[l] == kde_dc->t[k]) {
const float dx = mylig->coord_new.x[l] - kde_dc->x[k];
const float dy = mylig->coord_new.y[l] - kde_dc->y[k];
const float dz = mylig->coord_new.z[l] - kde_dc->z[k];
const float kde_dst_pow2 = dx * dx + dy * dy + dz * dz;
a_val[threadIdx.y][threadIdx.x] += expf (enepara_kde2_dc * kde_dst_pow2);
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (k < pnk_dc)
} // kde loop
} // if (l < lna_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && l < lna_dc && a_sz[threadIdx.y][0] != 0.0f)
ekde[threadIdx.y] += (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
} // lig loop
__syncthreads ();
if (bidx == 0) {
float eekde = 0.0f;
for (int i = 0; i < BDy; ++i)
eekde += ekde[i];
eekde = eekde / enepara_kde3_dc;
ekde[0] = eekde;
}
__syncthreads ();
#endif
#if 1
/* position restraints */
// lhm loop, ~11
for (int i = 0; i < pos_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int m = i + threadIdx.y;
if (m < pos_dc) {
// lig loop, ~30
for (int j = 0; j < lna_dc; j += blockDim.x) {
const int l = j + threadIdx.x;
if (l < lna_dc) {
const int lig_n = mylig->n[l] + 1;
if (mcs_dc[m].x[lig_n] != MCS_INVALID_COORD) {
const float dx = mylig->coord_new.x[l] - mcs_dc[m].x[lig_n];
const float dy = mylig->coord_new.y[l] - mcs_dc[m].y[lig_n];
const float dz = mylig->coord_new.z[l] - mcs_dc[m].z[lig_n];
a_val[threadIdx.y][threadIdx.x] += dx * dx + dy * dy + dz * dz;
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (l < lna_dc)
} // lig loop
} // if (m < pos_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && m < pos_dc) {
elhm[threadIdx.y] +=
mcs_dc[m].tcc *
sqrtf (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
}
} // lhm loop
__syncthreads ();
if (bidx == 0) {
float eelhm = 0.0f;
for (int i = 0; i < BDy; ++i)
eelhm += elhm[i];
// dropped the protection (if pos_dc != 0)
eelhm = logf (eelhm / pos_dc);
elhm[0] = eelhm;
}
__syncthreads ();
#endif
// energy edst e[8]
__shared__ float edst;
if (bidx == 0) {
const float dx = mylig->coord_new.center[0] - myprt->pocket_center[0];
const float dy = mylig->coord_new.center[1] - myprt->pocket_center[1];
const float dz = mylig->coord_new.center[2] - myprt->pocket_center[2];
edst = sqrtf (dx * dx + dy * dy + dz * dz);
}
__syncthreads ();
if (bidx == 0) {
evdw[0] = evdw[0] / lna_dc;
eele[0] = eele[0] / lna_dc;
epmf[0] = epmf[0] / lna_dc;
epsp[0] = epsp[0] / lna_dc;
ehdb[0] = ehdb[0] / lna_dc / sqrtf (2.0f * PI) * -1.0f;
// ehdb[0] = ehdb[0] / lna_dc; // using hdb2 is faster
ehpc[0] = ehpc[0] / lna_dc;
ekde[0] = ekde[0] / lna_dc;
#if IS_NORM == 1
// calculate normalized energy
evdw[0] = enepara_dc->a_para[0] * evdw[0] + enepara_dc->b_para[0];
eele[0] = enepara_dc->a_para[1] * eele[0] + enepara_dc->b_para[1];
epmf[0] = enepara_dc->a_para[2] * epmf[0] + enepara_dc->b_para[2];
ehpc[0] = enepara_dc->a_para[3] * ehpc[0] + enepara_dc->b_para[3];
ehdb[0] = enepara_dc->a_para[4] * ehdb[0] + enepara_dc->b_para[4];
edst = enepara_dc->a_para[5] * edst + enepara_dc->b_para[5];
epsp[0] = enepara_dc->a_para[6] * epsp[0] + enepara_dc->b_para[6];
ekde[0] = enepara_dc->a_para[7] * ekde[0] + enepara_dc->b_para[7];
elhm[0] = enepara_dc->a_para[8] * elhm[0] + enepara_dc->b_para[8];
#endif
#if IS_BAYE == 1
// calculate conditional prob belonging to high decoy
const float evdw_h = NormPdf(evdw[0], VDW_NORM_HIGH_LOC, VDW_NORM_HIGH_SCALE);
const float evdw_l = NormPdf(evdw[0], VDW_NORM_LOW_LOC, VDW_NORM_LOW_SCALE);
const float eele_h = CauchyPdf(eele[0], ELE_CAUCHY_HIGH_LOC, ELE_CAUCHY_HIGH_SCALE);
const float eele_l = CauchyPdf(eele[0], ELE_CAUCHY_LOW_LOC, ELE_CAUCHY_LOW_SCALE);
const float epmf_h = LogisticPdf(epmf[0], PMF_LOGISTIC_HIGH_LOC, PMF_LOGISTIC_HIGH_SCALE);
const float epmf_l = LogisticPdf(epmf[0], PMF_LOGISTIC_LOW_LOC, PMF_LOGISTIC_LOW_SCALE);
const float ehpc_h = WaldPdf(ehpc[0], HPC_WALD_HIGH_LOC, HPC_WALD_HIGH_SCALE);
const float ehpc_l = WaldPdf(ehpc[0], HPC_WALD_LOW_LOC, HPC_WALD_LOW_SCALE);
const float ehdb_h = NormPdf(ehdb[0], HDB_NORM_HIGH_LOC, HDB_NORM_HIGH_SCALE);
const float ehdb_l = NormPdf(ehdb[0], HDB_LOGISTIC_LOW_LOC, HDB_LOGISTIC_LOW_SCALE);
const float edst_h = LogisticPdf(edst, DST_LOGISTIC_HIGH_LOC, DST_LOGISTIC_HIGH_SCALE);
const float edst_l = LogisticPdf(edst, DST_LOGISTIC_LOW_LOC, DST_LOGISTIC_LOW_SCALE);
const float epsp_h = LogisticPdf(epsp[0], PSP_LOGISTIC_HIGH_LOC, PSP_LOGISTIC_HIGH_SCALE);
const float epsp_l = LogisticPdf(epsp[0], PSP_LAPLACE_LOW_LOC, PSP_LAPLACE_LOW_SCALE);
const float ekde_h = WaldPdf(ekde[0], KDE_WALD_HIGH_LOC, KDE_WALD_HIGH_SCALE);
const float ekde_l = WaldPdf(ekde[0], KDE_WALD_LOW_LOC, KDE_WALD_LOW_SCALE);
const float elhm_h = LogisticPdf(elhm[0], LHM_LOGISTIC_HIGH_LOC, LHM_LOGISTIC_HIGH_SCALE);
const float elhm_l = LogisticPdf(elhm[0], LHM_LOGISTIC_LOW_LOC, LHM_LOGISTIC_LOW_SCALE);
// calculate conditional prob
const float prob_h = log10f(evdw_h) + log10f(eele_h) + log10f(epmf_h) + log10f(ehpc_h) + log10f(ehdb_h)
+ log10f(edst_h) + log10f(epsp_h) + log10f(ekde_h) + log10f(elhm_h);
const float prob_l = log10f(evdw_l) + log10f(eele_l) + log10f(epmf_l) + log10f(ehpc_l) + log10f(ehdb_l)
+ log10f(edst_l) + log10f(epsp_l) + log10f(ekde_l) + log10f(elhm_l);
const float etotal = prob_l - prob_h;
#elif IS_BAYE == 0
#if IS_OPT == 1
const float etotal =
enepara_dc->w[0] * evdw[0] +
enepara_dc->w[1] * eele[0] +
enepara_dc->w[2] * epmf[0] +
enepara_dc->w[3] * epsp[0] +
enepara_dc->w[4] * ehdb[0] +
enepara_dc->w[5] * ehpc[0] +
enepara_dc->w[6] * ekde[0] +
enepara_dc->w[7] * elhm[0] +
enepara_dc->w[8] * edst;
#elif IS_OPT == 0
const float etotal = evdw[0] + edst;
#endif
#endif
float * e = &mylig->energy_new.e[0];
e[0] = evdw[0];
e[1] = eele[0];
e[2] = epmf[0];
e[3] = epsp[0];
e[4] = ehdb[0];
e[5] = ehpc[0];
e[6] = ekde[0];
e[7] = elhm[0];
e[8] = edst;
e[9] = etotal;
// e[9] = edst;
}
}
|
77b066940d5af1ef6e75ccc17f512c583dc31f56.cu
|
/*
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include "dock.h"
#include "gpu.cuh"
*/
/*
#define expf(a) (a)
#define powf(a,b) (a+b)
#define logf(a) (a)
#define sqrtf(a) (a)
*/
__device__ void
CalcEnergy_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// reduce all points on the X-Y plate
__shared__ float evdw[TperB]; // e[0]
__shared__ float eele[TperB]; // e[1]
__shared__ float epmf[TperB]; // e[2]
__shared__ float epsp[TperB]; // e[3]
__shared__ float ehdb[TperB]; // e[4]
// reduce through only x axis
__shared__ float a_val[BDy][BDx]; // reused by hpc, kde, lhm ???????
__shared__ float a_sz[BDy][BDx]; // ???????
__shared__ float ehpc[BDy]; // e[5]
__shared__ float ekde[BDy]; // e[6]
__shared__ float elhm[BDy]; // e[7]
evdw[bidx] = 0.0f;
eele[bidx] = 0.0f;
epmf[bidx] = 0.0f;
epsp[bidx] = 0.0f;
ehdb[bidx] = 0.0f;
if (bidx < BDy) {
ehpc[bidx] = 0.0f;
ekde[bidx] = 0.0f;
elhm[bidx] = 0.0f;
}
__syncthreads ();
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst_pow2 = dx * dx + dy * dy + dz * dz;
const float dst_pow4 = dst_pow2 * dst_pow2;
const float dst = sqrtf (dst_pow2);
/* hydrophobic potential */
if (myprt->c0_and_d12_or_c2[p] == 1 && dst_pow2 <= 81.0f) {
a_val[threadIdx.y][threadIdx.x] += myprt->hpp[p] *
(1.0f - (3.5f / 81.0f * dst_pow2 -
4.5f / 81.0f / 81.0f * dst_pow4 +
2.5f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow2 -
0.5f / 81.0f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow4));
}
/* L-J potential */
const float p1 = enepara_dc->p1a[lig_t][prt_t] / (dst_pow4 * dst_pow4 * dst);
const float p2 = enepara_dc->p2a[lig_t][prt_t] / (dst_pow4 * dst_pow2);
const float p4 = p1 * enepara_lj0_dc * (1.0f + enepara_lj1_dc * dst_pow2) + 1.0f;
evdw[bidx] += (p1 - p2) / p4;
/* electrostatic potential */
const float s1 = enepara_el1_dc * dst;
float g1;
if (s1 < 1)
g1 = enepara_el0_dc + enepara_a1_dc * s1 * s1 + enepara_b1_dc * s1 * s1 * s1;
else
g1 = 1.0f / s1;
eele[bidx] += mylig->c[l] * myprt->ele[p] * g1;
/* contact potential */
const float dst_minus_pmf0 = dst - enepara_dc->pmf0[lig_t][prt_t];
epmf[bidx] +=
enepara_dc->pmf1[lig_t][prt_t] /
(1.0f + expf ((-0.5f * dst + 6.0f) * dst_minus_pmf0));
/* pocket-specific potential */
// the senmatics do not match with the original program:
// if (found psp[][])
// accumulate to epsp;
// else
// do nothing
if (myprt->c[p] == 2 && dst_minus_pmf0 <= 0) {
const int i1 = myprt->seq3r[p];
epsp[bidx] += psp_dc->psp[lig_t][i1]; // sparse matrix
}
/* hydrogen bond potential */
const float hdb0 = enepara_dc->hdb0[lig_t][prt_t];
if (hdb0 > 0.1f) {
const float hdb1 = enepara_dc->hdb1[lig_t][prt_t];
const float hdb3 = (dst - hdb0) * hdb1;
ehdb[bidx] += hdb1 * expf (-0.5f * hdb3 * hdb3);
}
} // if (p < pnp_dc)
} // prt loop
} // if (l < lna_dc)
/* hydrophobic restraits*/
SumReduction2D_d (a_val);
// transpose may help improve the performance
if (threadIdx.x == 0 && l < lna_dc) {
const int lig_t = mylig->t[l];
const float hpc2 = (a_val[threadIdx.y][0] - enepara_dc->hpl0[lig_t]) / enepara_dc->hpl1[lig_t];
ehpc[threadIdx.y] += 0.5f * hpc2 * hpc2 - enepara_dc->hpl2[lig_t];
}
} // lig loop
SumReduction1D_5_d (bidx, evdw, eele, epmf, epsp, ehdb);
if (bidx == 0) {
float eehpc = 0.0f;
for (int i = 0; i < BDy; ++i)
eehpc += ehpc[i];
ehpc[0] = eehpc;
}
#if 1
/* kde potential */
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
// kde loop, ~400
for (int j = 0; j < pnk_dc; j += blockDim.x) {
const int k = j + threadIdx.x;
if (k < pnk_dc) {
if (mylig->t[l] == kde_dc->t[k]) {
const float dx = mylig->coord_new.x[l] - kde_dc->x[k];
const float dy = mylig->coord_new.y[l] - kde_dc->y[k];
const float dz = mylig->coord_new.z[l] - kde_dc->z[k];
const float kde_dst_pow2 = dx * dx + dy * dy + dz * dz;
a_val[threadIdx.y][threadIdx.x] += expf (enepara_kde2_dc * kde_dst_pow2);
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (k < pnk_dc)
} // kde loop
} // if (l < lna_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && l < lna_dc && a_sz[threadIdx.y][0] != 0.0f)
ekde[threadIdx.y] += (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
} // lig loop
__syncthreads ();
if (bidx == 0) {
float eekde = 0.0f;
for (int i = 0; i < BDy; ++i)
eekde += ekde[i];
eekde = eekde / enepara_kde3_dc;
ekde[0] = eekde;
}
__syncthreads ();
#endif
#if 1
/* position restraints */
// lhm loop, ~11
for (int i = 0; i < pos_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int m = i + threadIdx.y;
if (m < pos_dc) {
// lig loop, ~30
for (int j = 0; j < lna_dc; j += blockDim.x) {
const int l = j + threadIdx.x;
if (l < lna_dc) {
const int lig_n = mylig->n[l] + 1;
if (mcs_dc[m].x[lig_n] != MCS_INVALID_COORD) {
const float dx = mylig->coord_new.x[l] - mcs_dc[m].x[lig_n];
const float dy = mylig->coord_new.y[l] - mcs_dc[m].y[lig_n];
const float dz = mylig->coord_new.z[l] - mcs_dc[m].z[lig_n];
a_val[threadIdx.y][threadIdx.x] += dx * dx + dy * dy + dz * dz;
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (l < lna_dc)
} // lig loop
} // if (m < pos_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && m < pos_dc) {
elhm[threadIdx.y] +=
mcs_dc[m].tcc *
sqrtf (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
}
} // lhm loop
__syncthreads ();
if (bidx == 0) {
float eelhm = 0.0f;
for (int i = 0; i < BDy; ++i)
eelhm += elhm[i];
// dropped the protection (if pos_dc != 0)
eelhm = logf (eelhm / pos_dc);
elhm[0] = eelhm;
}
__syncthreads ();
#endif
// energy edst e[8]
__shared__ float edst;
if (bidx == 0) {
const float dx = mylig->coord_new.center[0] - myprt->pocket_center[0];
const float dy = mylig->coord_new.center[1] - myprt->pocket_center[1];
const float dz = mylig->coord_new.center[2] - myprt->pocket_center[2];
edst = sqrtf (dx * dx + dy * dy + dz * dz);
}
__syncthreads ();
if (bidx == 0) {
evdw[0] = evdw[0] / lna_dc;
eele[0] = eele[0] / lna_dc;
epmf[0] = epmf[0] / lna_dc;
epsp[0] = epsp[0] / lna_dc;
ehdb[0] = ehdb[0] / lna_dc / sqrtf (2.0f * PI) * -1.0f;
// ehdb[0] = ehdb[0] / lna_dc; // using hdb2 is faster
ehpc[0] = ehpc[0] / lna_dc;
ekde[0] = ekde[0] / lna_dc;
#if IS_NORM == 1
// calculate normalized energy
evdw[0] = enepara_dc->a_para[0] * evdw[0] + enepara_dc->b_para[0];
eele[0] = enepara_dc->a_para[1] * eele[0] + enepara_dc->b_para[1];
epmf[0] = enepara_dc->a_para[2] * epmf[0] + enepara_dc->b_para[2];
ehpc[0] = enepara_dc->a_para[3] * ehpc[0] + enepara_dc->b_para[3];
ehdb[0] = enepara_dc->a_para[4] * ehdb[0] + enepara_dc->b_para[4];
edst = enepara_dc->a_para[5] * edst + enepara_dc->b_para[5];
epsp[0] = enepara_dc->a_para[6] * epsp[0] + enepara_dc->b_para[6];
ekde[0] = enepara_dc->a_para[7] * ekde[0] + enepara_dc->b_para[7];
elhm[0] = enepara_dc->a_para[8] * elhm[0] + enepara_dc->b_para[8];
#endif
#if IS_BAYE == 1
// calculate conditional prob belonging to high decoy
const float evdw_h = NormPdf(evdw[0], VDW_NORM_HIGH_LOC, VDW_NORM_HIGH_SCALE);
const float evdw_l = NormPdf(evdw[0], VDW_NORM_LOW_LOC, VDW_NORM_LOW_SCALE);
const float eele_h = CauchyPdf(eele[0], ELE_CAUCHY_HIGH_LOC, ELE_CAUCHY_HIGH_SCALE);
const float eele_l = CauchyPdf(eele[0], ELE_CAUCHY_LOW_LOC, ELE_CAUCHY_LOW_SCALE);
const float epmf_h = LogisticPdf(epmf[0], PMF_LOGISTIC_HIGH_LOC, PMF_LOGISTIC_HIGH_SCALE);
const float epmf_l = LogisticPdf(epmf[0], PMF_LOGISTIC_LOW_LOC, PMF_LOGISTIC_LOW_SCALE);
const float ehpc_h = WaldPdf(ehpc[0], HPC_WALD_HIGH_LOC, HPC_WALD_HIGH_SCALE);
const float ehpc_l = WaldPdf(ehpc[0], HPC_WALD_LOW_LOC, HPC_WALD_LOW_SCALE);
const float ehdb_h = NormPdf(ehdb[0], HDB_NORM_HIGH_LOC, HDB_NORM_HIGH_SCALE);
const float ehdb_l = NormPdf(ehdb[0], HDB_LOGISTIC_LOW_LOC, HDB_LOGISTIC_LOW_SCALE);
const float edst_h = LogisticPdf(edst, DST_LOGISTIC_HIGH_LOC, DST_LOGISTIC_HIGH_SCALE);
const float edst_l = LogisticPdf(edst, DST_LOGISTIC_LOW_LOC, DST_LOGISTIC_LOW_SCALE);
const float epsp_h = LogisticPdf(epsp[0], PSP_LOGISTIC_HIGH_LOC, PSP_LOGISTIC_HIGH_SCALE);
const float epsp_l = LogisticPdf(epsp[0], PSP_LAPLACE_LOW_LOC, PSP_LAPLACE_LOW_SCALE);
const float ekde_h = WaldPdf(ekde[0], KDE_WALD_HIGH_LOC, KDE_WALD_HIGH_SCALE);
const float ekde_l = WaldPdf(ekde[0], KDE_WALD_LOW_LOC, KDE_WALD_LOW_SCALE);
const float elhm_h = LogisticPdf(elhm[0], LHM_LOGISTIC_HIGH_LOC, LHM_LOGISTIC_HIGH_SCALE);
const float elhm_l = LogisticPdf(elhm[0], LHM_LOGISTIC_LOW_LOC, LHM_LOGISTIC_LOW_SCALE);
// calculate conditional prob
const float prob_h = log10f(evdw_h) + log10f(eele_h) + log10f(epmf_h) + log10f(ehpc_h) + log10f(ehdb_h)
+ log10f(edst_h) + log10f(epsp_h) + log10f(ekde_h) + log10f(elhm_h);
const float prob_l = log10f(evdw_l) + log10f(eele_l) + log10f(epmf_l) + log10f(ehpc_l) + log10f(ehdb_l)
+ log10f(edst_l) + log10f(epsp_l) + log10f(ekde_l) + log10f(elhm_l);
const float etotal = prob_l - prob_h;
#elif IS_BAYE == 0
#if IS_OPT == 1
const float etotal =
enepara_dc->w[0] * evdw[0] +
enepara_dc->w[1] * eele[0] +
enepara_dc->w[2] * epmf[0] +
enepara_dc->w[3] * epsp[0] +
enepara_dc->w[4] * ehdb[0] +
enepara_dc->w[5] * ehpc[0] +
enepara_dc->w[6] * ekde[0] +
enepara_dc->w[7] * elhm[0] +
enepara_dc->w[8] * edst;
#elif IS_OPT == 0
const float etotal = evdw[0] + edst;
#endif
#endif
float * e = &mylig->energy_new.e[0];
e[0] = evdw[0];
e[1] = eele[0];
e[2] = epmf[0];
e[3] = epsp[0];
e[4] = ehdb[0];
e[5] = ehpc[0];
e[6] = ekde[0];
e[7] = elhm[0];
e[8] = edst;
e[9] = etotal;
// e[9] = edst;
}
}
|
fbdbed2b8fde81152c0980e581f15d05054895ae.hip
|
// !!! This is a file automatically generated by hipify!!!
// system libraries
// use nvcc -o (output name) -Wno-deprecated-gpu-targets -std=c++11 -Xcompiler -fopenmp file_name.cu
#include <hip/hip_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <chrono>
// size definition. modify as needed
#define N 2000
#define T_SIZE 32
using namespace std;
// safe call definition
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number){
if(err!=hipSuccess){
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// safe call definition
#define SAFE_CALL(call,msg) _safe_cuda_call(call,msg,__FILE__,__LINE__)
// initialize major row matrix
void initializeMatrix(float *ip, const int nxy){
srand (static_cast <unsigned> (time(0)));
float random;
for(int i = 0; i < nxy; i++){
random = 1.0 + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(10.0-1.0)));
ip[i] = random;
}
return;
}
// utility function to check result
void checkResult(float *hostRef, float *gpuRef, const int nxy){
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < nxy; i++){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// multiply matrix on host
void multiplyMatrixOnHost(float *A, float *B, float *C, const int nx){
for(int i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// function to multiply matrix on host with threads
void multiplyMatrixOnHostThreads(float *A, float *B, float *C, const int nx){
int i = 0;
// use the pragma directive to automatically paralelize
#pragma omp parallel for private(i) shared(A, B, C)
for(i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// kernel to multiply matrix on gpu
__global__ void multiplyMatrixOnGPU(float *A, float *B, float *C, const int nx){
// get ix and iy from cuda defined variables
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.0;
if (ix < nx && iy < nx){
for(int i = 0; i < nx ; i++)
sum += A[iy * nx + i] * B[i * nx + ix];
C[iy * nx + ix] = sum;
}
}
// Kernel GPU Tiles
__global__ void multiplyMatrixOnGPUTiles(float *A, float *B, float *C, const int nx){
// Create the shared memory space as tiles
__shared__ float tileOne[T_SIZE][T_SIZE], tileTwo[T_SIZE][T_SIZE];
// Get the ix and iy indexes
unsigned int ix = T_SIZE * blockIdx.x + threadIdx.x;
unsigned int iy = T_SIZE * blockIdx.y + threadIdx.y;
// Get the limit to the size
int limit = ((T_SIZE + nx)/T_SIZE);
// Partial Sum acumulator
float partialSum = 0.0;
int i = 0;
while(i < limit){
// Fetch values for each value of the tiles with restriction
if ((iy < nx) && ((i * T_SIZE + threadIdx.x) < nx)){
int id = (iy * nx) + (i * T_SIZE) + threadIdx.x;
tileOne[threadIdx.y][threadIdx.x] = A[id];
}else{
tileOne[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// cuPrintf(""); <--- deprecated
// printf("Improper Tile Size in X domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
// Fetch values for each value of the tiles with restriction
if ((ix < nx) && ((i * T_SIZE + threadIdx.y) < nx)){
int id = (i * T_SIZE + threadIdx.y) * nx + ix;
tileTwo[threadIdx.y][threadIdx.x] = B[id];
}else{
tileTwo[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// printf("Improper Tile Size in Y domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
//Perform partial sum on tile
#pragma unroll // T_SIZE is constant
for (int j = 0; j < T_SIZE; j++){
partialSum += tileOne[threadIdx.y][j] * tileTwo[j][threadIdx.x];
}
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
//printf("Partial Sum fetched with value %f\n", partialSum);
// Wait for threads to finish
__syncthreads();
i++;
}
if (ix < nx && iy < nx)
C[((blockIdx.y * blockDim.y + threadIdx.y) * nx) + (blockIdx.x * blockDim.x) + threadIdx.x] = partialSum;
}
int main(int argc, char* argv[]) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(hipSetDevice(dev), "Error setting device");
int nx = N;
int ny = N;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float*);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *hostRefThreads = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
float *gpuRefTiles = (float *)malloc(nBytes);
// initialize matrix
initializeMatrix(h_A, nxy);
initializeMatrix(h_B, nxy);
// initialize to 0
memset(hostRef, 0, nBytes);
memset(hostRefThreads, 0, nBytes);
memset(gpuRef, 0, nBytes);
memset(gpuRefTiles, 0, nBytes);
// multiply matrix on host
auto start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnHost(h_A, h_B, hostRef, nx);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnHost elapsed %f ms\n", duration_ms.count());
// multiply matrix on host with threads
start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnHostThreads(h_A, h_B, hostRefThreads, nx);
end_cpu = std::chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnHostThreads elapsed %f ms\n", duration_ms.count());
// check results
checkResult(hostRef, hostRefThreads, nx);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC, *d_MatD;
SAFE_CALL(hipMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(hipMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(hipMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
SAFE_CALL(hipMalloc((void **)&d_MatD, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice), "Error copying d_MatB");
SAFE_CALL(hipMemset(d_MatC, 0, nBytes), "Error copying d_MatB");
SAFE_CALL(hipMemset(d_MatD, 0, nBytes), "Error copying d_MatB");
// kernel definition and launch
dim3 block(T_SIZE, T_SIZE);
dim3 grid((int)ceil((float)nx / T_SIZE), (int)ceil((float)nx / T_SIZE));
// launch
auto start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multiplyMatrixOnGPU), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
auto end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPU elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(hostRef, gpuRef, nx);
// GPU TILE VERSION AND COMPARISSON
// launch
start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multiplyMatrixOnGPUTiles), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatD, nx);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPUTiles elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(gpuRefTiles, d_MatD, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(gpuRef, gpuRefTiles, nx);
// END GPU TILE VERSION AND COMPARISSON
// free device global memory
SAFE_CALL(hipFree(d_MatA), "Error freeing memory");
SAFE_CALL(hipFree(d_MatB), "Error freeing memory");
SAFE_CALL(hipFree(d_MatC), "Error freeing memory");
SAFE_CALL(hipFree(d_MatD), "Error freeing memory");
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(hostRefThreads);
free(gpuRef);
free(gpuRefTiles);
// reset device
SAFE_CALL(hipDeviceReset(), "Error reseting");
return (0);
}
|
fbdbed2b8fde81152c0980e581f15d05054895ae.cu
|
// system libraries
// use nvcc -o (output name) -Wno-deprecated-gpu-targets -std=c++11 -Xcompiler -fopenmp file_name.cu
#include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <chrono>
// size definition. modify as needed
#define N 2000
#define T_SIZE 32
using namespace std;
// safe call definition
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number){
if(err!=cudaSuccess){
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// safe call definition
#define SAFE_CALL(call,msg) _safe_cuda_call(call,msg,__FILE__,__LINE__)
// initialize major row matrix
void initializeMatrix(float *ip, const int nxy){
srand (static_cast <unsigned> (time(0)));
float random;
for(int i = 0; i < nxy; i++){
random = 1.0 + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(10.0-1.0)));
ip[i] = random;
}
return;
}
// utility function to check result
void checkResult(float *hostRef, float *gpuRef, const int nxy){
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < nxy; i++){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// multiply matrix on host
void multiplyMatrixOnHost(float *A, float *B, float *C, const int nx){
for(int i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// function to multiply matrix on host with threads
void multiplyMatrixOnHostThreads(float *A, float *B, float *C, const int nx){
int i = 0;
// use the pragma directive to automatically paralelize
#pragma omp parallel for private(i) shared(A, B, C)
for(i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// kernel to multiply matrix on gpu
__global__ void multiplyMatrixOnGPU(float *A, float *B, float *C, const int nx){
// get ix and iy from cuda defined variables
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.0;
if (ix < nx && iy < nx){
for(int i = 0; i < nx ; i++)
sum += A[iy * nx + i] * B[i * nx + ix];
C[iy * nx + ix] = sum;
}
}
// Kernel GPU Tiles
__global__ void multiplyMatrixOnGPUTiles(float *A, float *B, float *C, const int nx){
// Create the shared memory space as tiles
__shared__ float tileOne[T_SIZE][T_SIZE], tileTwo[T_SIZE][T_SIZE];
// Get the ix and iy indexes
unsigned int ix = T_SIZE * blockIdx.x + threadIdx.x;
unsigned int iy = T_SIZE * blockIdx.y + threadIdx.y;
// Get the limit to the size
int limit = ((T_SIZE + nx)/T_SIZE);
// Partial Sum acumulator
float partialSum = 0.0;
int i = 0;
while(i < limit){
// Fetch values for each value of the tiles with restriction
if ((iy < nx) && ((i * T_SIZE + threadIdx.x) < nx)){
int id = (iy * nx) + (i * T_SIZE) + threadIdx.x;
tileOne[threadIdx.y][threadIdx.x] = A[id];
}else{
tileOne[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// cuPrintf(""); <--- deprecated
// printf("Improper Tile Size in X domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
// Fetch values for each value of the tiles with restriction
if ((ix < nx) && ((i * T_SIZE + threadIdx.y) < nx)){
int id = (i * T_SIZE + threadIdx.y) * nx + ix;
tileTwo[threadIdx.y][threadIdx.x] = B[id];
}else{
tileTwo[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// printf("Improper Tile Size in Y domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
//Perform partial sum on tile
#pragma unroll // T_SIZE is constant
for (int j = 0; j < T_SIZE; j++){
partialSum += tileOne[threadIdx.y][j] * tileTwo[j][threadIdx.x];
}
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
//printf("Partial Sum fetched with value %f\n", partialSum);
// Wait for threads to finish
__syncthreads();
i++;
}
if (ix < nx && iy < nx)
C[((blockIdx.y * blockDim.y + threadIdx.y) * nx) + (blockIdx.x * blockDim.x) + threadIdx.x] = partialSum;
}
int main(int argc, char* argv[]) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(cudaSetDevice(dev), "Error setting device");
int nx = N;
int ny = N;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float*);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *hostRefThreads = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
float *gpuRefTiles = (float *)malloc(nBytes);
// initialize matrix
initializeMatrix(h_A, nxy);
initializeMatrix(h_B, nxy);
// initialize to 0
memset(hostRef, 0, nBytes);
memset(hostRefThreads, 0, nBytes);
memset(gpuRef, 0, nBytes);
memset(gpuRefTiles, 0, nBytes);
// multiply matrix on host
auto start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnHost(h_A, h_B, hostRef, nx);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnHost elapsed %f ms\n", duration_ms.count());
// multiply matrix on host with threads
start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnHostThreads(h_A, h_B, hostRefThreads, nx);
end_cpu = std::chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnHostThreads elapsed %f ms\n", duration_ms.count());
// check results
checkResult(hostRef, hostRefThreads, nx);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC, *d_MatD;
SAFE_CALL(cudaMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(cudaMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(cudaMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
SAFE_CALL(cudaMalloc((void **)&d_MatD, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatB");
SAFE_CALL(cudaMemset(d_MatC, 0, nBytes), "Error copying d_MatB");
SAFE_CALL(cudaMemset(d_MatD, 0, nBytes), "Error copying d_MatB");
// kernel definition and launch
dim3 block(T_SIZE, T_SIZE);
dim3 grid((int)ceil((float)nx / T_SIZE), (int)ceil((float)nx / T_SIZE));
// launch
auto start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnGPU<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
auto end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPU elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(hostRef, gpuRef, nx);
// GPU TILE VERSION AND COMPARISSON
// launch
start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnGPUTiles<<<grid, block>>>(d_MatA, d_MatB, d_MatD, nx);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPUTiles elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRefTiles, d_MatD, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(gpuRef, gpuRefTiles, nx);
// END GPU TILE VERSION AND COMPARISSON
// free device global memory
SAFE_CALL(cudaFree(d_MatA), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatB), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatC), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatD), "Error freeing memory");
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(hostRefThreads);
free(gpuRef);
free(gpuRefTiles);
// reset device
SAFE_CALL(cudaDeviceReset(), "Error reseting");
return (0);
}
|
5fc1f82ae43cd42a84c159e28c884b105029523c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void findNew(double* cCurr, double* cBar, double* cHalf, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Recover the new data
cCurr[index] = cBar[index] + cHalf[index];
}
|
5fc1f82ae43cd42a84c159e28c884b105029523c.cu
|
#include "includes.h"
__global__ static void findNew(double* cCurr, double* cBar, double* cHalf, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Recover the new data
cCurr[index] = cBar[index] + cHalf[index];
}
|
b3b4d4a41774cc900a0e757a8b1dfc777647efb9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/copying.hpp>
#include <cudf/detail/merge.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/strings/detail/merge.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/merge.h>
#include <thrust/tuple.h>
#include <queue>
#include <vector>
namespace { // anonym.
using namespace cudf;
using detail::side;
using index_type = detail::index_type;
/**
* @brief Merges the bits of two validity bitmasks.
*
* Merges the bits from two column_device_views into the destination column_device_view
* according to `merged_indices` map such that bit `i` in `out_col`
* will be equal to bit `thrust::get<1>(merged_indices[i])` from `left_dcol`
* if `thrust::get<0>(merged_indices[i])` equals `side::LEFT`; otherwise,
* from `right_dcol`.
*
* `left_dcol`, `right_dcol` and `out_dcol` must not
* overlap.
*
* @tparam left_have_valids Indicates whether left_dcol mask is unallocated (hence, ALL_VALID)
* @tparam right_have_valids Indicates whether right_dcol mask is unallocated (hence ALL_VALID)
* @param[in] left_dcol The left column_device_view whose bits will be merged
* @param[in] right_dcol The right column_device_view whose bits will be merged
* @param[out] out_dcol The output mutable_column_device_view after merging the left and right
* @param[in] num_destination_rows The number of rows in the out_dcol
* @param[in] merged_indices The map that indicates the source of the input and index
* to be copied to the output. Length must be equal to `num_destination_rows`
*/
template <bool left_have_valids, bool right_have_valids>
__global__ void materialize_merged_bitmask_kernel(
column_device_view left_dcol,
column_device_view right_dcol,
mutable_column_device_view out_dcol,
size_type const num_destination_rows,
index_type const* const __restrict__ merged_indices)
{
size_type destination_row = threadIdx.x + blockIdx.x * blockDim.x;
auto active_threads = __ballot_sync(0xffffffff, destination_row < num_destination_rows);
while (destination_row < num_destination_rows) {
index_type const& merged_idx = merged_indices[destination_row];
side const src_side = thrust::get<0>(merged_idx);
size_type const src_row = thrust::get<1>(merged_idx);
bool const from_left{src_side == side::LEFT};
bool source_bit_is_valid{true};
if (left_have_valids && from_left) {
source_bit_is_valid = left_dcol.is_valid_nocheck(src_row);
} else if (right_have_valids && !from_left) {
source_bit_is_valid = right_dcol.is_valid_nocheck(src_row);
}
// Use ballot to find all valid bits in this warp and create the output
// bitmask element
bitmask_type const result_mask{__ballot_sync(active_threads, source_bit_is_valid)};
size_type const output_element = word_index(destination_row);
// Only one thread writes output
if (0 == threadIdx.x % warpSize) { out_dcol.set_mask_word(output_element, result_mask); }
destination_row += blockDim.x * gridDim.x;
active_threads = __ballot_sync(active_threads, destination_row < num_destination_rows);
}
}
void materialize_bitmask(column_view const& left_col,
column_view const& right_col,
mutable_column_view& out_col,
index_type const* merged_indices,
hipStream_t stream)
{
constexpr size_type BLOCK_SIZE{256};
detail::grid_1d grid_config{out_col.size(), BLOCK_SIZE};
auto p_left_dcol = column_device_view::create(left_col);
auto p_right_dcol = column_device_view::create(right_col);
auto p_out_dcol = mutable_column_device_view::create(out_col);
auto left_valid = *p_left_dcol;
auto right_valid = *p_right_dcol;
auto out_valid = *p_out_dcol;
if (left_col.has_nulls()) {
if (right_col.has_nulls()) {
hipLaunchKernelGGL(( materialize_merged_bitmask_kernel<true, true>)
, dim3(grid_config.num_blocks), dim3(grid_config.num_threads_per_block), 0, stream,
left_valid, right_valid, out_valid, out_col.size(), merged_indices);
} else {
hipLaunchKernelGGL(( materialize_merged_bitmask_kernel<true, false>)
, dim3(grid_config.num_blocks), dim3(grid_config.num_threads_per_block), 0, stream,
left_valid, right_valid, out_valid, out_col.size(), merged_indices);
}
} else {
if (right_col.has_nulls()) {
hipLaunchKernelGGL(( materialize_merged_bitmask_kernel<false, true>)
, dim3(grid_config.num_blocks), dim3(grid_config.num_threads_per_block), 0, stream,
left_valid, right_valid, out_valid, out_col.size(), merged_indices);
} else {
CUDF_FAIL("materialize_merged_bitmask_kernel<false, false>() should never be called.");
}
}
CHECK_CUDA(stream);
}
/**
* @brief Generates the row indices and source side (left or right) in accordance with the index
* columns.
*
*
* @tparam index_type Indicates the type to be used to collect index and side information;
* @param[in] left_table The left table_view to be merged
* @param[in] right_table The right table_view to be merged
* @param[in] column_order Sort order types of index columns
* @param[in] null_precedence Array indicating the order of nulls with respect to non-nulls for the
* index columns
* @param[in] nullable Flag indicating if at least one of the table_view arguments has nulls
* (defaults to true)
* @param[in] stream CUDA stream (defaults to nullptr)
*
* @return A vector of merged indices
*/
rmm::device_vector<index_type> generate_merged_indices(
table_view const& left_table,
table_view const& right_table,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
bool nullable = true,
hipStream_t stream = nullptr)
{
const size_type left_size = left_table.num_rows();
const size_type right_size = right_table.num_rows();
const size_type total_size = left_size + right_size;
thrust::constant_iterator<side> left_side(side::LEFT);
thrust::constant_iterator<side> right_side(side::RIGHT);
auto left_indices = thrust::make_counting_iterator(static_cast<size_type>(0));
auto right_indices = thrust::make_counting_iterator(static_cast<size_type>(0));
auto left_begin_zip_iterator =
thrust::make_zip_iterator(thrust::make_tuple(left_side, left_indices));
auto right_begin_zip_iterator =
thrust::make_zip_iterator(thrust::make_tuple(right_side, right_indices));
auto left_end_zip_iterator =
thrust::make_zip_iterator(thrust::make_tuple(left_side + left_size, left_indices + left_size));
auto right_end_zip_iterator = thrust::make_zip_iterator(
thrust::make_tuple(right_side + right_size, right_indices + right_size));
rmm::device_vector<index_type> merged_indices(total_size);
auto lhs_device_view = table_device_view::create(left_table, stream);
auto rhs_device_view = table_device_view::create(right_table, stream);
rmm::device_vector<order> d_column_order(column_order);
auto exec_pol = rmm::exec_policy(stream);
if (nullable) {
rmm::device_vector<null_order> d_null_precedence(null_precedence);
auto ineq_op =
detail::row_lexicographic_tagged_comparator<true>(*lhs_device_view,
*rhs_device_view,
d_column_order.data().get(),
d_null_precedence.data().get());
thrust::merge(exec_pol->on(stream),
left_begin_zip_iterator,
left_end_zip_iterator,
right_begin_zip_iterator,
right_end_zip_iterator,
merged_indices.begin(),
ineq_op);
} else {
auto ineq_op = detail::row_lexicographic_tagged_comparator<false>(
*lhs_device_view, *rhs_device_view, d_column_order.data().get());
thrust::merge(exec_pol->on(stream),
left_begin_zip_iterator,
left_end_zip_iterator,
right_begin_zip_iterator,
right_end_zip_iterator,
merged_indices.begin(),
ineq_op);
}
CHECK_CUDA(stream);
return merged_indices;
}
} // namespace
namespace cudf {
namespace detail {
// generate merged column
// given row order of merged tables
//(ordered according to indices of key_cols)
// and the 2 columns to merge
//
struct column_merger {
using index_vector = rmm::device_vector<index_type>;
explicit column_merger(index_vector const& row_order,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = nullptr)
: dv_row_order_(row_order), mr_(mr), stream_(stream)
{
}
// column merger operator;
//
template <typename Element> // required: column type
std::unique_ptr<column> operator()(column_view const& lcol, column_view const& rcol) const
{
auto lsz = lcol.size();
auto merged_size = lsz + rcol.size();
auto type = lcol.type();
std::unique_ptr<cudf::column> p_merged_col{nullptr};
if (lcol.has_nulls())
p_merged_col = cudf::allocate_like(lcol, merged_size);
else
p_merged_col = cudf::allocate_like(rcol, merged_size);
//"gather" data from lcol, rcol according to dv_row_order_ "map"
//(directly calling gather() won't work because
// lcol, rcol indices overlap!)
//
cudf::mutable_column_view merged_view = p_merged_col->mutable_view();
// initialize null_mask to all valid:
//
// Note: this initialization in conjunction with _conditionally_
// calling materialize_bitmask() below covers the case
// materialize_merged_bitmask_kernel<false, false>()
// which won't be called anymore (because of the _condition_ below)
//
cudf::set_null_mask(merged_view.null_mask(), 0, merged_view.size(), true, stream_);
// set the null count:
//
p_merged_col->set_null_count(lcol.null_count() + rcol.null_count());
// to resolve view.data()'s types use: Element
//
Element const* p_d_lcol = lcol.data<Element>();
Element const* p_d_rcol = rcol.data<Element>();
auto exe_pol = rmm::exec_policy(stream_);
// capture lcol, rcol
// and "gather" into merged_view.data()[indx_merged]
// from lcol or rcol, depending on side;
//
thrust::transform(exe_pol->on(stream_),
dv_row_order_.begin(),
dv_row_order_.end(),
merged_view.begin<Element>(),
[p_d_lcol, p_d_rcol] __device__(index_type const& index_pair) {
auto side = thrust::get<0>(index_pair);
auto index = thrust::get<1>(index_pair);
Element val = (side == side::LEFT ? p_d_lcol[index] : p_d_rcol[index]);
return val;
});
// CAVEAT: conditional call below is erroneous without
// set_null_mask() call (see TODO above):
//
if (lcol.has_nulls() || rcol.has_nulls()) {
// resolve null mask:
//
materialize_bitmask(lcol, rcol, merged_view, dv_row_order_.data().get(), stream_);
}
return p_merged_col;
}
private:
index_vector const& dv_row_order_;
rmm::mr::device_memory_resource* mr_;
hipStream_t stream_;
};
// specialization for strings
template <>
std::unique_ptr<column> column_merger::operator()<cudf::string_view>(column_view const& lcol,
column_view const& rcol) const
{
auto column = strings::detail::merge<index_type>(strings_column_view(lcol),
strings_column_view(rcol),
dv_row_order_.begin(),
dv_row_order_.end(),
mr_,
stream_);
if (lcol.has_nulls() || rcol.has_nulls()) {
auto merged_view = column->mutable_view();
materialize_bitmask(lcol, rcol, merged_view, dv_row_order_.data().get(), stream_);
}
return column;
}
// specialization for dictionary
template <>
std::unique_ptr<column> column_merger::operator()<cudf::dictionary32>(column_view const& lcol,
column_view const& rcol) const
{
CUDF_FAIL("dictionary not supported yet");
}
using table_ptr_type = std::unique_ptr<cudf::table>;
namespace {
table_ptr_type merge(cudf::table_view const& left_table,
cudf::table_view const& right_table,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
// collect index columns for lhs, rhs, resp.
//
cudf::table_view index_left_view{left_table.select(key_cols)};
cudf::table_view index_right_view{right_table.select(key_cols)};
bool const nullable = cudf::has_nulls(index_left_view) || cudf::has_nulls(index_right_view);
// extract merged row order according to indices:
//
rmm::device_vector<index_type> merged_indices = generate_merged_indices(
index_left_view, index_right_view, column_order, null_precedence, nullable);
// create merged table:
//
auto const n_cols = left_table.num_columns();
std::vector<std::unique_ptr<column>> merged_cols;
merged_cols.reserve(n_cols);
column_merger merger{merged_indices, mr, stream};
transform(left_table.begin(),
left_table.end(),
right_table.begin(),
std::back_inserter(merged_cols),
[&](auto const& left_col, auto const& right_col) {
return cudf::type_dispatcher(left_col.type(), merger, left_col, right_col);
});
return std::make_unique<cudf::table>(std::move(merged_cols));
}
struct merge_queue_item {
table_view view;
table_ptr_type table;
// Priority is a separate member to ensure that moving from an object
// does not change its priority (which would ruin the queue invariant)
cudf::size_type priority = 0;
merge_queue_item(table_view const& view, table_ptr_type&& table)
: view{view}, table{std::move(table)}, priority{-view.num_rows()}
{
}
bool operator<(merge_queue_item const& other) const { return priority < other.priority; }
};
// Helper function to ensure that moving out of the priority_queue is "atomic"
template <typename T>
T top_and_pop(std::priority_queue<T>& q)
{
auto moved = std::move(const_cast<T&>(q.top()));
q.pop();
return moved;
}
} // namespace
table_ptr_type merge(std::vector<table_view> const& tables_to_merge,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
if (tables_to_merge.empty()) { return std::make_unique<cudf::table>(); }
auto const& first_table = tables_to_merge.front();
auto const n_cols = first_table.num_columns();
CUDF_EXPECTS(std::all_of(tables_to_merge.cbegin(),
tables_to_merge.cend(),
[n_cols](auto const& tbl) { return n_cols == tbl.num_columns(); }),
"Mismatched number of columns");
CUDF_EXPECTS(
std::all_of(tables_to_merge.cbegin(),
tables_to_merge.cend(),
[&](auto const& tbl) { return cudf::have_same_types(first_table, tbl); }),
"Mismatched column types");
CUDF_EXPECTS(!key_cols.empty(), "Empty key_cols");
CUDF_EXPECTS(key_cols.size() <= static_cast<size_t>(n_cols), "Too many values in key_cols");
CUDF_EXPECTS(key_cols.size() == column_order.size(),
"Mismatched size between key_cols and column_order");
// A queue of (table view, table) pairs
std::priority_queue<merge_queue_item> merge_queue;
// The table pointer is null if we do not own the table (input tables)
std::for_each(tables_to_merge.begin(), tables_to_merge.end(), [&](auto const& table) {
if (table.num_rows() > 0) merge_queue.emplace(table, table_ptr_type());
});
// If there is only one non-empty table_view, return its copy
if (merge_queue.size() == 1) { return std::make_unique<cudf::table>(merge_queue.top().view); }
// No inputs have rows, return a table with same columns as the first one
if (merge_queue.empty()) { return empty_like(first_table); }
// Pick the two smallest tables and merge them
// Until there is only one table left in the queue
while (merge_queue.size() > 1) {
// To delete the intermediate table at the end of the block
auto const left_table = top_and_pop(merge_queue);
// Deallocated at the end of the block
auto const right_table = top_and_pop(merge_queue);
// Only use mr for the output table
auto const& new_tbl_rm = merge_queue.empty() ? mr : rmm::mr::get_default_resource();
auto merged_table = merge(left_table.view,
right_table.view,
key_cols,
column_order,
null_precedence,
new_tbl_rm,
stream);
auto const merged_table_view = merged_table->view();
merge_queue.emplace(merged_table_view, std::move(merged_table));
}
return std::move(top_and_pop(merge_queue).table);
}
} // namespace detail
std::unique_ptr<cudf::table> merge(std::vector<table_view> const& tables_to_merge,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::merge(tables_to_merge, key_cols, column_order, null_precedence, mr);
}
} // namespace cudf
|
b3b4d4a41774cc900a0e757a8b1dfc777647efb9.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/copying.hpp>
#include <cudf/detail/merge.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/strings/detail/merge.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/merge.h>
#include <thrust/tuple.h>
#include <queue>
#include <vector>
namespace { // anonym.
using namespace cudf;
using detail::side;
using index_type = detail::index_type;
/**
* @brief Merges the bits of two validity bitmasks.
*
* Merges the bits from two column_device_views into the destination column_device_view
* according to `merged_indices` map such that bit `i` in `out_col`
* will be equal to bit `thrust::get<1>(merged_indices[i])` from `left_dcol`
* if `thrust::get<0>(merged_indices[i])` equals `side::LEFT`; otherwise,
* from `right_dcol`.
*
* `left_dcol`, `right_dcol` and `out_dcol` must not
* overlap.
*
* @tparam left_have_valids Indicates whether left_dcol mask is unallocated (hence, ALL_VALID)
* @tparam right_have_valids Indicates whether right_dcol mask is unallocated (hence ALL_VALID)
* @param[in] left_dcol The left column_device_view whose bits will be merged
* @param[in] right_dcol The right column_device_view whose bits will be merged
* @param[out] out_dcol The output mutable_column_device_view after merging the left and right
* @param[in] num_destination_rows The number of rows in the out_dcol
* @param[in] merged_indices The map that indicates the source of the input and index
* to be copied to the output. Length must be equal to `num_destination_rows`
*/
template <bool left_have_valids, bool right_have_valids>
__global__ void materialize_merged_bitmask_kernel(
column_device_view left_dcol,
column_device_view right_dcol,
mutable_column_device_view out_dcol,
size_type const num_destination_rows,
index_type const* const __restrict__ merged_indices)
{
size_type destination_row = threadIdx.x + blockIdx.x * blockDim.x;
auto active_threads = __ballot_sync(0xffffffff, destination_row < num_destination_rows);
while (destination_row < num_destination_rows) {
index_type const& merged_idx = merged_indices[destination_row];
side const src_side = thrust::get<0>(merged_idx);
size_type const src_row = thrust::get<1>(merged_idx);
bool const from_left{src_side == side::LEFT};
bool source_bit_is_valid{true};
if (left_have_valids && from_left) {
source_bit_is_valid = left_dcol.is_valid_nocheck(src_row);
} else if (right_have_valids && !from_left) {
source_bit_is_valid = right_dcol.is_valid_nocheck(src_row);
}
// Use ballot to find all valid bits in this warp and create the output
// bitmask element
bitmask_type const result_mask{__ballot_sync(active_threads, source_bit_is_valid)};
size_type const output_element = word_index(destination_row);
// Only one thread writes output
if (0 == threadIdx.x % warpSize) { out_dcol.set_mask_word(output_element, result_mask); }
destination_row += blockDim.x * gridDim.x;
active_threads = __ballot_sync(active_threads, destination_row < num_destination_rows);
}
}
void materialize_bitmask(column_view const& left_col,
column_view const& right_col,
mutable_column_view& out_col,
index_type const* merged_indices,
cudaStream_t stream)
{
constexpr size_type BLOCK_SIZE{256};
detail::grid_1d grid_config{out_col.size(), BLOCK_SIZE};
auto p_left_dcol = column_device_view::create(left_col);
auto p_right_dcol = column_device_view::create(right_col);
auto p_out_dcol = mutable_column_device_view::create(out_col);
auto left_valid = *p_left_dcol;
auto right_valid = *p_right_dcol;
auto out_valid = *p_out_dcol;
if (left_col.has_nulls()) {
if (right_col.has_nulls()) {
materialize_merged_bitmask_kernel<true, true>
<<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream>>>(
left_valid, right_valid, out_valid, out_col.size(), merged_indices);
} else {
materialize_merged_bitmask_kernel<true, false>
<<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream>>>(
left_valid, right_valid, out_valid, out_col.size(), merged_indices);
}
} else {
if (right_col.has_nulls()) {
materialize_merged_bitmask_kernel<false, true>
<<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream>>>(
left_valid, right_valid, out_valid, out_col.size(), merged_indices);
} else {
CUDF_FAIL("materialize_merged_bitmask_kernel<false, false>() should never be called.");
}
}
CHECK_CUDA(stream);
}
/**
* @brief Generates the row indices and source side (left or right) in accordance with the index
* columns.
*
*
* @tparam index_type Indicates the type to be used to collect index and side information;
* @param[in] left_table The left table_view to be merged
* @param[in] right_table The right table_view to be merged
* @param[in] column_order Sort order types of index columns
* @param[in] null_precedence Array indicating the order of nulls with respect to non-nulls for the
* index columns
* @param[in] nullable Flag indicating if at least one of the table_view arguments has nulls
* (defaults to true)
* @param[in] stream CUDA stream (defaults to nullptr)
*
* @return A vector of merged indices
*/
rmm::device_vector<index_type> generate_merged_indices(
table_view const& left_table,
table_view const& right_table,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
bool nullable = true,
cudaStream_t stream = nullptr)
{
const size_type left_size = left_table.num_rows();
const size_type right_size = right_table.num_rows();
const size_type total_size = left_size + right_size;
thrust::constant_iterator<side> left_side(side::LEFT);
thrust::constant_iterator<side> right_side(side::RIGHT);
auto left_indices = thrust::make_counting_iterator(static_cast<size_type>(0));
auto right_indices = thrust::make_counting_iterator(static_cast<size_type>(0));
auto left_begin_zip_iterator =
thrust::make_zip_iterator(thrust::make_tuple(left_side, left_indices));
auto right_begin_zip_iterator =
thrust::make_zip_iterator(thrust::make_tuple(right_side, right_indices));
auto left_end_zip_iterator =
thrust::make_zip_iterator(thrust::make_tuple(left_side + left_size, left_indices + left_size));
auto right_end_zip_iterator = thrust::make_zip_iterator(
thrust::make_tuple(right_side + right_size, right_indices + right_size));
rmm::device_vector<index_type> merged_indices(total_size);
auto lhs_device_view = table_device_view::create(left_table, stream);
auto rhs_device_view = table_device_view::create(right_table, stream);
rmm::device_vector<order> d_column_order(column_order);
auto exec_pol = rmm::exec_policy(stream);
if (nullable) {
rmm::device_vector<null_order> d_null_precedence(null_precedence);
auto ineq_op =
detail::row_lexicographic_tagged_comparator<true>(*lhs_device_view,
*rhs_device_view,
d_column_order.data().get(),
d_null_precedence.data().get());
thrust::merge(exec_pol->on(stream),
left_begin_zip_iterator,
left_end_zip_iterator,
right_begin_zip_iterator,
right_end_zip_iterator,
merged_indices.begin(),
ineq_op);
} else {
auto ineq_op = detail::row_lexicographic_tagged_comparator<false>(
*lhs_device_view, *rhs_device_view, d_column_order.data().get());
thrust::merge(exec_pol->on(stream),
left_begin_zip_iterator,
left_end_zip_iterator,
right_begin_zip_iterator,
right_end_zip_iterator,
merged_indices.begin(),
ineq_op);
}
CHECK_CUDA(stream);
return merged_indices;
}
} // namespace
namespace cudf {
namespace detail {
// generate merged column
// given row order of merged tables
//(ordered according to indices of key_cols)
// and the 2 columns to merge
//
struct column_merger {
using index_vector = rmm::device_vector<index_type>;
explicit column_merger(index_vector const& row_order,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = nullptr)
: dv_row_order_(row_order), mr_(mr), stream_(stream)
{
}
// column merger operator;
//
template <typename Element> // required: column type
std::unique_ptr<column> operator()(column_view const& lcol, column_view const& rcol) const
{
auto lsz = lcol.size();
auto merged_size = lsz + rcol.size();
auto type = lcol.type();
std::unique_ptr<cudf::column> p_merged_col{nullptr};
if (lcol.has_nulls())
p_merged_col = cudf::allocate_like(lcol, merged_size);
else
p_merged_col = cudf::allocate_like(rcol, merged_size);
//"gather" data from lcol, rcol according to dv_row_order_ "map"
//(directly calling gather() won't work because
// lcol, rcol indices overlap!)
//
cudf::mutable_column_view merged_view = p_merged_col->mutable_view();
// initialize null_mask to all valid:
//
// Note: this initialization in conjunction with _conditionally_
// calling materialize_bitmask() below covers the case
// materialize_merged_bitmask_kernel<false, false>()
// which won't be called anymore (because of the _condition_ below)
//
cudf::set_null_mask(merged_view.null_mask(), 0, merged_view.size(), true, stream_);
// set the null count:
//
p_merged_col->set_null_count(lcol.null_count() + rcol.null_count());
// to resolve view.data()'s types use: Element
//
Element const* p_d_lcol = lcol.data<Element>();
Element const* p_d_rcol = rcol.data<Element>();
auto exe_pol = rmm::exec_policy(stream_);
// capture lcol, rcol
// and "gather" into merged_view.data()[indx_merged]
// from lcol or rcol, depending on side;
//
thrust::transform(exe_pol->on(stream_),
dv_row_order_.begin(),
dv_row_order_.end(),
merged_view.begin<Element>(),
[p_d_lcol, p_d_rcol] __device__(index_type const& index_pair) {
auto side = thrust::get<0>(index_pair);
auto index = thrust::get<1>(index_pair);
Element val = (side == side::LEFT ? p_d_lcol[index] : p_d_rcol[index]);
return val;
});
// CAVEAT: conditional call below is erroneous without
// set_null_mask() call (see TODO above):
//
if (lcol.has_nulls() || rcol.has_nulls()) {
// resolve null mask:
//
materialize_bitmask(lcol, rcol, merged_view, dv_row_order_.data().get(), stream_);
}
return p_merged_col;
}
private:
index_vector const& dv_row_order_;
rmm::mr::device_memory_resource* mr_;
cudaStream_t stream_;
};
// specialization for strings
template <>
std::unique_ptr<column> column_merger::operator()<cudf::string_view>(column_view const& lcol,
column_view const& rcol) const
{
auto column = strings::detail::merge<index_type>(strings_column_view(lcol),
strings_column_view(rcol),
dv_row_order_.begin(),
dv_row_order_.end(),
mr_,
stream_);
if (lcol.has_nulls() || rcol.has_nulls()) {
auto merged_view = column->mutable_view();
materialize_bitmask(lcol, rcol, merged_view, dv_row_order_.data().get(), stream_);
}
return column;
}
// specialization for dictionary
template <>
std::unique_ptr<column> column_merger::operator()<cudf::dictionary32>(column_view const& lcol,
column_view const& rcol) const
{
CUDF_FAIL("dictionary not supported yet");
}
using table_ptr_type = std::unique_ptr<cudf::table>;
namespace {
table_ptr_type merge(cudf::table_view const& left_table,
cudf::table_view const& right_table,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
// collect index columns for lhs, rhs, resp.
//
cudf::table_view index_left_view{left_table.select(key_cols)};
cudf::table_view index_right_view{right_table.select(key_cols)};
bool const nullable = cudf::has_nulls(index_left_view) || cudf::has_nulls(index_right_view);
// extract merged row order according to indices:
//
rmm::device_vector<index_type> merged_indices = generate_merged_indices(
index_left_view, index_right_view, column_order, null_precedence, nullable);
// create merged table:
//
auto const n_cols = left_table.num_columns();
std::vector<std::unique_ptr<column>> merged_cols;
merged_cols.reserve(n_cols);
column_merger merger{merged_indices, mr, stream};
transform(left_table.begin(),
left_table.end(),
right_table.begin(),
std::back_inserter(merged_cols),
[&](auto const& left_col, auto const& right_col) {
return cudf::type_dispatcher(left_col.type(), merger, left_col, right_col);
});
return std::make_unique<cudf::table>(std::move(merged_cols));
}
struct merge_queue_item {
table_view view;
table_ptr_type table;
// Priority is a separate member to ensure that moving from an object
// does not change its priority (which would ruin the queue invariant)
cudf::size_type priority = 0;
merge_queue_item(table_view const& view, table_ptr_type&& table)
: view{view}, table{std::move(table)}, priority{-view.num_rows()}
{
}
bool operator<(merge_queue_item const& other) const { return priority < other.priority; }
};
// Helper function to ensure that moving out of the priority_queue is "atomic"
template <typename T>
T top_and_pop(std::priority_queue<T>& q)
{
auto moved = std::move(const_cast<T&>(q.top()));
q.pop();
return moved;
}
} // namespace
table_ptr_type merge(std::vector<table_view> const& tables_to_merge,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
if (tables_to_merge.empty()) { return std::make_unique<cudf::table>(); }
auto const& first_table = tables_to_merge.front();
auto const n_cols = first_table.num_columns();
CUDF_EXPECTS(std::all_of(tables_to_merge.cbegin(),
tables_to_merge.cend(),
[n_cols](auto const& tbl) { return n_cols == tbl.num_columns(); }),
"Mismatched number of columns");
CUDF_EXPECTS(
std::all_of(tables_to_merge.cbegin(),
tables_to_merge.cend(),
[&](auto const& tbl) { return cudf::have_same_types(first_table, tbl); }),
"Mismatched column types");
CUDF_EXPECTS(!key_cols.empty(), "Empty key_cols");
CUDF_EXPECTS(key_cols.size() <= static_cast<size_t>(n_cols), "Too many values in key_cols");
CUDF_EXPECTS(key_cols.size() == column_order.size(),
"Mismatched size between key_cols and column_order");
// A queue of (table view, table) pairs
std::priority_queue<merge_queue_item> merge_queue;
// The table pointer is null if we do not own the table (input tables)
std::for_each(tables_to_merge.begin(), tables_to_merge.end(), [&](auto const& table) {
if (table.num_rows() > 0) merge_queue.emplace(table, table_ptr_type());
});
// If there is only one non-empty table_view, return its copy
if (merge_queue.size() == 1) { return std::make_unique<cudf::table>(merge_queue.top().view); }
// No inputs have rows, return a table with same columns as the first one
if (merge_queue.empty()) { return empty_like(first_table); }
// Pick the two smallest tables and merge them
// Until there is only one table left in the queue
while (merge_queue.size() > 1) {
// To delete the intermediate table at the end of the block
auto const left_table = top_and_pop(merge_queue);
// Deallocated at the end of the block
auto const right_table = top_and_pop(merge_queue);
// Only use mr for the output table
auto const& new_tbl_rm = merge_queue.empty() ? mr : rmm::mr::get_default_resource();
auto merged_table = merge(left_table.view,
right_table.view,
key_cols,
column_order,
null_precedence,
new_tbl_rm,
stream);
auto const merged_table_view = merged_table->view();
merge_queue.emplace(merged_table_view, std::move(merged_table));
}
return std::move(top_and_pop(merge_queue).table);
}
} // namespace detail
std::unique_ptr<cudf::table> merge(std::vector<table_view> const& tables_to_merge,
std::vector<cudf::size_type> const& key_cols,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::merge(tables_to_merge, key_cols, column_order, null_precedence, mr);
}
} // namespace cudf
|
75824ac4ae98bc1e674740307d52444b431fe6ff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "lab2.h"
#include <cmath>
#include <algorithm>
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 240;
int p[] = {151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9,
129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180};
// To remove the need for index wrapping, double the permutation table length
int *perm = new int[W*H/2];
//for(int i=0; i<512; i++) perm[i]=p[i & 255];
struct Lab2VideoGenerator::Impl {
int t = 0;
void init() {
for(int i=0; i<W*H/2; i++) perm[i]=p[i & 255];
}
};
Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) {
}
Lab2VideoGenerator::~Lab2VideoGenerator() {}
void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
void Lab2VideoGenerator::Generate(uint8_t *yuv) {
hipMemset(yuv, 0, W*H);
std::random_shuffle(p, p+256);
impl->init();
for(int i = 0; i < W*H/2; ++i) {
hipMemset(yuv+W*H+i, perm[i], 1);
}
++(impl->t);
}
|
75824ac4ae98bc1e674740307d52444b431fe6ff.cu
|
#include "lab2.h"
#include <cmath>
#include <algorithm>
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 240;
int p[] = {151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9,
129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180};
// To remove the need for index wrapping, double the permutation table length
int *perm = new int[W*H/2];
//for(int i=0; i<512; i++) perm[i]=p[i & 255];
struct Lab2VideoGenerator::Impl {
int t = 0;
void init() {
for(int i=0; i<W*H/2; i++) perm[i]=p[i & 255];
}
};
Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) {
}
Lab2VideoGenerator::~Lab2VideoGenerator() {}
void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
void Lab2VideoGenerator::Generate(uint8_t *yuv) {
cudaMemset(yuv, 0, W*H);
std::random_shuffle(p, p+256);
impl->init();
for(int i = 0; i < W*H/2; ++i) {
cudaMemset(yuv+W*H+i, perm[i], 1);
}
++(impl->t);
}
|
10fef2d040662de983429b1b3de38a3624ff09dc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void sub_scalar_double(int n,int idx, double dx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] - dx;
}
}
|
10fef2d040662de983429b1b3de38a3624ff09dc.cu
|
extern "C"
__global__ void sub_scalar_double(int n,int idx, double dx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] - dx;
}
}
|
a9336f79c85d412c5ebed22597fa5fdfd8990407.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "HZ.hpp"
#include "HZ_L.hpp"
#include "HZ_L2.hpp"
#include "device_code.hpp"
#include "cuda_memory_helper.hpp"
int // 0 if OK, < 0 if invalid argument, > 0 if error
HZ_L2_gpu
(const unsigned routine, // IN, routine ID, <= 15, (B_N_)_2,
// B: block-oriented (else, full-block), N: no sort;
const unsigned ncol, // IN, number of columns, <= min(nrowF, nrowG), == 0 (mod 32);
#ifdef ANIMATE
const unsigned nrowF, // IN, number of rows of F, == 0 (mod 64);
const unsigned nrowG, // IN, number of rows of G, == 0 (mod 64);
cuD *const hFD, // INOUT, ldhF x ncol host array in Fortran order;
cuJ *const hFJ, // INOUT, ldhF x ncol host array in Fortran order;
const unsigned ldhF, // IN, leading dimension of hF, >= nrowF;
cuD *const dFD, // INOUT, lddF x ncol device array in Fortran order;
cuJ *const dFJ, // INOUT, lddF x ncol device array in Fortran order;
const unsigned lddF, // IN, leading dimension of dF, >= nrowF;
cuD *const hGD, // INOUT, ldhG x ncol host array in Fortran order;
cuJ *const hGJ, // INOUT, ldhG x ncol host array in Fortran order;
const unsigned ldhG, // IN, leading dimension of hG, >= nrowG;
cuD *const dGD, // INOUT, lddG x ncol device array in Fortran order;
cuJ *const dGJ, // INOUT, lddG x ncol device array in Fortran order;
const unsigned lddG, // IN, leading dimension of dG, >= nrowG;
#endif /* ANIMATE */
unsigned long long *const hC, // OUT, convergence vector
unsigned long long *const dC, // OUT, convergence vector
unsigned &glbSwp, // OUT, number of sweeps at the outermost level;
unsigned long long &glb_s, // OUT, number of rotations;
unsigned long long &glb_b // OUT, number of ``big'' rotations;
#ifdef ANIMATE
, vn_cmplxvis_ctx *const ctx
, std::complex<double> *const hDJ
, const size_t nrow
#endif /* ANIMATE */
, const hipStream_t s
) throw()
{
void (*const HZ_L1)(const unsigned, const hipStream_t) = ((routine & 2u) ? HZ_L1_v : HZ_L1_sv);
const unsigned swp = ((routine & HZ_BO_2) ? 1u : HZ_NSWEEP);
// stats count
const unsigned sc = STRAT1_PAIRS * C_ELEMS_PER_BLOCK;
// stats len
const size_t sl = sc * sizeof(unsigned long long);
glb_s = 0ull;
glb_b = 0ull;
#if (defined(PROFILE) && (PROFILE == 0))
unsigned long long CLK_1 = 0ull;
unsigned long long CLK_2 = 0ull;
unsigned long long CLK_3 = 0ull;
unsigned long long CLK_4 = 0ull;
#endif /* ?PROFILE */
#ifndef USE_MPI
long long swp_tim = 0ll;
stopwatch_reset(swp_tim);
#endif /* !USE_MPI */
unsigned blk_swp = 0u;
while (blk_swp < swp) {
CUDA_CALL(hipMemsetAsync(dC, 0, sl, s));
CUDA_CALL(hipStreamSynchronize(s));
for (unsigned blk_stp = 0u; blk_stp < STRAT1_STEPS; ++blk_stp) {
if (blk_stp)
CUDA_CALL(hipStreamSynchronize(s));
HZ_L1(blk_stp, s);
#ifdef ANIMATE
if (ctx) {
CUDA_CALL(hipStreamSynchronize(s));
CUDA_CALL(hipMemcpy2DAsync(hFD, ldhF * sizeof(double), dFD, lddF * sizeof(cuD), nrowF * sizeof(cuD), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hFJ, ldhF * sizeof(double), dFJ, lddF * sizeof(cuJ), nrowF * sizeof(cuJ), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hGD, ldhG * sizeof(double), dGD, lddG * sizeof(cuD), nrowG * sizeof(cuD), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hGJ, ldhG * sizeof(double), dGJ, lddG * sizeof(cuJ), nrowG * sizeof(cuJ), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipStreamSynchronize(s));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhF = ldhF * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhF = offhF + i;
hDJ[ixDJ].real(hFD[ixhF]);
hDJ[ixDJ].imag(hFJ[ixhF]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhG = ldhG * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhG = offhG + i;
hDJ[ixDJ].real(hGD[ixhG]);
hDJ[ixDJ].imag(hGJ[ixhG]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
}
#endif /* ANIMATE */
}
CUDA_CALL(hipStreamSynchronize(s));
CUDA_CALL(hipMemcpyAsync(hC, dC, sl, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipStreamSynchronize(s));
unsigned long long cvg_s = 0ull;
unsigned long long cvg_b = 0ull;
#if (defined(PROFILE) && (PROFILE == 0))
unsigned long long clk_1 = 0ull;
unsigned long long clk_2 = 0ull;
unsigned long long clk_3 = 0ull;
unsigned long long clk_4 = 0ull;
#endif /* ?PROFILE */
for (unsigned i = 0u; i < sc; i += C_ELEMS_PER_BLOCK) {
cvg_s += hC[i + C_SMALL];
cvg_b += hC[i + C_BIG];
#if (defined(PROFILE) && (PROFILE == 0))
if (clk_1 < hC[i + C_SUBPHASE_1])
clk_1 = hC[i + C_SUBPHASE_1];
if (clk_2 < hC[i + C_SUBPHASE_2])
clk_2 = hC[i + C_SUBPHASE_2];
if (clk_3 < hC[i + C_SUBPHASE_3])
clk_3 = hC[i + C_SUBPHASE_3];
if (clk_4 < hC[i + C_SUBPHASE_4])
clk_4 = hC[i + C_SUBPHASE_4];
#endif /* ?PROFILE */
}
glb_s += cvg_s;
glb_b += cvg_b;
#if (defined(PROFILE) && (PROFILE == 0))
CLK_1 += clk_1;
CLK_2 += clk_2;
CLK_3 += clk_3;
CLK_4 += clk_4;
#endif /* ?PROFILE */
#ifndef USE_MPI
const double tim_s = stopwatch_lap(swp_tim) * TS2S;
(void)fprintf(stdout, "BLK_SWP(%2u), ROT_S(%13llu), ROT_B(%13llu), TIME(%#14.6f s)", blk_swp, cvg_s, cvg_b, tim_s);
#if (defined(PROFILE) && (PROFILE == 0))
(void)fprintf(stdout, ", clk_1(%11llu), clk_2(%11llu), clk_3(%11llu), clk_4(%11llu)", clk_1, clk_2, clk_3, clk_4);
#endif /* ?PROFILE */
(void)fprintf(stdout, "\n");
(void)fflush(stdout);
#endif /* !USE_MPI */
if (!cvg_b)
break;
++blk_swp;
initS(0, ncol, s);
CUDA_CALL(hipStreamSynchronize(s));
#ifdef ANIMATE
if (ctx) {
CUDA_CALL(hipMemcpy2DAsync(hFD, ldhF * sizeof(double), dFD, lddF * sizeof(cuD), nrowF * sizeof(cuD), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hFJ, ldhF * sizeof(double), dFJ, lddF * sizeof(cuJ), nrowF * sizeof(cuJ), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hGD, ldhG * sizeof(double), dGD, lddG * sizeof(cuD), nrowG * sizeof(cuD), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hGJ, ldhG * sizeof(double), dGJ, lddG * sizeof(cuJ), nrowG * sizeof(cuJ), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipStreamSynchronize(s));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhF = ldhF * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhF = offhF + i;
hDJ[ixDJ].real(hFD[ixhF]);
hDJ[ixDJ].imag(hFJ[ixhF]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhG = ldhG * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhG = offhG + i;
hDJ[ixDJ].real(hGD[ixhG]);
hDJ[ixDJ].imag(hGJ[ixhG]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
}
#endif /* ANIMATE */
}
if (blk_swp < swp)
glbSwp = (blk_swp + 1u);
else
glbSwp = blk_swp;
#ifdef USE_MPI
if (blk_swp < swp)
initS(0, ncol, s);
#else /* !USE_MPI */
initS(1, ncol, s);
#endif /* ?USE_MPI */
CUDA_CALL(hipStreamSynchronize(s));
#if (defined(PROFILE) && (PROFILE == 0))
(void)fprintf(stdout, "CLK_1(%13llu), CLK_2(%13llu), CLK_3(%13llu), CLK_4(%13llu)\n", CLK_1, CLK_2, CLK_3, CLK_4);
(void)fflush(stdout);
#endif /* ?PROFILE */
return 0;
}
int // 0 if OK, < 0 if invalid argument, > 0 if error
HZ_L2
(const unsigned routine, // IN, routine ID, <= 15, (B_N_)_2,
// B: block-oriented (else, full-block), N: no sort;
const unsigned nrowF, // IN, number of rows of F, == 0 (mod 64);
const unsigned nrowG, // IN, number of rows of G, == 0 (mod 64);
const unsigned ncol, // IN, number of columns, <= min(nrowF, nrowG), == 0 (mod 32);
cuD *const hFD, // INOUT, ldhF x ncol host array in Fortran order;
cuJ *const hFJ, // INOUT, ldhF x ncol host array in Fortran order;
const unsigned ldhF, // IN, leading dimension of F, >= nrowF;
cuD *const hGD, // INOUT, ldhG x ncol host array in Fortran order;
cuJ *const hGJ, // INOUT, ldhG x ncol host array in Fortran order;
const unsigned ldhG, // IN, leading dimension of G, >= nrowG;
cuD *const hVD, // INOUT, ldhV x ncol host array in Fortran order;
cuJ *const hVJ, // INOUT, ldhV x ncol host array in Fortran order;
const unsigned ldhV, // IN, leading dimension of V, >= ncol;
double *const hS, // OUT, the generalized singular values, optionally sorted in descending order;
double *const hH, // ||F_i||_F/sqrt(||F_i||_F^2 + ||G_i||_F^2);
double *const hK, // ||G_i||_F/sqrt(||F_i||_F^2 + ||G_i||_F^2);
unsigned &glbSwp, // OUT, number of sweeps at the outermost level;
unsigned long long &glb_s, // OUT, number of rotations;
unsigned long long &glb_b, // OUT, number of ``big'' rotations;
double *const timing, // OUT, optional, in seconds, double[4] ==
// WALL, SETUP & HOST ==> GPUs, COMPUTATION, CLEANUP & GPUs ==> HOST;
const hipStream_t s
) throw()
{
long long timers[4] = { 0ll };
stopwatch_reset(timers[0]);
if (routine >= 16u)
return -1;
if (!nrowF || (nrowF % 64u))
return -2;
if (!nrowG || (nrowG % 64u))
return -3;
if (!ncol || (ncol > nrowF) || (ncol > nrowG) || (ncol % 32u))
return -4;
if (!hFD)
return -5;
if (!hFJ)
return -6;
if (ldhF < nrowF)
return -7;
if (!hGD)
return -8;
if (!hGJ)
return -9;
if (ldhG < nrowG)
return -10;
if (!hVD)
return -11;
if (!hVJ)
return -12;
if (ldhV < ncol)
return -13;
if (!hS)
return -14;
if (!hH)
return -15;
if (!hK)
return -16;
stopwatch_reset(timers[3]);
size_t lddF = static_cast<size_t>(nrowF);
cuD *const dFD = allocDeviceMtx<cuD>(lddF, static_cast<size_t>(nrowF), static_cast<size_t>(ncol), true, s);
cuJ *const dFJ = allocDeviceMtx<cuJ>(lddF, static_cast<size_t>(nrowF), static_cast<size_t>(ncol), true, s);
size_t lddG = static_cast<size_t>(nrowG);
cuD *const dGD = allocDeviceMtx<cuD>(lddG, static_cast<size_t>(nrowG), static_cast<size_t>(ncol), true, s);
cuJ *const dGJ = allocDeviceMtx<cuJ>(lddG, static_cast<size_t>(nrowG), static_cast<size_t>(ncol), true, s);
size_t lddV = static_cast<size_t>(ncol);
cuD *const dVD = allocDeviceMtx<cuD>(lddV, static_cast<size_t>(ncol), static_cast<size_t>(ncol), true, s);
cuJ *const dVJ = allocDeviceMtx<cuJ>(lddV, static_cast<size_t>(ncol), static_cast<size_t>(ncol), true, s);
double *const dS = allocDeviceVec<double>(static_cast<size_t>(ncol), s);
double *const dH = allocDeviceVec<double>(static_cast<size_t>(ncol), s);
double *const dK = allocDeviceVec<double>(static_cast<size_t>(ncol), s);
unsigned long long *const dC = allocDeviceVec<unsigned long long>((static_cast<size_t>(STRAT1_PAIRS) * C_ELEMS_PER_BLOCK), s);
unsigned long long *const hC = allocHostVec<unsigned long long>(static_cast<size_t>(STRAT1_PAIRS) * C_ELEMS_PER_BLOCK);
initSymbols(dFD,dFJ, dGD,dGJ, dVD,dVJ, dS,dH,dK, dC, nrowF,nrowG,ncol,ncol, lddF,lddG,lddV, ((routine & HZ_BO_1) ? 1u : HZ_NSWEEP), s);
CUDA_CALL(hipMemcpy2DAsync(dFD, lddF * sizeof(cuD), hFD, ldhF * sizeof(double), nrowF * sizeof(cuD), ncol, hipMemcpyHostToDevice, s));
CUDA_CALL(hipMemcpy2DAsync(dFJ, lddF * sizeof(cuJ), hFJ, ldhF * sizeof(double), nrowF * sizeof(cuJ), ncol, hipMemcpyHostToDevice, s));
CUDA_CALL(hipMemcpy2DAsync(dGD, lddG * sizeof(cuD), hGD, ldhG * sizeof(double), nrowG * sizeof(cuD), ncol, hipMemcpyHostToDevice, s));
CUDA_CALL(hipMemcpy2DAsync(dGJ, lddG * sizeof(cuJ), hGJ, ldhG * sizeof(double), nrowG * sizeof(cuJ), ncol, hipMemcpyHostToDevice, s));
CUDA_CALL(hipMemcpy2DAsync(dVD, lddV * sizeof(cuD), hVD, ldhV * sizeof(double), ncol * sizeof(cuD), ncol, hipMemcpyHostToDevice, s));
CUDA_CALL(hipMemcpy2DAsync(dVJ, lddV * sizeof(cuJ), hVJ, ldhV * sizeof(double), ncol * sizeof(cuJ), ncol, hipMemcpyHostToDevice, s));
CUDA_CALL(hipStreamSynchronize(s));
#ifndef USE_MPI
cuda_prof_start();
#endif /* !USE_MPI */
#ifdef USE_MPI
const unsigned ifc0 = 0u;
const unsigned ifc1 = (ncol >> 1u);
initV(((CVG == 0) || (CVG == 1) || (CVG == 4) || (CVG == 5)), ncol, ifc0, ifc1, s);
#else /* !USE_MPI */
initV(((CVG == 0) || (CVG == 1) || (CVG == 4) || (CVG == 5)), ncol, s);
#endif /* ?USE_MPI */
CUDA_CALL(hipStreamSynchronize(s));
#ifdef ANIMATE
vn_cmplxvis_ctx *ctx = static_cast<vn_cmplxvis_ctx*>(NULL);
std::complex<double> *hDJ = static_cast<std::complex<double>>(NULL);
size_t nrow = 0u;
// it is meant to work only for nrowF == nrowG
if (nrowF == nrowG) {
nrow = nrowF;
hDJ = allocHostMtx<std::complex<double>>(nrow, nrow, static_cast<size_t>(ncol), true);
}
if (ncol < 10000u) {
char fname[8] = { '\0' };
(void)sprintf(fname, "FG%x%04u", routine, ncol);
if (hDJ)
SYSI_CALL(vn_cmplxvis_start(&ctx, fname, (VN_CMPLXVIS_OP_AhA | VN_CMPLXVIS_FN_Lg), ncol, ncol, 1, 1, 7));
if (ctx) {
CUDA_CALL(hipMemcpy2DAsync(hFD, ldhF * sizeof(double), dFD, lddF * sizeof(cuD), nrowF * sizeof(cuD), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hFJ, ldhF * sizeof(double), dFJ, lddF * sizeof(cuJ), nrowF * sizeof(cuJ), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hGD, ldhG * sizeof(double), dGD, lddG * sizeof(cuD), nrowG * sizeof(cuD), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hGJ, ldhG * sizeof(double), dGJ, lddG * sizeof(cuJ), nrowG * sizeof(cuJ), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipStreamSynchronize(s));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhF = ldhF * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhF = offhF + i;
hDJ[ixDJ].real(hFD[ixhF]);
hDJ[ixDJ].imag(hFJ[ixhF]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhG = ldhG * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhG = offhG + i;
hDJ[ixDJ].real(hGD[ixhG]);
hDJ[ixDJ].imag(hGJ[ixhG]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
}
}
#endif /* ANIMATE */
timers[1] = stopwatch_lap(timers[3]);
const int ret = HZ_L2_gpu
(routine,ncol
#ifdef ANIMATE
, nrowF,nrowG, hFD,hFJ,ldhF, dFD,dFJ,lddF, hGD,hGJ,ldhG, dGD,dGJ,lddG,
#endif /* ANIMATE */
, hC,dC, glbSwp,glb_s,glb_b
#ifdef ANIMATE
, ctx,hDJ,nrow
#endif /* ANIMATE */
, s
);
timers[2] = stopwatch_lap(timers[3]);
#ifndef USE_MPI
cuda_prof_stop();
#endif /* !USE_MPI */
CUDA_CALL(hipMemcpy2DAsync(hFD, ldhF * sizeof(double), dFD, lddF * sizeof(cuD), nrowF * sizeof(cuD), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hFJ, ldhF * sizeof(double), dFJ, lddF * sizeof(cuJ), nrowF * sizeof(cuJ), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hGD, ldhG * sizeof(double), dGD, lddG * sizeof(cuD), nrowG * sizeof(cuD), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hGJ, ldhG * sizeof(double), dGJ, lddG * sizeof(cuJ), nrowG * sizeof(cuJ), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hVD, ldhV * sizeof(double), dVD, lddV * sizeof(cuD), ncol * sizeof(cuD), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpy2DAsync(hVJ, ldhV * sizeof(double), dVJ, lddV * sizeof(cuJ), ncol * sizeof(cuJ), ncol, hipMemcpyDeviceToHost, s));
CUDA_CALL(hipMemcpyAsync(hS, dS, ncol * sizeof(double), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpyAsync(hH, dH, ncol * sizeof(double), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpyAsync(hK, dK, ncol * sizeof(double), hipMemcpyDeviceToHost));
CUDA_CALL(hipStreamSynchronize(s));
#ifdef ANIMATE
if (ctx) {
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhF = ldhF * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhF = offhF + i;
hDJ[ixDJ].real(hFD[ixhF]);
hDJ[ixDJ].imag(hFJ[ixhF]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhG = ldhG * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhG = offhG + i;
hDJ[ixDJ].real(hGD[ixhG]);
hDJ[ixDJ].imag(hGJ[ixhG]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
SYSI_CALL(vn_cmplxvis_stop(ctx));
CUDA_CALL(hipHostFree((void*)hDJ));
}
#endif /* ANIMATE */
CUDA_CALL(hipHostFree(hC));
CUDA_CALL(hipFree(dC));
CUDA_CALL(hipFree(dK));
CUDA_CALL(hipFree(dH));
CUDA_CALL(hipFree(dS));
CUDA_CALL(hipFree(dVJ));
CUDA_CALL(hipFree(dVD));
CUDA_CALL(hipFree(dGJ));
CUDA_CALL(hipFree(dGD));
CUDA_CALL(hipFree(dFJ));
CUDA_CALL(hipFree(dFD));
timers[3] = stopwatch_lap(timers[3]);
timers[0] = stopwatch_lap(timers[0]);
if (timing)
for (unsigned i = 0u; i < 4u; ++i)
timing[i] = timers[i] * TS2S;
return ret;
}
|
a9336f79c85d412c5ebed22597fa5fdfd8990407.cu
|
#include "HZ.hpp"
#include "HZ_L.hpp"
#include "HZ_L2.hpp"
#include "device_code.hpp"
#include "cuda_memory_helper.hpp"
int // 0 if OK, < 0 if invalid argument, > 0 if error
HZ_L2_gpu
(const unsigned routine, // IN, routine ID, <= 15, (B_N_)_2,
// B: block-oriented (else, full-block), N: no sort;
const unsigned ncol, // IN, number of columns, <= min(nrowF, nrowG), == 0 (mod 32);
#ifdef ANIMATE
const unsigned nrowF, // IN, number of rows of F, == 0 (mod 64);
const unsigned nrowG, // IN, number of rows of G, == 0 (mod 64);
cuD *const hFD, // INOUT, ldhF x ncol host array in Fortran order;
cuJ *const hFJ, // INOUT, ldhF x ncol host array in Fortran order;
const unsigned ldhF, // IN, leading dimension of hF, >= nrowF;
cuD *const dFD, // INOUT, lddF x ncol device array in Fortran order;
cuJ *const dFJ, // INOUT, lddF x ncol device array in Fortran order;
const unsigned lddF, // IN, leading dimension of dF, >= nrowF;
cuD *const hGD, // INOUT, ldhG x ncol host array in Fortran order;
cuJ *const hGJ, // INOUT, ldhG x ncol host array in Fortran order;
const unsigned ldhG, // IN, leading dimension of hG, >= nrowG;
cuD *const dGD, // INOUT, lddG x ncol device array in Fortran order;
cuJ *const dGJ, // INOUT, lddG x ncol device array in Fortran order;
const unsigned lddG, // IN, leading dimension of dG, >= nrowG;
#endif /* ANIMATE */
unsigned long long *const hC, // OUT, convergence vector
unsigned long long *const dC, // OUT, convergence vector
unsigned &glbSwp, // OUT, number of sweeps at the outermost level;
unsigned long long &glb_s, // OUT, number of rotations;
unsigned long long &glb_b // OUT, number of ``big'' rotations;
#ifdef ANIMATE
, vn_cmplxvis_ctx *const ctx
, std::complex<double> *const hDJ
, const size_t nrow
#endif /* ANIMATE */
, const cudaStream_t s
) throw()
{
void (*const HZ_L1)(const unsigned, const cudaStream_t) = ((routine & 2u) ? HZ_L1_v : HZ_L1_sv);
const unsigned swp = ((routine & HZ_BO_2) ? 1u : HZ_NSWEEP);
// stats count
const unsigned sc = STRAT1_PAIRS * C_ELEMS_PER_BLOCK;
// stats len
const size_t sl = sc * sizeof(unsigned long long);
glb_s = 0ull;
glb_b = 0ull;
#if (defined(PROFILE) && (PROFILE == 0))
unsigned long long CLK_1 = 0ull;
unsigned long long CLK_2 = 0ull;
unsigned long long CLK_3 = 0ull;
unsigned long long CLK_4 = 0ull;
#endif /* ?PROFILE */
#ifndef USE_MPI
long long swp_tim = 0ll;
stopwatch_reset(swp_tim);
#endif /* !USE_MPI */
unsigned blk_swp = 0u;
while (blk_swp < swp) {
CUDA_CALL(cudaMemsetAsync(dC, 0, sl, s));
CUDA_CALL(cudaStreamSynchronize(s));
for (unsigned blk_stp = 0u; blk_stp < STRAT1_STEPS; ++blk_stp) {
if (blk_stp)
CUDA_CALL(cudaStreamSynchronize(s));
HZ_L1(blk_stp, s);
#ifdef ANIMATE
if (ctx) {
CUDA_CALL(cudaStreamSynchronize(s));
CUDA_CALL(cudaMemcpy2DAsync(hFD, ldhF * sizeof(double), dFD, lddF * sizeof(cuD), nrowF * sizeof(cuD), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hFJ, ldhF * sizeof(double), dFJ, lddF * sizeof(cuJ), nrowF * sizeof(cuJ), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hGD, ldhG * sizeof(double), dGD, lddG * sizeof(cuD), nrowG * sizeof(cuD), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hGJ, ldhG * sizeof(double), dGJ, lddG * sizeof(cuJ), nrowG * sizeof(cuJ), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaStreamSynchronize(s));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhF = ldhF * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhF = offhF + i;
hDJ[ixDJ].real(hFD[ixhF]);
hDJ[ixDJ].imag(hFJ[ixhF]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhG = ldhG * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhG = offhG + i;
hDJ[ixDJ].real(hGD[ixhG]);
hDJ[ixDJ].imag(hGJ[ixhG]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
}
#endif /* ANIMATE */
}
CUDA_CALL(cudaStreamSynchronize(s));
CUDA_CALL(cudaMemcpyAsync(hC, dC, sl, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaStreamSynchronize(s));
unsigned long long cvg_s = 0ull;
unsigned long long cvg_b = 0ull;
#if (defined(PROFILE) && (PROFILE == 0))
unsigned long long clk_1 = 0ull;
unsigned long long clk_2 = 0ull;
unsigned long long clk_3 = 0ull;
unsigned long long clk_4 = 0ull;
#endif /* ?PROFILE */
for (unsigned i = 0u; i < sc; i += C_ELEMS_PER_BLOCK) {
cvg_s += hC[i + C_SMALL];
cvg_b += hC[i + C_BIG];
#if (defined(PROFILE) && (PROFILE == 0))
if (clk_1 < hC[i + C_SUBPHASE_1])
clk_1 = hC[i + C_SUBPHASE_1];
if (clk_2 < hC[i + C_SUBPHASE_2])
clk_2 = hC[i + C_SUBPHASE_2];
if (clk_3 < hC[i + C_SUBPHASE_3])
clk_3 = hC[i + C_SUBPHASE_3];
if (clk_4 < hC[i + C_SUBPHASE_4])
clk_4 = hC[i + C_SUBPHASE_4];
#endif /* ?PROFILE */
}
glb_s += cvg_s;
glb_b += cvg_b;
#if (defined(PROFILE) && (PROFILE == 0))
CLK_1 += clk_1;
CLK_2 += clk_2;
CLK_3 += clk_3;
CLK_4 += clk_4;
#endif /* ?PROFILE */
#ifndef USE_MPI
const double tim_s = stopwatch_lap(swp_tim) * TS2S;
(void)fprintf(stdout, "BLK_SWP(%2u), ROT_S(%13llu), ROT_B(%13llu), TIME(%#14.6f s)", blk_swp, cvg_s, cvg_b, tim_s);
#if (defined(PROFILE) && (PROFILE == 0))
(void)fprintf(stdout, ", clk_1(%11llu), clk_2(%11llu), clk_3(%11llu), clk_4(%11llu)", clk_1, clk_2, clk_3, clk_4);
#endif /* ?PROFILE */
(void)fprintf(stdout, "\n");
(void)fflush(stdout);
#endif /* !USE_MPI */
if (!cvg_b)
break;
++blk_swp;
initS(0, ncol, s);
CUDA_CALL(cudaStreamSynchronize(s));
#ifdef ANIMATE
if (ctx) {
CUDA_CALL(cudaMemcpy2DAsync(hFD, ldhF * sizeof(double), dFD, lddF * sizeof(cuD), nrowF * sizeof(cuD), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hFJ, ldhF * sizeof(double), dFJ, lddF * sizeof(cuJ), nrowF * sizeof(cuJ), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hGD, ldhG * sizeof(double), dGD, lddG * sizeof(cuD), nrowG * sizeof(cuD), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hGJ, ldhG * sizeof(double), dGJ, lddG * sizeof(cuJ), nrowG * sizeof(cuJ), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaStreamSynchronize(s));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhF = ldhF * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhF = offhF + i;
hDJ[ixDJ].real(hFD[ixhF]);
hDJ[ixDJ].imag(hFJ[ixhF]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhG = ldhG * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhG = offhG + i;
hDJ[ixDJ].real(hGD[ixhG]);
hDJ[ixDJ].imag(hGJ[ixhG]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
}
#endif /* ANIMATE */
}
if (blk_swp < swp)
glbSwp = (blk_swp + 1u);
else
glbSwp = blk_swp;
#ifdef USE_MPI
if (blk_swp < swp)
initS(0, ncol, s);
#else /* !USE_MPI */
initS(1, ncol, s);
#endif /* ?USE_MPI */
CUDA_CALL(cudaStreamSynchronize(s));
#if (defined(PROFILE) && (PROFILE == 0))
(void)fprintf(stdout, "CLK_1(%13llu), CLK_2(%13llu), CLK_3(%13llu), CLK_4(%13llu)\n", CLK_1, CLK_2, CLK_3, CLK_4);
(void)fflush(stdout);
#endif /* ?PROFILE */
return 0;
}
int // 0 if OK, < 0 if invalid argument, > 0 if error
HZ_L2
(const unsigned routine, // IN, routine ID, <= 15, (B_N_)_2,
// B: block-oriented (else, full-block), N: no sort;
const unsigned nrowF, // IN, number of rows of F, == 0 (mod 64);
const unsigned nrowG, // IN, number of rows of G, == 0 (mod 64);
const unsigned ncol, // IN, number of columns, <= min(nrowF, nrowG), == 0 (mod 32);
cuD *const hFD, // INOUT, ldhF x ncol host array in Fortran order;
cuJ *const hFJ, // INOUT, ldhF x ncol host array in Fortran order;
const unsigned ldhF, // IN, leading dimension of F, >= nrowF;
cuD *const hGD, // INOUT, ldhG x ncol host array in Fortran order;
cuJ *const hGJ, // INOUT, ldhG x ncol host array in Fortran order;
const unsigned ldhG, // IN, leading dimension of G, >= nrowG;
cuD *const hVD, // INOUT, ldhV x ncol host array in Fortran order;
cuJ *const hVJ, // INOUT, ldhV x ncol host array in Fortran order;
const unsigned ldhV, // IN, leading dimension of V, >= ncol;
double *const hS, // OUT, the generalized singular values, optionally sorted in descending order;
double *const hH, // ||F_i||_F/sqrt(||F_i||_F^2 + ||G_i||_F^2);
double *const hK, // ||G_i||_F/sqrt(||F_i||_F^2 + ||G_i||_F^2);
unsigned &glbSwp, // OUT, number of sweeps at the outermost level;
unsigned long long &glb_s, // OUT, number of rotations;
unsigned long long &glb_b, // OUT, number of ``big'' rotations;
double *const timing, // OUT, optional, in seconds, double[4] ==
// WALL, SETUP & HOST ==> GPUs, COMPUTATION, CLEANUP & GPUs ==> HOST;
const cudaStream_t s
) throw()
{
long long timers[4] = { 0ll };
stopwatch_reset(timers[0]);
if (routine >= 16u)
return -1;
if (!nrowF || (nrowF % 64u))
return -2;
if (!nrowG || (nrowG % 64u))
return -3;
if (!ncol || (ncol > nrowF) || (ncol > nrowG) || (ncol % 32u))
return -4;
if (!hFD)
return -5;
if (!hFJ)
return -6;
if (ldhF < nrowF)
return -7;
if (!hGD)
return -8;
if (!hGJ)
return -9;
if (ldhG < nrowG)
return -10;
if (!hVD)
return -11;
if (!hVJ)
return -12;
if (ldhV < ncol)
return -13;
if (!hS)
return -14;
if (!hH)
return -15;
if (!hK)
return -16;
stopwatch_reset(timers[3]);
size_t lddF = static_cast<size_t>(nrowF);
cuD *const dFD = allocDeviceMtx<cuD>(lddF, static_cast<size_t>(nrowF), static_cast<size_t>(ncol), true, s);
cuJ *const dFJ = allocDeviceMtx<cuJ>(lddF, static_cast<size_t>(nrowF), static_cast<size_t>(ncol), true, s);
size_t lddG = static_cast<size_t>(nrowG);
cuD *const dGD = allocDeviceMtx<cuD>(lddG, static_cast<size_t>(nrowG), static_cast<size_t>(ncol), true, s);
cuJ *const dGJ = allocDeviceMtx<cuJ>(lddG, static_cast<size_t>(nrowG), static_cast<size_t>(ncol), true, s);
size_t lddV = static_cast<size_t>(ncol);
cuD *const dVD = allocDeviceMtx<cuD>(lddV, static_cast<size_t>(ncol), static_cast<size_t>(ncol), true, s);
cuJ *const dVJ = allocDeviceMtx<cuJ>(lddV, static_cast<size_t>(ncol), static_cast<size_t>(ncol), true, s);
double *const dS = allocDeviceVec<double>(static_cast<size_t>(ncol), s);
double *const dH = allocDeviceVec<double>(static_cast<size_t>(ncol), s);
double *const dK = allocDeviceVec<double>(static_cast<size_t>(ncol), s);
unsigned long long *const dC = allocDeviceVec<unsigned long long>((static_cast<size_t>(STRAT1_PAIRS) * C_ELEMS_PER_BLOCK), s);
unsigned long long *const hC = allocHostVec<unsigned long long>(static_cast<size_t>(STRAT1_PAIRS) * C_ELEMS_PER_BLOCK);
initSymbols(dFD,dFJ, dGD,dGJ, dVD,dVJ, dS,dH,dK, dC, nrowF,nrowG,ncol,ncol, lddF,lddG,lddV, ((routine & HZ_BO_1) ? 1u : HZ_NSWEEP), s);
CUDA_CALL(cudaMemcpy2DAsync(dFD, lddF * sizeof(cuD), hFD, ldhF * sizeof(double), nrowF * sizeof(cuD), ncol, cudaMemcpyHostToDevice, s));
CUDA_CALL(cudaMemcpy2DAsync(dFJ, lddF * sizeof(cuJ), hFJ, ldhF * sizeof(double), nrowF * sizeof(cuJ), ncol, cudaMemcpyHostToDevice, s));
CUDA_CALL(cudaMemcpy2DAsync(dGD, lddG * sizeof(cuD), hGD, ldhG * sizeof(double), nrowG * sizeof(cuD), ncol, cudaMemcpyHostToDevice, s));
CUDA_CALL(cudaMemcpy2DAsync(dGJ, lddG * sizeof(cuJ), hGJ, ldhG * sizeof(double), nrowG * sizeof(cuJ), ncol, cudaMemcpyHostToDevice, s));
CUDA_CALL(cudaMemcpy2DAsync(dVD, lddV * sizeof(cuD), hVD, ldhV * sizeof(double), ncol * sizeof(cuD), ncol, cudaMemcpyHostToDevice, s));
CUDA_CALL(cudaMemcpy2DAsync(dVJ, lddV * sizeof(cuJ), hVJ, ldhV * sizeof(double), ncol * sizeof(cuJ), ncol, cudaMemcpyHostToDevice, s));
CUDA_CALL(cudaStreamSynchronize(s));
#ifndef USE_MPI
cuda_prof_start();
#endif /* !USE_MPI */
#ifdef USE_MPI
const unsigned ifc0 = 0u;
const unsigned ifc1 = (ncol >> 1u);
initV(((CVG == 0) || (CVG == 1) || (CVG == 4) || (CVG == 5)), ncol, ifc0, ifc1, s);
#else /* !USE_MPI */
initV(((CVG == 0) || (CVG == 1) || (CVG == 4) || (CVG == 5)), ncol, s);
#endif /* ?USE_MPI */
CUDA_CALL(cudaStreamSynchronize(s));
#ifdef ANIMATE
vn_cmplxvis_ctx *ctx = static_cast<vn_cmplxvis_ctx*>(NULL);
std::complex<double> *hDJ = static_cast<std::complex<double>>(NULL);
size_t nrow = 0u;
// it is meant to work only for nrowF == nrowG
if (nrowF == nrowG) {
nrow = nrowF;
hDJ = allocHostMtx<std::complex<double>>(nrow, nrow, static_cast<size_t>(ncol), true);
}
if (ncol < 10000u) {
char fname[8] = { '\0' };
(void)sprintf(fname, "FG%x%04u", routine, ncol);
if (hDJ)
SYSI_CALL(vn_cmplxvis_start(&ctx, fname, (VN_CMPLXVIS_OP_AhA | VN_CMPLXVIS_FN_Lg), ncol, ncol, 1, 1, 7));
if (ctx) {
CUDA_CALL(cudaMemcpy2DAsync(hFD, ldhF * sizeof(double), dFD, lddF * sizeof(cuD), nrowF * sizeof(cuD), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hFJ, ldhF * sizeof(double), dFJ, lddF * sizeof(cuJ), nrowF * sizeof(cuJ), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hGD, ldhG * sizeof(double), dGD, lddG * sizeof(cuD), nrowG * sizeof(cuD), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hGJ, ldhG * sizeof(double), dGJ, lddG * sizeof(cuJ), nrowG * sizeof(cuJ), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaStreamSynchronize(s));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhF = ldhF * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhF = offhF + i;
hDJ[ixDJ].real(hFD[ixhF]);
hDJ[ixDJ].imag(hFJ[ixhF]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhG = ldhG * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhG = offhG + i;
hDJ[ixDJ].real(hGD[ixhG]);
hDJ[ixDJ].imag(hGJ[ixhG]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
}
}
#endif /* ANIMATE */
timers[1] = stopwatch_lap(timers[3]);
const int ret = HZ_L2_gpu
(routine,ncol
#ifdef ANIMATE
, nrowF,nrowG, hFD,hFJ,ldhF, dFD,dFJ,lddF, hGD,hGJ,ldhG, dGD,dGJ,lddG,
#endif /* ANIMATE */
, hC,dC, glbSwp,glb_s,glb_b
#ifdef ANIMATE
, ctx,hDJ,nrow
#endif /* ANIMATE */
, s
);
timers[2] = stopwatch_lap(timers[3]);
#ifndef USE_MPI
cuda_prof_stop();
#endif /* !USE_MPI */
CUDA_CALL(cudaMemcpy2DAsync(hFD, ldhF * sizeof(double), dFD, lddF * sizeof(cuD), nrowF * sizeof(cuD), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hFJ, ldhF * sizeof(double), dFJ, lddF * sizeof(cuJ), nrowF * sizeof(cuJ), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hGD, ldhG * sizeof(double), dGD, lddG * sizeof(cuD), nrowG * sizeof(cuD), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hGJ, ldhG * sizeof(double), dGJ, lddG * sizeof(cuJ), nrowG * sizeof(cuJ), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hVD, ldhV * sizeof(double), dVD, lddV * sizeof(cuD), ncol * sizeof(cuD), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpy2DAsync(hVJ, ldhV * sizeof(double), dVJ, lddV * sizeof(cuJ), ncol * sizeof(cuJ), ncol, cudaMemcpyDeviceToHost, s));
CUDA_CALL(cudaMemcpyAsync(hS, dS, ncol * sizeof(double), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpyAsync(hH, dH, ncol * sizeof(double), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpyAsync(hK, dK, ncol * sizeof(double), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaStreamSynchronize(s));
#ifdef ANIMATE
if (ctx) {
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhF = ldhF * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhF = offhF + i;
hDJ[ixDJ].real(hFD[ixhF]);
hDJ[ixDJ].imag(hFJ[ixhF]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
for (unsigned j = 0u; j < ncol; ++j) {
const size_t offDJ = ldhDJ * j;
const size_t offhG = ldhG * j;
for (unsigned i = 0u; i < nrow; ++i) {
const size_t ixDJ = offDJ + i;
const size_t ixhG = offhG + i;
hDJ[ixDJ].real(hGD[ixhG]);
hDJ[ixDJ].imag(hGJ[ixhG]);
}
}
SYSI_CALL(vn_cmplxvis_frame(ctx, (const vn_complex*)hDJ, nrow));
SYSI_CALL(vn_cmplxvis_stop(ctx));
CUDA_CALL(cudaFreeHost((void*)hDJ));
}
#endif /* ANIMATE */
CUDA_CALL(cudaFreeHost(hC));
CUDA_CALL(cudaFree(dC));
CUDA_CALL(cudaFree(dK));
CUDA_CALL(cudaFree(dH));
CUDA_CALL(cudaFree(dS));
CUDA_CALL(cudaFree(dVJ));
CUDA_CALL(cudaFree(dVD));
CUDA_CALL(cudaFree(dGJ));
CUDA_CALL(cudaFree(dGD));
CUDA_CALL(cudaFree(dFJ));
CUDA_CALL(cudaFree(dFD));
timers[3] = stopwatch_lap(timers[3]);
timers[0] = stopwatch_lap(timers[0]);
if (timing)
for (unsigned i = 0u; i < 4u; ++i)
timing[i] = timers[i] * TS2S;
return ret;
}
|
954377c5b9a2ae78ff75160865a623c47e58cc80.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void set_kernel(const int n, const float alpha, float *y) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n);
i += blockDim.x * gridDim.x) {
y[i] = alpha;
}
}
|
954377c5b9a2ae78ff75160865a623c47e58cc80.cu
|
#include "includes.h"
__global__ void set_kernel(const int n, const float alpha, float *y) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n);
i += blockDim.x * gridDim.x) {
y[i] = alpha;
}
}
|
bf03ca1e386fde3a253ea0fd789005b8562f33f8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
//#include "thrust/device_vector.h"
//#include "device_atomic_functions.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/custom_layers.hpp"
namespace caffe {
template <typename Dtype>
void GramLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->shape(0);
int channel = bottom[0]->shape(1);
int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3);
for (int n=0; n < num; n++){
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, channel, channel, spatial_dim,
1 / (Dtype)spatial_dim / (Dtype)channel, bottom_data + n * spatial_dim * channel, bottom_data + n * spatial_dim * channel, Dtype(0), top_data + n * channel * channel);
}
}
template <typename Dtype>
__global__ void FixDiagDiff(const int num, Dtype* in_out,int channel) {
CUDA_KERNEL_LOOP(index, num*channel) {
int n = index / channel;
int s = index % channel;
in_out[n*channel*channel + s*channel + s] *= 2;
}
}
template <typename Dtype>
void GramLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_diff = top[0]->mutable_gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int num = bottom[0]->shape(0);
int channel = bottom[0]->shape(1);
int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3);
FixDiagDiff<Dtype> << <CAFFE_GET_BLOCKS(num*channel), CAFFE_CUDA_NUM_THREADS >> >(
num, top_diff, channel);
for (int n=0; n < num; n++){
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channel, spatial_dim, channel,
1 / (Dtype)spatial_dim / (Dtype)channel, top_diff + n * channel * channel, bottom_data + n * spatial_dim * channel, Dtype(0), bottom_diff + n * spatial_dim * channel);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(GramLayer);
} // namespace caffe
|
bf03ca1e386fde3a253ea0fd789005b8562f33f8.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
//#include "thrust/device_vector.h"
//#include "device_atomic_functions.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/custom_layers.hpp"
namespace caffe {
template <typename Dtype>
void GramLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->shape(0);
int channel = bottom[0]->shape(1);
int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3);
for (int n=0; n < num; n++){
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, channel, channel, spatial_dim,
1 / (Dtype)spatial_dim / (Dtype)channel, bottom_data + n * spatial_dim * channel, bottom_data + n * spatial_dim * channel, Dtype(0), top_data + n * channel * channel);
}
}
template <typename Dtype>
__global__ void FixDiagDiff(const int num, Dtype* in_out,int channel) {
CUDA_KERNEL_LOOP(index, num*channel) {
int n = index / channel;
int s = index % channel;
in_out[n*channel*channel + s*channel + s] *= 2;
}
}
template <typename Dtype>
void GramLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_diff = top[0]->mutable_gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int num = bottom[0]->shape(0);
int channel = bottom[0]->shape(1);
int spatial_dim = bottom[0]->shape(2) * bottom[0]->shape(3);
FixDiagDiff<Dtype> << <CAFFE_GET_BLOCKS(num*channel), CAFFE_CUDA_NUM_THREADS >> >(
num, top_diff, channel);
for (int n=0; n < num; n++){
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channel, spatial_dim, channel,
1 / (Dtype)spatial_dim / (Dtype)channel, top_diff + n * channel * channel, bottom_data + n * spatial_dim * channel, Dtype(0), bottom_diff + n * spatial_dim * channel);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(GramLayer);
} // namespace caffe
|
ffd2a15aee3d749cceaf6ffc117b76ff4fa5f5b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Maximo( double *input, double *results, int n ) {
extern __shared__ double sdata[];
int idx = blockIdx.x * blockDim.x + threadIdx.x, tx = threadIdx.x;
double x = 0.;
if( idx < n ) {
x = input[ idx ];
}
sdata[ tx ] = x;
__syncthreads( );
for( int offset = blockDim.x / 2; offset > 0; offset >>= 1 ) {
if( tx < offset ) {
if( sdata[ tx ] < sdata[ tx + offset ] ) {
sdata[ tx ] = sdata[ tx + offset ];
}
}
__syncthreads( );
}
if( threadIdx.x == 0 ) {
results[ blockIdx.x ] = sdata[ 0 ];
}
}
|
ffd2a15aee3d749cceaf6ffc117b76ff4fa5f5b3.cu
|
#include "includes.h"
__global__ void Maximo( double *input, double *results, int n ) {
extern __shared__ double sdata[];
int idx = blockIdx.x * blockDim.x + threadIdx.x, tx = threadIdx.x;
double x = 0.;
if( idx < n ) {
x = input[ idx ];
}
sdata[ tx ] = x;
__syncthreads( );
for( int offset = blockDim.x / 2; offset > 0; offset >>= 1 ) {
if( tx < offset ) {
if( sdata[ tx ] < sdata[ tx + offset ] ) {
sdata[ tx ] = sdata[ tx + offset ];
}
}
__syncthreads( );
}
if( threadIdx.x == 0 ) {
results[ blockIdx.x ] = sdata[ 0 ];
}
}
|
da25c117e15ed8ebae87fbf30b5930dd1d98a028.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
// modified from
// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
// Original license: Apache 2.0
// clang-format off
// modify from
// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer *****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
*FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
*DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
*CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
*OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
*OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer *********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
#include <ATen/ATen.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
#include <THH/THHAtomics.cuh>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
namespace {
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N) {
return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(
const scalar_t* bottom_data,
const int data_width,
const int height,
const int width,
scalar_t h,
scalar_t w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(
scalar_t argmax_h,
scalar_t argmax_w,
const int h,
const int w,
const int height,
const int width) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 ||
argmax_w >= width) {
// empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(
scalar_t argmax_h,
scalar_t argmax_w,
const int height,
const int width,
const scalar_t* im_data,
const int data_width,
const int bp_dir) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 ||
argmax_w >= width) {
// empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) *
im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(
const int n,
const scalar_t* data_im,
const scalar_t* data_offset,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int num_channels,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t* data_col_ptr = data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
// const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) *
// height + h_in) * width + w_in;
const scalar_t* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t* data_offset_ptr = data_offset +
(b_col * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
// const scalar_t map_h = i * dilation_h + offset_h;
// const scalar_t map_w = j * dilation_w + offset_w;
// const int cur_height = height - h_in;
// const int cur_width = width - w_in;
// val = deformable_im2col_bilinear(data_im_ptr, width, cur_height,
// cur_width, map_h, map_w);
val = deformable_im2col_bilinear(
data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n,
const scalar_t* data_col,
const scalar_t* data_offset,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i =
(index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c =
index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 &&
cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1) {
int cur_bottom_grad_pos =
((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(
cur_inv_h_data,
cur_inv_w_data,
cur_h + dy,
cur_w + dx,
height,
width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(
const int n,
const scalar_t* data_col,
const scalar_t* data_im,
const scalar_t* data_offset,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int offset_channels,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* grad_offset) {
CUDA_KERNEL_LOOP(index, n) {
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t* data_col_ptr = data_col +
deformable_group_index * channel_per_deformable_group * batch_size *
width_col * height_col;
const scalar_t* data_im_ptr = data_im +
(b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group;
col_c += col_step) {
const int col_pos =
(((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i =
(col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr =
(((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr =
(((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col +
w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h,
inv_w,
height,
width,
data_im_ptr + cnt * height * width,
width,
bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
namespace detectron2 {
void deformable_im2col(
const at::Tensor data_im,
const at::Tensor data_offset,
const int channels,
const int height,
const int width,
const int ksize_h,
const int ksize_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int parallel_imgs,
const int deformable_group,
at::Tensor data_col) {
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_im.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t* data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS),
0,
stream,
num_kernels,
data_im_,
data_offset_,
height,
width,
ksize_h,
ksize_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
parallel_imgs,
channels,
deformable_group,
height_col,
width_col,
data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
}
void deformable_col2im(
const at::Tensor data_col,
const at::Tensor data_offset,
const int channels,
const int height,
const int width,
const int ksize_h,
const int ksize_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int parallel_imgs,
const int deformable_group,
at::Tensor grad_im) {
// todo: make sure parallel_imgs is passed in correctly
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels =
channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS),
0,
stream,
num_kernels,
data_col_,
data_offset_,
channels,
height,
width,
ksize_h,
ksize_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
parallel_imgs,
deformable_group,
height_col,
width_col,
grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in deformable_col2im: %s\n", hipGetErrorString(err));
}
}
void deformable_col2im_coord(
const at::Tensor data_col,
const at::Tensor data_im,
const at::Tensor data_offset,
const int channels,
const int height,
const int width,
const int ksize_h,
const int ksize_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int parallel_imgs,
const int deformable_group,
at::Tensor grad_offset) {
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w *
deformable_group * parallel_imgs;
int channel_per_deformable_group =
channels * ksize_h * ksize_w / deformable_group;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t* data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS),
0,
stream,
num_kernels,
data_col_,
data_im_,
data_offset_,
channels,
height,
width,
ksize_h,
ksize_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
parallel_imgs,
2 * ksize_h * ksize_w * deformable_group,
deformable_group,
height_col,
width_col,
grad_offset_);
}));
}
} // namespace detectron2
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(
const scalar_t* bottom_data,
const int data_width,
const int height,
const int width,
scalar_t h,
scalar_t w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(
scalar_t argmax_h,
scalar_t argmax_w,
const int h,
const int w,
const int height,
const int width) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 ||
argmax_w >= width) {
// empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(
scalar_t argmax_h,
scalar_t argmax_w,
const int height,
const int width,
const scalar_t* im_data,
const int data_width,
const int bp_dir) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 ||
argmax_w >= width) {
// empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) *
im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(
const int n,
const scalar_t* data_im,
const scalar_t* data_offset,
const scalar_t* data_mask,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int num_channels,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t* data_col_ptr = data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
// const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) *
// height + h_in) * width + w_in;
const scalar_t* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t* data_offset_ptr = data_offset +
(b_col * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const scalar_t* data_mask_ptr = data_mask +
(b_col * deformable_group + deformable_group_index) * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const int data_mask_hw_ptr =
((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
// if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
// const float map_h = i * dilation_h + offset_h;
// const float map_w = j * dilation_w + offset_w;
// const int cur_height = height - h_in;
// const int cur_width = width - w_in;
// val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height,
// cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(
data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
// data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(
const int n,
const scalar_t* data_col,
const scalar_t* data_offset,
const scalar_t* data_mask,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i =
(index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c =
index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const scalar_t* data_mask_ptr = data_mask +
(b * deformable_group + deformable_group_index) * kernel_h * kernel_w *
height_col * width_col;
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr =
((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 &&
cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1) {
int cur_bottom_grad_pos =
((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(
cur_inv_h_data,
cur_inv_w_data,
cur_h + dy,
cur_w + dx,
height,
width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(
const int n,
const scalar_t* data_col,
const scalar_t* data_im,
const scalar_t* data_offset,
const scalar_t* data_mask,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int offset_channels,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* grad_offset,
scalar_t* grad_mask) {
CUDA_KERNEL_LOOP(index, n) {
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t* data_col_ptr = data_col +
deformable_group_index * channel_per_deformable_group * batch_size *
width_col * height_col;
const scalar_t* data_im_ptr = data_im +
(b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const scalar_t* data_mask_ptr = data_mask +
(b * deformable_group + deformable_group_index) * kernel_h * kernel_w *
height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group;
col_c += col_step) {
const int col_pos =
(((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i =
(col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr =
(((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr =
(((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col +
w_out);
const int data_mask_hw_ptr =
(((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos] *
dmcn_im2col_bilinear(
data_im_ptr + cnt * height * width,
width,
height,
width,
inv_h,
inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h,
inv_w,
height,
width,
data_im_ptr + cnt * height * width,
width,
bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group +
// deformable_group_index) * kernel_h * kernel_w + offset_c / 2) *
// height_col + h) * width_col + w], mask_req, mval);
grad_mask
[(((b * deformable_group + deformable_group_index) * kernel_h *
kernel_w +
offset_c / 2) *
height_col +
h) *
width_col +
w] = mval;
}
}
namespace detectron2 {
void modulated_deformable_im2col_cuda(
const at::Tensor data_im,
const at::Tensor data_offset,
const at::Tensor data_mask,
const int batch_size,
const int channels,
const int height_im,
const int width_im,
const int height_col,
const int width_col,
const int kernel_h,
const int kenerl_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int deformable_group,
at::Tensor data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_im.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t* data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS),
0,
stream,
num_kernels,
data_im_,
data_offset_,
data_mask_,
height_im,
width_im,
kernel_h,
kenerl_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
batch_size,
channels,
deformable_group,
height_col,
width_col,
data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf(
"error in modulated_deformable_im2col_cuda: %s\n",
hipGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col,
const at::Tensor data_offset,
const at::Tensor data_mask,
const int batch_size,
const int channels,
const int height_im,
const int width_im,
const int height_col,
const int width_col,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int deformable_group,
at::Tensor grad_im) {
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels =
channels * kernel_h * kernel_w * batch_size * height_col * width_col;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS),
0,
stream,
num_kernels,
data_col_,
data_offset_,
data_mask_,
channels,
height_im,
width_im,
kernel_h,
kernel_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
batch_size,
deformable_group,
height_col,
width_col,
grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf(
"error in modulated_deformable_col2im_cuda: %s\n",
hipGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col,
const at::Tensor data_im,
const at::Tensor data_offset,
const at::Tensor data_mask,
const int batch_size,
const int channels,
const int height_im,
const int width_im,
const int height_col,
const int width_col,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int deformable_group,
at::Tensor grad_offset,
at::Tensor grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h *
kernel_w * deformable_group;
const int channel_per_deformable_group =
channels * kernel_h * kernel_w / deformable_group;
at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t* data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>();
scalar_t* grad_mask_ = grad_mask.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS),
0,
stream,
num_kernels,
data_col_,
data_im_,
data_offset_,
data_mask_,
channels,
height_im,
width_im,
kernel_h,
kernel_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
batch_size,
2 * kernel_h * kernel_w * deformable_group,
deformable_group,
height_col,
width_col,
grad_offset_,
grad_mask_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf(
"error in modulated_deformable_col2im_coord_cuda: %s\n",
hipGetErrorString(err));
}
}
} // namespace detectron2
|
da25c117e15ed8ebae87fbf30b5930dd1d98a028.cu
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
// modified from
// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
// Original license: Apache 2.0
// clang-format off
// modify from
// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer *****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
*FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
*DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
*CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
*OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
*OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer *********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
#include <ATen/ATen.h>
#include <c10/cuda/CUDAGuard.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
#include <THC/THCAtomics.cuh>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
namespace {
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N) {
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(
const scalar_t* bottom_data,
const int data_width,
const int height,
const int width,
scalar_t h,
scalar_t w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(
scalar_t argmax_h,
scalar_t argmax_w,
const int h,
const int w,
const int height,
const int width) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 ||
argmax_w >= width) {
// empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(
scalar_t argmax_h,
scalar_t argmax_w,
const int height,
const int width,
const scalar_t* im_data,
const int data_width,
const int bp_dir) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 ||
argmax_w >= width) {
// empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) *
im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(
const int n,
const scalar_t* data_im,
const scalar_t* data_offset,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int num_channels,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t* data_col_ptr = data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
// const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) *
// height + h_in) * width + w_in;
const scalar_t* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t* data_offset_ptr = data_offset +
(b_col * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
// const scalar_t map_h = i * dilation_h + offset_h;
// const scalar_t map_w = j * dilation_w + offset_w;
// const int cur_height = height - h_in;
// const int cur_width = width - w_in;
// val = deformable_im2col_bilinear(data_im_ptr, width, cur_height,
// cur_width, map_h, map_w);
val = deformable_im2col_bilinear(
data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n,
const scalar_t* data_col,
const scalar_t* data_offset,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i =
(index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c =
index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 &&
cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1) {
int cur_bottom_grad_pos =
((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(
cur_inv_h_data,
cur_inv_w_data,
cur_h + dy,
cur_w + dx,
height,
width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(
const int n,
const scalar_t* data_col,
const scalar_t* data_im,
const scalar_t* data_offset,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int offset_channels,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* grad_offset) {
CUDA_KERNEL_LOOP(index, n) {
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t* data_col_ptr = data_col +
deformable_group_index * channel_per_deformable_group * batch_size *
width_col * height_col;
const scalar_t* data_im_ptr = data_im +
(b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group;
col_c += col_step) {
const int col_pos =
(((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i =
(col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr =
(((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr =
(((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col +
w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h,
inv_w,
height,
width,
data_im_ptr + cnt * height * width,
width,
bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
namespace detectron2 {
void deformable_im2col(
const at::Tensor data_im,
const at::Tensor data_offset,
const int channels,
const int height,
const int width,
const int ksize_h,
const int ksize_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int parallel_imgs,
const int deformable_group,
at::Tensor data_col) {
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
at::cuda::CUDAGuard device_guard(data_im.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t* data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
deformable_im2col_gpu_kernel<<<
GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS,
0,
stream>>>(
num_kernels,
data_im_,
data_offset_,
height,
width,
ksize_h,
ksize_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
parallel_imgs,
channels,
deformable_group,
height_col,
width_col,
data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
}
void deformable_col2im(
const at::Tensor data_col,
const at::Tensor data_offset,
const int channels,
const int height,
const int width,
const int ksize_h,
const int ksize_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int parallel_imgs,
const int deformable_group,
at::Tensor grad_im) {
// todo: make sure parallel_imgs is passed in correctly
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels =
channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
at::cuda::CUDAGuard device_guard(data_col.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>();
deformable_col2im_gpu_kernel<<<
GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS,
0,
stream>>>(
num_kernels,
data_col_,
data_offset_,
channels,
height,
width,
ksize_h,
ksize_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
parallel_imgs,
deformable_group,
height_col,
width_col,
grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
}
}
void deformable_col2im_coord(
const at::Tensor data_col,
const at::Tensor data_im,
const at::Tensor data_offset,
const int channels,
const int height,
const int width,
const int ksize_h,
const int ksize_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int parallel_imgs,
const int deformable_group,
at::Tensor grad_offset) {
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w *
deformable_group * parallel_imgs;
int channel_per_deformable_group =
channels * ksize_h * ksize_w / deformable_group;
at::cuda::CUDAGuard device_guard(data_col.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t* data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>();
deformable_col2im_coord_gpu_kernel<<<
GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS,
0,
stream>>>(
num_kernels,
data_col_,
data_im_,
data_offset_,
channels,
height,
width,
ksize_h,
ksize_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
parallel_imgs,
2 * ksize_h * ksize_w * deformable_group,
deformable_group,
height_col,
width_col,
grad_offset_);
}));
}
} // namespace detectron2
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(
const scalar_t* bottom_data,
const int data_width,
const int height,
const int width,
scalar_t h,
scalar_t w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(
scalar_t argmax_h,
scalar_t argmax_w,
const int h,
const int w,
const int height,
const int width) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 ||
argmax_w >= width) {
// empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(
scalar_t argmax_h,
scalar_t argmax_w,
const int height,
const int width,
const scalar_t* im_data,
const int data_width,
const int bp_dir) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 ||
argmax_w >= width) {
// empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) *
im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) *
im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) *
im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) *
im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(
const int n,
const scalar_t* data_im,
const scalar_t* data_offset,
const scalar_t* data_mask,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int num_channels,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t* data_col_ptr = data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
// const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) *
// height + h_in) * width + w_in;
const scalar_t* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t* data_offset_ptr = data_offset +
(b_col * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const scalar_t* data_mask_ptr = data_mask +
(b_col * deformable_group + deformable_group_index) * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const int data_mask_hw_ptr =
((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
// if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
// const float map_h = i * dilation_h + offset_h;
// const float map_w = j * dilation_w + offset_w;
// const int cur_height = height - h_in;
// const int cur_width = width - w_in;
// val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height,
// cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(
data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
// data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(
const int n,
const scalar_t* data_col,
const scalar_t* data_offset,
const scalar_t* data_mask,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i =
(index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c =
index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const scalar_t* data_mask_ptr = data_mask +
(b * deformable_group + deformable_group_index) * kernel_h * kernel_w *
height_col * width_col;
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr =
((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 &&
cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1) {
int cur_bottom_grad_pos =
((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(
cur_inv_h_data,
cur_inv_w_data,
cur_h + dy,
cur_w + dx,
height,
width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(
const int n,
const scalar_t* data_col,
const scalar_t* data_im,
const scalar_t* data_offset,
const scalar_t* data_mask,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int offset_channels,
const int deformable_group,
const int height_col,
const int width_col,
scalar_t* grad_offset,
scalar_t* grad_mask) {
CUDA_KERNEL_LOOP(index, n) {
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t* data_col_ptr = data_col +
deformable_group_index * channel_per_deformable_group * batch_size *
width_col * height_col;
const scalar_t* data_im_ptr = data_im +
(b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
const scalar_t* data_mask_ptr = data_mask +
(b * deformable_group + deformable_group_index) * kernel_h * kernel_w *
height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group;
col_c += col_step) {
const int col_pos =
(((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i =
(col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr =
(((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr =
(((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col +
w_out);
const int data_mask_hw_ptr =
(((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos] *
dmcn_im2col_bilinear(
data_im_ptr + cnt * height * width,
width,
height,
width,
inv_h,
inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h,
inv_w,
height,
width,
data_im_ptr + cnt * height * width,
width,
bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group +
// deformable_group_index) * kernel_h * kernel_w + offset_c / 2) *
// height_col + h) * width_col + w], mask_req, mval);
grad_mask
[(((b * deformable_group + deformable_group_index) * kernel_h *
kernel_w +
offset_c / 2) *
height_col +
h) *
width_col +
w] = mval;
}
}
namespace detectron2 {
void modulated_deformable_im2col_cuda(
const at::Tensor data_im,
const at::Tensor data_offset,
const at::Tensor data_mask,
const int batch_size,
const int channels,
const int height_im,
const int width_im,
const int height_col,
const int width_col,
const int kernel_h,
const int kenerl_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int deformable_group,
at::Tensor data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
at::cuda::CUDAGuard device_guard(data_im.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t* data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
modulated_deformable_im2col_gpu_kernel<<<
GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS,
0,
stream>>>(
num_kernels,
data_im_,
data_offset_,
data_mask_,
height_im,
width_im,
kernel_h,
kenerl_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
batch_size,
channels,
deformable_group,
height_col,
width_col,
data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf(
"error in modulated_deformable_im2col_cuda: %s\n",
cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col,
const at::Tensor data_offset,
const at::Tensor data_mask,
const int batch_size,
const int channels,
const int height_im,
const int width_im,
const int height_col,
const int width_col,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int deformable_group,
at::Tensor grad_im) {
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels =
channels * kernel_h * kernel_w * batch_size * height_col * width_col;
at::cuda::CUDAGuard device_guard(data_col.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>();
modulated_deformable_col2im_gpu_kernel<<<
GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS,
0,
stream>>>(
num_kernels,
data_col_,
data_offset_,
data_mask_,
channels,
height_im,
width_im,
kernel_h,
kernel_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
batch_size,
deformable_group,
height_col,
width_col,
grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf(
"error in modulated_deformable_col2im_cuda: %s\n",
cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col,
const at::Tensor data_im,
const at::Tensor data_offset,
const at::Tensor data_mask,
const int batch_size,
const int channels,
const int height_im,
const int width_im,
const int height_col,
const int width_col,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int deformable_group,
at::Tensor grad_offset,
at::Tensor grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h *
kernel_w * deformable_group;
const int channel_per_deformable_group =
channels * kernel_h * kernel_w / deformable_group;
at::cuda::CUDAGuard device_guard(data_col.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t* data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t* data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>();
scalar_t* grad_mask_ = grad_mask.data_ptr<scalar_t>();
modulated_deformable_col2im_coord_gpu_kernel<<<
GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS,
0,
stream>>>(
num_kernels,
data_col_,
data_im_,
data_offset_,
data_mask_,
channels,
height_im,
width_im,
kernel_h,
kernel_w,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
channel_per_deformable_group,
batch_size,
2 * kernel_h * kernel_w * deformable_group,
deformable_group,
height_col,
width_col,
grad_offset_,
grad_mask_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf(
"error in modulated_deformable_col2im_coord_cuda: %s\n",
cudaGetErrorString(err));
}
}
} // namespace detectron2
|
e3871777e711cd209178cbbf5d3620994c215e55.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @desc image_process.hpp
* @author
* @date 2019-04-16
* @email [email protected]
*/
#include <hip/hip_runtime.h>
#include <cstdio>
#include <iostream>
#include "MATH/Function/function.hpp"
#include "cuda_include/sharemem.cuh"
#include "hip/hip_runtime.h"
#include <vector>
/***********************************************************************************/
__global__ void kernelDesaturateAlpha(float *p_out,float const *p_in, const int kSize,const int kType)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 8 ;
int out_idx = threadIdx.x + blockIdx.x * blockDim.x * 4 ;
int tid=threadIdx.x;
int stride=tid*4;
int stride1=stride+blockDim.x*4;
if (in_idx< kSize * 4)
{
s[tid] =p_in[in_idx];
s[tid+blockDim.x] =p_in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=p_in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=p_in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=p_in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=p_in[in_idx+blockDim.x*5];
s[tid+blockDim.x*6]=p_in[in_idx+blockDim.x*6];
s[tid+blockDim.x*7]=p_in[in_idx+blockDim.x*7];
}
__syncthreads();
if(kType==0)
{
p_out[out_idx] =max(s[stride+0],max(s[stride+1],s[stride+2]));
p_out[out_idx+blockDim.x*2]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(kType==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
p_out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
p_out[out_idx+blockDim.x*2]=0.5f*(max_s+min_s);
}
if(kType==2)
{
p_out[out_idx] =0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
p_out[out_idx+blockDim.x*2]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(kType==3)
{
p_out[out_idx] =0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
p_out[out_idx+blockDim.x*2]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(kType==4)
{
p_out[out_idx] =((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
p_out[out_idx+blockDim.x*2]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
p_out[out_idx+tid+1] =s[stride+3];
p_out[out_idx+blockDim.x*2+tid+1]=s[stride1+3];
}
__global__ void kernelDesaturate(float *p_out,float const *p_in, const int kSize,const int kType)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 6 ;
int out_idx = threadIdx.x + blockIdx.x * blockDim.x * 2 ;
int tid=threadIdx.x;
int stride=tid*3;
int stride1=stride+blockDim.x*3;
if (in_idx< kSize * 3)
{
s[tid] =p_in[in_idx];
s[tid+blockDim.x] =p_in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=p_in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=p_in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=p_in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=p_in[in_idx+blockDim.x*5];
}
__syncthreads();
if(kType==0)
{
p_out[out_idx] =max(s[stride+0],max(s[stride+1],s[stride+2]));
p_out[out_idx+blockDim.x]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(kType==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
p_out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
p_out[out_idx+blockDim.x]=0.5f*(max_s+min_s);
}
if(kType==2)
{
p_out[out_idx] =0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
p_out[out_idx+blockDim.x]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(kType==3)
{
p_out[out_idx] =0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
p_out[out_idx+blockDim.x]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(kType==4)
{
p_out[out_idx] =((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
p_out[out_idx+blockDim.x]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
}
/******************************************************************************************/
///
/*
* kernelDoubleSize 3.678ms [32,4,1]
* kernelDoubleSize1 3.67ms [32,4,1]
* kernelDoubleSize2 3.532ms [32,4,1]**
* kernelDoubleSizeByShare 5.265ms [32,8,1]
* kernelDoubleSizeByShare1 4.737ms [64,8,1]
* kernelDoubleSizeByShare2 3.98ms [32,8,1]
*/
/******************************************************************************************/
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernelDoublesize<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernelDoubleSize(float *p_out,float *p_in,int const kImage_x,int const kImage_y,int const kIn_width,int const kIn_Channels)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * kIn_Channels;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <kIn_Channels ; ++c)
{
int fact_x = out_x + blockDim.x * c;
if(out_y<kImage_y && fact_x < kImage_x*kIn_Channels)
{
int idx =fact_x + out_y * kImage_x * kIn_Channels;
bool nexty =(out_y+1)<kImage_y;
bool nextx =(fact_x+kIn_Channels)<(kImage_x*kIn_Channels);
int yoff[2]={kIn_Channels*kIn_width*(out_y>>1),
kIn_Channels*kIn_width*((out_y+nexty)>>1)};
int xoff[2]={((fact_x / kIn_Channels) >>1)* kIn_Channels + fact_x % kIn_Channels,
(((fact_x/kIn_Channels)+nextx)>>1)*kIn_Channels+fact_x%kIn_Channels};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[idx]=0.25f*(p_in[index[0]]+p_in[index[1]]+p_in[index[2]]+p_in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_doublesize1<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernelDoubleSize1(float *p_out,float *p_in,int const kImage_x,int const kImage_y,int const kIn_width,int const kIn_Channels)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * kIn_Channels*2;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <kIn_Channels*2 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kImage_y&&fact_x<kImage_x*kIn_Channels)
{
int idx=fact_x+out_y*kImage_x*kIn_Channels;
bool nexty=(out_y+1)<kImage_y;
bool nextx=(fact_x+kIn_Channels)<(kImage_x*kIn_Channels);
int yoff[2]={kIn_Channels*kIn_width*(out_y>>1),
kIn_Channels*kIn_width*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/kIn_Channels)>>1)*kIn_Channels+fact_x%kIn_Channels,
(((fact_x/kIn_Channels)+nextx)>>1)*kIn_Channels+fact_x%kIn_Channels};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[idx]=0.25f*(p_in[index[0]]+p_in[index[1]]+p_in[index[2]]+p_in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
* kernel_doublesize2<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernelDoubleSize2(float *p_out,float *p_in,int const kImage_x,int const kImage_y,int const kIn_width,int const kIn_Channels)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * kIn_Channels*3;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <kIn_Channels*3 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kImage_y&&fact_x<kImage_x*kIn_Channels)
{
int idx=fact_x+out_y*kImage_x*kIn_Channels;
bool nexty=(out_y+1)<kImage_y;
bool nextx=(fact_x+kIn_Channels)<(kImage_x*kIn_Channels);
int yoff[2]={kIn_Channels*kIn_width*(out_y>>1),
kIn_Channels*kIn_width*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/kIn_Channels)>>1)*kIn_Channels+fact_x%kIn_Channels,
(((fact_x/kIn_Channels)+nextx)>>1)*kIn_Channels+fact_x%kIn_Channels};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[idx]=0.25f*(p_in[index[0]]+p_in[index[1]]+p_in[index[2]]+p_in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_doublesizebyshare<<<grid,block,share_x*share_y*channels*sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernelDoubleSizeByShare(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//xkIn_Channels
int share_y=(blockDim.y>>1)+1;//y
int share_fact_x=share_x*kIn_Channels;
int share_idx_x;
int share_idx_y= threadIdx.y;//y
int in_x0 = ((blockIdx.x * blockDim.x) >> 1) * kIn_Channels;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <kIn_Channels ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//x
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, kIn_width * kIn_Channels - kIn_Channels + share_idx_x % kIn_Channels);
y = min(in_y0 + share_idx_y, kIn_height - 1);
data[share_idx_y * share_fact_x + share_idx_x] = p_in[y * kIn_width * kIn_Channels + x];
}
}
__syncthreads();
for ( c = 0; c <kIn_Channels ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height && fact_x<kOut_width*kIn_Channels)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/kIn_Channels>>1)*kIn_Channels+share_idx_x%kIn_Channels,
((share_idx_x/kIn_Channels+1)>>1)*kIn_Channels+share_idx_x%kIn_Channels};
int out_idx=out_y*kOut_width*kIn_Channels+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_doublesizebyshare1<<<grid,block,share_x*share_y*2*channels*sizeof(float)>>>(d_out,d_in,kOut_width,oh,width,height,channels);
*/
__global__ void kernelDoubleSizeByShare1(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//xkIn_Channels
int share_y=(blockDim.y>>1)+1;//y
int share_fact_x=share_x*kIn_Channels*2;
int share_idx_x;
int share_idx_y= threadIdx.y;//y
int in_x0 = ((blockIdx.x * blockDim.x*2) >> 1) * kIn_Channels;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <kIn_Channels*2 ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//x
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, kIn_width * kIn_Channels - kIn_Channels + share_idx_x % kIn_Channels);
y = min(in_y0 + share_idx_y, kIn_height - 1);
data[share_idx_y * share_fact_x + share_idx_x] = p_in[y * kIn_width * kIn_Channels + x];
}
}
__syncthreads();
for ( c = 0; c <kIn_Channels*2 ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/kIn_Channels>>1)*kIn_Channels+share_idx_x%kIn_Channels,
((share_idx_x/kIn_Channels+1)>>1)*kIn_Channels+share_idx_x%kIn_Channels};
int out_idx=out_y*kOut_width*kIn_Channels+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*3)/(x*3),(kOut_height-1+y)/y,1);
* kernel_doublesizebyshare2<<<grid,block,share_x*share_y*3*channels*sizeof(float)>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelDoubleSizeByShare2(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//xkIn_Channels
int share_y=(blockDim.y>>1)+1;//y
int share_fact_x=share_x*kIn_Channels*3;
int share_idx_x;
int share_idx_y = threadIdx.y;//y
int in_x0 = ((blockIdx.x * blockDim.x*3) >> 1) * kIn_Channels;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <kIn_Channels*3 ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//x
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, kIn_width * kIn_Channels - kIn_Channels + share_idx_x % kIn_Channels);
y = min(in_y0 + share_idx_y, kIn_height - 1);
data[share_idx_y * share_fact_x + share_idx_x] = p_in[y * kIn_width * kIn_Channels + x];
}
}
__syncthreads();
for ( c = 0; c <kIn_Channels*3 ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/kIn_Channels>>1)*kIn_Channels+share_idx_x%kIn_Channels,
((share_idx_x/kIn_Channels+1)>>1)*kIn_Channels+share_idx_x%kIn_Channels};
int out_idx=out_y*kOut_width*kIn_Channels+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/******************************************************************************************/
///
/*
*kernelHalfSize 636.275us [32,8,1]
*kernelHalfSize1 634.383us [32,8,1]**
*kernelHalfSize2 641.6us [32,8,1]
*kernelHalfSizeByShare 643.698us [32,4,1]
*kernelHalfSizeByShare1 671.245us [32,4,1]
*/
/******************************************************************************************/
/*
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x)/x,(kOut_height-1+y)/y,1);
* kernel_halfsize<<<grid,block>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSize(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=kIn_width*kIn_Channels;
for(int c=0;c<kIn_Channels;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < kIn_height);
int icol1 = (fact_x / kIn_Channels) * 2 * kIn_Channels + fact_x % kIn_Channels;
int icol2 = min((icol1 + kIn_Channels), (kIn_width * kIn_Channels - kIn_Channels + fact_x % kIn_Channels));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (p_in[index[0]] + p_in[index[1]] + p_in[index[2]] + p_in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*2)/(x*2),(kOut_height-1+y)/y,1);
* kernel_halfsize1<<<grid,block>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSize1(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=kIn_width*kIn_Channels;
for(int c=0;c<kIn_Channels*2;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < kIn_height);
int icol1 = (fact_x / kIn_Channels) * 2 * kIn_Channels + fact_x % kIn_Channels;
int icol2 = min((icol1 + kIn_Channels), (kIn_width * kIn_Channels - kIn_Channels + fact_x % kIn_Channels));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (p_in[index[0]] + p_in[index[1]] + p_in[index[2]] + p_in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*3)/(x*3),(kOut_height-1+y)/y,1);
* kernel_halfsize2<<<grid,block>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSize2(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=kIn_width*kIn_Channels;
for(int c=0;c<kIn_Channels*3;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height && fact_x < kOut_width*kIn_Channels) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < kIn_height);
int icol1 = (fact_x / kIn_Channels) * 2 * kIn_Channels + fact_x % kIn_Channels;
int icol2 = min((icol1 + kIn_Channels), (kIn_width * kIn_Channels - kIn_Channels + fact_x % kIn_Channels));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (p_in[index[0]] + p_in[index[1]] + p_in[index[2]] + p_in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x)/x,(kOut_height-1+y)/y,1);
* kernel_halfsizebyshare<<<grid,block,share_x*share_y*channels* sizeof(float)>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSizeByShare(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int block_stride=blockDim.x*kIn_Channels;//x
int out_x=threadIdx.x+blockIdx.x*block_stride;//x
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//y
int stride=kIn_width*kIn_Channels;//
int in_x0=blockIdx.x*block_stride*2;//x
int in_y0=blockIdx.y*blockDim.y*2;//y
int in_x1=in_x0+block_stride;
int in_y1=in_y0+blockDim.y;
int share_x=blockDim.x*2*kIn_Channels;//x
for (int c = 0; c < kIn_Channels; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int channel=fact_x_s%kIn_Channels;//
int x_s = fact_x_s + block_stride;
int y_s0=threadIdx.y*share_x;
int y_s1=y_s0+blockDim.y*share_x;
int fact_iw=channel+stride-kIn_Channels;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,kIn_height-1)*stride;
int y1=min(in_y1+threadIdx.y,kIn_height-1)*stride;
int deta=((fact_x_s/kIn_Channels)%2)*block_stride;//x
int x_fs0=(fact_x_s/kIn_Channels>>1)*kIn_Channels+channel+deta;//x
int x_fs1=(x_s/kIn_Channels>>1)*kIn_Channels+channel+deta;//x
data[y_s0+x_fs0]=p_in[y0+x0];
data[y_s0+x_fs1]=p_in[y0+x1];
data[y_s1+x_fs0]=p_in[y1+x0];
data[y_s1+x_fs1]=p_in[y1+x1];;
}
__syncthreads();
for (int c = 0; c <kIn_Channels ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
int srow1=threadIdx.y*2*share_x;
int srow2=srow1+share_x;
int scol1=threadIdx.x+blockDim.x*c;
int scol2=scol1+block_stride;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*2)/(x*2),(kOut_height-1+y)/y,1);
* kernel_halfsizebyshare1<<<grid,block,share_x*share_y*channels* sizeof(float)>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSizeByShare1(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int block_stride=blockDim.x*kIn_Channels*2;//x
int out_x=threadIdx.x+blockIdx.x*block_stride;//x
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//y
int stride=kIn_width*kIn_Channels;//
int in_x0=blockIdx.x*block_stride*2;//x
int in_y0=blockIdx.y*blockDim.y*2;//y
int in_x1=in_x0+block_stride;
int in_y1=in_y0+blockDim.y;
int share_x=blockDim.x*4*kIn_Channels;//x
for (int c = 0; c < kIn_Channels*2; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int channel=fact_x_s%kIn_Channels;//
int x_s=fact_x_s+block_stride;
int y_s0=threadIdx.y*share_x;
int y_s1=y_s0+blockDim.y*share_x;
int fact_iw=channel+stride-kIn_Channels;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,kIn_height-1)*stride;
int y1=min(in_y1+threadIdx.y,kIn_height-1)*stride;
int deta=((fact_x_s/kIn_Channels)%2)*block_stride;//x
int x_fs0=(fact_x_s/kIn_Channels>>1)*kIn_Channels+channel+deta;//x
int x_fs1=(x_s/kIn_Channels>>1)*kIn_Channels+channel+deta;//x
data[y_s0+x_fs0]=p_in[y0+x0];
data[y_s0+x_fs1]=p_in[y0+x1];
data[y_s1+x_fs0]=p_in[y1+x0];
data[y_s1+x_fs1]=p_in[y1+x1];;
}
__syncthreads();
for (int c = 0; c <kIn_Channels*2 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
int srow1=threadIdx.y*2*share_x;
int srow2=srow1+share_x;
int scol1=threadIdx.x+blockDim.x*c;
int scol2=scol1+block_stride;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
/******************************************************************************************/
///
/*
* kernel_halfsize_gauss 1.856ms [32,8,1]
* kernel_halfsize_gauss1 936.937us [32,4,1]
*/
/******************************************************************************************/
/*
* dim3 block(x, y, 1);
* dim3 grid((ow - 1 + x) / (x), (kOut_height - 1 + y) / y, 1);
* kernel_halfsize_guass << < grid, block >> > (d_out, d_in, ow, oh, width, height, channels, d_w);
*
__global__ void kernel_halfsize_guass(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic,float const *w)
{
//printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[2]] * dw[0]);in
//printfkernel_halfsize_guass1
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int istride=iw*ic;
float dw[3];
dw[0]=w[0];
dw[1]=w[1];
dw[2]=w[2];
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int out_idx = out_y * ow * ic + fact_x;
int channels = fact_x % ic;//
int out_xf = fact_x / ic;//x
int ix = out_xf << 1;
int iy = out_y << 1;
int row[4], col[4];
row[0] = max(0, iy - 1) * istride;
row[1] = iy * istride;
row[2] = min(iy + 1, (int)ih - 1) * istride;
row[3] = min(iy + 2, (int)ih - 2) * istride;
col[0] = max(0, ix - 1) * ic + channels;
col[1] = ix * ic + channels;
col[2] = min(ix + 1, (int)iw - 1) * ic + channels;
col[3] = min(ix + 2, (int)iw - 1) * ic + channels;
float sum = 0.0f;
int t=6;
if(out_idx==t);//printf("idx:%d\n",t);
sum += in[row[0] + col[0]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[0]] * dw[2]);
sum += in[row[0] + col[1]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[1]] * dw[1]);
sum += in[row[0] + col[2]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[2]] * dw[1]);
sum += in[row[0] + col[3]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[3]] * dw[2]);
sum += in[row[1] + col[0]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[0]] * dw[1]);
sum += in[row[1] + col[1]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[1]] * dw[0]);
sum += in[row[1] + col[2]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[2]] * dw[0]);
sum += in[row[1] + col[3]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[3]] * dw[1]);
sum += in[row[2] + col[0]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[0]] * dw[1]);
sum += in[row[2] + col[1]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[1]] * dw[0]);
sum += in[row[2] + col[2]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[2]] * dw[0]);
sum += in[row[2] + col[3]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[3]] * dw[1]);
sum += in[row[3] + col[0]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[0]] * dw[2]);
sum += in[row[3] + col[1]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[1]] * dw[1]);
sum += in[row[3] + col[2]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[2]] * dw[1]);
sum += in[row[3] + col[3]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[3]] * dw[2]);
out[out_idx] = sum / (float)(4 * dw[2] + 8 * dw[1] + 4 * dw[0]);
}
}
}
*/
/*
* dim3 block(x, y, 1);
* dim3 grid((kOut_width - 1 + x) / (x), (oh - 1 + y) / y, 1);
* kernel_halfsize_gauss1 << < grid, block >> > (d_out, d_in, kOut_width, kOut_height, width, height, channels, d_w);
*/
__global__ void kernelHalfSizeGauss1(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels,float const *p_w)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int istride=kIn_width*kIn_Channels;
float dw[3];
dw[0]=p_w[0];
dw[1]=p_w[1];
dw[2]=p_w[2];
for (int c = 0; c <kIn_Channels ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
int out_idx = out_y * kOut_width * kIn_Channels + fact_x;
int channels = fact_x % kIn_Channels;//
int out_xf = fact_x / kIn_Channels;//x
int ix = out_xf << 1;
int iy = out_y << 1;
int row[4], col[4];
row[0] = max(0, iy - 1) * istride;
row[1] = iy * istride;
row[2] = min(iy + 1, (int)kIn_height - 1) * istride;
row[3] = min(iy + 2, (int)kIn_height - 2) * istride;
col[0] = max(0, ix - 1) * kIn_Channels + channels;
col[1] = ix * kIn_Channels + channels;
col[2] = min(ix + 1, (int)kIn_width - 1) * kIn_Channels + channels;
col[3] = min(ix + 2, (int)kIn_width - 1) * kIn_Channels + channels;
float sum = 0.0f;
sum+=p_in[row[0] + col[0]] * dw[2];
sum+=p_in[row[0] + col[1]] * dw[1];
sum+=p_in[row[0] + col[2]] * dw[1];
sum+=p_in[row[0] + col[3]] * dw[2];
sum+=p_in[row[1] + col[0]] * dw[1];
sum+=p_in[row[1] + col[1]] * dw[0];
sum+=p_in[row[1] + col[2]] * dw[0];
sum+=p_in[row[1] + col[3]] * dw[1];
sum+=p_in[row[2] + col[0]] * dw[1];
sum+=p_in[row[2] + col[1]] * dw[0];
sum+=p_in[row[2] + col[2]] * dw[0];
sum+=p_in[row[2] + col[3]] * dw[1];
sum+=p_in[row[3] + col[0]] * dw[2];
sum+=p_in[row[3] + col[1]] * dw[1];
sum+=p_in[row[3] + col[2]] * dw[1];
sum+=p_in[row[3] + col[3]] * dw[2];
p_out[out_idx] = sum / (float)(4 * dw[2] + 8 * dw[1] + 4 * dw[0]);
}
}
}
/******************************************************************************************/
///x
/*
* kernelGaussBlurX 2.561ms [32,4,1]
* kernelGaussBlurX1 2.025ms [32,4,1]**
* kernelGaussBlurX2 2.148ms [32,4,1]
*/
/******************************************************************************************/
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/y,1);
* kernel_gaussBlur_x<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,w,h,c,ks,weight);
*/
__global__ void kernelGaussBlurX(float *const p_out,float const *const p_in,float const * const p_blur,int const kWidth,int const kHeight,int const kChannels,int const kSize,float const kWeight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
int fact_x=x/kChannels;
int channels=x%kChannels;
int max_x=y*kWidth*kChannels;
int out_idx=max_x+x;
if(fact_x<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x+i,kWidth-1));//xks
accum +=p_in[max_x+idx*kChannels+channels]* data[abs(i)];
}
p_out[out_idx]=accum / kWeight;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((kWidth*c-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_gaussBlur_x1<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,kWidth,h,c,ks,weight);
*/
__global__ void kernelGaussBlurX1(float *const p_out,float const *const p_in,float const * const p_blur,int const kWidth,int const kHeight,int const kChannels,int const kSize,float const kWeight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
int fact_x=x/kChannels;
int channels=x%kChannels;
int max_x=y*kWidth*kChannels;
int out_idx=max_x+x;
if(fact_x<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x+i,kWidth-1));//xks
accum +=p_in[max_x+idx*kChannels+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
p_out[out_idx]=accum / kWeight;
}
//
int fact_x1=(x+blockDim.x)/kChannels;
int channels1=(x+blockDim.x)%kChannels;
int out_idx1=max_x+x+blockDim.x;
if(fact_x1<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x1+i,kWidth-1));//xks
accum +=p_in[max_x+idx*kChannels+channels1]* data[abs(i)];
}
p_out[out_idx1]=accum / kWeight;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((kWidth*c-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_gaussBlur_x2<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,kWidth,h,c,ks,weight);
*/
__global__ void kernelGaussBlurX2(float *const p_out,float const *const p_in,float const * const p_blur,int const kWidth,int const kHeight,int const kChannels,int const kSize,float const kWeight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
int fact_x=x/kChannels;
int channels=x%kChannels;
int max_x=y*kWidth*kChannels;
int out_idx=max_x+x;
if(fact_x<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x+i,kWidth-1));//xks
accum +=p_in[max_x+idx*kChannels+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
p_out[out_idx]=accum / kWeight;
}
//
int fact_x1=(x+blockDim.x)/kChannels;
int channels1=(x+blockDim.x)%kChannels;
int out_idx1=max_x+x+blockDim.x;
if(fact_x1<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x1+i,kWidth-1));//xks
accum +=p_in[max_x+idx*kChannels+channels1]* data[abs(i)];
}
p_out[out_idx1]=accum / kWeight;
}
//
int fact_x2=(x+blockDim.x*2)/kChannels;
int channels2=(x+blockDim.x*2)%kChannels;
int out_idx2=max_x+x+blockDim.x*2;
if(fact_x2<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x2+i,kWidth-1));//xks
accum +=p_in[max_x+idx*kChannels+channels2]* data[abs(i)];
}
p_out[out_idx2]=accum / kWeight;
}
}
/******************************************************************************************/
///y
/*
* kernelGaussBlurY 2.358ms [32,4,1]
* kernelGaussBlurY1 1.875ms [32,4,1]
* kernelGaussBlurY2 1.811ms [32,8,1]**
*/
/******************************************************************************************/
/*
* dim3 block(x,y,1);
* dim3 grid((kWidth*c-1+x)/(x),(h-1+y)/y,1);
* kernel_gaussBlur_y<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,fact_W,h,ks,weight);
*/
template <typename T>
__global__ void kernelGaussBlurY(T *const p_out,T const *const p_in,T const * const p_blur,int const kFact_width,int const kHeight,int const kSize,T const kWeight)
{
//extern __shared__ float data[];
sharedMemory<T> smem;
T* data = smem.p_getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
//
int out_idx=y*kFact_width+x;
if(x<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//yks
accum +=p_in[idx*kFact_width+x]* data[abs(i)];
}
p_out[out_idx]=accum / kWeight;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_gaussBlur_y1<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,kFact_width,h,ks,weight);
*/
template <typename T>
__global__ void kernelGaussBlurY1(T *const p_out,T const *const p_in,T const * const p_blur,int const kFact_width,int const kHeight,int const kSize,T const kWeight)
{
//extern __shared__ float data[];
sharedMemory<T> smem;
T* data = smem.p_getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
//
int out_idx=y*kFact_width+x;
if(x<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//yks
accum +=p_in[idx*kFact_width+x]* data[abs(i)];
}
p_out[out_idx]=accum / kWeight;
}
//
int x1=x+blockDim.x;
int out_idx1=y*kFact_width+x1;
if(x1<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//yks
accum +=p_in[idx*kFact_width+x1]* data[abs(i)];
}
p_out[out_idx1]=accum / kWeight;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_gaussBlur_y2<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,kFact_width,h,ks,weight);
*/
template <typename T>
__global__ void kernelGaussBlurY2(T *const p_out,T const *const p_in,T const * const p_blur,int const kFact_width,int const kHeight,int const kSize,T const kWeight)
{
//extern __shared__ float data[];
sharedMemory<T> smem;
T* data = smem.p_getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
//
int out_idx=y*kFact_width+x;
if(x<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//yks
accum +=p_in[idx*kFact_width+x]* data[abs(i)];
}
p_out[out_idx]=accum / kWeight;
}
//
int x1=x+blockDim.x;
int out_idx1=y*kFact_width+x1;
if(x1<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//yks
accum +=p_in[idx*kFact_width+x1]* data[abs(i)];
}
p_out[out_idx1]=accum / kWeight;
}
//
int x2=x1+blockDim.x;
int out_idx2=y*kFact_width+x2;
if(x2<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//yks
accum +=p_in[idx*kFact_width+x2]* data[abs(i)];
}
p_out[out_idx2]=accum / kWeight;
}
}
/******************************************************************************************/
///
/*
* kernelSubtract 1.554ms [32,4,1]
* kernelSubtract1 1.541ms [32,8,1]
* kernelSubtract2 1.537ms [32,4,1]
*/
/******************************************************************************************/
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/(y),1);
* kernel_subtract<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernelSubtract(float *const p_out,float const * const p_in1,float const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kWidth_channels+x;
float a = 0.0f;
if(x<kWidth_channels&&y<kHeight) {
a = p_in1[idx];
a -= p_in2[idx];
p_out[idx] = a;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/(y),1);
* kernel_subtract1<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernelSubtract1(float *const p_out,float const * const p_in1,float const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
float diff=0.0f;
int idx;
for (int i = 0; i < 2; ++i) {
idx = y * kWidth_channels + x + blockDim.x * i;
if (idx <= kHeight * kWidth_channels) {
diff = p_in1[idx];
diff -= p_in2[idx];
p_out[idx] = diff;
}
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
* kernel_subtract2<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernelSubtract2(float *const p_out,float const * const p_in1,float const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
float diff=0.0f;
int idx;
for (int i = 0; i < 3; ++i) {
idx = y * kWidth_channels + x + blockDim.x * i;
if (idx <= kHeight * kWidth_channels) {
diff = p_in1[idx];
diff -=p_in2[idx];
p_out[idx] = diff;
}
}
}
/******************************************************************************************/
///
/*
* kernelDifference 1.601ms [32,16,1]
* kernelDifference1 1.538ms [32,8,1]
* kernelDifference2 1.534ms [32,4,1]**
*/
/******************************************************************************************/
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/(y),1);
* kernel_difference<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernelDifference(float *const p_out,float const * const p_in1,float const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kWidth_channels+x;
float diff = 0.0f;
if(x<kWidth_channels&&y<kHeight) {
diff = p_in1[idx];
diff -= p_in2[idx];
p_out[idx] = fabsf(diff);
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/(y),1);
* kernel_difference1<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
template <class T>
__global__ void kernelDifference1(T *const p_out,T const * const p_in1,T const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
T diff=0.0f;
int idx;
for (int i = 0; i < 2; ++i) {
idx = y * kWidth_channels + x + blockDim.x * i;
if (idx <= kHeight * kWidth_channels) {
diff = p_in1[idx];
diff -= p_in2[idx];
p_out[idx] = fabsf(diff);
}
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
* kernel_difference2<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
template <class T>
__global__ void kernelDifference2(T *const p_out,T const * const p_in1,T const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
T diff=0.0f;
int idx;
for (int i = 0; i < 3; ++i) {
idx = y * kWidth_channels + x + blockDim.x * i;
if (idx <= kHeight * kWidth_channels) {
diff = p_in1[idx];
diff -= p_in2[idx];
p_out[idx] = fabsf(diff);
}
}
}
/******************************************************************************************/
///
/******************************************************************************************/
void desaturateByCuda(float * const p_out_image,float const *p_in_image,const int kPixel_amount, const int kType,const bool kAlpha)
{
float *p_d_in=NULL;
float *p_d_out=NULL;
const size_t kBytes_in=kPixel_amount*(3+kAlpha)*sizeof(float);
const size_t kBytes_out=kPixel_amount*(1+kAlpha)* sizeof(float);
const int kBlocksize=256;
dim3 block(kBlocksize,1,1);
dim3 grid((kPixel_amount-1+kBlocksize*2)/(kBlocksize*2),1,1);
hipMalloc(&p_d_in ,kBytes_in);
hipMalloc(&p_d_out,kBytes_out);
hipMemcpy(p_d_in,p_in_image,kBytes_in,hipMemcpyHostToDevice);
if(kAlpha)
{
hipLaunchKernelGGL(( kernelDesaturateAlpha), dim3(grid),dim3(block),kBlocksize*8* sizeof(float), 0, p_d_out,p_d_in,kPixel_amount,kType);
}
else
{
hipLaunchKernelGGL(( kernelDesaturate), dim3(grid),dim3(block),kBlocksize*6* sizeof(float), 0, p_d_out,p_d_in,kPixel_amount,kType);
}
hipMemcpy(p_out_image,p_d_out,kBytes_out,hipMemcpyDeviceToHost);
hipFree(p_d_in);
hipFree(p_d_out);
}
void doubleSizeByCuda(float * const p_out_image,float const * const p_in_image,int const kWidth,int const kHeight,int const kChannels)
{
int const kOut_width=kWidth<<1;
int const kOut_height=kHeight<<1;
int const kSize_in=kWidth*kHeight;
int const kSize_out=kOut_width*kOut_height;
size_t const kBytes_in =kSize_in *kChannels* sizeof(float);
size_t const kBytes_out=kSize_out*kChannels* sizeof(float);
float *p_d_in=NULL;
float *p_d_out=NULL;
hipMalloc((void**)&p_d_in ,kBytes_in);
hipMalloc((void**)&p_d_out,kBytes_out);
hipMemcpy(p_d_in,p_in_image,kBytes_in,hipMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block2 (x,y,1);
dim3 grid2 ((kOut_width-1+x*3)/(x*3),(kOut_height-1+y)/y,1);
hipMalloc((void**)&p_d_out,kBytes_out);
hipLaunchKernelGGL(( kernelDoubleSize2), dim3(grid2),dim3(block2), 0, 0, p_d_out,p_d_in,kOut_width,kOut_height,kWidth,kChannels);
hipMemcpy(p_out_image,p_d_out,kBytes_out,hipMemcpyDeviceToHost);
//
hipFree(p_d_in);
hipFree(p_d_out);
}
void halfSizeByCuda(float * const p_out_image,float const * const p_in_image,int const kWidth,int const kHeight,int const kChannels)
{
int kOut_width=(kWidth+1)>>1;
int kOut_height=(kHeight+1)>>1;
int const kSize_in=kWidth*kHeight;
int const kSize_out=kOut_width*kOut_height;
size_t const kBytes_in =kSize_in *kChannels* sizeof(float);
size_t const kBytes_out=kSize_out*kChannels* sizeof(float);
float *p_d_in=NULL;
float *p_d_out=NULL;
hipMalloc((void**)&p_d_out,kBytes_out);
hipMalloc((void**)&p_d_in, kBytes_in);
hipMemcpy(p_d_in,p_in_image,kBytes_in,hipMemcpyHostToDevice);
int const x=32;
int const y=8;
dim3 block (x,y,1);
dim3 grid ((kOut_width-1+x*2)/(x*2),(kOut_height-1+y)/y,1);
hipLaunchKernelGGL(( kernelHalfSize1), dim3(grid),dim3(block), 0, 0, p_d_out,p_d_in,kOut_width,kOut_height,kWidth,kHeight,kChannels);
hipMemcpy(p_out_image,p_d_out,kBytes_out,hipMemcpyDeviceToHost);
hipFree(p_d_in);
hipFree(p_d_out);
}
void halfSizeGaussianByCuda(float * const p_out_image,float const * const p_in_image, int const kWidth,int const kHeight,int const kChannels,float sigma2)
{
int kOut_width=(kWidth+1)>>1;
int kOut_height=(kHeight+1)>>1;
int const kSize_in=kWidth*kHeight;
int const kSize_out=kOut_width*kOut_height;
//+/
size_t const kBytes_in =kSize_in *kChannels* sizeof(float);
size_t const kBytes_out=kSize_out*kChannels* sizeof(float);
float h_w[3];
//
float *p_d_w=NULL;
float *p_d_in=NULL;
float *p_d_out=NULL;
//
h_w[0] = ::exp(-0.5f / (2.0f * sigma2));
h_w[1] = ::exp(-2.5f / (2.0f * sigma2));
h_w[2] = ::exp(-4.5f / (2.0f * sigma2));
//
hipMalloc((void**)&p_d_w,3* sizeof(float));
hipMalloc((void**)&p_d_in ,kBytes_in);
hipMalloc((void**)&p_d_out,kBytes_out);
//
hipMemcpy(p_d_in,p_in_image,kBytes_in,hipMemcpyHostToDevice);
hipMemcpy(p_d_w,h_w,3* sizeof(float),hipMemcpyHostToDevice);
int x=32;
int y=4;
//gridblock
dim3 block(x, y, 1);
dim3 grid((kOut_width - 1 + x) / (x), (kOut_height - 1 + y) / y, 1);
hipLaunchKernelGGL(( kernelHalfSizeGauss1), dim3(grid), dim3(block) , 0, 0, p_d_out, p_d_in, kOut_width, kOut_height, kWidth, kHeight, kChannels, p_d_w);
//
hipMemcpy(p_out_image, p_d_out, kBytes_out, hipMemcpyDeviceToHost);
//
hipFree(p_d_w);
hipFree(p_d_in);
hipFree(p_d_out);
}
int blurGaussianByCuda(float * const p_out_image,float const * const p_in_image, int const kWidth,int const kHeight,int const kChannels,float sigma)
{
//+
int const kFact_width=kWidth*kChannels;
int const kSize_image=kWidth*kHeight;
size_t const kBytes=kSize_image*kChannels* sizeof(float);
int const kSize = ::ceil(sigma * 2.884f);//ks*2+1
std::vector<float> v_kernel(kSize + 1);//
float weight = 0;
for (int i = 0; i < kSize + 1; ++i)
{
v_kernel[i] = math::func::gaussian((float)i, sigma);//kernel[0]=1,kernel[i]=wi;
weight += v_kernel[i]*2;
}
weight-=v_kernel[0];
int const kBytes_blur=(kSize+1)*sizeof(float);
//
float *p_d_in=NULL;
float *p_d_out=NULL;
float *p_d_tmp=NULL;
float *p_d_blur=NULL;
//
hipMalloc((void**)&p_d_in ,kBytes);
hipMalloc((void**)&p_d_tmp ,kBytes);
hipMalloc((void**)&p_d_out ,kBytes);
hipMalloc((void**)&p_d_blur,kBytes_blur);
//cpugpu
hipMemcpy(p_d_in ,p_in_image,kBytes,hipMemcpyHostToDevice);
hipMemcpy(p_d_blur,&v_kernel[0],kBytes_blur,hipMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kFact_width-1+x*2)/(x*2),(kHeight-1+y)/y,1);
//x
hipLaunchKernelGGL(( kernelGaussBlurX1), dim3(grid),dim3(block),(kSize+1)* sizeof(float), 0, p_d_tmp,p_d_in,p_d_blur, kWidth, kHeight,kChannels,kSize,weight);
//y
x=32;
y=8;
dim3 block1(x,y,1);
dim3 grid1((kFact_width-1+x*3)/(x*3),(kHeight-1+y)/y,1);
hipLaunchKernelGGL(( kernelGaussBlurY2<float>), dim3(grid1),dim3(block1),(kSize+1)* sizeof(float), 0, p_d_out,p_d_tmp,p_d_blur,kFact_width,kHeight,kSize,weight);
//gpucpu
hipMemcpy(p_out_image,p_d_out,kBytes,hipMemcpyDeviceToHost);
//
hipFree(p_d_in);
hipFree(p_d_tmp);
hipFree(p_d_out);
hipFree(p_d_blur);
return 0;
}
int blurGaussian2ByCuda(float * const p_out_image,float const * const p_in_image, int const kWidth,int const kHeight,int const kChannels,float sigma2)
{
float sigma = sqrt(sigma2);
blurGaussianByCuda(p_out_image,p_in_image,kWidth,kHeight,kChannels,sigma);
return 0;
}
int subtractByCuda(float * const p_out_image,float const * const p_in_image1,float const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
int const kSize=kWidth*kHeight;
size_t const kBytes=kSize*kChannels*sizeof(float);
//
float *p_d_in1=NULL;
float *p_d_in2=NULL;
float *p_d_out=NULL;
//
hipMalloc((void**)&p_d_in1,kBytes);
hipMalloc((void**)&p_d_in2,kBytes);
hipMalloc((void**)&p_d_out,kBytes);
//(cpu2gpu)
hipMemcpy(p_d_in1,p_in_image1,kBytes,hipMemcpyHostToDevice);
hipMemcpy(p_d_in2,p_in_image2,kBytes,hipMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth*kChannels-1+x*3)/(x*3),(kHeight-1+y)/(y),1);
hipLaunchKernelGGL(( kernelSubtract2), dim3(grid),dim3(block), 0, 0, p_d_out,p_d_in1,p_d_in2,kWidth*kChannels,kHeight);
//(gpu2cpu)
hipMemcpy(p_out_image,p_d_out,kBytes,hipMemcpyDeviceToHost);
//
hipFree(p_d_in1);
hipFree(p_d_in2);
hipFree(p_d_out);
return 0;
}
template <class T>
int differenceByCu(T * const p_out_image,T const * const p_in_image1,T const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
int const kSize=kWidth*kHeight;
size_t const kBytes=kSize*kChannels*sizeof(T);
//
T *p_d_in1=NULL;
T *p_d_in2=NULL;
T *p_d_out=NULL;
//
hipMalloc((void**)&p_d_in1,kBytes);
hipMalloc((void**)&p_d_in2,kBytes);
hipMalloc((void**)&p_d_out,kBytes);
//(cpu2gpu)
hipMemcpy(p_d_in1,p_in_image1,kBytes,hipMemcpyHostToDevice);
hipMemcpy(p_d_in2,p_in_image2,kBytes,hipMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth*kChannels-1+x*3)/(x*3),(kHeight-1+y)/(y),1);
hipLaunchKernelGGL(( kernelDifference2<T>), dim3(grid),dim3(block), 0, 0, p_d_out,p_d_in1,p_d_in2,kWidth*kChannels,kHeight);
//(gpu2cpu)
hipMemcpy(p_out_image,p_d_out,kBytes,hipMemcpyDeviceToHost);
//
hipFree(p_d_in1);
hipFree(p_d_in2);
hipFree(p_d_out);
return 0;
}
template <typename T>
int differenceByCuda(T * const p_out_image,T const * const p_in_image1,T const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
return 0;
}
template<>
int differenceByCuda<float>(float * const p_out_image,float const * const p_in_image1,float const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
differenceByCu<float>(p_out_image,p_in_image1,p_in_image2,kWidth,kHeight,kChannels);
return 0;
}
template<>
int differenceByCuda<char>(char * const p_out_image,char const * const p_in_image1,char const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
differenceByCu<char>(p_out_image,p_in_image1,p_in_image2,kWidth,kHeight,kChannels);
return 0;
}
|
e3871777e711cd209178cbbf5d3620994c215e55.cu
|
/**
* @desc image_process.hpp函数实现
* @author 杨丰拓
* @date 2019-04-16
* @email [email protected]
*/
#include <cuda_runtime.h>
#include <cstdio>
#include <iostream>
#include "MATH/Function/function.hpp"
#include "cuda_include/sharemem.cuh"
#include "cuda_runtime.h"
#include <vector>
/***********************************************************************************/
__global__ void kernelDesaturateAlpha(float *p_out,float const *p_in, const int kSize,const int kType)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 8 ;
int out_idx = threadIdx.x + blockIdx.x * blockDim.x * 4 ;
int tid=threadIdx.x;
int stride=tid*4;
int stride1=stride+blockDim.x*4;
if (in_idx< kSize * 4)
{
s[tid] =p_in[in_idx];
s[tid+blockDim.x] =p_in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=p_in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=p_in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=p_in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=p_in[in_idx+blockDim.x*5];
s[tid+blockDim.x*6]=p_in[in_idx+blockDim.x*6];
s[tid+blockDim.x*7]=p_in[in_idx+blockDim.x*7];
}
__syncthreads();
if(kType==0)
{
p_out[out_idx] =max(s[stride+0],max(s[stride+1],s[stride+2]));
p_out[out_idx+blockDim.x*2]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(kType==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
p_out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
p_out[out_idx+blockDim.x*2]=0.5f*(max_s+min_s);
}
if(kType==2)
{
p_out[out_idx] =0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
p_out[out_idx+blockDim.x*2]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(kType==3)
{
p_out[out_idx] =0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
p_out[out_idx+blockDim.x*2]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(kType==4)
{
p_out[out_idx] =((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
p_out[out_idx+blockDim.x*2]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
p_out[out_idx+tid+1] =s[stride+3];
p_out[out_idx+blockDim.x*2+tid+1]=s[stride1+3];
}
__global__ void kernelDesaturate(float *p_out,float const *p_in, const int kSize,const int kType)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 6 ;
int out_idx = threadIdx.x + blockIdx.x * blockDim.x * 2 ;
int tid=threadIdx.x;
int stride=tid*3;
int stride1=stride+blockDim.x*3;
if (in_idx< kSize * 3)
{
s[tid] =p_in[in_idx];
s[tid+blockDim.x] =p_in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=p_in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=p_in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=p_in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=p_in[in_idx+blockDim.x*5];
}
__syncthreads();
if(kType==0)
{
p_out[out_idx] =max(s[stride+0],max(s[stride+1],s[stride+2]));
p_out[out_idx+blockDim.x]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(kType==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
p_out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
p_out[out_idx+blockDim.x]=0.5f*(max_s+min_s);
}
if(kType==2)
{
p_out[out_idx] =0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
p_out[out_idx+blockDim.x]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(kType==3)
{
p_out[out_idx] =0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
p_out[out_idx+blockDim.x]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(kType==4)
{
p_out[out_idx] =((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
p_out[out_idx+blockDim.x]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
}
/******************************************************************************************/
///功能:图片放大两倍
/* 函数名 线程块大小 耗费时间
* kernelDoubleSize 3.678ms [32,4,1]
* kernelDoubleSize1 3.67ms [32,4,1]
* kernelDoubleSize2 3.532ms [32,4,1]**
* kernelDoubleSizeByShare 5.265ms [32,8,1]
* kernelDoubleSizeByShare1 4.737ms [64,8,1]
* kernelDoubleSizeByShare2 3.98ms [32,8,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernelDoublesize<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernelDoubleSize(float *p_out,float *p_in,int const kImage_x,int const kImage_y,int const kIn_width,int const kIn_Channels)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * kIn_Channels;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <kIn_Channels ; ++c)
{
int fact_x = out_x + blockDim.x * c;
if(out_y<kImage_y && fact_x < kImage_x*kIn_Channels)
{
int idx =fact_x + out_y * kImage_x * kIn_Channels;
bool nexty =(out_y+1)<kImage_y;
bool nextx =(fact_x+kIn_Channels)<(kImage_x*kIn_Channels);
int yoff[2]={kIn_Channels*kIn_width*(out_y>>1),
kIn_Channels*kIn_width*((out_y+nexty)>>1)};
int xoff[2]={((fact_x / kIn_Channels) >>1)* kIn_Channels + fact_x % kIn_Channels,
(((fact_x/kIn_Channels)+nextx)>>1)*kIn_Channels+fact_x%kIn_Channels};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[idx]=0.25f*(p_in[index[0]]+p_in[index[1]]+p_in[index[2]]+p_in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_doublesize1<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernelDoubleSize1(float *p_out,float *p_in,int const kImage_x,int const kImage_y,int const kIn_width,int const kIn_Channels)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * kIn_Channels*2;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <kIn_Channels*2 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kImage_y&&fact_x<kImage_x*kIn_Channels)
{
int idx=fact_x+out_y*kImage_x*kIn_Channels;
bool nexty=(out_y+1)<kImage_y;
bool nextx=(fact_x+kIn_Channels)<(kImage_x*kIn_Channels);
int yoff[2]={kIn_Channels*kIn_width*(out_y>>1),
kIn_Channels*kIn_width*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/kIn_Channels)>>1)*kIn_Channels+fact_x%kIn_Channels,
(((fact_x/kIn_Channels)+nextx)>>1)*kIn_Channels+fact_x%kIn_Channels};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[idx]=0.25f*(p_in[index[0]]+p_in[index[1]]+p_in[index[2]]+p_in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
* kernel_doublesize2<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernelDoubleSize2(float *p_out,float *p_in,int const kImage_x,int const kImage_y,int const kIn_width,int const kIn_Channels)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * kIn_Channels*3;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <kIn_Channels*3 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kImage_y&&fact_x<kImage_x*kIn_Channels)
{
int idx=fact_x+out_y*kImage_x*kIn_Channels;
bool nexty=(out_y+1)<kImage_y;
bool nextx=(fact_x+kIn_Channels)<(kImage_x*kIn_Channels);
int yoff[2]={kIn_Channels*kIn_width*(out_y>>1),
kIn_Channels*kIn_width*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/kIn_Channels)>>1)*kIn_Channels+fact_x%kIn_Channels,
(((fact_x/kIn_Channels)+nextx)>>1)*kIn_Channels+fact_x%kIn_Channels};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[idx]=0.25f*(p_in[index[0]]+p_in[index[1]]+p_in[index[2]]+p_in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_doublesizebyshare<<<grid,block,share_x*share_y*channels*sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernelDoubleSizeByShare(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//共享内存块x维(需乘kIn_Channels)
int share_y=(blockDim.y>>1)+1;//共享内存块y维
int share_fact_x=share_x*kIn_Channels;
int share_idx_x;
int share_idx_y= threadIdx.y;//共享内存块内y维索引
int in_x0 = ((blockIdx.x * blockDim.x) >> 1) * kIn_Channels;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <kIn_Channels ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//共享内存块内x索引
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, kIn_width * kIn_Channels - kIn_Channels + share_idx_x % kIn_Channels);
y = min(in_y0 + share_idx_y, kIn_height - 1);
data[share_idx_y * share_fact_x + share_idx_x] = p_in[y * kIn_width * kIn_Channels + x];
}
}
__syncthreads();
for ( c = 0; c <kIn_Channels ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height && fact_x<kOut_width*kIn_Channels)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/kIn_Channels>>1)*kIn_Channels+share_idx_x%kIn_Channels,
((share_idx_x/kIn_Channels+1)>>1)*kIn_Channels+share_idx_x%kIn_Channels};
int out_idx=out_y*kOut_width*kIn_Channels+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_doublesizebyshare1<<<grid,block,share_x*share_y*2*channels*sizeof(float)>>>(d_out,d_in,kOut_width,oh,width,height,channels);
*/
__global__ void kernelDoubleSizeByShare1(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//共享内存块x维(需乘kIn_Channels)
int share_y=(blockDim.y>>1)+1;//共享内存块y维
int share_fact_x=share_x*kIn_Channels*2;
int share_idx_x;
int share_idx_y= threadIdx.y;//共享内存块内y维索引
int in_x0 = ((blockIdx.x * blockDim.x*2) >> 1) * kIn_Channels;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <kIn_Channels*2 ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//共享内存块内x索引
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, kIn_width * kIn_Channels - kIn_Channels + share_idx_x % kIn_Channels);
y = min(in_y0 + share_idx_y, kIn_height - 1);
data[share_idx_y * share_fact_x + share_idx_x] = p_in[y * kIn_width * kIn_Channels + x];
}
}
__syncthreads();
for ( c = 0; c <kIn_Channels*2 ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/kIn_Channels>>1)*kIn_Channels+share_idx_x%kIn_Channels,
((share_idx_x/kIn_Channels+1)>>1)*kIn_Channels+share_idx_x%kIn_Channels};
int out_idx=out_y*kOut_width*kIn_Channels+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*3)/(x*3),(kOut_height-1+y)/y,1);
* kernel_doublesizebyshare2<<<grid,block,share_x*share_y*3*channels*sizeof(float)>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelDoubleSizeByShare2(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//共享内存块x维(需乘kIn_Channels)
int share_y=(blockDim.y>>1)+1;//共享内存块y维
int share_fact_x=share_x*kIn_Channels*3;
int share_idx_x;
int share_idx_y = threadIdx.y;//共享内存块内y维索引
int in_x0 = ((blockIdx.x * blockDim.x*3) >> 1) * kIn_Channels;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <kIn_Channels*3 ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//共享内存块内x索引
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, kIn_width * kIn_Channels - kIn_Channels + share_idx_x % kIn_Channels);
y = min(in_y0 + share_idx_y, kIn_height - 1);
data[share_idx_y * share_fact_x + share_idx_x] = p_in[y * kIn_width * kIn_Channels + x];
}
}
__syncthreads();
for ( c = 0; c <kIn_Channels*3 ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/kIn_Channels>>1)*kIn_Channels+share_idx_x%kIn_Channels,
((share_idx_x/kIn_Channels+1)>>1)*kIn_Channels+share_idx_x%kIn_Channels};
int out_idx=out_y*kOut_width*kIn_Channels+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
p_out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/******************************************************************************************/
///功能:图片缩小两倍
/* 函数名 线程块大小 耗费时间
*kernelHalfSize 636.275us [32,8,1]
*kernelHalfSize1 634.383us [32,8,1]**
*kernelHalfSize2 641.6us [32,8,1]
*kernelHalfSizeByShare 643.698us [32,4,1]
*kernelHalfSizeByShare1 671.245us [32,4,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x)/x,(kOut_height-1+y)/y,1);
* kernel_halfsize<<<grid,block>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSize(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=kIn_width*kIn_Channels;
for(int c=0;c<kIn_Channels;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < kIn_height);
int icol1 = (fact_x / kIn_Channels) * 2 * kIn_Channels + fact_x % kIn_Channels;
int icol2 = min((icol1 + kIn_Channels), (kIn_width * kIn_Channels - kIn_Channels + fact_x % kIn_Channels));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (p_in[index[0]] + p_in[index[1]] + p_in[index[2]] + p_in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*2)/(x*2),(kOut_height-1+y)/y,1);
* kernel_halfsize1<<<grid,block>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSize1(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=kIn_width*kIn_Channels;
for(int c=0;c<kIn_Channels*2;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < kIn_height);
int icol1 = (fact_x / kIn_Channels) * 2 * kIn_Channels + fact_x % kIn_Channels;
int icol2 = min((icol1 + kIn_Channels), (kIn_width * kIn_Channels - kIn_Channels + fact_x % kIn_Channels));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (p_in[index[0]] + p_in[index[1]] + p_in[index[2]] + p_in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*3)/(x*3),(kOut_height-1+y)/y,1);
* kernel_halfsize2<<<grid,block>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSize2(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=kIn_width*kIn_Channels;
for(int c=0;c<kIn_Channels*3;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height && fact_x < kOut_width*kIn_Channels) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < kIn_height);
int icol1 = (fact_x / kIn_Channels) * 2 * kIn_Channels + fact_x % kIn_Channels;
int icol2 = min((icol1 + kIn_Channels), (kIn_width * kIn_Channels - kIn_Channels + fact_x % kIn_Channels));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (p_in[index[0]] + p_in[index[1]] + p_in[index[2]] + p_in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x)/x,(kOut_height-1+y)/y,1);
* kernel_halfsizebyshare<<<grid,block,share_x*share_y*channels* sizeof(float)>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSizeByShare(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int block_stride=blockDim.x*kIn_Channels;//线程块x维间隔
int out_x=threadIdx.x+blockIdx.x*block_stride;//输出的x维起始索引
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//输出的y位索引
int stride=kIn_width*kIn_Channels;//输入图像的行索引的最大值
int in_x0=blockIdx.x*block_stride*2;//输入图像x维的起始点
int in_y0=blockIdx.y*blockDim.y*2;//输入图像y维的起始点
int in_x1=in_x0+block_stride;
int in_y1=in_y0+blockDim.y;
int share_x=blockDim.x*2*kIn_Channels;//共享块内x维最大像素点个数
for (int c = 0; c < kIn_Channels; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int channel=fact_x_s%kIn_Channels;//第几个通道
int x_s = fact_x_s + block_stride;
int y_s0=threadIdx.y*share_x;
int y_s1=y_s0+blockDim.y*share_x;
int fact_iw=channel+stride-kIn_Channels;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,kIn_height-1)*stride;
int y1=min(in_y1+threadIdx.y,kIn_height-1)*stride;
int deta=((fact_x_s/kIn_Channels)%2)*block_stride;//像素点的x坐标是否为奇数
int x_fs0=(fact_x_s/kIn_Channels>>1)*kIn_Channels+channel+deta;//共享内存内存储第一个x坐标
int x_fs1=(x_s/kIn_Channels>>1)*kIn_Channels+channel+deta;//共享内存内存储第二个x坐标
data[y_s0+x_fs0]=p_in[y0+x0];
data[y_s0+x_fs1]=p_in[y0+x1];
data[y_s1+x_fs0]=p_in[y1+x0];
data[y_s1+x_fs1]=p_in[y1+x1];;
}
__syncthreads();
for (int c = 0; c <kIn_Channels ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
int srow1=threadIdx.y*2*share_x;
int srow2=srow1+share_x;
int scol1=threadIdx.x+blockDim.x*c;
int scol2=scol1+block_stride;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((kOut_width-1+x*2)/(x*2),(kOut_height-1+y)/y,1);
* kernel_halfsizebyshare1<<<grid,block,share_x*share_y*channels* sizeof(float)>>>(d_out,d_in,kOut_width,kOut_height,width,height,channels);
*/
__global__ void kernelHalfSizeByShare1(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels)
{
extern __shared__ float data[];
int block_stride=blockDim.x*kIn_Channels*2;//线程块x维间隔
int out_x=threadIdx.x+blockIdx.x*block_stride;//输出的x维起始索引
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//输出的y位索引
int stride=kIn_width*kIn_Channels;//输入图像的行索引的最大值
int in_x0=blockIdx.x*block_stride*2;//输入图像x维的起始点
int in_y0=blockIdx.y*blockDim.y*2;//输入图像y维的起始点
int in_x1=in_x0+block_stride;
int in_y1=in_y0+blockDim.y;
int share_x=blockDim.x*4*kIn_Channels;//共享块内x维最大像素点个数
for (int c = 0; c < kIn_Channels*2; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int channel=fact_x_s%kIn_Channels;//第几个通道
int x_s=fact_x_s+block_stride;
int y_s0=threadIdx.y*share_x;
int y_s1=y_s0+blockDim.y*share_x;
int fact_iw=channel+stride-kIn_Channels;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,kIn_height-1)*stride;
int y1=min(in_y1+threadIdx.y,kIn_height-1)*stride;
int deta=((fact_x_s/kIn_Channels)%2)*block_stride;//像素点的x坐标是否为奇数
int x_fs0=(fact_x_s/kIn_Channels>>1)*kIn_Channels+channel+deta;//共享内存内存储第一个x坐标
int x_fs1=(x_s/kIn_Channels>>1)*kIn_Channels+channel+deta;//共享内存内存储第二个x坐标
data[y_s0+x_fs0]=p_in[y0+x0];
data[y_s0+x_fs1]=p_in[y0+x1];
data[y_s1+x_fs0]=p_in[y1+x0];
data[y_s1+x_fs1]=p_in[y1+x1];;
}
__syncthreads();
for (int c = 0; c <kIn_Channels*2 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
int srow1=threadIdx.y*2*share_x;
int srow2=srow1+share_x;
int scol1=threadIdx.x+blockDim.x*c;
int scol2=scol1+block_stride;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * kOut_width*kIn_Channels + fact_x;
p_out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
/******************************************************************************************/
///功能:高斯权值降采样
/* 函数名 线程块大小 耗费时间
* kernel_halfsize_gauss 1.856ms [32,8,1]
* kernel_halfsize_gauss1 936.937us [32,4,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x, y, 1);
* dim3 grid((ow - 1 + x) / (x), (kOut_height - 1 + y) / y, 1);
* kernel_halfsize_guass << < grid, block >> > (d_out, d_in, ow, oh, width, height, channels, d_w);
*
__global__ void kernel_halfsize_guass(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic,float const *w)
{
//多余时间损耗原因为printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[2]] * dw[0]);中又访问了in数组
//注释掉printf函数后,时间与kernel_halfsize_guass1相差不多
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int istride=iw*ic;
float dw[3];
dw[0]=w[0];
dw[1]=w[1];
dw[2]=w[2];
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int out_idx = out_y * ow * ic + fact_x;
int channels = fact_x % ic;//颜色通道
int out_xf = fact_x / ic;//输出像素点x坐标
int ix = out_xf << 1;
int iy = out_y << 1;
int row[4], col[4];
row[0] = max(0, iy - 1) * istride;
row[1] = iy * istride;
row[2] = min(iy + 1, (int)ih - 1) * istride;
row[3] = min(iy + 2, (int)ih - 2) * istride;
col[0] = max(0, ix - 1) * ic + channels;
col[1] = ix * ic + channels;
col[2] = min(ix + 1, (int)iw - 1) * ic + channels;
col[3] = min(ix + 2, (int)iw - 1) * ic + channels;
float sum = 0.0f;
int t=6;
if(out_idx==t);//printf("idx:%d\n",t);
sum += in[row[0] + col[0]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[0]] * dw[2]);
sum += in[row[0] + col[1]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[1]] * dw[1]);
sum += in[row[0] + col[2]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[2]] * dw[1]);
sum += in[row[0] + col[3]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[3]] * dw[2]);
sum += in[row[1] + col[0]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[0]] * dw[1]);
sum += in[row[1] + col[1]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[1]] * dw[0]);
sum += in[row[1] + col[2]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[2]] * dw[0]);
sum += in[row[1] + col[3]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[3]] * dw[1]);
sum += in[row[2] + col[0]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[0]] * dw[1]);
sum += in[row[2] + col[1]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[1]] * dw[0]);
sum += in[row[2] + col[2]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[2]] * dw[0]);
sum += in[row[2] + col[3]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[3]] * dw[1]);
sum += in[row[3] + col[0]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[0]] * dw[2]);
sum += in[row[3] + col[1]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[1]] * dw[1]);
sum += in[row[3] + col[2]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[2]] * dw[1]);
sum += in[row[3] + col[3]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[3]] * dw[2]);
out[out_idx] = sum / (float)(4 * dw[2] + 8 * dw[1] + 4 * dw[0]);
}
}
}
*/
/* 调用示例
* dim3 block(x, y, 1);
* dim3 grid((kOut_width - 1 + x) / (x), (oh - 1 + y) / y, 1);
* kernel_halfsize_gauss1 << < grid, block >> > (d_out, d_in, kOut_width, kOut_height, width, height, channels, d_w);
*/
__global__ void kernelHalfSizeGauss1(float *p_out,float *p_in,int const kOut_width,int const kOut_height,int const kIn_width,int const kIn_height,int const kIn_Channels,float const *p_w)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*kIn_Channels;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int istride=kIn_width*kIn_Channels;
float dw[3];
dw[0]=p_w[0];
dw[1]=p_w[1];
dw[2]=p_w[2];
for (int c = 0; c <kIn_Channels ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<kOut_height&&fact_x<kOut_width*kIn_Channels)
{
int out_idx = out_y * kOut_width * kIn_Channels + fact_x;
int channels = fact_x % kIn_Channels;//颜色通道
int out_xf = fact_x / kIn_Channels;//输出像素点x坐标
int ix = out_xf << 1;
int iy = out_y << 1;
int row[4], col[4];
row[0] = max(0, iy - 1) * istride;
row[1] = iy * istride;
row[2] = min(iy + 1, (int)kIn_height - 1) * istride;
row[3] = min(iy + 2, (int)kIn_height - 2) * istride;
col[0] = max(0, ix - 1) * kIn_Channels + channels;
col[1] = ix * kIn_Channels + channels;
col[2] = min(ix + 1, (int)kIn_width - 1) * kIn_Channels + channels;
col[3] = min(ix + 2, (int)kIn_width - 1) * kIn_Channels + channels;
float sum = 0.0f;
sum+=p_in[row[0] + col[0]] * dw[2];
sum+=p_in[row[0] + col[1]] * dw[1];
sum+=p_in[row[0] + col[2]] * dw[1];
sum+=p_in[row[0] + col[3]] * dw[2];
sum+=p_in[row[1] + col[0]] * dw[1];
sum+=p_in[row[1] + col[1]] * dw[0];
sum+=p_in[row[1] + col[2]] * dw[0];
sum+=p_in[row[1] + col[3]] * dw[1];
sum+=p_in[row[2] + col[0]] * dw[1];
sum+=p_in[row[2] + col[1]] * dw[0];
sum+=p_in[row[2] + col[2]] * dw[0];
sum+=p_in[row[2] + col[3]] * dw[1];
sum+=p_in[row[3] + col[0]] * dw[2];
sum+=p_in[row[3] + col[1]] * dw[1];
sum+=p_in[row[3] + col[2]] * dw[1];
sum+=p_in[row[3] + col[3]] * dw[2];
p_out[out_idx] = sum / (float)(4 * dw[2] + 8 * dw[1] + 4 * dw[0]);
}
}
}
/******************************************************************************************/
///功能:x维高斯模糊
/* 函数名 线程块大小 耗费时间
* kernelGaussBlurX 2.561ms [32,4,1]
* kernelGaussBlurX1 2.025ms [32,4,1]**
* kernelGaussBlurX2 2.148ms [32,4,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/y,1);
* kernel_gaussBlur_x<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,w,h,c,ks,weight);
*/
__global__ void kernelGaussBlurX(float *const p_out,float const *const p_in,float const * const p_blur,int const kWidth,int const kHeight,int const kChannels,int const kSize,float const kWeight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
int fact_x=x/kChannels;
int channels=x%kChannels;
int max_x=y*kWidth*kChannels;
int out_idx=max_x+x;
if(fact_x<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x+i,kWidth-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=p_in[max_x+idx*kChannels+channels]* data[abs(i)];
}
p_out[out_idx]=accum / kWeight;
}
}
/*调用示例
* dim3 block(x,y,1);
* dim3 grid((kWidth*c-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_gaussBlur_x1<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,kWidth,h,c,ks,weight);
*/
__global__ void kernelGaussBlurX1(float *const p_out,float const *const p_in,float const * const p_blur,int const kWidth,int const kHeight,int const kChannels,int const kSize,float const kWeight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
int fact_x=x/kChannels;
int channels=x%kChannels;
int max_x=y*kWidth*kChannels;
int out_idx=max_x+x;
if(fact_x<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x+i,kWidth-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=p_in[max_x+idx*kChannels+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
p_out[out_idx]=accum / kWeight;
}
//二次展开
int fact_x1=(x+blockDim.x)/kChannels;
int channels1=(x+blockDim.x)%kChannels;
int out_idx1=max_x+x+blockDim.x;
if(fact_x1<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x1+i,kWidth-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=p_in[max_x+idx*kChannels+channels1]* data[abs(i)];
}
p_out[out_idx1]=accum / kWeight;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((kWidth*c-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_gaussBlur_x2<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,kWidth,h,c,ks,weight);
*/
__global__ void kernelGaussBlurX2(float *const p_out,float const *const p_in,float const * const p_blur,int const kWidth,int const kHeight,int const kChannels,int const kSize,float const kWeight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
int fact_x=x/kChannels;
int channels=x%kChannels;
int max_x=y*kWidth*kChannels;
int out_idx=max_x+x;
if(fact_x<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x+i,kWidth-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=p_in[max_x+idx*kChannels+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
p_out[out_idx]=accum / kWeight;
}
//二次展开
int fact_x1=(x+blockDim.x)/kChannels;
int channels1=(x+blockDim.x)%kChannels;
int out_idx1=max_x+x+blockDim.x;
if(fact_x1<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x1+i,kWidth-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=p_in[max_x+idx*kChannels+channels1]* data[abs(i)];
}
p_out[out_idx1]=accum / kWeight;
}
//三次展开
int fact_x2=(x+blockDim.x*2)/kChannels;
int channels2=(x+blockDim.x*2)%kChannels;
int out_idx2=max_x+x+blockDim.x*2;
if(fact_x2<kWidth&&y<kHeight)
{
float accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(fact_x2+i,kWidth-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=p_in[max_x+idx*kChannels+channels2]* data[abs(i)];
}
p_out[out_idx2]=accum / kWeight;
}
}
/******************************************************************************************/
///功能:y维高斯模糊
/* 函数名 线程块大小 耗费时间
* kernelGaussBlurY 2.358ms [32,4,1]
* kernelGaussBlurY1 1.875ms [32,4,1]
* kernelGaussBlurY2 1.811ms [32,8,1]**
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((kWidth*c-1+x)/(x),(h-1+y)/y,1);
* kernel_gaussBlur_y<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,fact_W,h,ks,weight);
*/
template <typename T>
__global__ void kernelGaussBlurY(T *const p_out,T const *const p_in,T const * const p_blur,int const kFact_width,int const kHeight,int const kSize,T const kWeight)
{
//extern __shared__ float data[];
sharedMemory<T> smem;
T* data = smem.p_getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
//一次展开
int out_idx=y*kFact_width+x;
if(x<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=p_in[idx*kFact_width+x]* data[abs(i)];
}
p_out[out_idx]=accum / kWeight;
}
}
/*调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_gaussBlur_y1<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,kFact_width,h,ks,weight);
*/
template <typename T>
__global__ void kernelGaussBlurY1(T *const p_out,T const *const p_in,T const * const p_blur,int const kFact_width,int const kHeight,int const kSize,T const kWeight)
{
//extern __shared__ float data[];
sharedMemory<T> smem;
T* data = smem.p_getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
//一次展开
int out_idx=y*kFact_width+x;
if(x<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=p_in[idx*kFact_width+x]* data[abs(i)];
}
p_out[out_idx]=accum / kWeight;
}
//二次展开
int x1=x+blockDim.x;
int out_idx1=y*kFact_width+x1;
if(x1<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=p_in[idx*kFact_width+x1]* data[abs(i)];
}
p_out[out_idx1]=accum / kWeight;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_gaussBlur_y2<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,kFact_width,h,ks,weight);
*/
template <typename T>
__global__ void kernelGaussBlurY2(T *const p_out,T const *const p_in,T const * const p_blur,int const kFact_width,int const kHeight,int const kSize,T const kWeight)
{
//extern __shared__ float data[];
sharedMemory<T> smem;
T* data = smem.p_getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(kSize+1))
{
data[share_idx]=p_blur[share_idx];
}
__syncthreads();
//一次展开
int out_idx=y*kFact_width+x;
if(x<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=p_in[idx*kFact_width+x]* data[abs(i)];
}
p_out[out_idx]=accum / kWeight;
}
//二次展开
int x1=x+blockDim.x;
int out_idx1=y*kFact_width+x1;
if(x1<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=p_in[idx*kFact_width+x1]* data[abs(i)];
}
p_out[out_idx1]=accum / kWeight;
}
//三次展开
int x2=x1+blockDim.x;
int out_idx2=y*kFact_width+x2;
if(x2<kFact_width&&y<kHeight)
{
T accum=0.0f;
for (int i = -kSize; i <=kSize ; ++i)
{
int idx =max(0,min(y+i,kHeight-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=p_in[idx*kFact_width+x2]* data[abs(i)];
}
p_out[out_idx2]=accum / kWeight;
}
}
/******************************************************************************************/
///功能:求图像差
/* 函数名 线程块大小 耗费时间
* kernelSubtract 1.554ms [32,4,1]
* kernelSubtract1 1.541ms [32,8,1]
* kernelSubtract2 1.537ms [32,4,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/(y),1);
* kernel_subtract<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernelSubtract(float *const p_out,float const * const p_in1,float const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kWidth_channels+x;
float a = 0.0f;
if(x<kWidth_channels&&y<kHeight) {
a = p_in1[idx];
a -= p_in2[idx];
p_out[idx] = a;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/(y),1);
* kernel_subtract1<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernelSubtract1(float *const p_out,float const * const p_in1,float const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
float diff=0.0f;
int idx;
for (int i = 0; i < 2; ++i) {
idx = y * kWidth_channels + x + blockDim.x * i;
if (idx <= kHeight * kWidth_channels) {
diff = p_in1[idx];
diff -= p_in2[idx];
p_out[idx] = diff;
}
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
* kernel_subtract2<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernelSubtract2(float *const p_out,float const * const p_in1,float const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
float diff=0.0f;
int idx;
for (int i = 0; i < 3; ++i) {
idx = y * kWidth_channels + x + blockDim.x * i;
if (idx <= kHeight * kWidth_channels) {
diff = p_in1[idx];
diff -=p_in2[idx];
p_out[idx] = diff;
}
}
}
/******************************************************************************************/
///功能:图像差分
/* 函数名 线程块大小 耗费时间
* kernelDifference 1.601ms [32,16,1]
* kernelDifference1 1.538ms [32,8,1]
* kernelDifference2 1.534ms [32,4,1]**
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/(y),1);
* kernel_difference<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernelDifference(float *const p_out,float const * const p_in1,float const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*kWidth_channels+x;
float diff = 0.0f;
if(x<kWidth_channels&&y<kHeight) {
diff = p_in1[idx];
diff -= p_in2[idx];
p_out[idx] = fabsf(diff);
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/(y),1);
* kernel_difference1<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
template <class T>
__global__ void kernelDifference1(T *const p_out,T const * const p_in1,T const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
T diff=0.0f;
int idx;
for (int i = 0; i < 2; ++i) {
idx = y * kWidth_channels + x + blockDim.x * i;
if (idx <= kHeight * kWidth_channels) {
diff = p_in1[idx];
diff -= p_in2[idx];
p_out[idx] = fabsf(diff);
}
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
* kernel_difference2<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
template <class T>
__global__ void kernelDifference2(T *const p_out,T const * const p_in1,T const * const p_in2,int const kWidth_channels,int const kHeight)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
T diff=0.0f;
int idx;
for (int i = 0; i < 3; ++i) {
idx = y * kWidth_channels + x + blockDim.x * i;
if (idx <= kHeight * kWidth_channels) {
diff = p_in1[idx];
diff -= p_in2[idx];
p_out[idx] = fabsf(diff);
}
}
}
/******************************************************************************************/
///调用核函数实现加速功能
/******************************************************************************************/
void desaturateByCuda(float * const p_out_image,float const *p_in_image,const int kPixel_amount, const int kType,const bool kAlpha)
{
float *p_d_in=NULL;
float *p_d_out=NULL;
const size_t kBytes_in=kPixel_amount*(3+kAlpha)*sizeof(float);
const size_t kBytes_out=kPixel_amount*(1+kAlpha)* sizeof(float);
const int kBlocksize=256;
dim3 block(kBlocksize,1,1);
dim3 grid((kPixel_amount-1+kBlocksize*2)/(kBlocksize*2),1,1);
cudaMalloc(&p_d_in ,kBytes_in);
cudaMalloc(&p_d_out,kBytes_out);
cudaMemcpy(p_d_in,p_in_image,kBytes_in,cudaMemcpyHostToDevice);
if(kAlpha)
{
kernelDesaturateAlpha<<<grid,block,kBlocksize*8* sizeof(float)>>>(p_d_out,p_d_in,kPixel_amount,kType);
}
else
{
kernelDesaturate<<<grid,block,kBlocksize*6* sizeof(float)>>>(p_d_out,p_d_in,kPixel_amount,kType);
}
cudaMemcpy(p_out_image,p_d_out,kBytes_out,cudaMemcpyDeviceToHost);
cudaFree(p_d_in);
cudaFree(p_d_out);
}
void doubleSizeByCuda(float * const p_out_image,float const * const p_in_image,int const kWidth,int const kHeight,int const kChannels)
{
int const kOut_width=kWidth<<1;
int const kOut_height=kHeight<<1;
int const kSize_in=kWidth*kHeight;
int const kSize_out=kOut_width*kOut_height;
size_t const kBytes_in =kSize_in *kChannels* sizeof(float);
size_t const kBytes_out=kSize_out*kChannels* sizeof(float);
float *p_d_in=NULL;
float *p_d_out=NULL;
cudaMalloc((void**)&p_d_in ,kBytes_in);
cudaMalloc((void**)&p_d_out,kBytes_out);
cudaMemcpy(p_d_in,p_in_image,kBytes_in,cudaMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block2 (x,y,1);
dim3 grid2 ((kOut_width-1+x*3)/(x*3),(kOut_height-1+y)/y,1);
cudaMalloc((void**)&p_d_out,kBytes_out);
kernelDoubleSize2<<<grid2,block2>>>(p_d_out,p_d_in,kOut_width,kOut_height,kWidth,kChannels);
cudaMemcpy(p_out_image,p_d_out,kBytes_out,cudaMemcpyDeviceToHost);
//释放分配的内存
cudaFree(p_d_in);
cudaFree(p_d_out);
}
void halfSizeByCuda(float * const p_out_image,float const * const p_in_image,int const kWidth,int const kHeight,int const kChannels)
{
int kOut_width=(kWidth+1)>>1;
int kOut_height=(kHeight+1)>>1;
int const kSize_in=kWidth*kHeight;
int const kSize_out=kOut_width*kOut_height;
size_t const kBytes_in =kSize_in *kChannels* sizeof(float);
size_t const kBytes_out=kSize_out*kChannels* sizeof(float);
float *p_d_in=NULL;
float *p_d_out=NULL;
cudaMalloc((void**)&p_d_out,kBytes_out);
cudaMalloc((void**)&p_d_in, kBytes_in);
cudaMemcpy(p_d_in,p_in_image,kBytes_in,cudaMemcpyHostToDevice);
int const x=32;
int const y=8;
dim3 block (x,y,1);
dim3 grid ((kOut_width-1+x*2)/(x*2),(kOut_height-1+y)/y,1);
kernelHalfSize1<<<grid,block>>>(p_d_out,p_d_in,kOut_width,kOut_height,kWidth,kHeight,kChannels);
cudaMemcpy(p_out_image,p_d_out,kBytes_out,cudaMemcpyDeviceToHost);
cudaFree(p_d_in);
cudaFree(p_d_out);
}
void halfSizeGaussianByCuda(float * const p_out_image,float const * const p_in_image, int const kWidth,int const kHeight,int const kChannels,float sigma2)
{
int kOut_width=(kWidth+1)>>1;
int kOut_height=(kHeight+1)>>1;
int const kSize_in=kWidth*kHeight;
int const kSize_out=kOut_width*kOut_height;
//声明+定义输入/输出图像字节数
size_t const kBytes_in =kSize_in *kChannels* sizeof(float);
size_t const kBytes_out=kSize_out*kChannels* sizeof(float);
float h_w[3];
//声明显存指针
float *p_d_w=NULL;
float *p_d_in=NULL;
float *p_d_out=NULL;
//定义权值
h_w[0] = std::exp(-0.5f / (2.0f * sigma2));
h_w[1] = std::exp(-2.5f / (2.0f * sigma2));
h_w[2] = std::exp(-4.5f / (2.0f * sigma2));
//分配显存
cudaMalloc((void**)&p_d_w,3* sizeof(float));
cudaMalloc((void**)&p_d_in ,kBytes_in);
cudaMalloc((void**)&p_d_out,kBytes_out);
//传递输入图像和权值
cudaMemcpy(p_d_in,p_in_image,kBytes_in,cudaMemcpyHostToDevice);
cudaMemcpy(p_d_w,h_w,3* sizeof(float),cudaMemcpyHostToDevice);
int x=32;
int y=4;
//定义grid和block大小
dim3 block(x, y, 1);
dim3 grid((kOut_width - 1 + x) / (x), (kOut_height - 1 + y) / y, 1);
kernelHalfSizeGauss1<<< grid, block >>> (p_d_out, p_d_in, kOut_width, kOut_height, kWidth, kHeight, kChannels, p_d_w);
//传出输入图像
cudaMemcpy(p_out_image, p_d_out, kBytes_out, cudaMemcpyDeviceToHost);
//释放分配的显存
cudaFree(p_d_w);
cudaFree(p_d_in);
cudaFree(p_d_out);
}
int blurGaussianByCuda(float * const p_out_image,float const * const p_in_image, int const kWidth,int const kHeight,int const kChannels,float sigma)
{
//声明+定义输入输出图片大小及字节数
int const kFact_width=kWidth*kChannels;
int const kSize_image=kWidth*kHeight;
size_t const kBytes=kSize_image*kChannels* sizeof(float);
int const kSize = std::ceil(sigma * 2.884f);//一维高斯核长度为ks*2+1
std::vector<float> v_kernel(kSize + 1);//分配半个高斯核
float weight = 0;
for (int i = 0; i < kSize + 1; ++i)
{
v_kernel[i] = math::func::gaussian((float)i, sigma);//kernel[0]=1,kernel[i]=wi;
weight += v_kernel[i]*2;
}
weight-=v_kernel[0];
int const kBytes_blur=(kSize+1)*sizeof(float);
//声明显存指针
float *p_d_in=NULL;
float *p_d_out=NULL;
float *p_d_tmp=NULL;
float *p_d_blur=NULL;
//分配显存
cudaMalloc((void**)&p_d_in ,kBytes);
cudaMalloc((void**)&p_d_tmp ,kBytes);
cudaMalloc((void**)&p_d_out ,kBytes);
cudaMalloc((void**)&p_d_blur,kBytes_blur);
//数据从cpu传入gpu
cudaMemcpy(p_d_in ,p_in_image,kBytes,cudaMemcpyHostToDevice);
cudaMemcpy(p_d_blur,&v_kernel[0],kBytes_blur,cudaMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kFact_width-1+x*2)/(x*2),(kHeight-1+y)/y,1);
//x维高斯模糊
kernelGaussBlurX1<<<grid,block,(kSize+1)* sizeof(float)>>>(p_d_tmp,p_d_in,p_d_blur, kWidth, kHeight,kChannels,kSize,weight);
//y维高斯模糊
x=32;
y=8;
dim3 block1(x,y,1);
dim3 grid1((kFact_width-1+x*3)/(x*3),(kHeight-1+y)/y,1);
kernelGaussBlurY2<float><<<grid1,block1,(kSize+1)* sizeof(float)>>>(p_d_out,p_d_tmp,p_d_blur,kFact_width,kHeight,kSize,weight);
//数据从gpu传回cpu
cudaMemcpy(p_out_image,p_d_out,kBytes,cudaMemcpyDeviceToHost);
//释放显存
cudaFree(p_d_in);
cudaFree(p_d_tmp);
cudaFree(p_d_out);
cudaFree(p_d_blur);
return 0;
}
int blurGaussian2ByCuda(float * const p_out_image,float const * const p_in_image, int const kWidth,int const kHeight,int const kChannels,float sigma2)
{
float sigma = sqrt(sigma2);
blurGaussianByCuda(p_out_image,p_in_image,kWidth,kHeight,kChannels,sigma);
return 0;
}
int subtractByCuda(float * const p_out_image,float const * const p_in_image1,float const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
int const kSize=kWidth*kHeight;
size_t const kBytes=kSize*kChannels*sizeof(float);
//定义显存指针
float *p_d_in1=NULL;
float *p_d_in2=NULL;
float *p_d_out=NULL;
//分配显存指针
cudaMalloc((void**)&p_d_in1,kBytes);
cudaMalloc((void**)&p_d_in2,kBytes);
cudaMalloc((void**)&p_d_out,kBytes);
//传递数据(cpu2gpu)
cudaMemcpy(p_d_in1,p_in_image1,kBytes,cudaMemcpyHostToDevice);
cudaMemcpy(p_d_in2,p_in_image2,kBytes,cudaMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth*kChannels-1+x*3)/(x*3),(kHeight-1+y)/(y),1);
kernelSubtract2<<<grid,block>>>(p_d_out,p_d_in1,p_d_in2,kWidth*kChannels,kHeight);
//传递数据(gpu2cpu)
cudaMemcpy(p_out_image,p_d_out,kBytes,cudaMemcpyDeviceToHost);
//释放显存
cudaFree(p_d_in1);
cudaFree(p_d_in2);
cudaFree(p_d_out);
return 0;
}
template <class T>
int differenceByCu(T * const p_out_image,T const * const p_in_image1,T const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
int const kSize=kWidth*kHeight;
size_t const kBytes=kSize*kChannels*sizeof(T);
//定义显存指针
T *p_d_in1=NULL;
T *p_d_in2=NULL;
T *p_d_out=NULL;
//分配显存指针
cudaMalloc((void**)&p_d_in1,kBytes);
cudaMalloc((void**)&p_d_in2,kBytes);
cudaMalloc((void**)&p_d_out,kBytes);
//传递数据(cpu2gpu)
cudaMemcpy(p_d_in1,p_in_image1,kBytes,cudaMemcpyHostToDevice);
cudaMemcpy(p_d_in2,p_in_image2,kBytes,cudaMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((kWidth*kChannels-1+x*3)/(x*3),(kHeight-1+y)/(y),1);
kernelDifference2<T><<<grid,block>>>(p_d_out,p_d_in1,p_d_in2,kWidth*kChannels,kHeight);
//传递数据(gpu2cpu)
cudaMemcpy(p_out_image,p_d_out,kBytes,cudaMemcpyDeviceToHost);
//释放显存
cudaFree(p_d_in1);
cudaFree(p_d_in2);
cudaFree(p_d_out);
return 0;
}
template <typename T>
int differenceByCuda(T * const p_out_image,T const * const p_in_image1,T const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
return 0;
}
template<>
int differenceByCuda<float>(float * const p_out_image,float const * const p_in_image1,float const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
differenceByCu<float>(p_out_image,p_in_image1,p_in_image2,kWidth,kHeight,kChannels);
return 0;
}
template<>
int differenceByCuda<char>(char * const p_out_image,char const * const p_in_image1,char const * const p_in_image2,int const kWidth,int const kHeight,int const kChannels)
{
differenceByCu<char>(p_out_image,p_in_image1,p_in_image2,kWidth,kHeight,kChannels);
return 0;
}
|
39bceb8e6baaf0c5a32e2624290a3fcf988633fa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cunn_OneVsAllMultiMarginCriterion_updateOutput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
int nframe = 1;
int dim = 2;
int sizeaverage = XSIZE*YSIZE;
float *positiveWeight = NULL;
hipMalloc(&positiveWeight, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cunn_OneVsAllMultiMarginCriterion_updateOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,target,nframe,dim,sizeaverage,positiveWeight);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cunn_OneVsAllMultiMarginCriterion_updateOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,target,nframe,dim,sizeaverage,positiveWeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cunn_OneVsAllMultiMarginCriterion_updateOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,target,nframe,dim,sizeaverage,positiveWeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
39bceb8e6baaf0c5a32e2624290a3fcf988633fa.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cunn_OneVsAllMultiMarginCriterion_updateOutput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
int nframe = 1;
int dim = 2;
int sizeaverage = XSIZE*YSIZE;
float *positiveWeight = NULL;
cudaMalloc(&positiveWeight, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cunn_OneVsAllMultiMarginCriterion_updateOutput_kernel<<<gridBlock,threadBlock>>>(output,input,target,nframe,dim,sizeaverage,positiveWeight);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cunn_OneVsAllMultiMarginCriterion_updateOutput_kernel<<<gridBlock,threadBlock>>>(output,input,target,nframe,dim,sizeaverage,positiveWeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cunn_OneVsAllMultiMarginCriterion_updateOutput_kernel<<<gridBlock,threadBlock>>>(output,input,target,nframe,dim,sizeaverage,positiveWeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
2e596004c6dbe8ffc35fe0cb01dcbb4723462e29.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* file name: matrix.cu
*
* matrix.cu contains the code that realize some common used matrix operations in CUDA
*
* this is a toy program for learning CUDA, some functions are reusable in other project
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define N 3
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
__global__ void MatAdd(int A[][N], int B[][N], int C[][N], int m)
{
int i = threadIdx.x;
int j = threadIdx.y;
printf("%d col : %d row :\n", i, j );
printf("end \n");
/// non margine
int sum = 0;
if (i != 0 && i != m - 1 && j != 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea din dreapta fara prima linie de sus
sum = 0;
if (i != 0 && i != m - 1 && j != 0 && j == N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea din dreapta + primul el de sus
sum = 0;
if (i == 0 && i != m - 1 && j != 0 && j == N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea din dreapta + primul el de jos
sum = 0;
if (i != 0 && i == m - 1 && j != 0 && j == N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea din stanga doar primul element de sus
sum = 0;
if (i == 0 && i != m - 1 && j == 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
}
C[i][j] += sum;
}
////marginea din stanga doar primul element de jos
sum = 0;
if (i != 0 && i == m - 1 && j == 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
}
C[i][j] += sum;
}
////marginea din stanga fara elementul de sus si jos
sum = 0;
if (i != 0 && i != m - 1 && j == 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
}
C[i][j] += sum;
}
////marginea de sus
sum = 0;
if (i == 0 && i != m - 1 && j != 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea de jos
sum = 0;
if (i != 0 && i == m - 1 && j != 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
}
/*
*********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
*********************************************************************
*/
int main(int argc, char const *argv[])
{
/* Fixed seed for illustration */
const int m = 6;
float gpu_elapsed_time_ms;
// some events to count the execution time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// start to count execution time of GPU version
hipEventRecord(start, 0);
// Allocate memory space on the device
int A[m][N];
int B[m][N];
int C[m][N];
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
A[i][j] = 1;
}
printf("\n");
}
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
B[i][j] = 1;
}
printf("\n");
}
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
C[i][j] = 0;
}
printf("\n");
}
int(*pA)[N], (*pB)[N], (*pC)[N];
hipMalloc((void**)&pA, (m*N) * sizeof(int));
hipMalloc((void**)&pB, (m*N) * sizeof(int));
hipMalloc((void**)&pC, (m*N) * sizeof(int));
hipMemcpy(pA, A, (m*N) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(pB, B, (m*N) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(pC, C, (m*N) * sizeof(int), hipMemcpyHostToDevice);
// Launch kernel
int numBlocks = 1;
dim3 threadsPerBlock(m, N);
MatAdd << <numBlocks, threadsPerBlock >> >(pA,pB,pC,m);
// Transefr results from device to host
hipMemcpy(C, pC, (m*N) * sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// time counting terminate
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// compute time elapse on GPU computing
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix calcul of %f matrix", gpu_elapsed_time_ms);
printf("\n");
printf("A = \n");
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
printf("%d ", A[i][j]);
}
printf("\n");
}
printf("B = \n");
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
printf("%d ", B[i][j]);
}
printf("\n");
}
int i, j;
printf("C = \n");
for (i = 0; i<m; i++) {
for (j = 0; j<N; j++) {
printf("%d ", C[i][j]);
}
printf("\n");
}
hipFree(pA);
hipFree(pB);
hipFree(pC);
printf("\n");
system("PAUSE");
return 0;
}
|
2e596004c6dbe8ffc35fe0cb01dcbb4723462e29.cu
|
/*
* file name: matrix.cu
*
* matrix.cu contains the code that realize some common used matrix operations in CUDA
*
* this is a toy program for learning CUDA, some functions are reusable in other project
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define N 3
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
__global__ void MatAdd(int A[][N], int B[][N], int C[][N], int m)
{
int i = threadIdx.x;
int j = threadIdx.y;
printf("%d col : %d row :\n", i, j );
printf("end \n");
/// non margine
int sum = 0;
if (i != 0 && i != m - 1 && j != 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea din dreapta fara prima linie de sus
sum = 0;
if (i != 0 && i != m - 1 && j != 0 && j == N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea din dreapta + primul el de sus
sum = 0;
if (i == 0 && i != m - 1 && j != 0 && j == N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea din dreapta + primul el de jos
sum = 0;
if (i != 0 && i == m - 1 && j != 0 && j == N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea din stanga doar primul element de sus
sum = 0;
if (i == 0 && i != m - 1 && j == 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
}
C[i][j] += sum;
}
////marginea din stanga doar primul element de jos
sum = 0;
if (i != 0 && i == m - 1 && j == 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
}
C[i][j] += sum;
}
////marginea din stanga fara elementul de sus si jos
sum = 0;
if (i != 0 && i != m - 1 && j == 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
}
C[i][j] += sum;
}
////marginea de sus
sum = 0;
if (i == 0 && i != m - 1 && j != 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i + 1][j] + B[i + 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
////marginea de jos
sum = 0;
if (i != 0 && i == m - 1 && j != 0 && j != N - 1) {
for (int j = 0; j < N; j++)
{
sum += A[i - 1][j] + B[i - 1][j];
}
C[i][j] += sum;
sum = 0;
for (int i = 0; i < m; i++)
{
sum += A[i][j + 1] + B[i][j + 1];
sum += A[i][j - 1] + B[i][j - 1];
}
C[i][j] += sum;
}
}
/*
*********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
*********************************************************************
*/
int main(int argc, char const *argv[])
{
/* Fixed seed for illustration */
const int m = 6;
float gpu_elapsed_time_ms;
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start to count execution time of GPU version
cudaEventRecord(start, 0);
// Allocate memory space on the device
int A[m][N];
int B[m][N];
int C[m][N];
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
A[i][j] = 1;
}
printf("\n");
}
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
B[i][j] = 1;
}
printf("\n");
}
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
C[i][j] = 0;
}
printf("\n");
}
int(*pA)[N], (*pB)[N], (*pC)[N];
cudaMalloc((void**)&pA, (m*N) * sizeof(int));
cudaMalloc((void**)&pB, (m*N) * sizeof(int));
cudaMalloc((void**)&pC, (m*N) * sizeof(int));
cudaMemcpy(pA, A, (m*N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pB, B, (m*N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pC, C, (m*N) * sizeof(int), cudaMemcpyHostToDevice);
// Launch kernel
int numBlocks = 1;
dim3 threadsPerBlock(m, N);
MatAdd << <numBlocks, threadsPerBlock >> >(pA,pB,pC,m);
// Transefr results from device to host
cudaMemcpy(C, pC, (m*N) * sizeof(int), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix calcul of %f matrix", gpu_elapsed_time_ms);
printf("\n");
printf("A = \n");
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
printf("%d ", A[i][j]);
}
printf("\n");
}
printf("B = \n");
for (int i = 0; i<m; i++) {
for (int j = 0; j<N; j++) {
printf("%d ", B[i][j]);
}
printf("\n");
}
int i, j;
printf("C = \n");
for (i = 0; i<m; i++) {
for (j = 0; j<N; j++) {
printf("%d ", C[i][j]);
}
printf("\n");
}
cudaFree(pA);
cudaFree(pB);
cudaFree(pC);
printf("\n");
system("PAUSE");
return 0;
}
|
07552769e5ee7647b0711f26f4a3bf58ccb4c9d7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
LICENCE
*/
//cuda_ker.h
///implementation of cuda kernel functions
#include <cstdio>
#include <cassert>
#include <rocblas.h>
#include "cuda_ker.h"
#include "typedef.h"
#include "common.h"
namespace cudaker
{
inline hipError_t checkCuda(hipError_t result)
{
#ifdef DEBUG
if(result != hipSuccess)
{
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
inline hipblasStatus_t checkCublas(hipblasStatus_t result, char* msg)
{
#ifdef DEBUG
if(result != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "cublas Runtime Error: %s\n", msg);
assert(result == HIPBLAS_STATUS_SUCCESS);
}
#endif
return result;
}
__global__ void kerVecCpy(mytype::real* const des,
const mytype::real* const src,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[i] = src[i];
}
}
__global__ void kerAxpy(mytype::real* const z,
const mytype::real* const x,
const mytype::real* const y,
const mytype::real a, const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
z[i] = a * x[i] + y[i];
}
}
__global__ void kerMatVec(mytype::real* const des,
const mytype::real* const mat,
const mytype::real* const vec,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
mytype::real _tmp = 0.0;
if(i < n)
{
for(mytype::integer j=0;j<n;j++)
{
_tmp += mat[i*n+j] * vec[j];
}
des[i] = _tmp;
}
}
__global__ void kerVecVec(mytype::real& des,
const mytype::real* const vec1,
const mytype::real* const vec2,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
mytype::real _tmp = 0.0;
if(i == 0)
{
for(mytype::integer j=0;j<n;j++)
{
_tmp += vec1[j] * vec2[j];
}
des = _tmp;
}
}
void VecCpy(mytype::real* const des,
const mytype::real* const src,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerVecCpy), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, des, src, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("cpy -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("cpy -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
void Axpy(mytype::real* const z,
const mytype::real* const x,
const mytype::real* const y,
const mytype::real a, const mytype::integer n)
{
hipLaunchKernelGGL(( kerAxpy), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, z, x, y, a, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("Axpy -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("Axpy -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
void MatVec(mytype::real* const des,
const mytype::real* const mat,
const mytype::real* const vec,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerMatVec), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, des, mat, vec, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("MV -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("MV -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
void VecVec(mytype::real& des,
const mytype::real* const vec1,
const mytype::real* const vec2,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerVecVec), dim3(1), dim3(NUM_THREADS), 0, 0, des, vec1, vec2, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("VV -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("VV -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
void CG(const mytype::real* const A, mytype::real* const x, const mytype::real* const b, const mytype::integer n)
{
const mytype::real _ZERO = 0.0;
const mytype::real _P_ONE = 1.0;
const mytype::real _N_ONE = -1.0;
mytype::integer _num;
mytype::real _rrold;
mytype::real _rrnew;
mytype::real _alpha;
mytype::real _rn_over_ro;
/*-----device memory-----*/
mytype::real* dev_A;
mytype::real* dev_x;
mytype::real* dev_b;
mytype::real* dev_Ap;
mytype::real* dev_p;
mytype::real* dev_r;
#ifdef DEBUG
float time;
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventRecord(startEvent, 0) );
#endif
checkCuda( hipMalloc(&dev_A, n*n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_x, n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_b, n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_Ap, n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_p, n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_r, n*sizeof(mytype::real)) );
#ifdef DEBUG
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for MemAlloc: %f ms\n",time);
#endif
#ifdef DEBUG
checkCuda( hipEventRecord(startEvent, 0) );
#endif
checkCuda( hipMemcpy(dev_A, A, n*n*sizeof(mytype::real), hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(dev_x, x, n*sizeof(mytype::real), hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(dev_b, b, n*sizeof(mytype::real), hipMemcpyHostToDevice) );
#ifdef DEBUG
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for Memcpy: %f ms\n",time);
#endif
/*-----------------------*/
/*-----CG by using cublas-----*/
hipblasHandle_t handle;
checkCublas( hipblasCreate(&handle), "create" );
///r = b - A*x
checkCublas( hipblasDcopy(handle, n, dev_b, 1, dev_r, 1), "Dcopy1" );
checkCublas( hipblasDgemv(handle, HIPBLAS_OP_N, n, n, &_N_ONE, dev_A, n, dev_x, 1, &_P_ONE, dev_r, 1), "Dgemv1" );
///p = r
checkCublas( hipblasDcopy(handle, n, dev_r, 1, dev_p, 1), "Dcopy2" );
///_rrold = r*r
checkCublas( hipblasDdot(handle, n, dev_r, 1, dev_r, 1, &_rrold), "Ddot1" );
_num = 0;
while( _rrold > mytype::EPS_BY_EPS )
{
///Ap = A*p
checkCublas( hipblasDgemv(handle, HIPBLAS_OP_N, n, n, &_P_ONE, dev_A, n, dev_p, 1, &_ZERO, dev_Ap, 1), "Dgemv2" );
///_alpha = _rrold / Ap*p
checkCublas( hipblasDdot(handle, n, dev_Ap, 1, dev_p, 1, &_alpha), "Ddot2" );
_alpha = _rrold / _alpha;
///x = x + _alpha*p
checkCublas( hipblasDaxpy(handle, n, &_alpha, dev_p, 1, dev_x, 1 ), "Daxpy1" );
///r = r - _alpha*Ap
_alpha = -_alpha;
checkCublas( hipblasDaxpy(handle, n, &_alpha, dev_Ap, 1, dev_r, 1 ), "Daxpy2" );
///_rrnew = r*r
checkCublas( hipblasDdot(handle, n, dev_r, 1, dev_r, 1, &_rrnew), "Ddot2" );
///_rn_over_ro = _rrnew / _rrold
_rn_over_ro = _rrnew / _rrold;
///p = _rn_over_ro*p + r
checkCublas( hipblasDscal(handle, n, &_rn_over_ro, dev_p, 1), "Dscal1" );
checkCublas( hipblasDaxpy(handle, n, &_P_ONE, dev_r, 1, dev_p, 1 ), "Daxpy3" );
///_rrold = _rrnew
_rrold = _rrnew;
_num++;
//printf("CONVERGENCE -> RESIDUAL: %.2e\n",_rrnew);
}
checkCuda( hipMemcpy(x, dev_x, n*sizeof(mytype::real), hipMemcpyDeviceToHost) );
checkCublas( hipblasDestroy(handle), "destroy");
/*----------------------------*/
/*-----device memory-----*/
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipFree(dev_A) );
checkCuda( hipFree(dev_x) );
checkCuda( hipFree(dev_b) );
checkCuda( hipFree(dev_Ap) );
checkCuda( hipFree(dev_p) );
checkCuda( hipFree(dev_r) );
#ifdef DEBUG
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for freeMem: %f ms\n",time);
#endif
/*-----------------------*/
#ifdef DEBUG
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
#endif
printf(" CG -> times: %d \n", _num);
}
/*
__global__ void DoCG(mytype::real* dev_A, mytype::real* dev_x, mytype::real* dev_b,
mytype::real* dev_Ap, mytype::real* dev_p, mytype::real* dev_r,
mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
const mytype::integer in = i * n;
mytype::integer _num;
__shared__ mytype::real _alpha;
__shared__ mytype::real _rrold;
__shared__ mytype::real _rrnew;
_num = 0;
mytype::real __tmp = 0.0;
///matVec & axpy
if(i < n)
{
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_A[in+j] * dev_x[j];
}
dev_p[i] = dev_r[i] = dev_b[i] - __tmp;
}
__syncthreads();
///vecVec
if(threadIdx.x == 0)
{
__tmp = 0.0;
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_r[j] * dev_r[j];
}
_rrold = __tmp;
}
__syncthreads();
//repeat
while(_rrold > mytype::EPS_BY_EPS)
{
///matVec
__tmp = 0.0;
if(i < n)
{
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_A[in+j] * dev_p[j];
}
dev_Ap[i] = __tmp;
}
__syncthreads();
///vecVec
__tmp = 0.0;
if(threadIdx.x == 0)
{
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_Ap[j] * dev_p[j];
}
_alpha = _rrold / __tmp;
}
__syncthreads();
///axpy
if(i < n)
{
dev_r[i] = dev_r[i] - _alpha * dev_Ap[i];
dev_x[i] = dev_x[i] + _alpha * dev_p[i];
}
__syncthreads();
///vecVec
if(threadIdx.x == 0)
{
__tmp = 0.0;
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_r[j] * dev_r[j];
}
_rrnew = __tmp;
}
__syncthreads();
///axpy
if(i < n)
{
dev_p[i] = _rrnew / _rrold * dev_p[i] + dev_r[i];
}
_rrold = _rrnew;
//printf("CONVERGENCE -> RESIDUAL: %.2e\n",__rrnew);
_num++;
__syncthreads();
}
}
*/
}
|
07552769e5ee7647b0711f26f4a3bf58ccb4c9d7.cu
|
/*
LICENCE
*/
//cuda_ker.h
///implementation of cuda kernel functions
#include <cstdio>
#include <cassert>
#include <cublas_v2.h>
#include "cuda_ker.h"
#include "typedef.h"
#include "common.h"
namespace cudaker
{
inline cudaError_t checkCuda(cudaError_t result)
{
#ifdef DEBUG
if(result != cudaSuccess)
{
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
inline cublasStatus_t checkCublas(cublasStatus_t result, char* msg)
{
#ifdef DEBUG
if(result != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "cublas Runtime Error: %s\n", msg);
assert(result == CUBLAS_STATUS_SUCCESS);
}
#endif
return result;
}
__global__ void kerVecCpy(mytype::real* const des,
const mytype::real* const src,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[i] = src[i];
}
}
__global__ void kerAxpy(mytype::real* const z,
const mytype::real* const x,
const mytype::real* const y,
const mytype::real a, const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
z[i] = a * x[i] + y[i];
}
}
__global__ void kerMatVec(mytype::real* const des,
const mytype::real* const mat,
const mytype::real* const vec,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
mytype::real _tmp = 0.0;
if(i < n)
{
for(mytype::integer j=0;j<n;j++)
{
_tmp += mat[i*n+j] * vec[j];
}
des[i] = _tmp;
}
}
__global__ void kerVecVec(mytype::real& des,
const mytype::real* const vec1,
const mytype::real* const vec2,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
mytype::real _tmp = 0.0;
if(i == 0)
{
for(mytype::integer j=0;j<n;j++)
{
_tmp += vec1[j] * vec2[j];
}
des = _tmp;
}
}
void VecCpy(mytype::real* const des,
const mytype::real* const src,
const mytype::integer n)
{
kerVecCpy<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(des, src, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("cpy -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("cpy -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
void Axpy(mytype::real* const z,
const mytype::real* const x,
const mytype::real* const y,
const mytype::real a, const mytype::integer n)
{
kerAxpy<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(z, x, y, a, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("Axpy -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("Axpy -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
void MatVec(mytype::real* const des,
const mytype::real* const mat,
const mytype::real* const vec,
const mytype::integer n)
{
kerMatVec<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(des, mat, vec, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("MV -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("MV -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
void VecVec(mytype::real& des,
const mytype::real* const vec1,
const mytype::real* const vec2,
const mytype::integer n)
{
kerVecVec<<<1, NUM_THREADS>>>(des, vec1, vec2, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("VV -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("VV -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
void CG(const mytype::real* const A, mytype::real* const x, const mytype::real* const b, const mytype::integer n)
{
const mytype::real _ZERO = 0.0;
const mytype::real _P_ONE = 1.0;
const mytype::real _N_ONE = -1.0;
mytype::integer _num;
mytype::real _rrold;
mytype::real _rrnew;
mytype::real _alpha;
mytype::real _rn_over_ro;
/*-----device memory-----*/
mytype::real* dev_A;
mytype::real* dev_x;
mytype::real* dev_b;
mytype::real* dev_Ap;
mytype::real* dev_p;
mytype::real* dev_r;
#ifdef DEBUG
float time;
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventRecord(startEvent, 0) );
#endif
checkCuda( cudaMalloc(&dev_A, n*n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_x, n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_b, n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_Ap, n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_p, n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_r, n*sizeof(mytype::real)) );
#ifdef DEBUG
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for MemAlloc: %f ms\n",time);
#endif
#ifdef DEBUG
checkCuda( cudaEventRecord(startEvent, 0) );
#endif
checkCuda( cudaMemcpy(dev_A, A, n*n*sizeof(mytype::real), cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(dev_x, x, n*sizeof(mytype::real), cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(dev_b, b, n*sizeof(mytype::real), cudaMemcpyHostToDevice) );
#ifdef DEBUG
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for Memcpy: %f ms\n",time);
#endif
/*-----------------------*/
/*-----CG by using cublas-----*/
cublasHandle_t handle;
checkCublas( cublasCreate(&handle), "create" );
///r = b - A*x
checkCublas( cublasDcopy(handle, n, dev_b, 1, dev_r, 1), "Dcopy1" );
checkCublas( cublasDgemv(handle, CUBLAS_OP_N, n, n, &_N_ONE, dev_A, n, dev_x, 1, &_P_ONE, dev_r, 1), "Dgemv1" );
///p = r
checkCublas( cublasDcopy(handle, n, dev_r, 1, dev_p, 1), "Dcopy2" );
///_rrold = r*r
checkCublas( cublasDdot(handle, n, dev_r, 1, dev_r, 1, &_rrold), "Ddot1" );
_num = 0;
while( _rrold > mytype::EPS_BY_EPS )
{
///Ap = A*p
checkCublas( cublasDgemv(handle, CUBLAS_OP_N, n, n, &_P_ONE, dev_A, n, dev_p, 1, &_ZERO, dev_Ap, 1), "Dgemv2" );
///_alpha = _rrold / Ap*p
checkCublas( cublasDdot(handle, n, dev_Ap, 1, dev_p, 1, &_alpha), "Ddot2" );
_alpha = _rrold / _alpha;
///x = x + _alpha*p
checkCublas( cublasDaxpy(handle, n, &_alpha, dev_p, 1, dev_x, 1 ), "Daxpy1" );
///r = r - _alpha*Ap
_alpha = -_alpha;
checkCublas( cublasDaxpy(handle, n, &_alpha, dev_Ap, 1, dev_r, 1 ), "Daxpy2" );
///_rrnew = r*r
checkCublas( cublasDdot(handle, n, dev_r, 1, dev_r, 1, &_rrnew), "Ddot2" );
///_rn_over_ro = _rrnew / _rrold
_rn_over_ro = _rrnew / _rrold;
///p = _rn_over_ro*p + r
checkCublas( cublasDscal(handle, n, &_rn_over_ro, dev_p, 1), "Dscal1" );
checkCublas( cublasDaxpy(handle, n, &_P_ONE, dev_r, 1, dev_p, 1 ), "Daxpy3" );
///_rrold = _rrnew
_rrold = _rrnew;
_num++;
//printf("CONVERGENCE -> RESIDUAL: %.2e\n",_rrnew);
}
checkCuda( cudaMemcpy(x, dev_x, n*sizeof(mytype::real), cudaMemcpyDeviceToHost) );
checkCublas( cublasDestroy(handle), "destroy");
/*----------------------------*/
/*-----device memory-----*/
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaFree(dev_A) );
checkCuda( cudaFree(dev_x) );
checkCuda( cudaFree(dev_b) );
checkCuda( cudaFree(dev_Ap) );
checkCuda( cudaFree(dev_p) );
checkCuda( cudaFree(dev_r) );
#ifdef DEBUG
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for freeMem: %f ms\n",time);
#endif
/*-----------------------*/
#ifdef DEBUG
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
#endif
printf(" CG -> times: %d \n", _num);
}
/*
__global__ void DoCG(mytype::real* dev_A, mytype::real* dev_x, mytype::real* dev_b,
mytype::real* dev_Ap, mytype::real* dev_p, mytype::real* dev_r,
mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
const mytype::integer in = i * n;
mytype::integer _num;
__shared__ mytype::real _alpha;
__shared__ mytype::real _rrold;
__shared__ mytype::real _rrnew;
_num = 0;
mytype::real __tmp = 0.0;
///matVec & axpy
if(i < n)
{
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_A[in+j] * dev_x[j];
}
dev_p[i] = dev_r[i] = dev_b[i] - __tmp;
}
__syncthreads();
///vecVec
if(threadIdx.x == 0)
{
__tmp = 0.0;
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_r[j] * dev_r[j];
}
_rrold = __tmp;
}
__syncthreads();
//repeat
while(_rrold > mytype::EPS_BY_EPS)
{
///matVec
__tmp = 0.0;
if(i < n)
{
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_A[in+j] * dev_p[j];
}
dev_Ap[i] = __tmp;
}
__syncthreads();
///vecVec
__tmp = 0.0;
if(threadIdx.x == 0)
{
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_Ap[j] * dev_p[j];
}
_alpha = _rrold / __tmp;
}
__syncthreads();
///axpy
if(i < n)
{
dev_r[i] = dev_r[i] - _alpha * dev_Ap[i];
dev_x[i] = dev_x[i] + _alpha * dev_p[i];
}
__syncthreads();
///vecVec
if(threadIdx.x == 0)
{
__tmp = 0.0;
for(mytype::integer j=0;j<n;j++)
{
__tmp += dev_r[j] * dev_r[j];
}
_rrnew = __tmp;
}
__syncthreads();
///axpy
if(i < n)
{
dev_p[i] = _rrnew / _rrold * dev_p[i] + dev_r[i];
}
_rrold = _rrnew;
//printf("CONVERGENCE -> RESIDUAL: %.2e\n",__rrnew);
_num++;
__syncthreads();
}
}
*/
}
|
5cb6225dbb6a8dd027ccfdd428cd49f664d33f01.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cctype>
#include <cassert>
#include <cstdio>
#include <ctime>
#include <cstdlib>
#define DATA_SIZE 1048576
#define BLOCK_NUM 32
#define THREAD_NUM 256
#ifndef nullptr
#define nullptr 0
#endif
using namespace std;
/////////////////////////////////////////////////////
__global__ static void Kernel_SquareSum( int* pIn, size_t* pDataSize,
int*pOut, clock_t* pTime )
{
//
extern __shared__ int sharedData[];
const size_t computeSize =*pDataSize / THREAD_NUM;
const size_t tID = size_t(threadIdx.x );//
const size_t bID = size_t(blockIdx.x );//
int offset = 1; //
//
if ( tID == 0 ) pTime[bID] =clock( );//
//
for ( size_t i = bID * THREAD_NUM+ tID;
i < DATA_SIZE;
i += BLOCK_NUM * THREAD_NUM )
{
sharedData[tID] += pIn[i] * pIn[i];
}
//
__syncthreads( );
offset = THREAD_NUM / 2;
while ( offset > 0 )
{
if ( offset > 0 )
{
sharedData[tID] += sharedData[tID + offset];
}
offset >>= 1;
__syncthreads( );
}
if ( tID == 0 )// ID
{
pOut[bID] = sharedData[0];
pTime[bID + BLOCK_NUM] = clock( );
}
}
bool CUDA_SquareSum( int* pOut,clock_t* pTime,
int* pIn, size_t dataSize )
{
assert( pIn != nullptr );
assert( pOut != nullptr );
int* pDevIn = nullptr;
int* pDevOut = nullptr;
size_t* pDevDataSize = nullptr;
clock_t* pDevTime = nullptr;
// 1
hipError_t cudaStatus = hipSetDevice( 0 );//
if ( cudaStatus != hipSuccess )
{
fprintf( stderr, "cudaSetDevice()" );
return false;
}
switch ( true)
{
default:
// 2
cudaStatus = hipMalloc( (void**)&pDevIn,dataSize * sizeof( int) );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMalloc()" );
break;
}
cudaStatus = hipMalloc( (void**)&pDevOut,BLOCK_NUM * sizeof( int) );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMalloc()" );
break;
}
cudaStatus = hipMalloc( (void**)&pDevDataSize,sizeof( size_t ) );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMalloc()" );
break;
}
cudaStatus = hipMalloc( (void**)&pDevTime,BLOCK_NUM * 2 * sizeof( clock_t ) );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMalloc()" );
break;
}
// 3
cudaStatus = hipMemcpy( pDevIn, pIn, dataSize * sizeof( int ),hipMemcpyHostToDevice );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMemcpy()" );
break;
}
cudaStatus = hipMemcpy( pDevDataSize, &dataSize, sizeof( size_t ), hipMemcpyHostToDevice );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "cudaMemcpy()" );
break;
}
// 4
hipLaunchKernelGGL(( Kernel_SquareSum), dim3(BLOCK_NUM), dim3(THREAD_NUM), THREAD_NUM *sizeof( int), 0,
pDevIn, pDevDataSize, pDevOut, pDevTime );
// 5
cudaStatus = hipGetLastError( );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "" );
break;
}
// 6
cudaStatus = hipDeviceSynchronize( );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "\n" );
break;
}
// 7
cudaStatus = hipMemcpy( pOut, pDevOut, BLOCK_NUM * sizeof( int ),hipMemcpyDeviceToHost );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "" );
break;
}
cudaStatus = hipMemcpy( pTime, pDevTime, BLOCK_NUM * 2 * sizeof( clock_t ), hipMemcpyDeviceToHost );
if ( cudaStatus != hipSuccess)
{
fprintf( stderr, "" );
break;
}
// 8
hipFree( pDevIn );
hipFree( pDevOut );
hipFree( pDevDataSize );
hipFree( pDevTime );
return true;
}
// 8
hipFree( pDevIn );
hipFree( pDevOut );
hipFree( pDevDataSize );
hipFree( pDevTime );
return false;
}
void GenerateData( int* pData,size_t dataSize )//
{
assert( pData != nullptr );
for ( size_t i = 0; i <dataSize; i++ )
{
srand( i + 3 );
pData[i] = rand( ) % 100;
}
}
int main( int argc, char** argv )//
{
int* pData = nullptr;
int* pResult = nullptr;
clock_t* pTime = nullptr;
// CUDAhost
hipError_t cudaStatus = hipHostMalloc( &pData, DATA_SIZE * sizeof( int ) );
if ( cudaStatus != hipSuccess )
{
fprintf( stderr, "" );
return 1;
}
cudaStatus = hipHostMalloc( &pResult, BLOCK_NUM * sizeof( int ) );
if ( cudaStatus != hipSuccess )
{
fprintf( stderr, "" );
return 1;
}
cudaStatus = hipHostMalloc( &pTime, BLOCK_NUM * 2 * sizeof( clock_t ) );
if ( cudaStatus != hipSuccess )
{
fprintf( stderr, "" );
return 1;
}
GenerateData( pData, DATA_SIZE );//
CUDA_SquareSum( pResult, pTime, pData, DATA_SIZE );//
// CPU
int totalResult=0;
for ( int i = 0; i < BLOCK_NUM; ++i )
{
totalResult += pResult[i];
}
//
clock_t startTime = pTime[0];
clock_t endTime = pTime[BLOCK_NUM];
for ( int i = 0; i < BLOCK_NUM; ++i )
{
if ( startTime > pTime[i] )startTime = pTime[i];
if ( endTime < pTime[i +BLOCK_NUM] ) endTime = pTime[i + BLOCK_NUM];
}
clock_t elapsed = endTime - startTime;
//
char* pOverFlow = nullptr;
if ( totalResult < 0 )pOverFlow = "";
else pOverFlow = "";
//
printf( "CUDA%d%s\n%d\n",
totalResult, pOverFlow, elapsed );
hipDeviceProp_t prop;
if ( hipGetDeviceProperties(&prop, 0 ) == hipSuccess )
{
float actualTime = float( elapsed ) / float(prop.clockRate );
printf( "%.2fms\n", actualTime );
printf( "%.2fMB/s\n",
float( DATA_SIZE * sizeof( int )>> 20 ) * 1000.0f / actualTime );
printf( "GPU%s\n", prop.name );
}
hipHostFree( pData );
hipHostFree( pResult );
hipHostFree( pTime );
return 0;
}
|
5cb6225dbb6a8dd027ccfdd428cd49f664d33f01.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include <cctype>
#include <cassert>
#include <cstdio>
#include <ctime>
#include <cstdlib>
#define DATA_SIZE 1048576
#define BLOCK_NUM 32
#define THREAD_NUM 256
#ifndef nullptr
#define nullptr 0
#endif
using namespace std;
////////////////////////在设备上运行的内核函数/////////////////////////////
__global__ static void Kernel_SquareSum( int* pIn, size_t* pDataSize,
int*pOut, clock_t* pTime )
{
// 声明一个动态分配的共享存储器
extern __shared__ int sharedData[];
const size_t computeSize =*pDataSize / THREAD_NUM;
const size_t tID = size_t(threadIdx.x );// 线程
const size_t bID = size_t(blockIdx.x );// 块
int offset = 1; // 记录每轮增倍的步距
// 开始计时
if ( tID == 0 ) pTime[bID] =clock( );// 选择任意一个线程进行计时
// 执行计算
for ( size_t i = bID * THREAD_NUM+ tID;
i < DATA_SIZE;
i += BLOCK_NUM * THREAD_NUM )
{
sharedData[tID] += pIn[i] * pIn[i];
}
// 同步一个块中的其它线程
__syncthreads( );
offset = THREAD_NUM / 2;
while ( offset > 0 )
{
if ( offset > 0 )
{
sharedData[tID] += sharedData[tID + offset];
}
offset >>= 1;
__syncthreads( );
}
if ( tID == 0 )// 如果线程ID为,那么计算结果,并记录时钟
{
pOut[bID] = sharedData[0];
pTime[bID + BLOCK_NUM] = clock( );
}
}
bool CUDA_SquareSum( int* pOut,clock_t* pTime,
int* pIn, size_t dataSize )
{
assert( pIn != nullptr );
assert( pOut != nullptr );
int* pDevIn = nullptr;
int* pDevOut = nullptr;
size_t* pDevDataSize = nullptr;
clock_t* pDevTime = nullptr;
// 1、设置设备
cudaError_t cudaStatus = cudaSetDevice( 0 );// 只要机器安装了英伟达显卡,那么会调用成功
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "调用cudaSetDevice()函数失败!" );
return false;
}
switch ( true)
{
default:
// 2、分配显存空间
cudaStatus = cudaMalloc( (void**)&pDevIn,dataSize * sizeof( int) );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数组时失败!" );
break;
}
cudaStatus = cudaMalloc( (void**)&pDevOut,BLOCK_NUM * sizeof( int) );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中返回值时失败!" );
break;
}
cudaStatus = cudaMalloc( (void**)&pDevDataSize,sizeof( size_t ) );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数据大小时失败!" );
break;
}
cudaStatus = cudaMalloc( (void**)&pDevTime,BLOCK_NUM * 2 * sizeof( clock_t ) );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中耗费用时变量失败!" );
break;
}
// 3、将宿主程序数据复制到显存中
cudaStatus = cudaMemcpy( pDevIn, pIn, dataSize * sizeof( int ),cudaMemcpyHostToDevice );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据数组到显卡时失败!" );
break;
}
cudaStatus = cudaMemcpy( pDevDataSize, &dataSize, sizeof( size_t ), cudaMemcpyHostToDevice );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据大小到显卡时失败!" );
break;
}
// 4、执行程序,宿主程序等待显卡执行完毕
Kernel_SquareSum<<<BLOCK_NUM, THREAD_NUM, THREAD_NUM *sizeof( int)>>>
( pDevIn, pDevDataSize, pDevOut, pDevTime );
// 5、查询内核初始化的时候是否出错
cudaStatus = cudaGetLastError( );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "显卡执行程序时失败!" );
break;
}
// 6、与内核同步等待执行完毕
cudaStatus = cudaDeviceSynchronize( );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "在与内核同步的过程中发生问题!\n" );
break;
}
// 7、获取数据
cudaStatus = cudaMemcpy( pOut, pDevOut, BLOCK_NUM * sizeof( int ),cudaMemcpyDeviceToHost );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "在将结果数据从显卡复制到宿主程序中失败!" );
break;
}
cudaStatus = cudaMemcpy( pTime, pDevTime, BLOCK_NUM * 2 * sizeof( clock_t ), cudaMemcpyDeviceToHost );
if ( cudaStatus != cudaSuccess)
{
fprintf( stderr, "在将耗费用时数据从显卡复制到宿主程序中失败!" );
break;
}
// 8、释放空间
cudaFree( pDevIn );
cudaFree( pDevOut );
cudaFree( pDevDataSize );
cudaFree( pDevTime );
return true;
}
// 8、释放空间
cudaFree( pDevIn );
cudaFree( pDevOut );
cudaFree( pDevDataSize );
cudaFree( pDevTime );
return false;
}
void GenerateData( int* pData,size_t dataSize )// 产生数据
{
assert( pData != nullptr );
for ( size_t i = 0; i <dataSize; i++ )
{
srand( i + 3 );
pData[i] = rand( ) % 100;
}
}
int main( int argc, char** argv )// 函数的主入口
{
int* pData = nullptr;
int* pResult = nullptr;
clock_t* pTime = nullptr;
// 使用CUDA内存分配器分配host端
cudaError_t cudaStatus = cudaMallocHost( &pData, DATA_SIZE * sizeof( int ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在主机中分配资源失败!" );
return 1;
}
cudaStatus = cudaMallocHost( &pResult, BLOCK_NUM * sizeof( int ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在主机中分配资源失败!" );
return 1;
}
cudaStatus = cudaMallocHost( &pTime, BLOCK_NUM * 2 * sizeof( clock_t ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在主机中分配资源失败!" );
return 1;
}
GenerateData( pData, DATA_SIZE );// 通过随机数产生数据
CUDA_SquareSum( pResult, pTime, pData, DATA_SIZE );// 执行平方和
// 在CPU中将结果组合起来
int totalResult=0;
for ( int i = 0; i < BLOCK_NUM; ++i )
{
totalResult += pResult[i];
}
// 计算执行的时间
clock_t startTime = pTime[0];
clock_t endTime = pTime[BLOCK_NUM];
for ( int i = 0; i < BLOCK_NUM; ++i )
{
if ( startTime > pTime[i] )startTime = pTime[i];
if ( endTime < pTime[i +BLOCK_NUM] ) endTime = pTime[i + BLOCK_NUM];
}
clock_t elapsed = endTime - startTime;
// 判断是否溢出
char* pOverFlow = nullptr;
if ( totalResult < 0 )pOverFlow = "(溢出)";
else pOverFlow = "";
// 显示基准测试
printf( "用CUDA计算平方和的结果是:%d%s\n耗费用时:%d\n",
totalResult, pOverFlow, elapsed );
cudaDeviceProp prop;
if ( cudaGetDeviceProperties(&prop, 0 ) == cudaSuccess )
{
float actualTime = float( elapsed ) / float(prop.clockRate );
printf( "实际执行时间为:%.2fms\n", actualTime );
printf( "带宽为:%.2fMB/s\n",
float( DATA_SIZE * sizeof( int )>> 20 ) * 1000.0f / actualTime );
printf( "GPU设备型号:%s\n", prop.name );
}
cudaFreeHost( pData );
cudaFreeHost( pResult );
cudaFreeHost( pTime );
return 0;
}
|
eadec97817e1d83857bc44401807791aa799fb20.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "common.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
// Kernel for fast unfold+copy
// Borrowed from Theano
// Authors: Arjun Jain, Frdric Bastien, Jan Schlter, Nicolas Ballas
template <typename Dtype>
__global__ void im3d2col_kernel(const int64_t n, const Dtype* data_im,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t kernel_h, const int64_t kernel_w, const int64_t kernel_d,
const int64_t pad_h, const int64_t pad_w, const int64_t pad_d,
const int64_t stride_h, const int64_t stride_w, const int64_t stride_d,
const int64_t height_col, const int64_t width_col, const int64_t depth_col,
Dtype* data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
int64_t d_out = index % depth_col;
int64_t w_index = index / depth_col;
int64_t w_out = w_index % width_col;
int64_t h_index = w_index / width_col;
int64_t h_out = h_index % height_col;
int64_t channel_in = h_index / height_col;
//channel_in = 1;
int64_t channel_out = channel_in * kernel_h * kernel_w * kernel_d;
int64_t h_in = h_out * stride_h - pad_h;
int64_t w_in = w_out * stride_w - pad_w;
int64_t d_in = d_out * stride_d - pad_d;
Dtype* data_col_ptr = data_col;
data_col_ptr += channel_out * (height_col * width_col * depth_col) +
h_out * (width_col * depth_col) + w_out * depth_col + d_out;
const Dtype* data_im_ptr = data_im;
data_im_ptr += channel_in * (height * width * depth) +
h_in * (width * depth) + w_in * depth + d_in;
for (int64_t i = 0; i < kernel_h; ++i)
{
int64_t h = h_in + i;
for (int64_t j = 0; j < kernel_w; ++j)
{
int64_t w = w_in + j;
for (int64_t k = 0; k < kernel_d; ++k)
{
int64_t d = d_in + k;
*data_col_ptr = (h >= 0 && w >= 0 && d >= 0 &&
h < height && w < width && d < depth) ?
data_im_ptr[i * (width * depth) + j *depth + k] : ScalarConvert<int, Dtype>::to(0);
data_col_ptr += height_col * width_col * depth_col;
}
}
}
}
}
template <typename Dtype>
void im3d2col(hipStream_t stream, const Dtype* data_im, const int64_t channels,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t kernel_h, const int64_t kernel_w, const int64_t kernel_d,
const int64_t pad_h, const int64_t pad_w, const int64_t pad_d,
const int64_t stride_h, const int64_t stride_w, const int64_t stride_d,
Dtype* data_col)
{
// We are going to launch channels * height_col * width_col * depth_col kernels, each
// kernel responsible for copying a single-channel grid.
int64_t height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int64_t width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int64_t depth_col = (depth + 2 * pad_d - kernel_d) / stride_d + 1;
int64_t num_kernels = channels * height_col * width_col * depth_col;
hipLaunchKernelGGL(( im3d2col_kernel), dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im,
height, width, depth,
kernel_h, kernel_w, kernel_d,
pad_h, pad_w, pad_d,
stride_h, stride_w, stride_d,
height_col, width_col, depth_col,
data_col);
THCudaCheck(hipGetLastError());
}
template <typename Dtype, typename Acctype>
__global__ void col2im3d_kernel(const int64_t n, const Dtype* data_col,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t channels,
const int64_t patch_h, const int64_t patch_w, const int64_t patch_d,
const int64_t pad_h, const int64_t pad_w, const int64_t pad_d,
const int64_t stride_h, const int64_t stride_w, const int64_t stride_d,
const int64_t height_col, const int64_t width_col, const int64_t depth_col,
Dtype* data_im)
{
CUDA_KERNEL_LOOP(index, n)
{
Acctype val = 0;
int64_t d = index % depth + pad_d;
int64_t w_index = index / depth;
int64_t w = w_index % width + pad_w;
int64_t h_index = w_index / width;
int64_t h = h_index % height + pad_h;
int64_t c = h_index / height;
// compute the start and end of the output
int64_t d_col_start = (d < patch_d) ? 0 : (d - patch_d) / stride_d + 1;
int64_t d_col_end = min(d / stride_d + 1, depth_col);
int64_t w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int64_t w_col_end = min(w / stride_w + 1, width_col);
int64_t h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int64_t h_col_end = min(h / stride_h + 1, height_col);
int64_t offset =
(c * patch_h * patch_w * patch_d + h * patch_w * patch_d + w * patch_d + d) * height_col * width_col * depth_col;
int64_t coeff_h_col = (1 - stride_h * patch_w * patch_d * height_col) * width_col * depth_col;
int64_t coeff_w_col = (1 - stride_w * patch_d * height_col * width_col) * depth_col;
int64_t coeff_d_col = (1 - stride_d * height_col * width_col * depth_col);
for (int64_t d_col = d_col_start; d_col < d_col_end; ++d_col)
for (int64_t h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int64_t w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col + d_col * coeff_d_col];
}
}
data_im[index] = ScalarConvert<Acctype, Dtype>::to(val);
}
}
template <typename Dtype, typename Acctype>
void col2im3d(hipStream_t stream, const Dtype* data_col, const int64_t channels,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t patch_h, const int64_t patch_w, const int64_t patch_d,
const int64_t pad_h, const int64_t pad_w, const int64_t pad_d,
const int64_t stride_h, const int64_t stride_w, const int64_t stride_d,
Dtype* data_im)
{
int64_t height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int64_t width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int64_t depth_col = (depth + 2 * pad_d - patch_d) / stride_d + 1;
int64_t num_kernels = channels * height * width * depth;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im3d_kernel<Dtype, Acctype>), dim3(GET_BLOCKS(num_kernels)),
dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col,
height, width, depth, channels,
patch_h, patch_w, patch_d,
pad_h, pad_w, pad_d,
stride_h, stride_w, stride_d,
height_col, width_col, depth_col,
data_im);
THCudaCheck(hipGetLastError());
}
#include "generic/VolumetricConvolution.cu"
#include "THHGenerateFloatTypes.h"
|
eadec97817e1d83857bc44401807791aa799fb20.cu
|
#include "THCUNN.h"
#include "THCTensor.hpp"
#include "common.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
// Kernel for fast unfold+copy
// Borrowed from Theano
// Authors: Arjun Jain, Frédéric Bastien, Jan Schlüter, Nicolas Ballas
template <typename Dtype>
__global__ void im3d2col_kernel(const int64_t n, const Dtype* data_im,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t kernel_h, const int64_t kernel_w, const int64_t kernel_d,
const int64_t pad_h, const int64_t pad_w, const int64_t pad_d,
const int64_t stride_h, const int64_t stride_w, const int64_t stride_d,
const int64_t height_col, const int64_t width_col, const int64_t depth_col,
Dtype* data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
int64_t d_out = index % depth_col;
int64_t w_index = index / depth_col;
int64_t w_out = w_index % width_col;
int64_t h_index = w_index / width_col;
int64_t h_out = h_index % height_col;
int64_t channel_in = h_index / height_col;
//channel_in = 1;
int64_t channel_out = channel_in * kernel_h * kernel_w * kernel_d;
int64_t h_in = h_out * stride_h - pad_h;
int64_t w_in = w_out * stride_w - pad_w;
int64_t d_in = d_out * stride_d - pad_d;
Dtype* data_col_ptr = data_col;
data_col_ptr += channel_out * (height_col * width_col * depth_col) +
h_out * (width_col * depth_col) + w_out * depth_col + d_out;
const Dtype* data_im_ptr = data_im;
data_im_ptr += channel_in * (height * width * depth) +
h_in * (width * depth) + w_in * depth + d_in;
for (int64_t i = 0; i < kernel_h; ++i)
{
int64_t h = h_in + i;
for (int64_t j = 0; j < kernel_w; ++j)
{
int64_t w = w_in + j;
for (int64_t k = 0; k < kernel_d; ++k)
{
int64_t d = d_in + k;
*data_col_ptr = (h >= 0 && w >= 0 && d >= 0 &&
h < height && w < width && d < depth) ?
data_im_ptr[i * (width * depth) + j *depth + k] : ScalarConvert<int, Dtype>::to(0);
data_col_ptr += height_col * width_col * depth_col;
}
}
}
}
}
template <typename Dtype>
void im3d2col(cudaStream_t stream, const Dtype* data_im, const int64_t channels,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t kernel_h, const int64_t kernel_w, const int64_t kernel_d,
const int64_t pad_h, const int64_t pad_w, const int64_t pad_d,
const int64_t stride_h, const int64_t stride_w, const int64_t stride_d,
Dtype* data_col)
{
// We are going to launch channels * height_col * width_col * depth_col kernels, each
// kernel responsible for copying a single-channel grid.
int64_t height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int64_t width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int64_t depth_col = (depth + 2 * pad_d - kernel_d) / stride_d + 1;
int64_t num_kernels = channels * height_col * width_col * depth_col;
im3d2col_kernel<<<GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_im,
height, width, depth,
kernel_h, kernel_w, kernel_d,
pad_h, pad_w, pad_d,
stride_h, stride_w, stride_d,
height_col, width_col, depth_col,
data_col);
THCudaCheck(cudaGetLastError());
}
template <typename Dtype, typename Acctype>
__global__ void col2im3d_kernel(const int64_t n, const Dtype* data_col,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t channels,
const int64_t patch_h, const int64_t patch_w, const int64_t patch_d,
const int64_t pad_h, const int64_t pad_w, const int64_t pad_d,
const int64_t stride_h, const int64_t stride_w, const int64_t stride_d,
const int64_t height_col, const int64_t width_col, const int64_t depth_col,
Dtype* data_im)
{
CUDA_KERNEL_LOOP(index, n)
{
Acctype val = 0;
int64_t d = index % depth + pad_d;
int64_t w_index = index / depth;
int64_t w = w_index % width + pad_w;
int64_t h_index = w_index / width;
int64_t h = h_index % height + pad_h;
int64_t c = h_index / height;
// compute the start and end of the output
int64_t d_col_start = (d < patch_d) ? 0 : (d - patch_d) / stride_d + 1;
int64_t d_col_end = min(d / stride_d + 1, depth_col);
int64_t w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int64_t w_col_end = min(w / stride_w + 1, width_col);
int64_t h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int64_t h_col_end = min(h / stride_h + 1, height_col);
int64_t offset =
(c * patch_h * patch_w * patch_d + h * patch_w * patch_d + w * patch_d + d) * height_col * width_col * depth_col;
int64_t coeff_h_col = (1 - stride_h * patch_w * patch_d * height_col) * width_col * depth_col;
int64_t coeff_w_col = (1 - stride_w * patch_d * height_col * width_col) * depth_col;
int64_t coeff_d_col = (1 - stride_d * height_col * width_col * depth_col);
for (int64_t d_col = d_col_start; d_col < d_col_end; ++d_col)
for (int64_t h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int64_t w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col + d_col * coeff_d_col];
}
}
data_im[index] = ScalarConvert<Acctype, Dtype>::to(val);
}
}
template <typename Dtype, typename Acctype>
void col2im3d(cudaStream_t stream, const Dtype* data_col, const int64_t channels,
const int64_t height, const int64_t width, const int64_t depth,
const int64_t patch_h, const int64_t patch_w, const int64_t patch_d,
const int64_t pad_h, const int64_t pad_w, const int64_t pad_d,
const int64_t stride_h, const int64_t stride_w, const int64_t stride_d,
Dtype* data_im)
{
int64_t height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int64_t width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int64_t depth_col = (depth + 2 * pad_d - patch_d) / stride_d + 1;
int64_t num_kernels = channels * height * width * depth;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im3d_kernel<Dtype, Acctype><<<GET_BLOCKS(num_kernels),
CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_col,
height, width, depth, channels,
patch_h, patch_w, patch_d,
pad_h, pad_w, pad_d,
stride_h, stride_w, stride_d,
height_col, width_col, depth_col,
data_im);
THCudaCheck(cudaGetLastError());
}
#include "generic/VolumetricConvolution.cu"
#include "THCGenerateFloatTypes.h"
|
3b3d753676d7177e3e4b265eaeb4e5adad610ee0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <rocblas.h>
#include "common_cuda_helper.hpp"
#include "trt_cuda_helper.cuh"
#include "trt_plugin_helper.hpp"
using mmcv::TensorDesc;
template <class scalar_t>
__global__ void copy_permute_kernel(scalar_t *dst, const scalar_t *src, int n,
TensorDesc ts_src_stride,
TensorDesc ts_dst_stride,
TensorDesc ts_permute) {
const int src_dim = ts_src_stride.dim;
int *src_stride = &(ts_src_stride.stride[0]);
int *dst_stride = &(ts_dst_stride.stride[0]);
int *permute = &(ts_permute.shape[0]);
CUDA_1D_KERNEL_LOOP(index, n) {
size_t dst_index = index;
size_t src_index = 0;
for (int i = 0; i < src_dim; ++i) {
int dim_index = dst_index / dst_stride[i];
dst_index = dst_index % dst_stride[i];
src_index += dim_index * src_stride[permute[i]];
}
dst[index] = src[src_index];
}
}
template <class scalar_t>
void memcpyPermute(scalar_t *dst, const scalar_t *src, int *src_size,
int *permute, int src_dim, hipStream_t stream) {
size_t copy_size = 1;
TensorDesc ts_permute;
memcpy(&(ts_permute.shape[0]), permute, src_dim * sizeof(int));
TensorDesc ts_src_stride;
TensorDesc ts_dst_stride;
ts_src_stride.dim = src_dim;
ts_dst_stride.dim = src_dim;
int *src_stride = &(ts_src_stride.stride[0]);
int *dst_stride = &(ts_dst_stride.stride[0]);
int *dst_size = &(ts_dst_stride.shape[0]);
src_stride[src_dim - 1] = 1;
dst_stride[src_dim - 1] = 1;
for (int i = src_dim - 1; i >= 0; --i) {
dst_size[i] = src_size[permute[i]];
if (i < src_dim - 1) {
src_stride[i] = src_stride[i + 1] * src_size[i + 1];
}
}
for (int i = src_dim - 1; i >= 0; --i) {
copy_size *= dst_size[i];
if (i < src_dim - 1) {
dst_stride[i] = dst_stride[i + 1] * dst_size[i + 1];
}
}
hipLaunchKernelGGL(( copy_permute_kernel<scalar_t>)
, dim3(GET_BLOCKS(copy_size)), dim3(THREADS_PER_BLOCK), 0, stream,
dst, src, copy_size, ts_src_stride, ts_dst_stride, ts_permute);
}
template void memcpyPermute<float>(float *dst, const float *src, int *src_size,
int *permute, int src_dim,
hipStream_t stream);
template <>
hipblasStatus_t cublasGemmWrap<float>(hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n,
int k, const float *alpha, const float *A,
int lda, const float *B, int ldb,
const float *beta, float *C, int ldc) {
return hipblasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb,
beta, C, ldc);
}
template <>
hipblasStatus_t cublasGemmWrap<half>(hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n,
int k, const half *alpha, const half *A,
int lda, const half *B, int ldb,
const half *beta, half *C, int ldc) {
return hipblasHgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb,
beta, C, ldc);
}
|
3b3d753676d7177e3e4b265eaeb4e5adad610ee0.cu
|
#include <cublas_v2.h>
#include "common_cuda_helper.hpp"
#include "trt_cuda_helper.cuh"
#include "trt_plugin_helper.hpp"
using mmcv::TensorDesc;
template <class scalar_t>
__global__ void copy_permute_kernel(scalar_t *dst, const scalar_t *src, int n,
TensorDesc ts_src_stride,
TensorDesc ts_dst_stride,
TensorDesc ts_permute) {
const int src_dim = ts_src_stride.dim;
int *src_stride = &(ts_src_stride.stride[0]);
int *dst_stride = &(ts_dst_stride.stride[0]);
int *permute = &(ts_permute.shape[0]);
CUDA_1D_KERNEL_LOOP(index, n) {
size_t dst_index = index;
size_t src_index = 0;
for (int i = 0; i < src_dim; ++i) {
int dim_index = dst_index / dst_stride[i];
dst_index = dst_index % dst_stride[i];
src_index += dim_index * src_stride[permute[i]];
}
dst[index] = src[src_index];
}
}
template <class scalar_t>
void memcpyPermute(scalar_t *dst, const scalar_t *src, int *src_size,
int *permute, int src_dim, cudaStream_t stream) {
size_t copy_size = 1;
TensorDesc ts_permute;
memcpy(&(ts_permute.shape[0]), permute, src_dim * sizeof(int));
TensorDesc ts_src_stride;
TensorDesc ts_dst_stride;
ts_src_stride.dim = src_dim;
ts_dst_stride.dim = src_dim;
int *src_stride = &(ts_src_stride.stride[0]);
int *dst_stride = &(ts_dst_stride.stride[0]);
int *dst_size = &(ts_dst_stride.shape[0]);
src_stride[src_dim - 1] = 1;
dst_stride[src_dim - 1] = 1;
for (int i = src_dim - 1; i >= 0; --i) {
dst_size[i] = src_size[permute[i]];
if (i < src_dim - 1) {
src_stride[i] = src_stride[i + 1] * src_size[i + 1];
}
}
for (int i = src_dim - 1; i >= 0; --i) {
copy_size *= dst_size[i];
if (i < src_dim - 1) {
dst_stride[i] = dst_stride[i + 1] * dst_size[i + 1];
}
}
copy_permute_kernel<scalar_t>
<<<GET_BLOCKS(copy_size), THREADS_PER_BLOCK, 0, stream>>>(
dst, src, copy_size, ts_src_stride, ts_dst_stride, ts_permute);
}
template void memcpyPermute<float>(float *dst, const float *src, int *src_size,
int *permute, int src_dim,
cudaStream_t stream);
template <>
cublasStatus_t cublasGemmWrap<float>(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb, int m, int n,
int k, const float *alpha, const float *A,
int lda, const float *B, int ldb,
const float *beta, float *C, int ldc) {
return cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb,
beta, C, ldc);
}
template <>
cublasStatus_t cublasGemmWrap<half>(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb, int m, int n,
int k, const half *alpha, const half *A,
int lda, const half *B, int ldb,
const half *beta, half *C, int ldc) {
return cublasHgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb,
beta, C, ldc);
}
|
f0892b65c87b8d87982b3c53e090c937cdda7b00.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Distributed under MIT licence. See https://github.com/QuEST-Kit/QuEST/blob/master/LICENCE.txt for details
/** @file
* An implementation of the backend in ../QuEST_internal.h for a GPU environment.
*/
# include "QuEST.h"
# include "QuEST_precision.h"
# include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey
# include "mt19937ar.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define REDUCE_SHARED_SIZE 512
# define DEBUG 0
static __device__ int extractBit (int locationOfBitFromRight, long long int theEncodedNumber)
{
return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight;
}
#ifdef __cplusplus
extern "C" {
#endif
void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) {
hipDeviceSynchronize();
hipMemcpy(
qureg.deviceStateVec.real + startInd,
reals,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
hipMemcpyHostToDevice);
hipMemcpy(
qureg.deviceStateVec.imag + startInd,
imags,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
hipMemcpyHostToDevice);
}
/** works for both statevectors and density matrices */
void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) {
// copy copyQureg's GPU statevec to targetQureg's GPU statevec
hipDeviceSynchronize();
hipMemcpy(
targetQureg.deviceStateVec.real,
copyQureg.deviceStateVec.real,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)),
hipMemcpyDeviceToDevice);
hipMemcpy(
targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.imag,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)),
hipMemcpyDeviceToDevice);
}
__global__ void densmatr_initPureStateKernel(
long long int numPureAmps,
qreal *targetVecReal, qreal *targetVecImag,
qreal *copyVecReal, qreal *copyVecImag)
{
// this is a particular index of the pure copyQureg
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=numPureAmps) return;
qreal realRow = copyVecReal[index];
qreal imagRow = copyVecImag[index];
for (long long int col=0; col < numPureAmps; col++) {
qreal realCol = copyVecReal[col];
qreal imagCol = - copyVecImag[col]; // minus for conjugation
targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol;
targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol;
}
}
void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_initPureStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
copyQureg.numAmpsPerChunk,
targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag);
}
__global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = probFactor;
stateVecImag[index] = 0.0;
}
void densmatr_initPlusState(Qureg qureg)
{
qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented));
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
probFactor,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void densmatr_initClassicalStateKernel(
long long int densityNumElems,
qreal *densityReal, qreal *densityImag,
long long int densityInd)
{
// initialise the state to all zeros
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= densityNumElems) return;
densityReal[index] = 0.0;
densityImag[index] = 0.0;
if (index==densityInd){
// classical state has probability 1
densityReal[densityInd] = 1.0;
densityImag[densityInd] = 0.0;
}
}
void densmatr_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
// index of the desired state in the flat density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int densityInd = (densityDim + 1)*stateInd;
// identical to pure version
hipLaunchKernelGGL(( densmatr_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, densityInd);
}
void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env)
{
// allocate CPU memory
long long int numAmps = 1L << numQubits;
long long int numAmpsPerRank = numAmps/env.numRanks;
qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real));
qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag));
if (env.numRanks>1){
qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real));
qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag));
}
// check cpu memory allocation was successful
if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
qureg->numQubitsInStateVec = numQubits;
qureg->numAmpsPerChunk = numAmpsPerRank;
qureg->numAmpsTotal = numAmps;
qureg->chunkId = env.rank;
qureg->numChunks = env.numRanks;
qureg->isDensityMatrix = 0;
// allocate GPU memory
hipMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real)));
hipMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag)));
hipMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal));
hipMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))*
sizeof(qreal));
// check gpu memory allocation was successful
if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){
printf("Could not allocate memory on GPU!\n");
exit (EXIT_FAILURE);
}
}
void statevec_destroyQureg(Qureg qureg, QuESTEnv env)
{
// Free CPU memory
free(qureg.stateVec.real);
free(qureg.stateVec.imag);
if (env.numRanks>1){
free(qureg.pairStateVec.real);
free(qureg.pairStateVec.imag);
}
// Free GPU memory
hipFree(qureg.deviceStateVec.real);
hipFree(qureg.deviceStateVec.imag);
}
int GPUExists(void){
int deviceCount, device;
int gpuDeviceCount = 0;
struct hipDeviceProp_t properties;
hipError_t cudaResultCode = hipGetDeviceCount(&deviceCount);
if (cudaResultCode != hipSuccess) deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
hipGetDeviceProperties(&properties, device);
if (properties.major != 9999) { /* 9999 means emulation only */
++gpuDeviceCount;
}
}
if (gpuDeviceCount) return 1;
else return 0;
}
QuESTEnv createQuESTEnv(void) {
// init MPI environment
if (!GPUExists()){
printf("Trying to run GPU code with no GPU available\n");
exit(EXIT_FAILURE);
}
QuESTEnv env;
env.rank=0;
env.numRanks=1;
seedQuESTDefault();
return env;
}
void syncQuESTEnv(QuESTEnv env){
hipDeviceSynchronize();
}
int syncQuESTSuccess(int successCode){
return successCode;
}
void destroyQuESTEnv(QuESTEnv env){
// MPI finalize goes here in MPI version. Call this function anyway for consistency
}
void reportQuESTEnv(QuESTEnv env){
printf("EXECUTION ENVIRONMENT:\n");
printf("Running locally on one node with GPU\n");
printf("Number of ranks is %d\n", env.numRanks);
# ifdef _OPENMP
printf("OpenMP enabled\n");
printf("Number of threads available is %d\n", omp_get_max_threads());
# else
printf("OpenMP disabled\n");
# endif
}
void getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){
sprintf(str, "%dqubits_GPU_noMpi_noOMP", qureg.numQubitsInStateVec);
}
void copyStateToGPU(Qureg qureg)
{
if (DEBUG) printf("Copying data to GPU\n");
hipMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice);
hipMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice);
hipMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice);
hipMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice);
if (DEBUG) printf("Finished copying data to GPU\n");
}
void copyStateFromGPU(Qureg qureg)
{
hipDeviceSynchronize();
if (DEBUG) printf("Copying data from GPU\n");
hipMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost);
hipMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost);
if (DEBUG) printf("Finished copying data from GPU\n");
}
/** Print the current state vector of probability amplitudes for a set of qubits to standard out.
For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits
*/
void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){
long long int index;
int rank;
copyStateFromGPU(qureg);
if (qureg.numQubitsInStateVec<=5){
for (rank=0; rank<qureg.numChunks; rank++){
if (qureg.chunkId==rank){
if (reportRank) {
printf("Reporting state from rank %d [\n", qureg.chunkId);
//printf("\trank, index, real, imag\n");
printf("real, imag\n");
} else if (rank==0) {
printf("Reporting state [\n");
printf("real, imag\n");
}
for(index=0; index<qureg.numAmpsPerChunk; index++){
printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]);
}
if (reportRank || rank==qureg.numChunks-1) printf("]\n");
}
syncQuESTEnv(env);
}
}
}
qreal statevec_getRealAmp(Qureg qureg, long long int index){
qreal el=0;
hipMemcpy(&el, &(qureg.deviceStateVec.real[index]),
sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost);
return el;
}
qreal statevec_getImagAmp(Qureg qureg, long long int index){
qreal el=0;
hipMemcpy(&el, &(qureg.deviceStateVec.imag[index]),
sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost);
return el;
}
__global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the state to |0000..0000>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==0){
// zero state |0000..0000> has probability 1
stateVecReal[0] = 1.0;
stateVecImag[0] = 0.0;
}
}
void statevec_initZeroState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initZeroStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize);
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
}
void statevec_initPlusState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){
long long int index;
// initialise the state to |stateInd>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==stateInd){
// classical state has probability 1
stateVecReal[stateInd] = 1.0;
stateVecImag[stateInd] = 0.0;
}
}
void statevec_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, stateInd);
}
__global__ void statevec_initStateDebugKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = (index*2.0)/10.0;
stateVecImag[index] = (index*2.0+1.0)/10.0;
}
void statevec_initStateDebug(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initStateDebugKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){
long long int index;
int bit;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2);
bit = extractBit(qubitId, index);
if (bit==outcome) {
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
} else {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initStateOfSingleQubitKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome);
}
// returns 1 if successful, else 0
int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){
long long int chunkSize, stateVecSize;
long long int indexInChunk, totalIndex;
chunkSize = qureg->numAmpsPerChunk;
stateVecSize = chunkSize*qureg->numChunks;
qreal *stateVecReal = qureg->stateVec.real;
qreal *stateVecImag = qureg->stateVec.imag;
FILE *fp;
char line[200];
fp = fopen(filename, "r");
if (fp == NULL)
return 0;
indexInChunk = 0; totalIndex = 0;
while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){
if (line[0]!='#'){
int chunkId = totalIndex/chunkSize;
if (chunkId==qureg->chunkId){
# if QuEST_PREC==1
sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==2
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==4
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# endif
indexInChunk += 1;
}
totalIndex += 1;
}
}
fclose(fp);
copyStateToGPU(*qureg);
// indicate success
return 1;
}
int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){
qreal diff;
int chunkSize = mq1.numAmpsPerChunk;
copyStateFromGPU(mq1);
copyStateFromGPU(mq2);
for (int i=0; i<chunkSize; i++){
diff = mq1.stateVec.real[i] - mq2.stateVec.real[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
}
return 1;
}
__global__ void statevec_compactUnitaryKernel (Qureg qureg, const int rotQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << rotQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
void statevec_compactUnitary(Qureg qureg, const int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_compactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, alpha, beta);
}
__global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
void statevec_controlledCompactUnitary(Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledCompactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, alpha, beta);
}
__global__ void statevec_unitaryKernel(Qureg qureg, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
void statevec_unitary(Qureg qureg, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_unitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, u);
}
__global__ void statevec_controlledUnitaryKernel(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_controlledUnitary(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, u);
}
__global__ void statevec_multiControlledUnitaryKernel(Qureg qureg, long long int mask, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
if (mask == (mask & indexUp) ){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_multiControlledUnitary(Qureg qureg, int *controlQubits, int numControlQubits, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask=0;
for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, targetQubit, u);
}
__global__ void statevec_pauliXKernel(Qureg qureg, const int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
void statevec_pauliX(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_pauliXKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit);
}
__global__ void statevec_pauliYKernel(Qureg qureg, const int targetQubit, const int conjFac){
long long int sizeHalfBlock = 1LL << targetQubit;
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int thisBlock = thisTask / sizeHalfBlock;
long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
long long int indexLo = indexUp + sizeHalfBlock;
qreal stateRealUp, stateImagUp;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
void statevec_pauliY(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, 1);
}
void statevec_pauliYConj(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, -1);
}
__global__ void statevec_controlledPauliYKernel(Qureg qureg, const int controlQubit, const int targetQubit, const int conjFac)
{
long long int index;
long long int sizeBlock, sizeHalfBlock;
long long int stateVecSize;
int controlBit;
qreal stateRealUp, stateImagUp;
long long int thisBlock, indexUp, indexLo;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
}
void statevec_controlledPauliY(Qureg qureg, const int controlQubit, const int targetQubit)
{
int conjFactor = 1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor);
}
void statevec_controlledPauliYConj(Qureg qureg, const int controlQubit, const int targetQubit)
{
int conjFactor = -1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor);
}
__global__ void statevec_phaseShiftByTermKernel(Qureg qureg, const int targetQubit, qreal cosAngle, qreal sinAngle) {
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, indexUp,indexLo;
qreal stateRealLo, stateImagLo;
long long int thisTask;
const long long int numTasks = qureg.numAmpsPerChunk >> 1;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
void statevec_phaseShiftByTerm(Qureg qureg, const int targetQubit, Complex term)
{
qreal cosAngle = term.real;
qreal sinAngle = term.imag;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_phaseShiftByTermKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, cosAngle, sinAngle);
}
__global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, const int idQubit1, const int idQubit2, qreal cosAngle, qreal sinAngle)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
qreal stateRealLo, stateImagLo;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_controlledPhaseShift(Qureg qureg, const int idQubit1, const int idQubit2, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
qreal stateRealLo, stateImagLo;
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
long long int mask=0;
for (int i=0; i<numControlQubits; i++)
mask = mask | (1LL<<controlQubits[i]);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, cosAngle, sinAngle);
}
qreal densmatr_calcTotalProb(Qureg qureg) {
// computes the trace using Kahan summation
qreal pTotal=0;
qreal y, t, c;
c = 0;
long long int numCols = 1LL << qureg.numQubitsRepresented;
long long diagIndex;
copyStateFromGPU(qureg);
for (int col=0; col< numCols; col++) {
diagIndex = col*(numCols + 1);
y = qureg.stateVec.real[diagIndex] - c;
t = pTotal + y;
c = ( t - pTotal ) - y; // brackets are important
pTotal = t;
}
return pTotal;
}
qreal statevec_calcTotalProb(Qureg qureg){
/* IJB - implemented using Kahan summation for greater accuracy at a slight floating
point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */
/* Don't change the bracketing in this routine! */
qreal pTotal=0;
qreal y, t, c;
long long int index;
long long int numAmpsPerRank = qureg.numAmpsPerChunk;
copyStateFromGPU(qureg);
c = 0.0;
for (index=0; index<numAmpsPerRank; index++){
/* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */
// pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index];
y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
/* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */
//pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index];
y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
}
return pTotal;
}
__global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, const int idQubit1, const int idQubit2)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_controlledPhaseFlip(Qureg qureg, const int idQubit1, const int idQubit2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2);
}
__global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask)
{
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask=0;
for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask);
}
__global__ void statevec_hadamardKernel (Qureg qureg, const int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal recRoot2 = 1.0/sqrt(2.0);
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo);
stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo);
stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo);
stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo);
}
void statevec_hadamard(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_hadamardKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit);
}
__global__ void statevec_controlledNotKernel(Qureg qureg, const int controlQubit, const int targetQubit)
{
long long int index;
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
long long int stateVecSize;
int controlBit;
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
}
void statevec_controlledNot(Qureg qureg, const int controlQubit, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledNotKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit);
}
__device__ __host__ unsigned int log2Int( unsigned int x )
{
unsigned int ans = 0 ;
while( x>>=1 ) ans++;
return ans ;
}
__device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){
int i, l, r;
int threadMax, maxDepth;
threadMax = length/2;
maxDepth = log2Int(length/2);
for (i=0; i<maxDepth+1; i++){
if (threadIdx.x<threadMax){
l = threadIdx.x;
r = l + threadMax;
arrayIn[l] = arrayIn[r] + arrayIn[l];
}
threadMax = threadMax >> 1;
__syncthreads(); // optimise -- use warp shuffle instead
}
if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0];
}
__global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){
extern __shared__ qreal tempReductionArray[];
int blockOffset = blockIdx.x*length;
tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2];
tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1];
__syncthreads();
reduceBlock(tempReductionArray, reducedArray, length);
}
__global__ void densmatr_findProbabilityOfZeroKernel(
Qureg qureg, const int measureQubit, qreal *reducedArray
) {
// run by each thread
// use of block here refers to contiguous amplitudes where measureQubit = 0,
// (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numTasks = densityDim >> 1;
long long int sizeHalfBlock = 1LL << (measureQubit);
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int thisBlock; // which block this thread is processing
long long int thisTask; // which part of the block this thread is processing
long long int basisIndex; // index of this thread's computational basis state
long long int densityIndex; // " " index of |basis><basis| in the flat density matrix
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
// figure out which density matrix prob that this thread is assigned
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
densityIndex = (densityDim + 1) * basisIndex;
// record the probability in the CUDA-BLOCK-wide array
qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0
tempReductionArray[threadIdx.x] = prob;
// sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
__global__ void statevec_findProbabilityOfZeroKernel(
Qureg qureg, const int measureQubit, qreal *reducedArray
) {
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
// (good for shared memory parallelism)
extern __shared__ qreal tempReductionArray[];
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
qreal realVal, imagVal;
realVal = stateVecReal[index];
imagVal = stateVecImag[index];
tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal;
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){
int levels=0;
while (numValuesToReduce){
numValuesToReduce = numValuesToReduce/numReducedPerLevel;
levels++;
}
return levels;
}
void swapDouble(qreal **a, qreal **b){
qreal *temp;
temp = *a;
*a = *b;
*b = temp;
}
qreal densmatr_findProbabilityOfZero(Qureg qureg, const int measureQubit)
{
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
hipLaunchKernelGGL(( densmatr_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg, measureQubit, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal zeroProb;
hipMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return zeroProb;
}
qreal statevec_findProbabilityOfZero(Qureg qureg, const int measureQubit)
{
long long int numValuesToReduce = qureg.numAmpsPerChunk>>1;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
qreal stateProb=0;
int firstTime=1;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
while(numValuesToReduce>1){
if (numValuesToReduce<maxReducedPerLevel){
// Need less than one CUDA block to reduce values
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
} else {
// Use full CUDA blocks, with block size constrained by shared mem usage
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime){
hipLaunchKernelGGL(( statevec_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg, measureQubit, qureg.firstLevelReduction);
firstTime=0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return stateProb;
}
qreal statevec_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome)
{
qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
qreal densmatr_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome)
{
qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
/** computes either a real or imag term in the inner product */
__global__ void statevec_calcInnerProductKernel(
int getRealComp,
qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// choose whether to calculate the real or imaginary term of the inner product
qreal innerProdTerm;
if (getRealComp)
innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index];
else
innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = innerProdTerm;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the
* inner product, so as to not have to worry about keeping the sums separated during reduction.
* Truly disgusting, probably doubles runtime, please fix.
* @TODO could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc?
*/
Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) {
qreal innerProdReal, innerProdImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// return complex
Complex innerProd;
innerProd.real = innerProdReal;
innerProd.imag = innerProdImag;
return innerProd;
}
/** computes one term of (vec^*T) dens * vec */
__global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) {
// figure out which density matrix row to consider
long long int col;
long long int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= dim) return;
qreal* densReal = dens.deviceStateVec.real;
qreal* densImag = dens.deviceStateVec.imag;
qreal* vecReal = vec.deviceStateVec.real;
qreal* vecImag = vec.deviceStateVec.imag;
// compute the row-th element of the product dens*vec
qreal prodReal = 0;
qreal prodImag = 0;
for (col=0LL; col < dim; col++) {
qreal densElemReal = densReal[dim*col + row];
qreal densElemImag = densImag[dim*col + row];
prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col];
prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col];
}
// multiply with row-th elem of (vec^*)
qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row];
// imag of every term should be zero, because each is a valid fidelity calc of an eigenstate
//qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row];
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = termReal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
// @TODO implement
qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) {
// we're summing the square of every term in the density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
// store the reduction in the pureState array
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcFidelityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg, pureState, densityDim, pureState.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
pureState.firstLevelReduction,
pureState.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal fidelity;
hipMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return fidelity;
}
__global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Computes the trace of the density matrix squared */
qreal densmatr_calcPurity(Qureg qureg) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = qureg.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcPurityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
numValuesToReduce, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal traceDensSquared;
hipMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return traceDensSquared;
}
__global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability)
{
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- measured probability
qreal renorm; // probability (returned) value
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
// (good for shared memory parallelism)
long long int numTasks=qureg.numAmpsPerChunk>>1;
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
renorm=1/sqrt(totalProbability);
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
if (outcome==0){
stateVecReal[index]=stateVecReal[index]*renorm;
stateVecImag[index]=stateVecImag[index]*renorm;
stateVecReal[index+sizeHalfBlock]=0;
stateVecImag[index+sizeHalfBlock]=0;
} else if (outcome==1){
stateVecReal[index]=0;
stateVecImag[index]=0;
stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm;
stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm;
}
}
/*
* outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or
* else the state-vector will lose normalisation
*/
void statevec_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, measureQubit, outcome, outcomeProb);
}
/** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */
__global__ void densmatr_collapseToKnownProbOutcomeKernel(
qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit,
long long int part1, long long int part2, long long int part3,
long long int rowBit, long long int colBit, long long int desired, long long int undesired)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numBasesToVisit) return;
long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
// renormalise desired outcome
vecReal[base + desired] /= outcomeProb;
vecImag[base + desired] /= outcomeProb;
// kill undesired outcome
vecReal[base + undesired] = 0;
vecImag[base + undesired] = 0;
// kill |..0..><..1..| states
vecReal[base + colBit] = 0;
vecImag[base + colBit] = 0;
vecReal[base + rowBit] = 0;
vecImag[base + rowBit] = 0;
}
/** This involves finding |...i...><...j...| states and killing those where i!=j */
void densmatr_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) {
int rowQubit = measureQubit + qureg.numQubitsRepresented;
int colBit = 1LL << measureQubit;
int rowBit = 1LL << rowQubit;
long long int numBasesToVisit = qureg.numAmpsPerChunk/4;
long long int part1 = colBit -1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numBasesToVisit - (rowBit >> 1);
long long int desired, undesired;
if (outcome == 0) {
desired = 0;
undesired = colBit | rowBit;
} else {
desired = colBit | rowBit;
undesired = 0;
}
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit,
part1, part2, part3, rowBit, colBit, desired, undesired);
}
__global__ void densmatr_addDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= numAmpsToVisit) return;
combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd];
combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd];
}
void densmatr_addDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) {
long long int numAmpsToVisit = combineQureg.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_addDensityMatrixKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
combineQureg, otherProb, otherQureg, numAmpsToVisit
);
}
/** Called once for every 4 amplitudes in density matrix
* Works by establishing the |..0..><..0..| state (for its given index) then
* visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3|
* From the brain of Simon Benjamin
*/
__global__ void densmatr_oneQubitDephaseKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int colBit, long long int rowBit)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
vecReal[ampInd + colBit] *= fac;
vecImag[ampInd + colBit] *= fac;
vecReal[ampInd + rowBit] *= fac;
vecImag[ampInd + rowBit] *= fac;
}
void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, const int targetQubit, qreal dephFac) {
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_oneQubitDephaseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, colBit, rowBit);
}
void densmatr_oneQubitDephase(Qureg qureg, const int targetQubit, qreal dephase) {
if (dephase == 0)
return;
qreal dephFac = 1 - dephase;
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephFac);
}
/** Called 12 times for every 16 amplitudes in density matrix
* Each sums from the |..0..0..><..0..0..| index to visit either
* |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..|
* etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|.
* From the brain of Simon Benjamin
*/
__global__ void densmatr_twoQubitDephaseKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3, long long int part4, long long int part5,
long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2)
{
long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x;
if (outerInd >= numAmpsToVisit) return;
// sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A|
int meta = 1 + (outerInd/numBackgroundStates);
if (meta > 4) meta++;
if (meta > 9) meta++;
long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2);
long long int scanInd = outerInd % numBackgroundStates;
long long int stateInd = (
shift +
(scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4));
vecReal[stateInd] *= fac;
vecImag[stateInd] *= fac;
}
// @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems?
void densmatr_twoQubitDephase(Qureg qureg, int qubit1, int qubit2, qreal dephase) {
if (dephase == 0)
return;
// assumes qubit2 > qubit1
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3);
qreal dephFac = 1 - dephase;
// refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed)
long long int numBackgroundStates = qureg.numAmpsPerChunk/16;
// 12 of these states experience dephasing
long long int numAmpsToVisit = 12 * numBackgroundStates;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_twoQubitDephaseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit,
part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2);
}
/** Works like oneQubitDephase but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_oneQubitDepolariseKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]);
qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]);
vecReal[baseInd] *= 1 - depolLevel;
vecImag[baseInd] *= 1 - depolLevel;
vecReal[targetInd] *= 1 - depolLevel;
vecImag[targetInd] *= 1 - depolLevel;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
vecReal[targetInd] += realAvDepol;
vecImag[targetInd] += imagAvDepol;
}
/** Works like oneQubitDephase but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_oneQubitDampingKernel(
qreal damping, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = damping * ( vecReal[targetInd]);
qreal imagAvDepol = damping * ( vecImag[targetInd]);
vecReal[targetInd] *= 1 - damping;
vecImag[targetInd] *= 1 - damping;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
}
void densmatr_oneQubitDepolarise(Qureg qureg, const int targetQubit, qreal depolLevel) {
if (depolLevel == 0)
return;
densmatr_oneQubitDephase(qureg, targetQubit, depolLevel);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_oneQubitDepolariseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
void densmatr_oneQubitDamping(Qureg qureg, const int targetQubit, qreal damping) {
if (damping == 0)
return;
qreal dephase = sqrt(1-damping);
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephase);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_oneQubitDampingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
damping, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
/** Called once for every 16 amplitudes */
__global__ void densmatr_twoQubitDepolariseKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int part4, long long int part5,
long long int rowCol1, long long int rowCol2)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
// index of |..0..0..><..0..0|
long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4);
long long int ind01 = ind00 + rowCol1;
long long int ind10 = ind00 + rowCol2;
long long int ind11 = ind00 + rowCol1 + rowCol2;
qreal realAvDepol = depolLevel * 0.25 * (
vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]);
qreal imagAvDepol = depolLevel * 0.25 * (
vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]);
qreal retain = 1 - depolLevel;
vecReal[ind00] *= retain; vecImag[ind00] *= retain;
vecReal[ind01] *= retain; vecImag[ind01] *= retain;
vecReal[ind10] *= retain; vecImag[ind10] *= retain;
vecReal[ind11] *= retain; vecImag[ind11] *= retain;
vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol;
vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol;
vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol;
vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol;
}
void densmatr_twoQubitDepolarise(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) {
if (depolLevel == 0)
return;
// assumes qubit2 > qubit1
densmatr_twoQubitDephase(qureg, qubit1, qubit2, depolLevel);
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int rowCol1 = colBit1 | rowBit1;
long long int rowCol2 = colBit2 | rowBit2;
long long int numAmpsToVisit = qureg.numAmpsPerChunk/16;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = numAmpsToVisit - (rowBit2 >> 3);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_twoQubitDepolariseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, part4, part5, rowCol1, rowCol2);
}
void seedQuESTDefault(){
// init MT random number generator with three keys -- time and pid
// for the MPI version, it is ok that all procs will get the same seed as random numbers will only be
// used by the master process
unsigned long int key[2];
getQuESTDefaultSeedKey(key);
init_by_array(key, 2);
}
#ifdef __cplusplus
}
#endif
|
f0892b65c87b8d87982b3c53e090c937cdda7b00.cu
|
// Distributed under MIT licence. See https://github.com/QuEST-Kit/QuEST/blob/master/LICENCE.txt for details
/** @file
* An implementation of the backend in ../QuEST_internal.h for a GPU environment.
*/
# include "QuEST.h"
# include "QuEST_precision.h"
# include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey
# include "mt19937ar.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define REDUCE_SHARED_SIZE 512
# define DEBUG 0
static __device__ int extractBit (int locationOfBitFromRight, long long int theEncodedNumber)
{
return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight;
}
#ifdef __cplusplus
extern "C" {
#endif
void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) {
cudaDeviceSynchronize();
cudaMemcpy(
qureg.deviceStateVec.real + startInd,
reals,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
cudaMemcpyHostToDevice);
cudaMemcpy(
qureg.deviceStateVec.imag + startInd,
imags,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
cudaMemcpyHostToDevice);
}
/** works for both statevectors and density matrices */
void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) {
// copy copyQureg's GPU statevec to targetQureg's GPU statevec
cudaDeviceSynchronize();
cudaMemcpy(
targetQureg.deviceStateVec.real,
copyQureg.deviceStateVec.real,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)),
cudaMemcpyDeviceToDevice);
cudaMemcpy(
targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.imag,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)),
cudaMemcpyDeviceToDevice);
}
__global__ void densmatr_initPureStateKernel(
long long int numPureAmps,
qreal *targetVecReal, qreal *targetVecImag,
qreal *copyVecReal, qreal *copyVecImag)
{
// this is a particular index of the pure copyQureg
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=numPureAmps) return;
qreal realRow = copyVecReal[index];
qreal imagRow = copyVecImag[index];
for (long long int col=0; col < numPureAmps; col++) {
qreal realCol = copyVecReal[col];
qreal imagCol = - copyVecImag[col]; // minus for conjugation
targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol;
targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol;
}
}
void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_initPureStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
copyQureg.numAmpsPerChunk,
targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag);
}
__global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = probFactor;
stateVecImag[index] = 0.0;
}
void densmatr_initPlusState(Qureg qureg)
{
qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented));
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
probFactor,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void densmatr_initClassicalStateKernel(
long long int densityNumElems,
qreal *densityReal, qreal *densityImag,
long long int densityInd)
{
// initialise the state to all zeros
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= densityNumElems) return;
densityReal[index] = 0.0;
densityImag[index] = 0.0;
if (index==densityInd){
// classical state has probability 1
densityReal[densityInd] = 1.0;
densityImag[densityInd] = 0.0;
}
}
void densmatr_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
// index of the desired state in the flat density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int densityInd = (densityDim + 1)*stateInd;
// identical to pure version
densmatr_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, densityInd);
}
void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env)
{
// allocate CPU memory
long long int numAmps = 1L << numQubits;
long long int numAmpsPerRank = numAmps/env.numRanks;
qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real));
qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag));
if (env.numRanks>1){
qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real));
qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag));
}
// check cpu memory allocation was successful
if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
qureg->numQubitsInStateVec = numQubits;
qureg->numAmpsPerChunk = numAmpsPerRank;
qureg->numAmpsTotal = numAmps;
qureg->chunkId = env.rank;
qureg->numChunks = env.numRanks;
qureg->isDensityMatrix = 0;
// allocate GPU memory
cudaMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real)));
cudaMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag)));
cudaMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal));
cudaMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))*
sizeof(qreal));
// check gpu memory allocation was successful
if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){
printf("Could not allocate memory on GPU!\n");
exit (EXIT_FAILURE);
}
}
void statevec_destroyQureg(Qureg qureg, QuESTEnv env)
{
// Free CPU memory
free(qureg.stateVec.real);
free(qureg.stateVec.imag);
if (env.numRanks>1){
free(qureg.pairStateVec.real);
free(qureg.pairStateVec.imag);
}
// Free GPU memory
cudaFree(qureg.deviceStateVec.real);
cudaFree(qureg.deviceStateVec.imag);
}
int GPUExists(void){
int deviceCount, device;
int gpuDeviceCount = 0;
struct cudaDeviceProp properties;
cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount);
if (cudaResultCode != cudaSuccess) deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties(&properties, device);
if (properties.major != 9999) { /* 9999 means emulation only */
++gpuDeviceCount;
}
}
if (gpuDeviceCount) return 1;
else return 0;
}
QuESTEnv createQuESTEnv(void) {
// init MPI environment
if (!GPUExists()){
printf("Trying to run GPU code with no GPU available\n");
exit(EXIT_FAILURE);
}
QuESTEnv env;
env.rank=0;
env.numRanks=1;
seedQuESTDefault();
return env;
}
void syncQuESTEnv(QuESTEnv env){
cudaDeviceSynchronize();
}
int syncQuESTSuccess(int successCode){
return successCode;
}
void destroyQuESTEnv(QuESTEnv env){
// MPI finalize goes here in MPI version. Call this function anyway for consistency
}
void reportQuESTEnv(QuESTEnv env){
printf("EXECUTION ENVIRONMENT:\n");
printf("Running locally on one node with GPU\n");
printf("Number of ranks is %d\n", env.numRanks);
# ifdef _OPENMP
printf("OpenMP enabled\n");
printf("Number of threads available is %d\n", omp_get_max_threads());
# else
printf("OpenMP disabled\n");
# endif
}
void getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){
sprintf(str, "%dqubits_GPU_noMpi_noOMP", qureg.numQubitsInStateVec);
}
void copyStateToGPU(Qureg qureg)
{
if (DEBUG) printf("Copying data to GPU\n");
cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice);
cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice);
cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice);
cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice);
if (DEBUG) printf("Finished copying data to GPU\n");
}
void copyStateFromGPU(Qureg qureg)
{
cudaDeviceSynchronize();
if (DEBUG) printf("Copying data from GPU\n");
cudaMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost);
cudaMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost);
if (DEBUG) printf("Finished copying data from GPU\n");
}
/** Print the current state vector of probability amplitudes for a set of qubits to standard out.
For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits
*/
void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){
long long int index;
int rank;
copyStateFromGPU(qureg);
if (qureg.numQubitsInStateVec<=5){
for (rank=0; rank<qureg.numChunks; rank++){
if (qureg.chunkId==rank){
if (reportRank) {
printf("Reporting state from rank %d [\n", qureg.chunkId);
//printf("\trank, index, real, imag\n");
printf("real, imag\n");
} else if (rank==0) {
printf("Reporting state [\n");
printf("real, imag\n");
}
for(index=0; index<qureg.numAmpsPerChunk; index++){
printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]);
}
if (reportRank || rank==qureg.numChunks-1) printf("]\n");
}
syncQuESTEnv(env);
}
}
}
qreal statevec_getRealAmp(Qureg qureg, long long int index){
qreal el=0;
cudaMemcpy(&el, &(qureg.deviceStateVec.real[index]),
sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost);
return el;
}
qreal statevec_getImagAmp(Qureg qureg, long long int index){
qreal el=0;
cudaMemcpy(&el, &(qureg.deviceStateVec.imag[index]),
sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost);
return el;
}
__global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the state to |0000..0000>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==0){
// zero state |0000..0000> has probability 1
stateVecReal[0] = 1.0;
stateVecImag[0] = 0.0;
}
}
void statevec_initZeroState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initZeroStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize);
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
}
void statevec_initPlusState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){
long long int index;
// initialise the state to |stateInd>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==stateInd){
// classical state has probability 1
stateVecReal[stateInd] = 1.0;
stateVecImag[stateInd] = 0.0;
}
}
void statevec_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, stateInd);
}
__global__ void statevec_initStateDebugKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = (index*2.0)/10.0;
stateVecImag[index] = (index*2.0+1.0)/10.0;
}
void statevec_initStateDebug(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initStateDebugKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){
long long int index;
int bit;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2);
bit = extractBit(qubitId, index);
if (bit==outcome) {
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
} else {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initStateOfSingleQubitKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome);
}
// returns 1 if successful, else 0
int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){
long long int chunkSize, stateVecSize;
long long int indexInChunk, totalIndex;
chunkSize = qureg->numAmpsPerChunk;
stateVecSize = chunkSize*qureg->numChunks;
qreal *stateVecReal = qureg->stateVec.real;
qreal *stateVecImag = qureg->stateVec.imag;
FILE *fp;
char line[200];
fp = fopen(filename, "r");
if (fp == NULL)
return 0;
indexInChunk = 0; totalIndex = 0;
while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){
if (line[0]!='#'){
int chunkId = totalIndex/chunkSize;
if (chunkId==qureg->chunkId){
# if QuEST_PREC==1
sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==2
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==4
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# endif
indexInChunk += 1;
}
totalIndex += 1;
}
}
fclose(fp);
copyStateToGPU(*qureg);
// indicate success
return 1;
}
int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){
qreal diff;
int chunkSize = mq1.numAmpsPerChunk;
copyStateFromGPU(mq1);
copyStateFromGPU(mq2);
for (int i=0; i<chunkSize; i++){
diff = mq1.stateVec.real[i] - mq2.stateVec.real[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
}
return 1;
}
__global__ void statevec_compactUnitaryKernel (Qureg qureg, const int rotQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << rotQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
void statevec_compactUnitary(Qureg qureg, const int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_compactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, alpha, beta);
}
__global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
void statevec_controlledCompactUnitary(Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledCompactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, alpha, beta);
}
__global__ void statevec_unitaryKernel(Qureg qureg, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
void statevec_unitary(Qureg qureg, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_unitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, u);
}
__global__ void statevec_controlledUnitaryKernel(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_controlledUnitary(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, u);
}
__global__ void statevec_multiControlledUnitaryKernel(Qureg qureg, long long int mask, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
if (mask == (mask & indexUp) ){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_multiControlledUnitary(Qureg qureg, int *controlQubits, int numControlQubits, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask=0;
for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_multiControlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, targetQubit, u);
}
__global__ void statevec_pauliXKernel(Qureg qureg, const int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
void statevec_pauliX(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliXKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit);
}
__global__ void statevec_pauliYKernel(Qureg qureg, const int targetQubit, const int conjFac){
long long int sizeHalfBlock = 1LL << targetQubit;
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int thisBlock = thisTask / sizeHalfBlock;
long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
long long int indexLo = indexUp + sizeHalfBlock;
qreal stateRealUp, stateImagUp;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
void statevec_pauliY(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, 1);
}
void statevec_pauliYConj(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, -1);
}
__global__ void statevec_controlledPauliYKernel(Qureg qureg, const int controlQubit, const int targetQubit, const int conjFac)
{
long long int index;
long long int sizeBlock, sizeHalfBlock;
long long int stateVecSize;
int controlBit;
qreal stateRealUp, stateImagUp;
long long int thisBlock, indexUp, indexLo;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
}
void statevec_controlledPauliY(Qureg qureg, const int controlQubit, const int targetQubit)
{
int conjFactor = 1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor);
}
void statevec_controlledPauliYConj(Qureg qureg, const int controlQubit, const int targetQubit)
{
int conjFactor = -1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor);
}
__global__ void statevec_phaseShiftByTermKernel(Qureg qureg, const int targetQubit, qreal cosAngle, qreal sinAngle) {
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, indexUp,indexLo;
qreal stateRealLo, stateImagLo;
long long int thisTask;
const long long int numTasks = qureg.numAmpsPerChunk >> 1;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
void statevec_phaseShiftByTerm(Qureg qureg, const int targetQubit, Complex term)
{
qreal cosAngle = term.real;
qreal sinAngle = term.imag;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_phaseShiftByTermKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, cosAngle, sinAngle);
}
__global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, const int idQubit1, const int idQubit2, qreal cosAngle, qreal sinAngle)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
qreal stateRealLo, stateImagLo;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_controlledPhaseShift(Qureg qureg, const int idQubit1, const int idQubit2, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
qreal stateRealLo, stateImagLo;
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
long long int mask=0;
for (int i=0; i<numControlQubits; i++)
mask = mask | (1LL<<controlQubits[i]);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle);
}
qreal densmatr_calcTotalProb(Qureg qureg) {
// computes the trace using Kahan summation
qreal pTotal=0;
qreal y, t, c;
c = 0;
long long int numCols = 1LL << qureg.numQubitsRepresented;
long long diagIndex;
copyStateFromGPU(qureg);
for (int col=0; col< numCols; col++) {
diagIndex = col*(numCols + 1);
y = qureg.stateVec.real[diagIndex] - c;
t = pTotal + y;
c = ( t - pTotal ) - y; // brackets are important
pTotal = t;
}
return pTotal;
}
qreal statevec_calcTotalProb(Qureg qureg){
/* IJB - implemented using Kahan summation for greater accuracy at a slight floating
point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */
/* Don't change the bracketing in this routine! */
qreal pTotal=0;
qreal y, t, c;
long long int index;
long long int numAmpsPerRank = qureg.numAmpsPerChunk;
copyStateFromGPU(qureg);
c = 0.0;
for (index=0; index<numAmpsPerRank; index++){
/* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */
// pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index];
y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
/* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */
//pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index];
y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
}
return pTotal;
}
__global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, const int idQubit1, const int idQubit2)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_controlledPhaseFlip(Qureg qureg, const int idQubit1, const int idQubit2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2);
}
__global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask)
{
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask=0;
for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask);
}
__global__ void statevec_hadamardKernel (Qureg qureg, const int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal recRoot2 = 1.0/sqrt(2.0);
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo);
stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo);
stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo);
stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo);
}
void statevec_hadamard(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_hadamardKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit);
}
__global__ void statevec_controlledNotKernel(Qureg qureg, const int controlQubit, const int targetQubit)
{
long long int index;
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
long long int stateVecSize;
int controlBit;
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
}
void statevec_controlledNot(Qureg qureg, const int controlQubit, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledNotKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit);
}
__device__ __host__ unsigned int log2Int( unsigned int x )
{
unsigned int ans = 0 ;
while( x>>=1 ) ans++;
return ans ;
}
__device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){
int i, l, r;
int threadMax, maxDepth;
threadMax = length/2;
maxDepth = log2Int(length/2);
for (i=0; i<maxDepth+1; i++){
if (threadIdx.x<threadMax){
l = threadIdx.x;
r = l + threadMax;
arrayIn[l] = arrayIn[r] + arrayIn[l];
}
threadMax = threadMax >> 1;
__syncthreads(); // optimise -- use warp shuffle instead
}
if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0];
}
__global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){
extern __shared__ qreal tempReductionArray[];
int blockOffset = blockIdx.x*length;
tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2];
tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1];
__syncthreads();
reduceBlock(tempReductionArray, reducedArray, length);
}
__global__ void densmatr_findProbabilityOfZeroKernel(
Qureg qureg, const int measureQubit, qreal *reducedArray
) {
// run by each thread
// use of block here refers to contiguous amplitudes where measureQubit = 0,
// (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numTasks = densityDim >> 1;
long long int sizeHalfBlock = 1LL << (measureQubit);
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int thisBlock; // which block this thread is processing
long long int thisTask; // which part of the block this thread is processing
long long int basisIndex; // index of this thread's computational basis state
long long int densityIndex; // " " index of |basis><basis| in the flat density matrix
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
// figure out which density matrix prob that this thread is assigned
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
densityIndex = (densityDim + 1) * basisIndex;
// record the probability in the CUDA-BLOCK-wide array
qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0
tempReductionArray[threadIdx.x] = prob;
// sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
__global__ void statevec_findProbabilityOfZeroKernel(
Qureg qureg, const int measureQubit, qreal *reducedArray
) {
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
// (good for shared memory parallelism)
extern __shared__ qreal tempReductionArray[];
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
qreal realVal, imagVal;
realVal = stateVecReal[index];
imagVal = stateVecImag[index];
tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal;
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){
int levels=0;
while (numValuesToReduce){
numValuesToReduce = numValuesToReduce/numReducedPerLevel;
levels++;
}
return levels;
}
void swapDouble(qreal **a, qreal **b){
qreal *temp;
temp = *a;
*a = *b;
*b = temp;
}
qreal densmatr_findProbabilityOfZero(Qureg qureg, const int measureQubit)
{
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
densmatr_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, measureQubit, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal zeroProb;
cudaMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return zeroProb;
}
qreal statevec_findProbabilityOfZero(Qureg qureg, const int measureQubit)
{
long long int numValuesToReduce = qureg.numAmpsPerChunk>>1;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
qreal stateProb=0;
int firstTime=1;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
while(numValuesToReduce>1){
if (numValuesToReduce<maxReducedPerLevel){
// Need less than one CUDA block to reduce values
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
} else {
// Use full CUDA blocks, with block size constrained by shared mem usage
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime){
statevec_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, measureQubit, qureg.firstLevelReduction);
firstTime=0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return stateProb;
}
qreal statevec_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome)
{
qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
qreal densmatr_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome)
{
qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
/** computes either a real or imag term in the inner product */
__global__ void statevec_calcInnerProductKernel(
int getRealComp,
qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// choose whether to calculate the real or imaginary term of the inner product
qreal innerProdTerm;
if (getRealComp)
innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index];
else
innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = innerProdTerm;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the
* inner product, so as to not have to worry about keeping the sums separated during reduction.
* Truly disgusting, probably doubles runtime, please fix.
* @TODO could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc?
*/
Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) {
qreal innerProdReal, innerProdImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// return complex
Complex innerProd;
innerProd.real = innerProdReal;
innerProd.imag = innerProdImag;
return innerProd;
}
/** computes one term of (vec^*T) dens * vec */
__global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) {
// figure out which density matrix row to consider
long long int col;
long long int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= dim) return;
qreal* densReal = dens.deviceStateVec.real;
qreal* densImag = dens.deviceStateVec.imag;
qreal* vecReal = vec.deviceStateVec.real;
qreal* vecImag = vec.deviceStateVec.imag;
// compute the row-th element of the product dens*vec
qreal prodReal = 0;
qreal prodImag = 0;
for (col=0LL; col < dim; col++) {
qreal densElemReal = densReal[dim*col + row];
qreal densElemImag = densImag[dim*col + row];
prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col];
prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col];
}
// multiply with row-th elem of (vec^*)
qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row];
// imag of every term should be zero, because each is a valid fidelity calc of an eigenstate
//qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row];
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = termReal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
// @TODO implement
qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) {
// we're summing the square of every term in the density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
// store the reduction in the pureState array
if (firstTime) {
densmatr_calcFidelityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, pureState, densityDim, pureState.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
pureState.firstLevelReduction,
pureState.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal fidelity;
cudaMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return fidelity;
}
__global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Computes the trace of the density matrix squared */
qreal densmatr_calcPurity(Qureg qureg) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = qureg.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
densmatr_calcPurityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
numValuesToReduce, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal traceDensSquared;
cudaMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return traceDensSquared;
}
__global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability)
{
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- measured probability
qreal renorm; // probability (returned) value
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
// (good for shared memory parallelism)
long long int numTasks=qureg.numAmpsPerChunk>>1;
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
renorm=1/sqrt(totalProbability);
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
if (outcome==0){
stateVecReal[index]=stateVecReal[index]*renorm;
stateVecImag[index]=stateVecImag[index]*renorm;
stateVecReal[index+sizeHalfBlock]=0;
stateVecImag[index+sizeHalfBlock]=0;
} else if (outcome==1){
stateVecReal[index]=0;
stateVecImag[index]=0;
stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm;
stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm;
}
}
/*
* outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or
* else the state-vector will lose normalisation
*/
void statevec_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, measureQubit, outcome, outcomeProb);
}
/** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */
__global__ void densmatr_collapseToKnownProbOutcomeKernel(
qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit,
long long int part1, long long int part2, long long int part3,
long long int rowBit, long long int colBit, long long int desired, long long int undesired)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numBasesToVisit) return;
long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
// renormalise desired outcome
vecReal[base + desired] /= outcomeProb;
vecImag[base + desired] /= outcomeProb;
// kill undesired outcome
vecReal[base + undesired] = 0;
vecImag[base + undesired] = 0;
// kill |..0..><..1..| states
vecReal[base + colBit] = 0;
vecImag[base + colBit] = 0;
vecReal[base + rowBit] = 0;
vecImag[base + rowBit] = 0;
}
/** This involves finding |...i...><...j...| states and killing those where i!=j */
void densmatr_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) {
int rowQubit = measureQubit + qureg.numQubitsRepresented;
int colBit = 1LL << measureQubit;
int rowBit = 1LL << rowQubit;
long long int numBasesToVisit = qureg.numAmpsPerChunk/4;
long long int part1 = colBit -1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numBasesToVisit - (rowBit >> 1);
long long int desired, undesired;
if (outcome == 0) {
desired = 0;
undesired = colBit | rowBit;
} else {
desired = colBit | rowBit;
undesired = 0;
}
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock);
densmatr_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit,
part1, part2, part3, rowBit, colBit, desired, undesired);
}
__global__ void densmatr_addDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= numAmpsToVisit) return;
combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd];
combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd];
}
void densmatr_addDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) {
long long int numAmpsToVisit = combineQureg.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_addDensityMatrixKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
combineQureg, otherProb, otherQureg, numAmpsToVisit
);
}
/** Called once for every 4 amplitudes in density matrix
* Works by establishing the |..0..><..0..| state (for its given index) then
* visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3|
* From the brain of Simon Benjamin
*/
__global__ void densmatr_oneQubitDephaseKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int colBit, long long int rowBit)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
vecReal[ampInd + colBit] *= fac;
vecImag[ampInd + colBit] *= fac;
vecReal[ampInd + rowBit] *= fac;
vecImag[ampInd + rowBit] *= fac;
}
void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, const int targetQubit, qreal dephFac) {
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_oneQubitDephaseKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, colBit, rowBit);
}
void densmatr_oneQubitDephase(Qureg qureg, const int targetQubit, qreal dephase) {
if (dephase == 0)
return;
qreal dephFac = 1 - dephase;
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephFac);
}
/** Called 12 times for every 16 amplitudes in density matrix
* Each sums from the |..0..0..><..0..0..| index to visit either
* |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..|
* etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|.
* From the brain of Simon Benjamin
*/
__global__ void densmatr_twoQubitDephaseKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3, long long int part4, long long int part5,
long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2)
{
long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x;
if (outerInd >= numAmpsToVisit) return;
// sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A|
int meta = 1 + (outerInd/numBackgroundStates);
if (meta > 4) meta++;
if (meta > 9) meta++;
long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2);
long long int scanInd = outerInd % numBackgroundStates;
long long int stateInd = (
shift +
(scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4));
vecReal[stateInd] *= fac;
vecImag[stateInd] *= fac;
}
// @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems?
void densmatr_twoQubitDephase(Qureg qureg, int qubit1, int qubit2, qreal dephase) {
if (dephase == 0)
return;
// assumes qubit2 > qubit1
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3);
qreal dephFac = 1 - dephase;
// refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed)
long long int numBackgroundStates = qureg.numAmpsPerChunk/16;
// 12 of these states experience dephasing
long long int numAmpsToVisit = 12 * numBackgroundStates;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_twoQubitDephaseKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit,
part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2);
}
/** Works like oneQubitDephase but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_oneQubitDepolariseKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]);
qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]);
vecReal[baseInd] *= 1 - depolLevel;
vecImag[baseInd] *= 1 - depolLevel;
vecReal[targetInd] *= 1 - depolLevel;
vecImag[targetInd] *= 1 - depolLevel;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
vecReal[targetInd] += realAvDepol;
vecImag[targetInd] += imagAvDepol;
}
/** Works like oneQubitDephase but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_oneQubitDampingKernel(
qreal damping, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = damping * ( vecReal[targetInd]);
qreal imagAvDepol = damping * ( vecImag[targetInd]);
vecReal[targetInd] *= 1 - damping;
vecImag[targetInd] *= 1 - damping;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
}
void densmatr_oneQubitDepolarise(Qureg qureg, const int targetQubit, qreal depolLevel) {
if (depolLevel == 0)
return;
densmatr_oneQubitDephase(qureg, targetQubit, depolLevel);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_oneQubitDepolariseKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
void densmatr_oneQubitDamping(Qureg qureg, const int targetQubit, qreal damping) {
if (damping == 0)
return;
qreal dephase = sqrt(1-damping);
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephase);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_oneQubitDampingKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
damping, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
/** Called once for every 16 amplitudes */
__global__ void densmatr_twoQubitDepolariseKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int part4, long long int part5,
long long int rowCol1, long long int rowCol2)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
// index of |..0..0..><..0..0|
long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4);
long long int ind01 = ind00 + rowCol1;
long long int ind10 = ind00 + rowCol2;
long long int ind11 = ind00 + rowCol1 + rowCol2;
qreal realAvDepol = depolLevel * 0.25 * (
vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]);
qreal imagAvDepol = depolLevel * 0.25 * (
vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]);
qreal retain = 1 - depolLevel;
vecReal[ind00] *= retain; vecImag[ind00] *= retain;
vecReal[ind01] *= retain; vecImag[ind01] *= retain;
vecReal[ind10] *= retain; vecImag[ind10] *= retain;
vecReal[ind11] *= retain; vecImag[ind11] *= retain;
vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol;
vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol;
vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol;
vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol;
}
void densmatr_twoQubitDepolarise(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) {
if (depolLevel == 0)
return;
// assumes qubit2 > qubit1
densmatr_twoQubitDephase(qureg, qubit1, qubit2, depolLevel);
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int rowCol1 = colBit1 | rowBit1;
long long int rowCol2 = colBit2 | rowBit2;
long long int numAmpsToVisit = qureg.numAmpsPerChunk/16;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = numAmpsToVisit - (rowBit2 >> 3);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_twoQubitDepolariseKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, part4, part5, rowCol1, rowCol2);
}
void seedQuESTDefault(){
// init MT random number generator with three keys -- time and pid
// for the MPI version, it is ok that all procs will get the same seed as random numbers will only be
// used by the master process
unsigned long int key[2];
getQuESTDefaultSeedKey(key);
init_by_array(key, 2);
}
#ifdef __cplusplus
}
#endif
|
c064b4e6dd1e1fc9eac932f21e931cfdc4fe1f12.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __NVCC__
//K in parallel
template < class U >
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
template < class T, class U >
__global__ void A_star_expand(int* off,int* edge,T* W, U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}//end
}
//K in parallel -- O(N)
template < class U >
__global__ void keepHeapPQ(unsigned int* PQ, unsigned int* PQ_size,U* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
template <class U >
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
template < class U >
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
template <class U>
__global__ void getCx(U* Cx,int dest,U* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
#endif
|
c064b4e6dd1e1fc9eac932f21e931cfdc4fe1f12.cu
|
#ifdef __NVCC__
//K in parallel
template < class U >
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
template < class T, class U >
__global__ void A_star_expand(int* off,int* edge,T* W, U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}//end
}
//K in parallel -- O(N)
template < class U >
__global__ void keepHeapPQ(unsigned int* PQ, unsigned int* PQ_size,U* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
template <class U >
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
template < class U >
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
template <class U>
__global__ void getCx(U* Cx,int dest,U* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
#endif
|
4f6a06d97eaf4d4b2f23edf0363b2517f57dbd29.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
#include<iostream>
#include <iostream>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <iomanip>
#define n 1000
__global__ void Matrix_Product (double *A, double *g, double *C)
// Each thread computes one element of C
// by accumulating results into Cvalue
{ double Cvalue = 0.00;
int row = blockIdx.y*blockDim.y+threadIdx.y;
// int col = blockIdx.x * blockDim.x + threadIdx.x;
//size of matrix A//
int N=1000;
if(row> N ) return;
for (int e = 0; e < N; e++)
{
Cvalue += A[N*row+e]*g[e];
}
C[row]+= Cvalue;
}
using namespace std;
int main(){
double a[n*n],x[n],c[n],temp=0,d=2;
srand(time(NULL));
for(long int i=0;i<n*n;i++)
{
a[i]=2*i*314.9568298+100;
//cin>>a[i][j]; //generating the matrix a[n][n]
//cout<<" "<<a[i][j]<<endl;
}
//
for(int i=0;i<n;i++)
{
x[i]=0.5;
}
x[n-1]=1;
hipEvent_t start,stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
double *dev_a, *dev_x, *dev_c;
dim3 griddim(100,1);
dim3 blockdim(10,1);
hipMalloc( (void**)&dev_a, n *n* sizeof(double) );
hipMalloc( (void**)&dev_c, n * sizeof(double) );
hipMalloc( (void**)&dev_x, n * sizeof(double) );
hipMemcpy( dev_a,a,n * n * sizeof(double),hipMemcpyHostToDevice );
while(fabs(d-temp)>0.0000000000001)
{
for(int i=0;i<n;i++)
{
c[i]=0;
}
// for(int j=0;j<n;j++) //portion to be parallelized
// {
// c[i]+=a[i][j]*x[j];
// }
// hipMalloc( (void**)&dev_c, n * sizeof(double) );
// hipMalloc( (void**)&dev_x, n * sizeof(double) );
// hipMalloc( (void**)&dev_a, n *n* sizeof(double) );
//hipMemcpy( dev_a,a,n * n * sizeof(double),hipMemcpyHostToDevice );
hipMemcpy( dev_x,x,n * sizeof(double),hipMemcpyHostToDevice );
hipMemcpy( dev_c,c,n * sizeof(double),hipMemcpyHostToDevice );
hipLaunchKernelGGL(( Matrix_Product), dim3(griddim), dim3(blockdim), 0, 0, dev_a, dev_x, dev_c );
hipMemcpy( c,dev_c,n * sizeof(double),hipMemcpyDeviceToHost );
// hipFree( dev_a );
// hipFree( dev_x );
// hipFree( dev_c );
for(int i=0;i<n;i++)
{
x[i]=c[i];
}
temp=d;
d=0;
for(int i=0;i<n;i++)
{
if(fabs(x[i])>fabs(d))
d=x[i];
}
for(int i=0;i<n;i++){
x[i]/=d;
}
}
// hipMemcpy( c,dev_c,n * sizeof(double),hipMemcpyDeviceToHost );
hipFree( dev_a );
hipFree( dev_x );
hipFree( dev_c );
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
cout<<"\n\nElapsed Time = "<<elapsedTime<<" ms";
//cout<<d<<endl;
//for(int i=0;i<n;i++){
// cout<<setprecision(30)<<d<<endl;
//}
//cout<<"Enter the initial guess for eigen vector";
//for(int i=0;i<n;i++){
// cout<<x[i]<<endl;
//}
//}
return 0;
}
|
4f6a06d97eaf4d4b2f23edf0363b2517f57dbd29.cu
|
#include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
#include<iostream>
#include <iostream>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <iomanip>
#define n 1000
__global__ void Matrix_Product (double *A, double *g, double *C)
// Each thread computes one element of C
// by accumulating results into Cvalue
{ double Cvalue = 0.00;
int row = blockIdx.y*blockDim.y+threadIdx.y;
// int col = blockIdx.x * blockDim.x + threadIdx.x;
//size of matrix A//
int N=1000;
if(row> N ) return;
for (int e = 0; e < N; e++)
{
Cvalue += A[N*row+e]*g[e];
}
C[row]+= Cvalue;
}
using namespace std;
int main(){
double a[n*n],x[n],c[n],temp=0,d=2;
srand(time(NULL));
for(long int i=0;i<n*n;i++)
{
a[i]=2*i*314.9568298+100;
//cin>>a[i][j]; //generating the matrix a[n][n]
//cout<<" "<<a[i][j]<<endl;
}
//
for(int i=0;i<n;i++)
{
x[i]=0.5;
}
x[n-1]=1;
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
double *dev_a, *dev_x, *dev_c;
dim3 griddim(100,1);
dim3 blockdim(10,1);
cudaMalloc( (void**)&dev_a, n *n* sizeof(double) );
cudaMalloc( (void**)&dev_c, n * sizeof(double) );
cudaMalloc( (void**)&dev_x, n * sizeof(double) );
cudaMemcpy( dev_a,a,n * n * sizeof(double),cudaMemcpyHostToDevice );
while(fabs(d-temp)>0.0000000000001)
{
for(int i=0;i<n;i++)
{
c[i]=0;
}
// for(int j=0;j<n;j++) //portion to be parallelized
// {
// c[i]+=a[i][j]*x[j];
// }
// cudaMalloc( (void**)&dev_c, n * sizeof(double) );
// cudaMalloc( (void**)&dev_x, n * sizeof(double) );
// cudaMalloc( (void**)&dev_a, n *n* sizeof(double) );
//cudaMemcpy( dev_a,a,n * n * sizeof(double),cudaMemcpyHostToDevice );
cudaMemcpy( dev_x,x,n * sizeof(double),cudaMemcpyHostToDevice );
cudaMemcpy( dev_c,c,n * sizeof(double),cudaMemcpyHostToDevice );
Matrix_Product<<<griddim, blockdim>>>( dev_a, dev_x, dev_c );
cudaMemcpy( c,dev_c,n * sizeof(double),cudaMemcpyDeviceToHost );
// cudaFree( dev_a );
// cudaFree( dev_x );
// cudaFree( dev_c );
for(int i=0;i<n;i++)
{
x[i]=c[i];
}
temp=d;
d=0;
for(int i=0;i<n;i++)
{
if(fabs(x[i])>fabs(d))
d=x[i];
}
for(int i=0;i<n;i++){
x[i]/=d;
}
}
// cudaMemcpy( c,dev_c,n * sizeof(double),cudaMemcpyDeviceToHost );
cudaFree( dev_a );
cudaFree( dev_x );
cudaFree( dev_c );
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
cout<<"\n\nElapsed Time = "<<elapsedTime<<" ms";
//cout<<d<<endl;
//for(int i=0;i<n;i++){
// cout<<setprecision(30)<<d<<endl;
//}
//cout<<"Enter the initial guess for eigen vector";
//for(int i=0;i<n;i++){
// cout<<x[i]<<endl;
//}
//}
return 0;
}
|
ace526a62b5756bb972e873cdcf7bfc5f2964f13.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/kernel_util.cuh"
namespace oneflow {
namespace {
template<typename T>
__global__ void QuantizationSymmetric(const T* in_ptr, const T* scale_ptr, const int64_t scale_size,
const int64_t elements, const int64_t panel_size,
const double quantization_bit, T* out_ptr) {
int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x;
int64_t step = gridDim.x * blockDim.x;
T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
T lower_bound = -upper_bound - 1;
while (gid < elements) {
int64_t channel_index = gid / panel_size;
int64_t scale_idx = min(scale_size - 1, channel_index);
T scale = scale_ptr[scale_idx];
T out = nearbyint(in_ptr[gid] / scale);
out = out > upper_bound ? upper_bound : out;
out = out < lower_bound ? lower_bound : out;
out_ptr[gid] = out;
gid += step;
}
}
template<typename T>
__global__ void QuantizationAffine(const T* in_ptr, const T* scale_ptr, const T* zero_point_ptr,
const int64_t scale_size, const int64_t elements,
const int64_t panel_size, const double quantization_bit,
T* out_ptr) {
int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x;
int64_t step = gridDim.x * blockDim.x;
T upper_bound = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T lower_bound = 0;
while (gid < elements) {
int64_t channel_index = gid / panel_size;
int64_t scale_idx = min(scale_size - 1, channel_index);
T scale = scale_ptr[scale_idx];
T zero_point = zero_point_ptr[scale_idx];
T out = nearbyint(in_ptr[gid] / scale + zero_point);
out = out > upper_bound ? upper_bound : out;
out = out < lower_bound ? lower_bound : out;
out_ptr[gid] = out;
gid += step;
}
}
template<typename T>
__global__ void QuantizationCambricon(const T* in_ptr, const T* shift, const int64_t scale_size,
const int64_t elements, const int64_t panel_size,
const double quantization_bit, T* out_ptr) {
int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x;
int64_t step = gridDim.x * blockDim.x;
T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
T lower_bound = -upper_bound - 1;
T scale = static_cast<T>(pow(2.0, static_cast<int32_t>(shift[0])));
while (gid < elements) {
T out = nearbyint(in_ptr[gid] / scale);
out = out > upper_bound ? upper_bound : out;
out = out < lower_bound ? lower_bound : out;
out_ptr[gid] = out;
gid += step;
}
}
} // namespace
template<typename T>
class GpuQuantizationKernel final : public user_op::OpKernel {
public:
GpuQuantizationKernel() = default;
~GpuQuantizationKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
const user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
const user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape_view().elem_cnt();
const int64_t panel_size = in->shape_view().Count(1);
const int64_t scale_size = scale->shape_view().elem_cnt();
// round to even
auto origin_round_mode = std::fegetround();
std::fesetround(FE_TONEAREST);
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
RUN_CUDA_KERNEL((QuantizationSymmetric<T>), ctx->stream(), elements, in->dptr<T>(),
scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit,
out->mut_dptr<T>());
} else { // quantization_scheme == "affine"
RUN_CUDA_KERNEL((QuantizationAffine<T>), ctx->stream(), elements, in->dptr<T>(),
scale->dptr<T>(), zero_point->dptr<T>(), scale_size, elements, panel_size,
quantization_bit, out->mut_dptr<T>());
}
} else if (quantization_formula == "cambricon") {
RUN_CUDA_KERNEL((QuantizationCambricon<T>), ctx->stream(), elements, in->dptr<T>(),
scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit,
out->mut_dptr<T>());
} else {
UNIMPLEMENTED();
}
std::fesetround(origin_round_mode);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_QUANTIZATION_KERNEL(dtype) \
REGISTER_USER_KERNEL("quantization") \
.SetCreateFn<GpuQuantizationKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value))
REGISTER_QUANTIZATION_KERNEL(float);
REGISTER_QUANTIZATION_KERNEL(double);
} // namespace oneflow
|
ace526a62b5756bb972e873cdcf7bfc5f2964f13.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/kernel_util.cuh"
namespace oneflow {
namespace {
template<typename T>
__global__ void QuantizationSymmetric(const T* in_ptr, const T* scale_ptr, const int64_t scale_size,
const int64_t elements, const int64_t panel_size,
const double quantization_bit, T* out_ptr) {
int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x;
int64_t step = gridDim.x * blockDim.x;
T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
T lower_bound = -upper_bound - 1;
while (gid < elements) {
int64_t channel_index = gid / panel_size;
int64_t scale_idx = min(scale_size - 1, channel_index);
T scale = scale_ptr[scale_idx];
T out = nearbyint(in_ptr[gid] / scale);
out = out > upper_bound ? upper_bound : out;
out = out < lower_bound ? lower_bound : out;
out_ptr[gid] = out;
gid += step;
}
}
template<typename T>
__global__ void QuantizationAffine(const T* in_ptr, const T* scale_ptr, const T* zero_point_ptr,
const int64_t scale_size, const int64_t elements,
const int64_t panel_size, const double quantization_bit,
T* out_ptr) {
int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x;
int64_t step = gridDim.x * blockDim.x;
T upper_bound = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T lower_bound = 0;
while (gid < elements) {
int64_t channel_index = gid / panel_size;
int64_t scale_idx = min(scale_size - 1, channel_index);
T scale = scale_ptr[scale_idx];
T zero_point = zero_point_ptr[scale_idx];
T out = nearbyint(in_ptr[gid] / scale + zero_point);
out = out > upper_bound ? upper_bound : out;
out = out < lower_bound ? lower_bound : out;
out_ptr[gid] = out;
gid += step;
}
}
template<typename T>
__global__ void QuantizationCambricon(const T* in_ptr, const T* shift, const int64_t scale_size,
const int64_t elements, const int64_t panel_size,
const double quantization_bit, T* out_ptr) {
int64_t gid = (blockDim.x * blockIdx.x) + threadIdx.x;
int64_t step = gridDim.x * blockDim.x;
T upper_bound = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
T lower_bound = -upper_bound - 1;
T scale = static_cast<T>(pow(2.0, static_cast<int32_t>(shift[0])));
while (gid < elements) {
T out = nearbyint(in_ptr[gid] / scale);
out = out > upper_bound ? upper_bound : out;
out = out < lower_bound ? lower_bound : out;
out_ptr[gid] = out;
gid += step;
}
}
} // namespace
template<typename T>
class GpuQuantizationKernel final : public user_op::OpKernel {
public:
GpuQuantizationKernel() = default;
~GpuQuantizationKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
const user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
const user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape_view().elem_cnt();
const int64_t panel_size = in->shape_view().Count(1);
const int64_t scale_size = scale->shape_view().elem_cnt();
// round to even
auto origin_round_mode = std::fegetround();
std::fesetround(FE_TONEAREST);
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
RUN_CUDA_KERNEL((QuantizationSymmetric<T>), ctx->stream(), elements, in->dptr<T>(),
scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit,
out->mut_dptr<T>());
} else { // quantization_scheme == "affine"
RUN_CUDA_KERNEL((QuantizationAffine<T>), ctx->stream(), elements, in->dptr<T>(),
scale->dptr<T>(), zero_point->dptr<T>(), scale_size, elements, panel_size,
quantization_bit, out->mut_dptr<T>());
}
} else if (quantization_formula == "cambricon") {
RUN_CUDA_KERNEL((QuantizationCambricon<T>), ctx->stream(), elements, in->dptr<T>(),
scale->dptr<T>(), scale_size, elements, panel_size, quantization_bit,
out->mut_dptr<T>());
} else {
UNIMPLEMENTED();
}
std::fesetround(origin_round_mode);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_QUANTIZATION_KERNEL(dtype) \
REGISTER_USER_KERNEL("quantization") \
.SetCreateFn<GpuQuantizationKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value))
REGISTER_QUANTIZATION_KERNEL(float);
REGISTER_QUANTIZATION_KERNEL(double);
} // namespace oneflow
|
6cb2a843bd2c7533a085d4150e91a9463bf5a9c0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
@file kernel.cu
@author t-sakai
@date 2019/06/24
*/
#include "kernel.h"
#include "BSDF.h"
using namespace lcuda;
LCUDA_DEVICE f32 Shape::intersect(const Ray& ray) const
{
switch(shapeType_){
case Type_Sphere:
return intersectSphere(ray);
case Type_Plane:
return intersectPlane(ray);
}
return F32_MAX;
}
LCUDA_DEVICE void Shape::calcIntersection(Intersection& intersection, const Ray& ray) const
{
intersection.position_ = ray.origin_ + intersection.t_ * ray.direction_;
switch(shapeType_){
case Type_Sphere:
intersection.normal_ = normalizeChecked(intersection.position_ - position_, make_float3(0.0f));
break;
case Type_Plane:
intersection.normal_ = normal_;
break;
}
orthonormalBasis(intersection.binormal0_, intersection.binormal1_, intersection.normal_);
}
LCUDA_DEVICE f32 Shape::intersectSphere(const Ray& ray) const
{
float3 m = ray.origin_ - position_;
f32 b = dot(m, ray.direction_);
f32 c = dot(m, m) - radius_ * radius_;
if(0.0f < c) {
if(0.0f < b) {
return F32_MAX;
}
} else {
return F32_EPSILON;
}
f32 discr = b * b - c;
if(discr < RAY_EPSILON) {
return F32_MAX;
}
discr = std::sqrtf(discr);
b = -b;
f32 tmin = b - discr;
f32 tmax = b + discr;
return 0.0f <= tmin ? tmin : RAY_EPSILON < tmax ? tmax : F32_MAX;
}
LCUDA_DEVICE f32 Shape::intersectPlane(const Ray& ray) const
{
f32 t = radius_ - dot(normal_, ray.origin_);
f32 d = dot(normal_, ray.direction_);
t /= d;
return (d<F32_EPSILON && 0.0f <= t && t < F32_MAX) ? t : F32_MAX;
}
LCUDA_DEVICE float3 EmitterConstant::eval(const Intersection& intersection, const float3& d, const float3& radiance)
{
return (F32_EPSILON < dot(intersection.normal_, d))? radiance : make_float3(0.0f);
}
LCUDA_CONSTANT ConstantsRender g_constansRender;
namespace
{
LCUDA_DEVICE bool intersect(Intersection& intersection, const Ray& ray, s32 numShapes, const Shape* shapes)
{
intersection.t_ = F32_MAX;
intersection.id_ = -1;
for(s32 i = 0; i<numShapes; ++i){
f32 d = shapes[i].intersect(ray);
if(d<intersection.t_){
intersection.t_ = d;
intersection.id_ = i;
}
}
if(0<=intersection.id_){
shapes[intersection.id_].calcIntersection(intersection, ray);
return true;
}
return false;
}
template<class T>
LCUDA_DEVICE float3 radiance(Ray ray, s32 maxDepth, f32 roughness, uint4& random, s32 numShapes, const Shape* shapes)
{
BSDF<T> bsdf(roughness, roughness, 1.0f, 1.8f, 0.0f);
float3 Li = make_float3(0.0f);
float3 throughput = make_float3(1.0f);
//f32 pdf = 1.0f;
Intersection intersection;
for(s32 depth = 0; depth<maxDepth; ++depth){
if(!intersect(intersection, ray, numShapes, shapes)){
break;
}
const Shape& shape = shapes[intersection.id_];
float3 wow = -ray.direction_;
float3 wo = intersection.worldToLocal(wow);
float3 n = intersection.worldToLocal(intersection.normal_);
float3 wiw;
f32 bsdfPdf = 0.0f;
switch(shape.materialType_) {
case DIFFUSE: {
Diffuse diffuse;
f32 eta0 = xoshiro128plus_frand(random);
f32 eta1 = xoshiro128plus_frand(random);
float3 wi;
float3 bsdfWeight = diffuse.sample(wi, bsdfPdf, wo, eta0, eta1);
float3 f = shape.color_ * bsdfWeight;
if(isZero(f)) {
return Li;
}
wiw = intersection.localToWorld(wi);
throughput *= f;
} break;
case ROUGH_CONDUCTOR:{
f32 eta0 = xoshiro128plus_frand(random);
f32 eta1 = xoshiro128plus_frand(random);
float3 wi;
float3 bsdfWeight = bsdf.sample(wi, bsdfPdf, wo, eta0, eta1);
float3 f = shape.color_ * bsdfWeight;
if(isZero(f)) {
return Li;
}
wiw = intersection.localToWorld(wi);
throughput *= f;
}break;
case EMITTER:
bsdfPdf = 1.0f;
break;
default:
return Li;
}
if(!isZero(shape.emission_)){
Li += throughput * bsdfPdf * EmitterConstant::eval(intersection, -ray.direction_, shape.emission_);
}
ray.origin_ = intersection.position_;//muladd(LRENDER_RAY_EPSILON, intersection.normal_, intersection.position_);
ray.direction_ = wiw;
//Russian roulette
if(6 <= depth) {
f32 continueProbability = fminf(length(throughput), 0.9f);
if(continueProbability <= xoshiro128plus_frand(random)) {
break;
}
throughput /= continueProbability;
}
}
return Li;
}
template<class T>
LCUDA_GLOBAL void render(float3* screen)
{
s32 x = blockIdx.x * blockDim.x + threadIdx.x;
s32 y = blockIdx.y * blockDim.y + threadIdx.y;
const s32 Width = g_constansRender.width_;
const s32 Height = g_constansRender.height_;
if(Width<=x || Height<=y){
return;
}
uint4 random = xoshiro128plus_srand(g_constansRender.random_ + Width*y + x);
f32 fovx = g_constansRender.fovx_;
f32 fovy = g_constansRender.fovy_;
s32 samplesPerStep = g_constansRender.samplesPerStep_;
const Ray& camera = g_constansRender.cameraRay_;
float3 cx = make_float3(fovx, 0.0f, 0.0f);
float3 cy = normalize(cross(cx, camera.direction_)) * fovy;
f32 invWidth2 = 2.0f/Width;
f32 invHeight2 = 2.0f/Height;
s32 row = (Height-y-1)*Width;
float3 r = make_float3(0.0f);
for(s32 s = 0; s<samplesPerStep; ++s){
//f32 rx = xoshiro128plus_frand(random);
//f32 ry = xoshiro128plus_frand(random);
f32 rx = g_constansRender.samples_[s].x;
f32 ry = g_constansRender.samples_[s].y;
f32 sx = (x + rx)*invWidth2 - 1.0f;
f32 sy = (y + ry)*invHeight2 - 1.0f;
float3 d = normalize(cx*sx + cy*sy + camera.direction_);
Ray ray;
ray.origin_ = camera.origin_ + d*30e-4f;
ray.direction_ = d;
r += radiance<T>(ray, 16, g_constansRender.roughness_, random, ConstantsRender::NumShapes, g_constansRender.shapes_);
}
f32 count = g_constansRender.count_;
f32 inv = 1.0f/(count + samplesPerStep);
screen[row+x] = screen[row+x] * (count*inv) + r*inv;
}
LCUDA_GLOBAL void test_random(int N, float* result, uint4 random)
{
for(int i=0; i<N; ++i){
result[i] = xoshiro128plus_frand(random);
}
}
}
void kernel_render(float3* screen, int blockSize, const ConstantsRender& constants, DistributionType distribution)
{
hipMemcpyToSymbol(g_constansRender, &constants, sizeof(ConstantsRender));
dim3 dimBlock(blockSize, blockSize);
dim3 dimGrid((constants.width_ + blockSize - 1)/blockSize, (constants.height_ + blockSize - 1)/blockSize);
switch(distribution){
case Distribution_Beckmann:
hipLaunchKernelGGL(( render<BeckmannIsotropic>), dim3(dimGrid), dim3(dimBlock), 0, 0, screen);
break;
case Distribution_GGX:
hipLaunchKernelGGL(( render<GGXIsotropic>), dim3(dimGrid), dim3(dimBlock), 0, 0, screen);
break;
case Distribution_GGXVND:
hipLaunchKernelGGL(( render<GGXAnisotropicVND>), dim3(dimGrid), dim3(dimBlock), 0, 0, screen);
break;
case Distribution_GGXVND2:
hipLaunchKernelGGL(( render<GGXAnisotropicElipsoidVND>), dim3(dimGrid), dim3(dimBlock), 0, 0, screen);
break;
}
}
void kernel_random(int N, float* result, int blockSize, uint4 random)
{
hipLaunchKernelGGL(( test_random), dim3(1), dim3(blockSize), 0, 0, N, result, random);
}
|
6cb2a843bd2c7533a085d4150e91a9463bf5a9c0.cu
|
/**
@file kernel.cu
@author t-sakai
@date 2019/06/24
*/
#include "kernel.h"
#include "BSDF.h"
using namespace lcuda;
LCUDA_DEVICE f32 Shape::intersect(const Ray& ray) const
{
switch(shapeType_){
case Type_Sphere:
return intersectSphere(ray);
case Type_Plane:
return intersectPlane(ray);
}
return F32_MAX;
}
LCUDA_DEVICE void Shape::calcIntersection(Intersection& intersection, const Ray& ray) const
{
intersection.position_ = ray.origin_ + intersection.t_ * ray.direction_;
switch(shapeType_){
case Type_Sphere:
intersection.normal_ = normalizeChecked(intersection.position_ - position_, make_float3(0.0f));
break;
case Type_Plane:
intersection.normal_ = normal_;
break;
}
orthonormalBasis(intersection.binormal0_, intersection.binormal1_, intersection.normal_);
}
LCUDA_DEVICE f32 Shape::intersectSphere(const Ray& ray) const
{
float3 m = ray.origin_ - position_;
f32 b = dot(m, ray.direction_);
f32 c = dot(m, m) - radius_ * radius_;
if(0.0f < c) {
if(0.0f < b) {
return F32_MAX;
}
} else {
return F32_EPSILON;
}
f32 discr = b * b - c;
if(discr < RAY_EPSILON) {
return F32_MAX;
}
discr = std::sqrtf(discr);
b = -b;
f32 tmin = b - discr;
f32 tmax = b + discr;
return 0.0f <= tmin ? tmin : RAY_EPSILON < tmax ? tmax : F32_MAX;
}
LCUDA_DEVICE f32 Shape::intersectPlane(const Ray& ray) const
{
f32 t = radius_ - dot(normal_, ray.origin_);
f32 d = dot(normal_, ray.direction_);
t /= d;
return (d<F32_EPSILON && 0.0f <= t && t < F32_MAX) ? t : F32_MAX;
}
LCUDA_DEVICE float3 EmitterConstant::eval(const Intersection& intersection, const float3& d, const float3& radiance)
{
return (F32_EPSILON < dot(intersection.normal_, d))? radiance : make_float3(0.0f);
}
LCUDA_CONSTANT ConstantsRender g_constansRender;
namespace
{
LCUDA_DEVICE bool intersect(Intersection& intersection, const Ray& ray, s32 numShapes, const Shape* shapes)
{
intersection.t_ = F32_MAX;
intersection.id_ = -1;
for(s32 i = 0; i<numShapes; ++i){
f32 d = shapes[i].intersect(ray);
if(d<intersection.t_){
intersection.t_ = d;
intersection.id_ = i;
}
}
if(0<=intersection.id_){
shapes[intersection.id_].calcIntersection(intersection, ray);
return true;
}
return false;
}
template<class T>
LCUDA_DEVICE float3 radiance(Ray ray, s32 maxDepth, f32 roughness, uint4& random, s32 numShapes, const Shape* shapes)
{
BSDF<T> bsdf(roughness, roughness, 1.0f, 1.8f, 0.0f);
float3 Li = make_float3(0.0f);
float3 throughput = make_float3(1.0f);
//f32 pdf = 1.0f;
Intersection intersection;
for(s32 depth = 0; depth<maxDepth; ++depth){
if(!intersect(intersection, ray, numShapes, shapes)){
break;
}
const Shape& shape = shapes[intersection.id_];
float3 wow = -ray.direction_;
float3 wo = intersection.worldToLocal(wow);
float3 n = intersection.worldToLocal(intersection.normal_);
float3 wiw;
f32 bsdfPdf = 0.0f;
switch(shape.materialType_) {
case DIFFUSE: {
Diffuse diffuse;
f32 eta0 = xoshiro128plus_frand(random);
f32 eta1 = xoshiro128plus_frand(random);
float3 wi;
float3 bsdfWeight = diffuse.sample(wi, bsdfPdf, wo, eta0, eta1);
float3 f = shape.color_ * bsdfWeight;
if(isZero(f)) {
return Li;
}
wiw = intersection.localToWorld(wi);
throughput *= f;
} break;
case ROUGH_CONDUCTOR:{
f32 eta0 = xoshiro128plus_frand(random);
f32 eta1 = xoshiro128plus_frand(random);
float3 wi;
float3 bsdfWeight = bsdf.sample(wi, bsdfPdf, wo, eta0, eta1);
float3 f = shape.color_ * bsdfWeight;
if(isZero(f)) {
return Li;
}
wiw = intersection.localToWorld(wi);
throughput *= f;
}break;
case EMITTER:
bsdfPdf = 1.0f;
break;
default:
return Li;
}
if(!isZero(shape.emission_)){
Li += throughput * bsdfPdf * EmitterConstant::eval(intersection, -ray.direction_, shape.emission_);
}
ray.origin_ = intersection.position_;//muladd(LRENDER_RAY_EPSILON, intersection.normal_, intersection.position_);
ray.direction_ = wiw;
//Russian roulette
if(6 <= depth) {
f32 continueProbability = fminf(length(throughput), 0.9f);
if(continueProbability <= xoshiro128plus_frand(random)) {
break;
}
throughput /= continueProbability;
}
}
return Li;
}
template<class T>
LCUDA_GLOBAL void render(float3* screen)
{
s32 x = blockIdx.x * blockDim.x + threadIdx.x;
s32 y = blockIdx.y * blockDim.y + threadIdx.y;
const s32 Width = g_constansRender.width_;
const s32 Height = g_constansRender.height_;
if(Width<=x || Height<=y){
return;
}
uint4 random = xoshiro128plus_srand(g_constansRender.random_ + Width*y + x);
f32 fovx = g_constansRender.fovx_;
f32 fovy = g_constansRender.fovy_;
s32 samplesPerStep = g_constansRender.samplesPerStep_;
const Ray& camera = g_constansRender.cameraRay_;
float3 cx = make_float3(fovx, 0.0f, 0.0f);
float3 cy = normalize(cross(cx, camera.direction_)) * fovy;
f32 invWidth2 = 2.0f/Width;
f32 invHeight2 = 2.0f/Height;
s32 row = (Height-y-1)*Width;
float3 r = make_float3(0.0f);
for(s32 s = 0; s<samplesPerStep; ++s){
//f32 rx = xoshiro128plus_frand(random);
//f32 ry = xoshiro128plus_frand(random);
f32 rx = g_constansRender.samples_[s].x;
f32 ry = g_constansRender.samples_[s].y;
f32 sx = (x + rx)*invWidth2 - 1.0f;
f32 sy = (y + ry)*invHeight2 - 1.0f;
float3 d = normalize(cx*sx + cy*sy + camera.direction_);
Ray ray;
ray.origin_ = camera.origin_ + d*30e-4f;
ray.direction_ = d;
r += radiance<T>(ray, 16, g_constansRender.roughness_, random, ConstantsRender::NumShapes, g_constansRender.shapes_);
}
f32 count = g_constansRender.count_;
f32 inv = 1.0f/(count + samplesPerStep);
screen[row+x] = screen[row+x] * (count*inv) + r*inv;
}
LCUDA_GLOBAL void test_random(int N, float* result, uint4 random)
{
for(int i=0; i<N; ++i){
result[i] = xoshiro128plus_frand(random);
}
}
}
void kernel_render(float3* screen, int blockSize, const ConstantsRender& constants, DistributionType distribution)
{
cudaMemcpyToSymbol(g_constansRender, &constants, sizeof(ConstantsRender));
dim3 dimBlock(blockSize, blockSize);
dim3 dimGrid((constants.width_ + blockSize - 1)/blockSize, (constants.height_ + blockSize - 1)/blockSize);
switch(distribution){
case Distribution_Beckmann:
render<BeckmannIsotropic><<<dimGrid, dimBlock>>>(screen);
break;
case Distribution_GGX:
render<GGXIsotropic><<<dimGrid, dimBlock>>>(screen);
break;
case Distribution_GGXVND:
render<GGXAnisotropicVND><<<dimGrid, dimBlock>>>(screen);
break;
case Distribution_GGXVND2:
render<GGXAnisotropicElipsoidVND><<<dimGrid, dimBlock>>>(screen);
break;
}
}
void kernel_random(int N, float* result, int blockSize, uint4 random)
{
test_random<<<1, blockSize>>>(N, result, random);
}
|
e98f92bf3b269062b4bf4f761284cd5678763e5e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2021 by XGBoost Contributors
* \file simple_dmatrix.cu
*/
#include <thrust/copy.h>
#include <xgboost/data.h>
#include "simple_dmatrix.cuh"
#include "simple_dmatrix.h"
#include "device_adapter_hip.cuh"
namespace xgboost {
namespace data {
// Does not currently support metainfo as no on-device data source contains this
// Current implementation assumes a single batch. More batches can
// be supported in future. Does not currently support inferring row/column size
template <typename AdapterT>
SimpleDMatrix::SimpleDMatrix(AdapterT* adapter, float missing, int nthread) {
dh::safe_cuda(hipSetDevice(adapter->DeviceIdx()));
CHECK(adapter->NumRows() != kAdapterUnknownSize);
CHECK(adapter->NumColumns() != kAdapterUnknownSize);
adapter->BeforeFirst();
adapter->Next();
// Enforce single batch
CHECK(!adapter->Next());
info_.num_nonzero_ = CopyToSparsePage(adapter->Value(), adapter->DeviceIdx(),
missing, &sparse_page_);
info_.num_col_ = adapter->NumColumns();
info_.num_row_ = adapter->NumRows();
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
template SimpleDMatrix::SimpleDMatrix(CudfAdapter* adapter, float missing,
int nthread);
template SimpleDMatrix::SimpleDMatrix(CupyAdapter* adapter, float missing,
int nthread);
} // namespace data
} // namespace xgboost
|
e98f92bf3b269062b4bf4f761284cd5678763e5e.cu
|
/*!
* Copyright 2019-2021 by XGBoost Contributors
* \file simple_dmatrix.cu
*/
#include <thrust/copy.h>
#include <xgboost/data.h>
#include "simple_dmatrix.cuh"
#include "simple_dmatrix.h"
#include "device_adapter.cuh"
namespace xgboost {
namespace data {
// Does not currently support metainfo as no on-device data source contains this
// Current implementation assumes a single batch. More batches can
// be supported in future. Does not currently support inferring row/column size
template <typename AdapterT>
SimpleDMatrix::SimpleDMatrix(AdapterT* adapter, float missing, int nthread) {
dh::safe_cuda(cudaSetDevice(adapter->DeviceIdx()));
CHECK(adapter->NumRows() != kAdapterUnknownSize);
CHECK(adapter->NumColumns() != kAdapterUnknownSize);
adapter->BeforeFirst();
adapter->Next();
// Enforce single batch
CHECK(!adapter->Next());
info_.num_nonzero_ = CopyToSparsePage(adapter->Value(), adapter->DeviceIdx(),
missing, &sparse_page_);
info_.num_col_ = adapter->NumColumns();
info_.num_row_ = adapter->NumRows();
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
template SimpleDMatrix::SimpleDMatrix(CudfAdapter* adapter, float missing,
int nthread);
template SimpleDMatrix::SimpleDMatrix(CupyAdapter* adapter, float missing,
int nthread);
} // namespace data
} // namespace xgboost
|
0d3b97f2f9d6ed5a82e72dedc3095da6e07c02ef.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2014 Jure Ratkovic
*/
#include <Trayc/CUDAfiles/common.h>
#include <Trayc/CUDAfiles/random.h>
#include <Trayc/CUDAfiles/helper.h>
#include <optix_math.h>
//
// Pinhole/DOF camera implementation
//
rtDeclareVariable(float3, eye, , );
rtDeclareVariable(float3, U, , );
rtDeclareVariable(float3, V, , );
rtDeclareVariable(float3, W, , );
rtDeclareVariable(int, AAlevel, , );
rtDeclareVariable(float, aperture_radius, , );
rtDeclareVariable(float, focal_length, , );
rtDeclareVariable(int, dof_samples, , );
RT_PROGRAM void dof_camera()
{
const uint2 screen = output_buffer.size() * AAlevel;
const float2 invScreen2x = 2.0f / make_float2(screen);
const uint2 uiNewLaunchIndex = make_uint2(launch_index.x, launch_index.y + myStripe * output_buffer.size().y / renderingDivisionLevel);
const int2 iNewLaunchIndex = make_int2(uiNewLaunchIndex.x, uiNewLaunchIndex.y);
float3 result = make_float3(0.0f);
unsigned int seed = tea<1>(screen.x * uiNewLaunchIndex.y + uiNewLaunchIndex.x, rnd_seed);
float count = 0.0f;
for(int i = 0; i < AAlevel; ++i)
for(int j = 0; j < AAlevel; ++j)
{
const float2 d = make_float2(AAlevel * iNewLaunchIndex.x + i, AAlevel * iNewLaunchIndex.y + j) * invScreen2x - 1.f;
const float3 ray_direction = normalize(d.x * U + d.y * V + W);
const optix::Ray ray(eye, ray_direction, radiance_ray_type, scene_epsilon);
PerRayData_radiance prd;
prd.importance = 1.f;
prd.depth = 0;
rtTrace(top_object, ray, prd);
result += prd.result;
count += 1.0f;
for(int k = 0; k < dof_samples; ++k)
{
optix::Ray ray_dof(ray);
const float2 circPoint = make_float2(rnd(seed) * 2.0f - 1.0f, rnd(seed) * 2.0f - 1.0f);
const float3 apertureOffset = make_float3(circPoint.x * aperture_radius, circPoint.y * aperture_radius, 0.0f);
ray_dof.origin += apertureOffset;
ray_dof.direction *= focal_length;
ray_dof.direction -= apertureOffset;
ray_dof.direction = normalize(ray_dof.direction);
PerRayData_radiance prd;
prd.importance = 1.f;
prd.depth = 0;
rtTrace(top_object, ray_dof, prd);
count += 1.0f;
result += prd.result;
}
}
output_buffer[uiNewLaunchIndex] = make_color(result / count);
}
//
// Enviormement map
//
rtTextureSampler<uchar4, 2, hipReadModeNormalizedFloat> envmap;
RT_PROGRAM void envmap_miss()
{
const float theta = atan2f(ray.direction.x, ray.direction.z);
const float phi = M_PI_2f - acosf(ray.direction.y);
const float u = (theta + M_PIf) * (0.5f * M_1_PIf);
const float v = 0.5f * (1.0f + sinf(phi));
prd_radiance.result = make_float3(tex2D(envmap, u, v));
}
//
// Returns solid color for miss rays
//
rtDeclareVariable(float3, miss_color, , );
RT_PROGRAM void miss()
{
prd_radiance.result = miss_color;
}
//
// Returns color from [miss_min, miss_max] lineary interpolated across ray inclination
//
rtDeclareVariable(float3, miss_min, , );
rtDeclareVariable(float3, miss_max, , );
RT_PROGRAM void gradient_miss()
{
const float phi = asinf(ray.direction.y);
prd_radiance.result = 2.0f * phi / M_PIf * (miss_max - miss_min) + miss_min;
}
//
// Set pixel to solid color upon failure
//
rtDeclareVariable(float3, bad_color, , );
RT_PROGRAM void exception()
{
const uint2 newLaunchIndex = make_uint2(launch_index.x, launch_index.y + myStripe * output_buffer.size().y / renderingDivisionLevel);
output_buffer[newLaunchIndex] = make_color(bad_color);
}
|
0d3b97f2f9d6ed5a82e72dedc3095da6e07c02ef.cu
|
/*
* Copyright (c) 2014 Jure Ratkovic
*/
#include <Trayc/CUDAfiles/common.h>
#include <Trayc/CUDAfiles/random.h>
#include <Trayc/CUDAfiles/helper.h>
#include <optix_math.h>
//
// Pinhole/DOF camera implementation
//
rtDeclareVariable(float3, eye, , );
rtDeclareVariable(float3, U, , );
rtDeclareVariable(float3, V, , );
rtDeclareVariable(float3, W, , );
rtDeclareVariable(int, AAlevel, , );
rtDeclareVariable(float, aperture_radius, , );
rtDeclareVariable(float, focal_length, , );
rtDeclareVariable(int, dof_samples, , );
RT_PROGRAM void dof_camera()
{
const uint2 screen = output_buffer.size() * AAlevel;
const float2 invScreen2x = 2.0f / make_float2(screen);
const uint2 uiNewLaunchIndex = make_uint2(launch_index.x, launch_index.y + myStripe * output_buffer.size().y / renderingDivisionLevel);
const int2 iNewLaunchIndex = make_int2(uiNewLaunchIndex.x, uiNewLaunchIndex.y);
float3 result = make_float3(0.0f);
unsigned int seed = tea<1>(screen.x * uiNewLaunchIndex.y + uiNewLaunchIndex.x, rnd_seed);
float count = 0.0f;
for(int i = 0; i < AAlevel; ++i)
for(int j = 0; j < AAlevel; ++j)
{
const float2 d = make_float2(AAlevel * iNewLaunchIndex.x + i, AAlevel * iNewLaunchIndex.y + j) * invScreen2x - 1.f;
const float3 ray_direction = normalize(d.x * U + d.y * V + W);
const optix::Ray ray(eye, ray_direction, radiance_ray_type, scene_epsilon);
PerRayData_radiance prd;
prd.importance = 1.f;
prd.depth = 0;
rtTrace(top_object, ray, prd);
result += prd.result;
count += 1.0f;
for(int k = 0; k < dof_samples; ++k)
{
optix::Ray ray_dof(ray);
const float2 circPoint = make_float2(rnd(seed) * 2.0f - 1.0f, rnd(seed) * 2.0f - 1.0f);
const float3 apertureOffset = make_float3(circPoint.x * aperture_radius, circPoint.y * aperture_radius, 0.0f);
ray_dof.origin += apertureOffset;
ray_dof.direction *= focal_length;
ray_dof.direction -= apertureOffset;
ray_dof.direction = normalize(ray_dof.direction);
PerRayData_radiance prd;
prd.importance = 1.f;
prd.depth = 0;
rtTrace(top_object, ray_dof, prd);
count += 1.0f;
result += prd.result;
}
}
output_buffer[uiNewLaunchIndex] = make_color(result / count);
}
//
// Enviormement map
//
rtTextureSampler<uchar4, 2, cudaReadModeNormalizedFloat> envmap;
RT_PROGRAM void envmap_miss()
{
const float theta = atan2f(ray.direction.x, ray.direction.z);
const float phi = M_PI_2f - acosf(ray.direction.y);
const float u = (theta + M_PIf) * (0.5f * M_1_PIf);
const float v = 0.5f * (1.0f + sinf(phi));
prd_radiance.result = make_float3(tex2D(envmap, u, v));
}
//
// Returns solid color for miss rays
//
rtDeclareVariable(float3, miss_color, , );
RT_PROGRAM void miss()
{
prd_radiance.result = miss_color;
}
//
// Returns color from [miss_min, miss_max] lineary interpolated across ray inclination
//
rtDeclareVariable(float3, miss_min, , );
rtDeclareVariable(float3, miss_max, , );
RT_PROGRAM void gradient_miss()
{
const float phi = asinf(ray.direction.y);
prd_radiance.result = 2.0f * phi / M_PIf * (miss_max - miss_min) + miss_min;
}
//
// Set pixel to solid color upon failure
//
rtDeclareVariable(float3, bad_color, , );
RT_PROGRAM void exception()
{
const uint2 newLaunchIndex = make_uint2(launch_index.x, launch_index.y + myStripe * output_buffer.size().y / renderingDivisionLevel);
output_buffer[newLaunchIndex] = make_color(bad_color);
}
|
9112911ca42aacc81d8cb8fa568e50f4f25d3696.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
// This file contains C wrappers around the some of the CUDA API and the
// kernel functions so that they can be called from "particleSystem.cpp"
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include "thrust/device_ptr.h"
#include "thrust/iterator/constant_iterator.h"
#include "thrust/sort.h"
#include "thrust/count.h"
#include "thrust/functional.h"
#include "thrust/reduce.h"
#include "thrust/inner_product.h"
#include "utilities.h"
#include "particles_kernel.h"
#include "particles_kernel.cu"
using namespace thrust;
uint iDivUp(uint a, uint b)
{
return (a%b == 0) ? (a/b) : (a/b +1);
}
extern "C"
{
void cudaInit(int argc, char **argv)
{
int devID;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0) {
printf("No CUDA Capable devices found, exiting...\n");
exit(EXIT_SUCCESS);
}
}
void cudaGLInit(int argc, char **argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaGLDevice(argc, (const char **)argv);
}
void copyArrayToDevice(void* device, const void* host, int offset, int size)
{
hipMemcpy((char *) device + offset, host, size, hipMemcpyHostToDevice);
}
void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
hipGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
hipGraphicsMapFlagsNone);
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
hipGraphicsUnregisterResource(cuda_vbo_resource);
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
hipGraphicsMapResources(1, cuda_vbo_resource, 0);
size_t num_bytes;
hipGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes,
*cuda_vbo_resource);
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0);
}
void copyArrayFromDevice(void* host, const void* device,
struct cudaGraphicsResource **cuda_vbo_resource, int size)
{
if (cuda_vbo_resource)
device = mapGLBufferObject(cuda_vbo_resource);
hipMemcpy(host, device, size, hipMemcpyDeviceToHost);
if (cuda_vbo_resource)
unmapGLBufferObject(*cuda_vbo_resource);
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
hipMemcpyToSymbol(params, hostParams, sizeof(SimParams));
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles)
{
sort_by_key(device_ptr<uint>(dGridParticleHash),
device_ptr<uint>(dGridParticleHash + numParticles),
device_ptr<uint>(dGridParticleIndex));
}
struct f4norm : public unary_function<float4, float>{
__host__ __device__ float operator() (const float4 &f){
return sqrtf(f.x*f.x + f.y*f.y + f.z*f.z);
}
};
struct isOut
{
isOut(float3 bmax) : bmax(bmax) {}
__host__ __device__ bool operator()(const float4 &p){
//allows for particles to be out of bounds by ~1ULP of fp error
float err = 1 + 1e-6f;
if(isnan(p.x) || isnan(p.y) || isnan(p.z))
return true;
if(fabsf(p.x) > bmax.x*err )
return true;
if(fabsf(p.y)+p.w > bmax.y*err)
return true;
if(fabsf(p.z) > bmax.z*err )
return true;
return false;
}
const float3 bmax;
};
bool isOutofBounds(float4* positions, float3 border, uint numParticles)
{
int x = count_if(device_ptr<float4>(positions),
device_ptr<float4>(positions+numParticles),
isOut(border));
if(x>0) printf("%d particles outofbounds\n", x);
return x>0;
}
float3 magnetization(float4* moments, uint numParticles, float worldVol){
float4 totalDp = reduce(device_ptr<float4>(moments),
device_ptr<float4>(moments+numParticles),
make_float4(0,0,0,0), plus<float4>() );
return make_float3(totalDp)/worldVol;
}
uint edgeCount(float4* forces, uint numParticles){
float4 edge = reduce(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles),
make_float4(0,0,0,0), plus<float4>());
return (uint) edge.w/2.0f;
}
//functors for finding the top and bottom particles
struct isTop : public binary_function<float4, float4, float3> {
isTop(float wsize, float cut) : pin_d(cut), wsize(wsize) {}
__host__ __device__ float3 operator()(const float4& force, const float4& pos){
if(pos.y >= wsize - pin_d*pos.w)
return make_float3(force);
else
return make_float3(0,0,0);
}
const float wsize;//half the worldisze
const float pin_d;
};
struct isBot : public binary_function<float4, float4, float3> {
isBot(float size, float cut) : pin_d(cut), wsize(size) {}
__host__ __device__ float3 operator()(const float4& force, const float4& pos){
if(pos.y <= -wsize + pin_d*pos.w)
return make_float3(force);
else
return make_float3(0,0,0);
}
const float pin_d;
const float wsize;
};
//the functions
float calcTopForce(float4* forces, float4* position, uint numParticles, float wsize, float cut){
float3 tforce = inner_product(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles),device_ptr<float4>(position),
make_float3(0,0,0), plus<float3>(), isTop(wsize, cut));
return tforce.x;
}
float calcBotForce(float4* forces, float4* position, uint numParticles, float wsize, float cut){
float3 tforce = inner_product(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles),device_ptr<float4>(position),
make_float3(0,0,0), plus<float3>(), isBot(wsize, cut));
return tforce.x;
}
//global stress functor
struct stressThing : public binary_function<float4, float4, float3>{
stressThing(float ws, float pd) : wsize(ws), pin_d(pd) {}
__host__ __device__ float3 operator()(const float4& force, const float4& pos){
if(fabsf(pos.y) <= wsize - pin_d*pos.w)
return make_float3(force.x, force.y, force.z)*pos.y;
else
return make_float3(0,0,0);
}
const float pin_d;
const float wsize;
};
float calcGlForce(float4* forces, float4* position, uint numParticles, float wsize, float cut = 0.0f){
float3 glf = inner_product(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles), device_ptr<float4>(position),
make_float3(0,0,0), plus<float3>(), stressThing(wsize, cut));
return glf.x;
}
uint numInteractions(uint* neighList, uint numParticles){
return reduce(device_ptr<uint>(neighList), device_ptr<uint>(neighList+numParticles),
0, plus<uint>() );
}
//computes v^2 - should probably add a m term lol
struct kinen : public binary_function<float4, float4, float>{
kinen(float v, float ws, float pd): visc(v), wsize(ws), pin_d(pd) {}
__host__ __device__ float operator()(const float4& f, const float4& p)
{
float Cd = 6*PI_F*visc*p.w;
if(fabsf(p.y) > wsize - p.w*pin_d) {
return 0.0f;
} else {
return (f.x*f.x + f.y*f.y + f.z*f.z)/(Cd*Cd)*(4.0f/3.0f*PI_F*p.w*p.w*p.w);
}
}
const float visc;
const float wsize;
const float pin_d;
};
float calcKinEn(float4* forces, float4* position, NewParams& params){
kinen thingy = kinen(params.visc, params.L.y*0.5f, params.pin_d);
float kin = inner_product(device_ptr<float4>(forces),
device_ptr<float4>(forces+params.N), device_ptr<float4>(position),
0.0f, plus<float>(), thingy );
return kin*0.5f;
}
float maxforce(float4* forces, uint numParticles) {
return transform_reduce(device_ptr<float4>(forces), device_ptr<float4>(forces+numParticles),
f4norm(),0.0f, maximum<float>());
}
struct pvel : public binary_function<float4, float4, float> {
pvel(float v, float ws, float pdist) : visc(v), wsize(ws), pin_d(pdist) {}
__host__ __device__ float operator()(const float4 &f, const float4 &p) {
float Cd = 6*PI_F*visc*p.w;
if(fabsf(p.y) > wsize - p.w*pin_d){
return 0.0f;
} else {
return sqrtf(f.x*f.x + f.y*f.y + f.z*f.z)/Cd;
}
}
const float visc;
const float wsize;
const float pin_d;
};
float maxvel(float4* forces, float4* positions, NewParams& params){
//use pos.w to get radius,
pvel vel_calc = pvel(params.visc, params.L.y*0.5f, params.pin_d);
return inner_product(device_ptr<float4>(forces), device_ptr<float4>(forces+params.N),
device_ptr<float4>(positions), 0.0f, maximum<float>(), vel_calc);
}
void pshift(float4* positions, float3 s, uint numParticles){
float4 shift = make_float4(s, 0.0f);
constant_iterator<int> test(3);
constant_iterator<float4> shifter(shift);
thrust::transform(device_ptr<float4>(positions), device_ptr<float4>(positions+numParticles),
shifter, device_ptr<float4>(positions), plus<float4>());
}
struct isExcessForce
{
isExcessForce(float force) : force(force) {}
__host__ __device__ bool operator()(const float4 &f){
if(f.x*f.x + f.y*f.y + f.z*f.z > force*force )
return true;
return false;
}
const float force;
};
bool excessForce(float4* forces, float maxforce, uint numParticles){
int x = count_if(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles),
isExcessForce(maxforce));
if(x>0) printf("%d particles with excessive movement\n", x);
return x>0;
}
struct mom_reset
{
mom_reset(float3 H) : extH(H) {}
__host__ __device__ float4 operator()(const float4& m){
return make_float4(extH*m.w, m.w);
}
const float3 extH;
};
void resetMom(float4* moments, float3 extH, uint numParticles){
transform(device_ptr<float4>(moments), device_ptr<float4>(moments+numParticles),
device_ptr<float4>(moments), mom_reset(extH));
}
struct renderCut
{
renderCut(float4 r) : cut(r) {}
__host__ __device__ float4 operator()(const float4& p){
float4 returned = p;
if(p.x > cut.x || p.y > cut.y || p.z > cut.z || p.w > cut.w){
returned.w = 0;
}
return returned;
}
const float4 cut;
};
void renderCutKern(float4* pos, float4 minPos, uint numParticles) {
device_ptr<float4> dpos(pos);
transform(dpos, dpos+numParticles, dpos, renderCut(minPos));
}
struct bogacki
{
bogacki(float dt, float c) : deltaTime(dt), Cd(c) {}
__host__ __device__ float operator() (const tuple<float4, float4, float4, float4>& p) {
float3 k1 = make_float3(get<0>(p));
float3 k2 = make_float3(get<1>(p));
float3 k3 = make_float3(get<2>(p));
float3 k4 = make_float3(get<3>(p));
float3 error = (float)(2.0/9.0 - 7.0/24.0)*k1 + (float)(3.0/9.0 - 6.0/24.0)*k2 +
(float)(4.0/9.0 - 8.0/24.0)*k3 + (float)(0.0 - 3.0/24.0)*k4;
return deltaTime*length(error)/Cd;
}
const float deltaTime;
const float Cd;
};
float bogacki_error(float4* k1, float4* k2, float4* k3, float4* k4, uint N, float Cd, float deltaTime) {
device_ptr<float4> dk1 = device_ptr<float4>(k1);
device_ptr<float4> dk2 = device_ptr<float4>(k2);
device_ptr<float4> dk3 = device_ptr<float4>(k3);
device_ptr<float4> dk4 = device_ptr<float4>(k4);
//transform reduce, unary func computes error, reducing to find max
return transform_reduce(
make_zip_iterator(make_tuple(dk1, dk2, dk3,dk4)),
make_zip_iterator(make_tuple(dk1+N, dk2+N, dk3+N,dk4+N)),
bogacki(deltaTime,Cd),
0.0f,
maximum<float>() );
}
void renderStuff(const float* pos,
const float* moment,
const float* force,
float* rendPos,
float* rendColor,
float colorFmax,
float scale,
float rendercut,
uint numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( writeRender), dim3(numBlocks), dim3(numThreads), 0, 0, (float4*)pos,
(float4*)moment,
(float4*)force,
(float4*)rendPos,
(float4*)rendColor,
colorFmax,
scale,
rendercut,
numParticles);
getLastCudaError("Render Kernel execution failed");
}
}
|
9112911ca42aacc81d8cb8fa568e50f4f25d3696.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
// This file contains C wrappers around the some of the CUDA API and the
// kernel functions so that they can be called from "particleSystem.cpp"
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include "thrust/device_ptr.h"
#include "thrust/iterator/constant_iterator.h"
#include "thrust/sort.h"
#include "thrust/count.h"
#include "thrust/functional.h"
#include "thrust/reduce.h"
#include "thrust/inner_product.h"
#include "utilities.h"
#include "particles_kernel.h"
#include "particles_kernel.cu"
using namespace thrust;
uint iDivUp(uint a, uint b)
{
return (a%b == 0) ? (a/b) : (a/b +1);
}
extern "C"
{
void cudaInit(int argc, char **argv)
{
int devID;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0) {
printf("No CUDA Capable devices found, exiting...\n");
exit(EXIT_SUCCESS);
}
}
void cudaGLInit(int argc, char **argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaGLDevice(argc, (const char **)argv);
}
void copyArrayToDevice(void* device, const void* host, int offset, int size)
{
cudaMemcpy((char *) device + offset, host, size, cudaMemcpyHostToDevice);
}
void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
cudaGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
cudaGraphicsMapFlagsNone);
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cudaGraphicsUnregisterResource(cuda_vbo_resource);
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
cudaGraphicsMapResources(1, cuda_vbo_resource, 0);
size_t num_bytes;
cudaGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes,
*cuda_vbo_resource);
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0);
}
void copyArrayFromDevice(void* host, const void* device,
struct cudaGraphicsResource **cuda_vbo_resource, int size)
{
if (cuda_vbo_resource)
device = mapGLBufferObject(cuda_vbo_resource);
cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost);
if (cuda_vbo_resource)
unmapGLBufferObject(*cuda_vbo_resource);
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
cudaMemcpyToSymbol(params, hostParams, sizeof(SimParams));
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles)
{
sort_by_key(device_ptr<uint>(dGridParticleHash),
device_ptr<uint>(dGridParticleHash + numParticles),
device_ptr<uint>(dGridParticleIndex));
}
struct f4norm : public unary_function<float4, float>{
__host__ __device__ float operator() (const float4 &f){
return sqrtf(f.x*f.x + f.y*f.y + f.z*f.z);
}
};
struct isOut
{
isOut(float3 bmax) : bmax(bmax) {}
__host__ __device__ bool operator()(const float4 &p){
//allows for particles to be out of bounds by ~1ULP of fp error
float err = 1 + 1e-6f;
if(isnan(p.x) || isnan(p.y) || isnan(p.z))
return true;
if(fabsf(p.x) > bmax.x*err )
return true;
if(fabsf(p.y)+p.w > bmax.y*err)
return true;
if(fabsf(p.z) > bmax.z*err )
return true;
return false;
}
const float3 bmax;
};
bool isOutofBounds(float4* positions, float3 border, uint numParticles)
{
int x = count_if(device_ptr<float4>(positions),
device_ptr<float4>(positions+numParticles),
isOut(border));
if(x>0) printf("%d particles outofbounds\n", x);
return x>0;
}
float3 magnetization(float4* moments, uint numParticles, float worldVol){
float4 totalDp = reduce(device_ptr<float4>(moments),
device_ptr<float4>(moments+numParticles),
make_float4(0,0,0,0), plus<float4>() );
return make_float3(totalDp)/worldVol;
}
uint edgeCount(float4* forces, uint numParticles){
float4 edge = reduce(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles),
make_float4(0,0,0,0), plus<float4>());
return (uint) edge.w/2.0f;
}
//functors for finding the top and bottom particles
struct isTop : public binary_function<float4, float4, float3> {
isTop(float wsize, float cut) : pin_d(cut), wsize(wsize) {}
__host__ __device__ float3 operator()(const float4& force, const float4& pos){
if(pos.y >= wsize - pin_d*pos.w)
return make_float3(force);
else
return make_float3(0,0,0);
}
const float wsize;//half the worldisze
const float pin_d;
};
struct isBot : public binary_function<float4, float4, float3> {
isBot(float size, float cut) : pin_d(cut), wsize(size) {}
__host__ __device__ float3 operator()(const float4& force, const float4& pos){
if(pos.y <= -wsize + pin_d*pos.w)
return make_float3(force);
else
return make_float3(0,0,0);
}
const float pin_d;
const float wsize;
};
//the functions
float calcTopForce(float4* forces, float4* position, uint numParticles, float wsize, float cut){
float3 tforce = inner_product(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles),device_ptr<float4>(position),
make_float3(0,0,0), plus<float3>(), isTop(wsize, cut));
return tforce.x;
}
float calcBotForce(float4* forces, float4* position, uint numParticles, float wsize, float cut){
float3 tforce = inner_product(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles),device_ptr<float4>(position),
make_float3(0,0,0), plus<float3>(), isBot(wsize, cut));
return tforce.x;
}
//global stress functor
struct stressThing : public binary_function<float4, float4, float3>{
stressThing(float ws, float pd) : wsize(ws), pin_d(pd) {}
__host__ __device__ float3 operator()(const float4& force, const float4& pos){
if(fabsf(pos.y) <= wsize - pin_d*pos.w)
return make_float3(force.x, force.y, force.z)*pos.y;
else
return make_float3(0,0,0);
}
const float pin_d;
const float wsize;
};
float calcGlForce(float4* forces, float4* position, uint numParticles, float wsize, float cut = 0.0f){
float3 glf = inner_product(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles), device_ptr<float4>(position),
make_float3(0,0,0), plus<float3>(), stressThing(wsize, cut));
return glf.x;
}
uint numInteractions(uint* neighList, uint numParticles){
return reduce(device_ptr<uint>(neighList), device_ptr<uint>(neighList+numParticles),
0, plus<uint>() );
}
//computes v^2 - should probably add a m term lol
struct kinen : public binary_function<float4, float4, float>{
kinen(float v, float ws, float pd): visc(v), wsize(ws), pin_d(pd) {}
__host__ __device__ float operator()(const float4& f, const float4& p)
{
float Cd = 6*PI_F*visc*p.w;
if(fabsf(p.y) > wsize - p.w*pin_d) {
return 0.0f;
} else {
return (f.x*f.x + f.y*f.y + f.z*f.z)/(Cd*Cd)*(4.0f/3.0f*PI_F*p.w*p.w*p.w);
}
}
const float visc;
const float wsize;
const float pin_d;
};
float calcKinEn(float4* forces, float4* position, NewParams& params){
kinen thingy = kinen(params.visc, params.L.y*0.5f, params.pin_d);
float kin = inner_product(device_ptr<float4>(forces),
device_ptr<float4>(forces+params.N), device_ptr<float4>(position),
0.0f, plus<float>(), thingy );
return kin*0.5f;
}
float maxforce(float4* forces, uint numParticles) {
return transform_reduce(device_ptr<float4>(forces), device_ptr<float4>(forces+numParticles),
f4norm(),0.0f, maximum<float>());
}
struct pvel : public binary_function<float4, float4, float> {
pvel(float v, float ws, float pdist) : visc(v), wsize(ws), pin_d(pdist) {}
__host__ __device__ float operator()(const float4 &f, const float4 &p) {
float Cd = 6*PI_F*visc*p.w;
if(fabsf(p.y) > wsize - p.w*pin_d){
return 0.0f;
} else {
return sqrtf(f.x*f.x + f.y*f.y + f.z*f.z)/Cd;
}
}
const float visc;
const float wsize;
const float pin_d;
};
float maxvel(float4* forces, float4* positions, NewParams& params){
//use pos.w to get radius,
pvel vel_calc = pvel(params.visc, params.L.y*0.5f, params.pin_d);
return inner_product(device_ptr<float4>(forces), device_ptr<float4>(forces+params.N),
device_ptr<float4>(positions), 0.0f, maximum<float>(), vel_calc);
}
void pshift(float4* positions, float3 s, uint numParticles){
float4 shift = make_float4(s, 0.0f);
constant_iterator<int> test(3);
constant_iterator<float4> shifter(shift);
thrust::transform(device_ptr<float4>(positions), device_ptr<float4>(positions+numParticles),
shifter, device_ptr<float4>(positions), plus<float4>());
}
struct isExcessForce
{
isExcessForce(float force) : force(force) {}
__host__ __device__ bool operator()(const float4 &f){
if(f.x*f.x + f.y*f.y + f.z*f.z > force*force )
return true;
return false;
}
const float force;
};
bool excessForce(float4* forces, float maxforce, uint numParticles){
int x = count_if(device_ptr<float4>(forces),
device_ptr<float4>(forces+numParticles),
isExcessForce(maxforce));
if(x>0) printf("%d particles with excessive movement\n", x);
return x>0;
}
struct mom_reset
{
mom_reset(float3 H) : extH(H) {}
__host__ __device__ float4 operator()(const float4& m){
return make_float4(extH*m.w, m.w);
}
const float3 extH;
};
void resetMom(float4* moments, float3 extH, uint numParticles){
transform(device_ptr<float4>(moments), device_ptr<float4>(moments+numParticles),
device_ptr<float4>(moments), mom_reset(extH));
}
struct renderCut
{
renderCut(float4 r) : cut(r) {}
__host__ __device__ float4 operator()(const float4& p){
float4 returned = p;
if(p.x > cut.x || p.y > cut.y || p.z > cut.z || p.w > cut.w){
returned.w = 0;
}
return returned;
}
const float4 cut;
};
void renderCutKern(float4* pos, float4 minPos, uint numParticles) {
device_ptr<float4> dpos(pos);
transform(dpos, dpos+numParticles, dpos, renderCut(minPos));
}
struct bogacki
{
bogacki(float dt, float c) : deltaTime(dt), Cd(c) {}
__host__ __device__ float operator() (const tuple<float4, float4, float4, float4>& p) {
float3 k1 = make_float3(get<0>(p));
float3 k2 = make_float3(get<1>(p));
float3 k3 = make_float3(get<2>(p));
float3 k4 = make_float3(get<3>(p));
float3 error = (float)(2.0/9.0 - 7.0/24.0)*k1 + (float)(3.0/9.0 - 6.0/24.0)*k2 +
(float)(4.0/9.0 - 8.0/24.0)*k3 + (float)(0.0 - 3.0/24.0)*k4;
return deltaTime*length(error)/Cd;
}
const float deltaTime;
const float Cd;
};
float bogacki_error(float4* k1, float4* k2, float4* k3, float4* k4, uint N, float Cd, float deltaTime) {
device_ptr<float4> dk1 = device_ptr<float4>(k1);
device_ptr<float4> dk2 = device_ptr<float4>(k2);
device_ptr<float4> dk3 = device_ptr<float4>(k3);
device_ptr<float4> dk4 = device_ptr<float4>(k4);
//transform reduce, unary func computes error, reducing to find max
return transform_reduce(
make_zip_iterator(make_tuple(dk1, dk2, dk3,dk4)),
make_zip_iterator(make_tuple(dk1+N, dk2+N, dk3+N,dk4+N)),
bogacki(deltaTime,Cd),
0.0f,
maximum<float>() );
}
void renderStuff(const float* pos,
const float* moment,
const float* force,
float* rendPos,
float* rendColor,
float colorFmax,
float scale,
float rendercut,
uint numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
writeRender<<<numBlocks, numThreads>>>((float4*)pos,
(float4*)moment,
(float4*)force,
(float4*)rendPos,
(float4*)rendColor,
colorFmax,
scale,
rendercut,
numParticles);
getLastCudaError("Render Kernel execution failed");
}
}
|
ca651f7c20e6b07ada3f1eccdef90d831baa0604.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
A basic CUDA demonstration. Two random vectors are added together
in serial and using a GPU accelerator.
To compile, use:
make
NOTE: CUDA must be installed/loaded before running make. Also, the
Makefile will probably have to be customized for your system.
To run, use for example:
./cuda_vecadd 100000000
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
// Add two vectors in serial
void vecAdd(double *h_A, double *h_B, double *h_C, int n)
{
for( int i = 0; i < n; i++ )
h_C[i] = h_A[i] + h_B[i];
}
// The CUDA vector addition kernel
__global__
void cudaVecAddKernel( double* A, double* B, double* D, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<n) D[i] = A[i] + B[i];
}
// Add two vectors in CUDA
void cuda_vecAdd(double *h_A, double *h_B, double *h_D, int n)
{
int size = n * sizeof(double);
double *d_A, *d_B, *d_D;
hipError_t err1 = hipSuccess;
hipError_t err2 = hipSuccess;
hipError_t err3 = hipSuccess;
// Allocate memory on the GPU
err1 = hipMalloc((void **) &d_A, size);
err2 = hipMalloc((void **) &d_B, size);
err3 = hipMalloc((void **) &d_D, size);
if(err1 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err2 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err3 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Copy the data to the GPU
err1 = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
err2 = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if(err1 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err2 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// CUDA kernel
int threads = 1024; // Threads per block
int blocks = (n + threads - 1) / threads; // Blocks per grid
printf("\n CUDA kernel was launched with %d blocks of %d threads...", blocks, threads);
hipLaunchKernelGGL(( cudaVecAddKernel), dim3(blocks), dim3(threads), 0, 0, d_A, d_B, d_D, n);
err1 = hipGetLastError();
if(err1 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Copy the results back to the host
err1 = hipMemcpy(h_D, d_D, size, hipMemcpyDeviceToHost);
if(err1 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Deallocate memory on the GPU
err1 = hipFree(d_A);
err2 = hipFree(d_B);
err3 = hipFree(d_D);
if(err1 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err2 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err3 != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
// Timer
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
return( ((double)TV.tv_sec) + ((double)TV.tv_usec)*1.0e-6 );
}
// Main program
int main (int argc, char** argv)
{
unsigned int n, i;
double t0, t1, t2, t3;
n = atoi(argv[1]);
printf("\nn = %d", n);
double * A = (double*) malloc( n * sizeof(double) );
double * B = (double*) malloc( n * sizeof(double) );
double * C = (double*) malloc( n * sizeof(double) );
double * D = (double*) malloc( n * sizeof(double) );
for (i = 0; i < n; ++i) {
A[i] = ((double) rand()/RAND_MAX);
B[i] = ((double) rand()/RAND_MAX);
}
// Add the two vectors in serial
t0 = getTime();
vecAdd(A, B, C, n);
t1 = getTime();
printf("\n Serial addition: %f sec.", t1 - t0);
// Add the two vectors using CUDA
t2 = getTime();
cuda_vecAdd(A, B, D, n);
t3 = getTime();
printf("\n CUDA addition: %f sec.\n\n", t3 - t2);
// Verify that the two results are the same
for (i = 0; i < n; ++i) {
if( C[i] != D[i])
{
printf("\nERROR! Outputs do not match at index %d", i);
break;
}
}
// Free host memory
free(A);
free(B);
free(C);
free(D);
return 0;
}
|
ca651f7c20e6b07ada3f1eccdef90d831baa0604.cu
|
/*
A basic CUDA demonstration. Two random vectors are added together
in serial and using a GPU accelerator.
To compile, use:
make
NOTE: CUDA must be installed/loaded before running make. Also, the
Makefile will probably have to be customized for your system.
To run, use for example:
./cuda_vecadd 100000000
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
// Add two vectors in serial
void vecAdd(double *h_A, double *h_B, double *h_C, int n)
{
for( int i = 0; i < n; i++ )
h_C[i] = h_A[i] + h_B[i];
}
// The CUDA vector addition kernel
__global__
void cudaVecAddKernel( double* A, double* B, double* D, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<n) D[i] = A[i] + B[i];
}
// Add two vectors in CUDA
void cuda_vecAdd(double *h_A, double *h_B, double *h_D, int n)
{
int size = n * sizeof(double);
double *d_A, *d_B, *d_D;
cudaError_t err1 = cudaSuccess;
cudaError_t err2 = cudaSuccess;
cudaError_t err3 = cudaSuccess;
// Allocate memory on the GPU
err1 = cudaMalloc((void **) &d_A, size);
err2 = cudaMalloc((void **) &d_B, size);
err3 = cudaMalloc((void **) &d_D, size);
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err2 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err3 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Copy the data to the GPU
err1 = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
err2 = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err2 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// CUDA kernel
int threads = 1024; // Threads per block
int blocks = (n + threads - 1) / threads; // Blocks per grid
printf("\n CUDA kernel was launched with %d blocks of %d threads...", blocks, threads);
cudaVecAddKernel<<<blocks, threads>>>(d_A, d_B, d_D, n);
err1 = cudaGetLastError();
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Copy the results back to the host
err1 = cudaMemcpy(h_D, d_D, size, cudaMemcpyDeviceToHost);
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Deallocate memory on the GPU
err1 = cudaFree(d_A);
err2 = cudaFree(d_B);
err3 = cudaFree(d_D);
if(err1 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err2 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(err3 != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
// Timer
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
return( ((double)TV.tv_sec) + ((double)TV.tv_usec)*1.0e-6 );
}
// Main program
int main (int argc, char** argv)
{
unsigned int n, i;
double t0, t1, t2, t3;
n = atoi(argv[1]);
printf("\nn = %d", n);
double * A = (double*) malloc( n * sizeof(double) );
double * B = (double*) malloc( n * sizeof(double) );
double * C = (double*) malloc( n * sizeof(double) );
double * D = (double*) malloc( n * sizeof(double) );
for (i = 0; i < n; ++i) {
A[i] = ((double) rand()/RAND_MAX);
B[i] = ((double) rand()/RAND_MAX);
}
// Add the two vectors in serial
t0 = getTime();
vecAdd(A, B, C, n);
t1 = getTime();
printf("\n Serial addition: %f sec.", t1 - t0);
// Add the two vectors using CUDA
t2 = getTime();
cuda_vecAdd(A, B, D, n);
t3 = getTime();
printf("\n CUDA addition: %f sec.\n\n", t3 - t2);
// Verify that the two results are the same
for (i = 0; i < n; ++i) {
if( C[i] != D[i])
{
printf("\nERROR! Outputs do not match at index %d", i);
break;
}
}
// Free host memory
free(A);
free(B);
free(C);
free(D);
return 0;
}
|
d20a5634cde8e5f355ae97f068bdaa7d55bdfea9.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cassert>
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
#include "paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h"
#include "paddle/fluid/operators/detection/yolo_box_op.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
YoloBoxPlugin::YoloBoxPlugin(const nvinfer1::DataType data_type,
const std::vector<int>& anchors,
const int class_num, const float conf_thresh,
const int downsample_ratio, const bool clip_bbox,
const float scale_x_y, const int input_h,
const int input_w)
: data_type_(data_type),
class_num_(class_num),
conf_thresh_(conf_thresh),
downsample_ratio_(downsample_ratio),
clip_bbox_(clip_bbox),
scale_x_y_(scale_x_y),
input_h_(input_h),
input_w_(input_w) {
anchors_.insert(anchors_.end(), anchors.cbegin(), anchors.cend());
assert(data_type_ == nvinfer1::DataType::kFLOAT ||
data_type_ == nvinfer1::DataType::kHALF);
assert(class_num_ > 0);
assert(input_h_ > 0);
assert(input_w_ > 0);
hipMalloc(&anchors_device_, anchors.size() * sizeof(int));
hipMemcpy(anchors_device_, anchors.data(), anchors.size() * sizeof(int),
hipMemcpyHostToDevice);
}
YoloBoxPlugin::YoloBoxPlugin(const void* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchors_);
DeserializeValue(&data, &length, &class_num_);
DeserializeValue(&data, &length, &conf_thresh_);
DeserializeValue(&data, &length, &downsample_ratio_);
DeserializeValue(&data, &length, &clip_bbox_);
DeserializeValue(&data, &length, &scale_x_y_);
DeserializeValue(&data, &length, &input_h_);
DeserializeValue(&data, &length, &input_w_);
}
YoloBoxPlugin::~YoloBoxPlugin() {
if (anchors_device_ != nullptr) {
hipFree(anchors_device_);
anchors_device_ = nullptr;
}
}
const char* YoloBoxPlugin::getPluginType() const { return "yolo_box_plugin"; }
const char* YoloBoxPlugin::getPluginVersion() const { return "1"; }
int YoloBoxPlugin::getNbOutputs() const { return 2; }
nvinfer1::Dims YoloBoxPlugin::getOutputDimensions(int index,
const nvinfer1::Dims* inputs,
int nb_input_dims) {
const int anchor_num = anchors_.size() / 2;
const int box_num = inputs[0].d[1] * inputs[0].d[2] * anchor_num;
assert(index <= 1);
if (index == 0) {
return nvinfer1::Dims2(box_num, 4);
}
return nvinfer1::Dims2(box_num, class_num_);
}
bool YoloBoxPlugin::supportsFormat(nvinfer1::DataType type,
nvinfer1::TensorFormat format) const {
return ((type == data_type_ || type == nvinfer1::DataType::kINT32) &&
format == nvinfer1::TensorFormat::kLINEAR);
}
size_t YoloBoxPlugin::getWorkspaceSize(int max_batch_size) const { return 0; }
template <typename T>
__device__ inline T sigmoid(T x) {
return 1. / (1. + exp(-x));
}
template <>
__device__ inline float sigmoid(float x) {
return 1.f / (1.f + expf(-x));
}
template <typename T>
__device__ inline void GetYoloBox(float* box, const T* x, const int* anchors,
int i, int j, int an_idx, int grid_size_h,
int grid_size_w, int input_size_h,
int input_size_w, int index, int stride,
int img_height, int img_width, float scale,
float bias) {
box[0] = static_cast<float>(
(i + sigmoid(static_cast<float>(x[index]) * scale + bias)) * img_width /
grid_size_w);
box[1] = static_cast<float>(
(j + sigmoid(static_cast<float>(x[index + stride]) * scale + bias)) *
img_height / grid_size_h);
box[2] = static_cast<float>(expf(static_cast<float>(x[index + 2 * stride])) *
anchors[2 * an_idx] * img_width / input_size_w);
box[3] =
static_cast<float>(expf(static_cast<float>(x[index + 3 * stride])) *
anchors[2 * an_idx + 1] * img_height / input_size_h);
}
__device__ inline int GetEntryIndex(int batch, int an_idx, int hw_idx,
int an_num, int an_stride, int stride,
int entry) {
return (batch * an_num + an_idx) * an_stride + entry * stride + hw_idx;
}
template <typename T>
__device__ inline void CalcDetectionBox(T* boxes, const float* box,
const int box_idx, const int img_height,
const int img_width, bool clip_bbox) {
float tmp_box_0, tmp_box_1, tmp_box_2, tmp_box_3;
tmp_box_0 = box[0] - box[2] / 2;
tmp_box_1 = box[1] - box[3] / 2;
tmp_box_2 = box[0] + box[2] / 2;
tmp_box_3 = box[1] + box[3] / 2;
if (clip_bbox) {
tmp_box_0 = max(tmp_box_0, 0.f);
tmp_box_1 = max(tmp_box_1, 0.f);
tmp_box_2 = min(tmp_box_2, static_cast<float>(img_width - 1));
tmp_box_3 = min(tmp_box_3, static_cast<float>(img_height - 1));
}
boxes[box_idx + 0] = static_cast<T>(tmp_box_0);
boxes[box_idx + 1] = static_cast<T>(tmp_box_1);
boxes[box_idx + 2] = static_cast<T>(tmp_box_2);
boxes[box_idx + 3] = static_cast<T>(tmp_box_3);
}
template <typename T>
__device__ inline void CalcLabelScore(T* scores, const T* input,
const int label_idx, const int score_idx,
const int class_num, const float conf,
const int stride) {
for (int i = 0; i < class_num; i++) {
scores[score_idx + i] = static_cast<T>(
conf * sigmoid(static_cast<float>(input[label_idx + i * stride])));
}
}
template <typename T>
__global__ void KeYoloBoxFw(const T* const input, const int* const imgsize,
T* boxes, T* scores, const float conf_thresh,
const int* anchors, const int n, const int h,
const int w, const int an_num, const int class_num,
const int box_num, int input_size_h,
int input_size_w, bool clip_bbox, const float scale,
const float bias) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float box[4];
for (; tid < n * box_num; tid += stride) {
int grid_num = h * w;
int i = tid / box_num;
int j = (tid % box_num) / grid_num;
int k = (tid % grid_num) / w;
int l = tid % w;
int an_stride = (5 + class_num) * grid_num;
int img_height = imgsize[2 * i];
int img_width = imgsize[2 * i + 1];
int obj_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 4);
float conf = sigmoid(static_cast<float>(input[obj_idx]));
int box_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 0);
if (conf < conf_thresh) {
for (int i = 0; i < 4; ++i) {
box[i] = 0.f;
}
} else {
GetYoloBox<T>(box, input, anchors, l, k, j, h, w, input_size_h,
input_size_w, box_idx, grid_num, img_height, img_width,
scale, bias);
}
box_idx = (i * box_num + j * grid_num + k * w + l) * 4;
CalcDetectionBox<T>(boxes, box, box_idx, img_height, img_width, clip_bbox);
int label_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 5);
int score_idx = (i * box_num + j * grid_num + k * w + l) * class_num;
CalcLabelScore<T>(scores, input, label_idx, score_idx, class_num, conf,
grid_num);
}
}
template <typename T>
int YoloBoxPlugin::enqueue_impl(int batch_size, const void* const* inputs,
void** outputs, void* workspace,
hipStream_t stream) {
const int n = batch_size;
const int h = input_h_;
const int w = input_w_;
const int an_num = anchors_.size() / 2;
const int box_num = h * w * an_num;
int input_size_h = downsample_ratio_ * h;
int input_size_w = downsample_ratio_ * w;
float bias = -0.5 * (scale_x_y_ - 1.);
constexpr int threads = 256;
hipLaunchKernelGGL(( KeYoloBoxFw<T>), dim3((n * box_num + threads - 1) / threads), dim3(threads), 0, stream,
reinterpret_cast<const T* const>(inputs[0]),
reinterpret_cast<const int* const>(inputs[1]),
reinterpret_cast<T*>(outputs[0]), reinterpret_cast<T*>(outputs[1]),
conf_thresh_, anchors_device_, n, h, w, an_num, class_num_, box_num,
input_size_h, input_size_w, clip_bbox_, scale_x_y_, bias);
return hipGetLastError() != hipSuccess;
}
int YoloBoxPlugin::enqueue(int batch_size, const void* const* inputs,
void** outputs, void* workspace,
hipStream_t stream) {
if (data_type_ == nvinfer1::DataType::kFLOAT) {
return enqueue_impl<float>(batch_size, inputs, outputs, workspace, stream);
} else if (data_type_ == nvinfer1::DataType::kHALF) {
return enqueue_impl<half>(batch_size, inputs, outputs, workspace, stream);
}
assert("unsupported type.");
}
int YoloBoxPlugin::initialize() { return 0; }
void YoloBoxPlugin::terminate() {}
size_t YoloBoxPlugin::getSerializationSize() const {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchors_);
serialize_size += SerializedSize(class_num_);
serialize_size += SerializedSize(conf_thresh_);
serialize_size += SerializedSize(downsample_ratio_);
serialize_size += SerializedSize(clip_bbox_);
serialize_size += SerializedSize(scale_x_y_);
serialize_size += SerializedSize(input_h_);
serialize_size += SerializedSize(input_w_);
return serialize_size;
}
void YoloBoxPlugin::serialize(void* buffer) const {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchors_);
SerializeValue(&buffer, class_num_);
SerializeValue(&buffer, conf_thresh_);
SerializeValue(&buffer, downsample_ratio_);
SerializeValue(&buffer, clip_bbox_);
SerializeValue(&buffer, scale_x_y_);
SerializeValue(&buffer, input_h_);
SerializeValue(&buffer, input_w_);
}
void YoloBoxPlugin::destroy() {
hipFree(anchors_device_);
delete this;
}
void YoloBoxPlugin::setPluginNamespace(const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* YoloBoxPlugin::getPluginNamespace() const {
return namespace_.c_str();
}
nvinfer1::DataType YoloBoxPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_type, int nb_inputs) const {
return data_type_;
}
bool YoloBoxPlugin::isOutputBroadcastAcrossBatch(int output_index,
const bool* input_is_broadcast,
int nb_inputs) const {
return false;
}
bool YoloBoxPlugin::canBroadcastInputAcrossBatch(int input_index) const {
return false;
}
void YoloBoxPlugin::configurePlugin(
const nvinfer1::Dims* input_dims, int nb_inputs,
const nvinfer1::Dims* output_dims, int nb_outputs,
const nvinfer1::DataType* input_types,
const nvinfer1::DataType* output_types, const bool* input_is_broadcast,
const bool* output_is_broadcast, nvinfer1::PluginFormat float_format,
int max_batct_size) {}
nvinfer1::IPluginV2Ext* YoloBoxPlugin::clone() const {
return new YoloBoxPlugin(data_type_, anchors_, class_num_, conf_thresh_,
downsample_ratio_, clip_bbox_, scale_x_y_, input_h_,
input_w_);
}
YoloBoxPluginCreator::YoloBoxPluginCreator() {}
void YoloBoxPluginCreator::setPluginNamespace(const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* YoloBoxPluginCreator::getPluginNamespace() const {
return namespace_.c_str();
}
const char* YoloBoxPluginCreator::getPluginName() const {
return "yolo_box_plugin";
}
const char* YoloBoxPluginCreator::getPluginVersion() const { return "1"; }
const nvinfer1::PluginFieldCollection* YoloBoxPluginCreator::getFieldNames() {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* YoloBoxPluginCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<int> anchors;
int class_num = -1;
float conf_thresh = 0.01;
int downsample_ratio = 32;
bool clip_bbox = true;
float scale_x_y = 1.;
int h = -1;
int w = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchors")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
anchors.insert(anchors.end(), data, data + length);
} else if (field_name.compare("class_num")) {
class_num = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("conf_thresh")) {
conf_thresh = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("downsample_ratio")) {
downsample_ratio = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("clip_bbox")) {
clip_bbox = *static_cast<const bool*>(fc->fields[i].data);
} else if (field_name.compare("scale_x_y")) {
scale_x_y = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("h")) {
h = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("w")) {
w = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new YoloBoxPlugin(
type_id ? nvinfer1::DataType::kHALF : nvinfer1::DataType::kFLOAT, anchors,
class_num, conf_thresh, downsample_ratio, clip_bbox, scale_x_y, h, w);
}
nvinfer1::IPluginV2Ext* YoloBoxPluginCreator::deserializePlugin(
const char* name, const void* serial_data, size_t serial_length) {
auto plugin = new YoloBoxPlugin(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
d20a5634cde8e5f355ae97f068bdaa7d55bdfea9.cu
|
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <cassert>
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
#include "paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h"
#include "paddle/fluid/operators/detection/yolo_box_op.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
YoloBoxPlugin::YoloBoxPlugin(const nvinfer1::DataType data_type,
const std::vector<int>& anchors,
const int class_num, const float conf_thresh,
const int downsample_ratio, const bool clip_bbox,
const float scale_x_y, const int input_h,
const int input_w)
: data_type_(data_type),
class_num_(class_num),
conf_thresh_(conf_thresh),
downsample_ratio_(downsample_ratio),
clip_bbox_(clip_bbox),
scale_x_y_(scale_x_y),
input_h_(input_h),
input_w_(input_w) {
anchors_.insert(anchors_.end(), anchors.cbegin(), anchors.cend());
assert(data_type_ == nvinfer1::DataType::kFLOAT ||
data_type_ == nvinfer1::DataType::kHALF);
assert(class_num_ > 0);
assert(input_h_ > 0);
assert(input_w_ > 0);
cudaMalloc(&anchors_device_, anchors.size() * sizeof(int));
cudaMemcpy(anchors_device_, anchors.data(), anchors.size() * sizeof(int),
cudaMemcpyHostToDevice);
}
YoloBoxPlugin::YoloBoxPlugin(const void* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchors_);
DeserializeValue(&data, &length, &class_num_);
DeserializeValue(&data, &length, &conf_thresh_);
DeserializeValue(&data, &length, &downsample_ratio_);
DeserializeValue(&data, &length, &clip_bbox_);
DeserializeValue(&data, &length, &scale_x_y_);
DeserializeValue(&data, &length, &input_h_);
DeserializeValue(&data, &length, &input_w_);
}
YoloBoxPlugin::~YoloBoxPlugin() {
if (anchors_device_ != nullptr) {
cudaFree(anchors_device_);
anchors_device_ = nullptr;
}
}
const char* YoloBoxPlugin::getPluginType() const { return "yolo_box_plugin"; }
const char* YoloBoxPlugin::getPluginVersion() const { return "1"; }
int YoloBoxPlugin::getNbOutputs() const { return 2; }
nvinfer1::Dims YoloBoxPlugin::getOutputDimensions(int index,
const nvinfer1::Dims* inputs,
int nb_input_dims) {
const int anchor_num = anchors_.size() / 2;
const int box_num = inputs[0].d[1] * inputs[0].d[2] * anchor_num;
assert(index <= 1);
if (index == 0) {
return nvinfer1::Dims2(box_num, 4);
}
return nvinfer1::Dims2(box_num, class_num_);
}
bool YoloBoxPlugin::supportsFormat(nvinfer1::DataType type,
nvinfer1::TensorFormat format) const {
return ((type == data_type_ || type == nvinfer1::DataType::kINT32) &&
format == nvinfer1::TensorFormat::kLINEAR);
}
size_t YoloBoxPlugin::getWorkspaceSize(int max_batch_size) const { return 0; }
template <typename T>
__device__ inline T sigmoid(T x) {
return 1. / (1. + exp(-x));
}
template <>
__device__ inline float sigmoid(float x) {
return 1.f / (1.f + expf(-x));
}
template <typename T>
__device__ inline void GetYoloBox(float* box, const T* x, const int* anchors,
int i, int j, int an_idx, int grid_size_h,
int grid_size_w, int input_size_h,
int input_size_w, int index, int stride,
int img_height, int img_width, float scale,
float bias) {
box[0] = static_cast<float>(
(i + sigmoid(static_cast<float>(x[index]) * scale + bias)) * img_width /
grid_size_w);
box[1] = static_cast<float>(
(j + sigmoid(static_cast<float>(x[index + stride]) * scale + bias)) *
img_height / grid_size_h);
box[2] = static_cast<float>(expf(static_cast<float>(x[index + 2 * stride])) *
anchors[2 * an_idx] * img_width / input_size_w);
box[3] =
static_cast<float>(expf(static_cast<float>(x[index + 3 * stride])) *
anchors[2 * an_idx + 1] * img_height / input_size_h);
}
__device__ inline int GetEntryIndex(int batch, int an_idx, int hw_idx,
int an_num, int an_stride, int stride,
int entry) {
return (batch * an_num + an_idx) * an_stride + entry * stride + hw_idx;
}
template <typename T>
__device__ inline void CalcDetectionBox(T* boxes, const float* box,
const int box_idx, const int img_height,
const int img_width, bool clip_bbox) {
float tmp_box_0, tmp_box_1, tmp_box_2, tmp_box_3;
tmp_box_0 = box[0] - box[2] / 2;
tmp_box_1 = box[1] - box[3] / 2;
tmp_box_2 = box[0] + box[2] / 2;
tmp_box_3 = box[1] + box[3] / 2;
if (clip_bbox) {
tmp_box_0 = max(tmp_box_0, 0.f);
tmp_box_1 = max(tmp_box_1, 0.f);
tmp_box_2 = min(tmp_box_2, static_cast<float>(img_width - 1));
tmp_box_3 = min(tmp_box_3, static_cast<float>(img_height - 1));
}
boxes[box_idx + 0] = static_cast<T>(tmp_box_0);
boxes[box_idx + 1] = static_cast<T>(tmp_box_1);
boxes[box_idx + 2] = static_cast<T>(tmp_box_2);
boxes[box_idx + 3] = static_cast<T>(tmp_box_3);
}
template <typename T>
__device__ inline void CalcLabelScore(T* scores, const T* input,
const int label_idx, const int score_idx,
const int class_num, const float conf,
const int stride) {
for (int i = 0; i < class_num; i++) {
scores[score_idx + i] = static_cast<T>(
conf * sigmoid(static_cast<float>(input[label_idx + i * stride])));
}
}
template <typename T>
__global__ void KeYoloBoxFw(const T* const input, const int* const imgsize,
T* boxes, T* scores, const float conf_thresh,
const int* anchors, const int n, const int h,
const int w, const int an_num, const int class_num,
const int box_num, int input_size_h,
int input_size_w, bool clip_bbox, const float scale,
const float bias) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float box[4];
for (; tid < n * box_num; tid += stride) {
int grid_num = h * w;
int i = tid / box_num;
int j = (tid % box_num) / grid_num;
int k = (tid % grid_num) / w;
int l = tid % w;
int an_stride = (5 + class_num) * grid_num;
int img_height = imgsize[2 * i];
int img_width = imgsize[2 * i + 1];
int obj_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 4);
float conf = sigmoid(static_cast<float>(input[obj_idx]));
int box_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 0);
if (conf < conf_thresh) {
for (int i = 0; i < 4; ++i) {
box[i] = 0.f;
}
} else {
GetYoloBox<T>(box, input, anchors, l, k, j, h, w, input_size_h,
input_size_w, box_idx, grid_num, img_height, img_width,
scale, bias);
}
box_idx = (i * box_num + j * grid_num + k * w + l) * 4;
CalcDetectionBox<T>(boxes, box, box_idx, img_height, img_width, clip_bbox);
int label_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 5);
int score_idx = (i * box_num + j * grid_num + k * w + l) * class_num;
CalcLabelScore<T>(scores, input, label_idx, score_idx, class_num, conf,
grid_num);
}
}
template <typename T>
int YoloBoxPlugin::enqueue_impl(int batch_size, const void* const* inputs,
void** outputs, void* workspace,
cudaStream_t stream) {
const int n = batch_size;
const int h = input_h_;
const int w = input_w_;
const int an_num = anchors_.size() / 2;
const int box_num = h * w * an_num;
int input_size_h = downsample_ratio_ * h;
int input_size_w = downsample_ratio_ * w;
float bias = -0.5 * (scale_x_y_ - 1.);
constexpr int threads = 256;
KeYoloBoxFw<T><<<(n * box_num + threads - 1) / threads, threads, 0, stream>>>(
reinterpret_cast<const T* const>(inputs[0]),
reinterpret_cast<const int* const>(inputs[1]),
reinterpret_cast<T*>(outputs[0]), reinterpret_cast<T*>(outputs[1]),
conf_thresh_, anchors_device_, n, h, w, an_num, class_num_, box_num,
input_size_h, input_size_w, clip_bbox_, scale_x_y_, bias);
return cudaGetLastError() != cudaSuccess;
}
int YoloBoxPlugin::enqueue(int batch_size, const void* const* inputs,
void** outputs, void* workspace,
cudaStream_t stream) {
if (data_type_ == nvinfer1::DataType::kFLOAT) {
return enqueue_impl<float>(batch_size, inputs, outputs, workspace, stream);
} else if (data_type_ == nvinfer1::DataType::kHALF) {
return enqueue_impl<half>(batch_size, inputs, outputs, workspace, stream);
}
assert("unsupported type.");
}
int YoloBoxPlugin::initialize() { return 0; }
void YoloBoxPlugin::terminate() {}
size_t YoloBoxPlugin::getSerializationSize() const {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchors_);
serialize_size += SerializedSize(class_num_);
serialize_size += SerializedSize(conf_thresh_);
serialize_size += SerializedSize(downsample_ratio_);
serialize_size += SerializedSize(clip_bbox_);
serialize_size += SerializedSize(scale_x_y_);
serialize_size += SerializedSize(input_h_);
serialize_size += SerializedSize(input_w_);
return serialize_size;
}
void YoloBoxPlugin::serialize(void* buffer) const {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchors_);
SerializeValue(&buffer, class_num_);
SerializeValue(&buffer, conf_thresh_);
SerializeValue(&buffer, downsample_ratio_);
SerializeValue(&buffer, clip_bbox_);
SerializeValue(&buffer, scale_x_y_);
SerializeValue(&buffer, input_h_);
SerializeValue(&buffer, input_w_);
}
void YoloBoxPlugin::destroy() {
cudaFree(anchors_device_);
delete this;
}
void YoloBoxPlugin::setPluginNamespace(const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* YoloBoxPlugin::getPluginNamespace() const {
return namespace_.c_str();
}
nvinfer1::DataType YoloBoxPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_type, int nb_inputs) const {
return data_type_;
}
bool YoloBoxPlugin::isOutputBroadcastAcrossBatch(int output_index,
const bool* input_is_broadcast,
int nb_inputs) const {
return false;
}
bool YoloBoxPlugin::canBroadcastInputAcrossBatch(int input_index) const {
return false;
}
void YoloBoxPlugin::configurePlugin(
const nvinfer1::Dims* input_dims, int nb_inputs,
const nvinfer1::Dims* output_dims, int nb_outputs,
const nvinfer1::DataType* input_types,
const nvinfer1::DataType* output_types, const bool* input_is_broadcast,
const bool* output_is_broadcast, nvinfer1::PluginFormat float_format,
int max_batct_size) {}
nvinfer1::IPluginV2Ext* YoloBoxPlugin::clone() const {
return new YoloBoxPlugin(data_type_, anchors_, class_num_, conf_thresh_,
downsample_ratio_, clip_bbox_, scale_x_y_, input_h_,
input_w_);
}
YoloBoxPluginCreator::YoloBoxPluginCreator() {}
void YoloBoxPluginCreator::setPluginNamespace(const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* YoloBoxPluginCreator::getPluginNamespace() const {
return namespace_.c_str();
}
const char* YoloBoxPluginCreator::getPluginName() const {
return "yolo_box_plugin";
}
const char* YoloBoxPluginCreator::getPluginVersion() const { return "1"; }
const nvinfer1::PluginFieldCollection* YoloBoxPluginCreator::getFieldNames() {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* YoloBoxPluginCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<int> anchors;
int class_num = -1;
float conf_thresh = 0.01;
int downsample_ratio = 32;
bool clip_bbox = true;
float scale_x_y = 1.;
int h = -1;
int w = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchors")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
anchors.insert(anchors.end(), data, data + length);
} else if (field_name.compare("class_num")) {
class_num = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("conf_thresh")) {
conf_thresh = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("downsample_ratio")) {
downsample_ratio = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("clip_bbox")) {
clip_bbox = *static_cast<const bool*>(fc->fields[i].data);
} else if (field_name.compare("scale_x_y")) {
scale_x_y = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("h")) {
h = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("w")) {
w = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new YoloBoxPlugin(
type_id ? nvinfer1::DataType::kHALF : nvinfer1::DataType::kFLOAT, anchors,
class_num, conf_thresh, downsample_ratio, clip_bbox, scale_x_y, h, w);
}
nvinfer1::IPluginV2Ext* YoloBoxPluginCreator::deserializePlugin(
const char* name, const void* serial_data, size_t serial_length) {
auto plugin = new YoloBoxPlugin(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
122769f7cb400b8d5e6cf177612d57aba7f1a0a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/reduce.h>
#include "modularity.cuh"
struct deg {
unsigned int *vertndx;
unsigned int lb_deg, ub_deg;
deg(unsigned int *_vertndx, unsigned int _lb_deg, unsigned int _ub_deg) :
vertndx(_vertndx),
lb_deg(_lb_deg),
ub_deg(_ub_deg) {}
__device__ bool operator()(const int &v) const {
unsigned int deg = vertndx[v + 1] - vertndx[v];
return lb_deg < deg && deg <= ub_deg;
}
};
template<typename T>
struct square {
__device__ T operator()(const T &x) const {
return x * x;
}
};
static __global__ void sumNeighWeights(Graph graph) {
unsigned int vertndx = blockIdx.x * blockDim.y + threadIdx.y;
if (vertndx < graph.vert_sz) {
unsigned int begin = graph.vertices[vertndx];
unsigned int deg = graph.vertices[vertndx + 1] - begin;
unsigned int e_ndx = threadIdx.x;
float sum = 0.0;
while (e_ndx < deg) {
sum += graph.weights[begin + e_ndx];
e_ndx += blockDim.x;
}
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
sum += __shfl_down_sync(FULL_MASK, sum, offset);
}
if (threadIdx.x == 0) {
graph.neigh_w[vertndx] = sum;
}
}
}
static __global__ void graphVertToComm(Graph graph) {
unsigned int v_ndx = blockIdx.x * blockDim.x + threadIdx.x;
if (v_ndx < graph.init_sz) {
graph.init_comm[v_ndx] = graph.comm[graph.init_comm[v_ndx]];
}
}
template<typename T>
static __device__ void initTable(T *arr, unsigned int arr_sz, T init_val) {
unsigned int block_dim = blockDim.x * blockDim.y;
unsigned int thrd_id = threadIdx.x * blockDim.y + threadIdx.y;
for (unsigned int i = thrd_id; i < arr_sz; i += block_dim) {
arr[i] = init_val;
}
__syncthreads();
}
template<typename T>
static __device__ void initSharedVar(T *var, T val) {
if (threadIdx.x == 0) {
*var = val;
}
}
static __device__ void initHashTables(int *hash_c, float *hash_w, unsigned int hash_sz) {
unsigned int block_dim = blockDim.x * blockDim.y;
unsigned int thrd_id = threadIdx.x * blockDim.y + threadIdx.y;
for (unsigned int i = thrd_id; i < hash_sz; i += block_dim) {
hash_c[i] = HASH_INIT;
hash_w[i] = 0;
}
__syncthreads();
}
static __device__ bool isLowerIdx(Graph &g, unsigned int vert, unsigned int neigh) {
unsigned int vert_comm = g.comm[vert];
unsigned int neigh_comm = g.comm[neigh];
if (g.comm_sz[vert_comm] == 1 && g.comm_sz[neigh_comm] == 1) {
return neigh_comm < vert_comm;
}
return true;
}
static __global__ void computeMoveGlobal(
Graph graph,
const unsigned int *verts,
unsigned int vert_size,
unsigned int prime,
unsigned int vert_hash_sz,
int *hash_comm,
float *hash_weight) {
extern __shared__ int cache_pos[];
int *cache_comm = cache_pos + blockDim.x;
float *cache_max = reinterpret_cast<float *>(cache_comm + blockDim.x);
float *comm_e_w = cache_max + blockDim.x;
initSharedVar<float>(comm_e_w, (float) 0);
__syncthreads();
unsigned int curr_pos;
float max_mod_gain = (-1) * INFINITY;
unsigned int deg = 0;
unsigned int vertndx = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int v, e, max_e, max_comm;
if (vertndx < vert_size) {
int *vert_hash_comm = hash_comm + vertndx * vert_hash_sz;
float *vert_hash_weight = hash_weight + vertndx * vert_hash_sz;
initHashTables(vert_hash_comm, vert_hash_weight, vert_hash_sz);
v = verts[vertndx];
deg = graph.vertices[v + 1] - graph.vertices[v];
unsigned int e_ndx = threadIdx.x;
while (e_ndx < deg) {
e = graph.edges[graph.vertices[v] + e_ndx];
unsigned int e_comm = graph.comm[e];
unsigned int v_comm = graph.comm[v];
float edge_weight = graph.weights[graph.vertices[v] + e_ndx];
unsigned int it = 0;
do {
curr_pos = hash(e_comm, it, prime);
it++;
if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == HASH_INIT) {
if (atomicCAS(vert_hash_comm + curr_pos, HASH_INIT, (int) e_comm) == HASH_INIT) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
}
}
} while (vert_hash_comm[curr_pos] != e_comm);
if (e_comm == v_comm && v != e) {
atomicAdd(comm_e_w, edge_weight);
}
if (isLowerIdx(graph, v, e)) {
float v_weights = graph.neigh_w[v];
float mod_gain = (vert_hash_weight[curr_pos] / graph.all_w)
+ (v_weights
* ((graph.comm_w[v_comm] - v_weights) - graph.comm_w[e_comm])
/ (2 * graph.all_w * graph.all_w));
if (mod_gain > max_mod_gain) {
max_mod_gain = mod_gain;
max_e = e;
max_comm = e_comm;
}
}
e_ndx += blockDim.x;
}
}
cache_max[threadIdx.x] = max_mod_gain;
cache_pos[threadIdx.x] = threadIdx.x;
cache_comm[threadIdx.x] = max_comm;
__syncthreads();
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
if (threadIdx.x < offset
&& (cache_max[threadIdx.x + offset] > cache_max[threadIdx.x]
|| (cache_max[threadIdx.x + offset] == cache_max[threadIdx.x] &&
cache_comm[threadIdx.x + offset] < cache_comm[threadIdx.x]))) {
cache_max[threadIdx.x] = cache_max[threadIdx.x + offset];
cache_pos[threadIdx.x] = cache_pos[threadIdx.x + offset];
cache_comm[threadIdx.x] = cache_comm[threadIdx.x + offset];
}
__syncthreads();
}
if (vertndx < vert_size && threadIdx.x < deg && cache_pos[0] == threadIdx.x) {
if (max_mod_gain - (*comm_e_w / graph.all_w) > 0) {
graph.new_comm[v] = graph.comm[e];
} else {
graph.new_comm[v] = graph.comm[v];
}
}
}
static __global__ void computeMoveBlock(
Graph graph,
const unsigned int *verts,
unsigned int vert_size,
unsigned int prime,
unsigned int vert_hash_sz) {
extern __shared__ float s[];
float *hash_weight = s;
float *vert_hash_weight = s + threadIdx.y * vert_hash_sz;
int *hash_comm = reinterpret_cast<int *>(hash_weight + blockDim.y * vert_hash_sz);
int *vert_hash_comm = hash_comm + threadIdx.y * vert_hash_sz;
initHashTables(hash_comm, hash_weight, blockDim.y * vert_hash_sz);
int *cache_pos = hash_comm + blockDim.y * vert_hash_sz;
int *cache_comm = cache_pos + blockDim.x;
float *cache_max = reinterpret_cast<float *>(cache_comm + blockDim.x);
float *comm_e_w = cache_max + blockDim.x;
initSharedVar<float>(comm_e_w, (float) 0);
__syncthreads();
unsigned int curr_pos;
float max_mod_gain = (-1) * INFINITY;
unsigned int deg = 0;
unsigned int vertndx = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int v, e, max_e, max_comm;
if (vertndx < vert_size) {
v = verts[vertndx];
deg = graph.vertices[v + 1] - graph.vertices[v];
unsigned int e_ndx = threadIdx.x;
while (e_ndx < deg) {
e = graph.edges[graph.vertices[v] + e_ndx];
unsigned int e_comm = graph.comm[e];
unsigned int v_comm = graph.comm[v];
float edge_weight = graph.weights[graph.vertices[v] + e_ndx];
unsigned int it = 0;
do {
curr_pos = hash(e_comm, it, prime);
it++;
if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == HASH_INIT) {
if (atomicCAS(vert_hash_comm + curr_pos, HASH_INIT, (int) e_comm) == HASH_INIT) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
}
}
} while (vert_hash_comm[curr_pos] != e_comm);
if (e_comm == v_comm && e != v) {
atomicAdd(comm_e_w, edge_weight);
}
if (isLowerIdx(graph, v, e)) {
float v_weights = graph.neigh_w[v];
float mod_gain = (vert_hash_weight[curr_pos] / graph.all_w)
+ (v_weights
* ((graph.comm_w[v_comm] - v_weights) - graph.comm_w[e_comm])
/ (2 * graph.all_w * graph.all_w));
if (mod_gain > max_mod_gain) {
max_mod_gain = mod_gain;
max_e = e;
max_comm = e_comm;
}
}
e_ndx += blockDim.x;
}
}
cache_max[threadIdx.x] = max_mod_gain;
cache_pos[threadIdx.x] = threadIdx.x;
cache_comm[threadIdx.x] = max_comm;
__syncthreads();
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
if (threadIdx.x < offset
&& (cache_max[threadIdx.x + offset] > cache_max[threadIdx.x]
|| (cache_max[threadIdx.x + offset] == cache_max[threadIdx.x] &&
cache_comm[threadIdx.x + offset] < cache_comm[threadIdx.x]))) {
cache_max[threadIdx.x] = cache_max[threadIdx.x + offset];
cache_pos[threadIdx.x] = cache_pos[threadIdx.x + offset];
cache_comm[threadIdx.x] = cache_comm[threadIdx.x + offset];
}
__syncthreads();
}
if (vertndx < vert_size && threadIdx.x < deg && cache_pos[0] == threadIdx.x) {
if (max_mod_gain - (*comm_e_w / graph.all_w) > 0) {
graph.new_comm[v] = graph.comm[e];
} else {
graph.new_comm[v] = graph.comm[v];
}
}
}
static __global__ void computeMoveWarp(
Graph graph,
const unsigned int *verts,
unsigned int vert_size,
unsigned int prime,
unsigned int vert_hash_sz) {
extern __shared__ float s[];
float *hash_weight = s;
float *vert_hash_weight = s + threadIdx.y * vert_hash_sz;
int *hash_comm = reinterpret_cast<int *>(hash_weight + blockDim.y * vert_hash_sz);
int *vert_hash_comm = hash_comm + threadIdx.y * vert_hash_sz;
initHashTables(hash_comm, hash_weight, blockDim.y * vert_hash_sz);
int *block_max_pos = hash_comm + blockDim.y * vert_hash_sz;
int *vert_max_pos = block_max_pos + threadIdx.y;
float *comm_e_w = reinterpret_cast<float *>(block_max_pos + blockDim.y + threadIdx.y);
initSharedVar<float>(comm_e_w, (float) 0);
unsigned int curr_pos;
float max_mod_gain = (-1) * INFINITY;
unsigned int deg = 0;
unsigned int vertndx = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int v, e, max_e, max_comm;
if (vertndx < vert_size) {
v = verts[vertndx];
deg = graph.vertices[v + 1] - graph.vertices[v];
unsigned int e_ndx = threadIdx.x;
while (e_ndx < deg) {
e = graph.edges[graph.vertices[v] + e_ndx];
unsigned int e_comm = graph.comm[e];
unsigned int v_comm = graph.comm[v];
float edge_weight = graph.weights[graph.vertices[v] + e_ndx];
unsigned int it = 0;
do {
curr_pos = hash(e_comm, it, prime);
it++;
if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == HASH_INIT) {
if (atomicCAS(vert_hash_comm + curr_pos, HASH_INIT, (int) e_comm) == HASH_INIT) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
}
}
} while (vert_hash_comm[curr_pos] != e_comm);
if (e_comm == v_comm && e != v) {
atomicAdd(comm_e_w, edge_weight);
}
if (isLowerIdx(graph, v, e)) {
float v_weights = graph.neigh_w[v];
float mod_gain = (vert_hash_weight[curr_pos] / graph.all_w)
+ (v_weights
* ((graph.comm_w[v_comm] - v_weights) - graph.comm_w[e_comm])
/ (2 * graph.all_w * graph.all_w));
if (mod_gain > max_mod_gain) {
max_mod_gain = mod_gain;
max_e = e;
max_comm = e_comm;
}
}
e_ndx += blockDim.x;
}
}
float tmp_gain;
float e_mod_gain = max_mod_gain;
unsigned int tmp_pos, tmp_comm;
unsigned int pos = threadIdx.x;
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
tmp_gain = __shfl_down_sync(FULL_MASK, max_mod_gain, offset);
tmp_pos = __shfl_down_sync(FULL_MASK, pos, offset);
tmp_comm = __shfl_down_sync(FULL_MASK, max_comm, offset);
if (tmp_gain > max_mod_gain || (tmp_gain == max_mod_gain && tmp_comm < max_comm)) {
max_mod_gain = tmp_gain;
pos = tmp_pos;
max_comm = tmp_comm;
}
}
if (threadIdx.x == 0) {
*vert_max_pos = pos;
}
if (vertndx < vert_size && threadIdx.x < deg && *vert_max_pos == threadIdx.x) {
if (e_mod_gain - (*comm_e_w / graph.all_w) > 0) {
graph.new_comm[v] = graph.comm[max_e];
} else {
graph.new_comm[v] = graph.comm[v];
}
}
}
static __global__ void assignCommunity(Graph graph, const unsigned int *verts, unsigned int vert_size) {
unsigned int v_ndx = blockIdx.x * blockDim.x + threadIdx.x;
if (v_ndx < vert_size) {
unsigned int v = verts[v_ndx];
unsigned int v_new_comm = graph.new_comm[v];
atomicSub(graph.comm_sz + graph.comm[v], 1);
atomicAdd(graph.comm_sz + v_new_comm, 1);
graph.comm[v] = v_new_comm;
}
}
static __global__ void computeNewCommWeights(Graph graph) {
unsigned int v_ndx = blockIdx.x * blockDim.x + threadIdx.x;
if (v_ndx < graph.vert_sz) {
atomicAdd(graph.comm_w + graph.comm[v_ndx], graph.neigh_w[v_ndx]);
}
}
static __global__ void computeCommNeighSum(Graph graph, float *sum) {
unsigned int v_ndx = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int e_ndx = threadIdx.x;
float temp = 0;
if (v_ndx < graph.vert_sz) {
unsigned int v_e_ndx = graph.vertices[v_ndx];
unsigned int v_comm = graph.comm[v_ndx];
unsigned int deg = graph.vertices[v_ndx + 1] - v_e_ndx;
unsigned int *v_neigh = graph.edges + v_e_ndx;
float *v_weights = graph.weights + v_e_ndx;
while (e_ndx < deg) {
if (graph.comm[v_neigh[e_ndx]] == v_comm) {
temp += v_weights[e_ndx];
}
e_ndx += blockDim.x;
}
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
temp += __shfl_down_sync(FULL_MASK, temp, offset);
}
if (threadIdx.x == 0) {
sum[v_ndx] = temp;
}
}
}
static float computeModularity(Graph &dev_graph) {
hipLaunchKernelGGL(( computeCommNeighSum), dim3((dev_graph.vert_sz + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK), dim3(dim3(WARP_SZ, WARPS_PER_BLOCK)), 0, 0,
dev_graph, dev_graph.modularity);
float comm_w_sum = thrust::transform_reduce(thrust::device, dev_graph.comm_w, dev_graph.comm_w + dev_graph.vert_sz,
square<float>(), 0.0, thrust::plus<float>());
float sum_edges_comm_v = thrust::reduce(thrust::device, dev_graph.modularity,
dev_graph.modularity + dev_graph.vert_sz);
return (sum_edges_comm_v / (2 * dev_graph.all_w)) - (comm_w_sum / (4 * dev_graph.all_w * dev_graph.all_w));
}
bool modularityOptimization(Graph &dev_graph, unsigned int *v_set, float threshold) {
unsigned int buckets[] = {0, 4, 8, 16, 32, 84, 319, INT_MAX};
unsigned int prime[] = {7, 13, 29, 53, 127, 479,
findNearestPrime((unsigned int) (dev_graph.max_deg * 1.5) + 1)};
unsigned int vert_hash_sizes[] = {8, 16, 32, 64, 128, 512};
dim3 blocks_dim[] = {
{4, 32},
{8, 16},
{16, 8},
{32, 4},
{32, 4},
{128, 1},
{128, 1}};
size_t bucket_sz = 8;
int *hash_comm = nullptr;
float *hash_weights = nullptr;
hipLaunchKernelGGL(( sumNeighWeights), dim3((dev_graph.vert_sz + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK), dim3(dim3(WARP_SZ, WARPS_PER_BLOCK)), 0, 0,
dev_graph);
dev_graph.all_w = thrust::reduce(thrust::device, dev_graph.neigh_w, dev_graph.neigh_w + dev_graph.vert_sz,
(float) 0) / 2;
thrust::sequence(thrust::device, v_set, v_set + dev_graph.vert_sz, 0);
thrust::sequence(thrust::device, dev_graph.comm, dev_graph.comm + dev_graph.vert_sz, 0);
thrust::fill(thrust::device, dev_graph.comm_sz, dev_graph.comm_sz + dev_graph.vert_sz, 1);
thrust::copy(thrust::device, dev_graph.neigh_w, dev_graph.neigh_w + dev_graph.vert_sz, dev_graph.comm_w);
float act_modularity = computeModularity(dev_graph);
float old_modularity;
bool first_loop = true, ret = true;
do {
for (int i = 1; i < bucket_sz; i++) {
unsigned int *v_set_end = thrust::partition(thrust::device, v_set, v_set + dev_graph.vert_sz,
deg(dev_graph.vertices, buckets[i - 1], buckets[i]));
unsigned int v_set_sz = v_set_end - v_set;
if (v_set_sz > 0) {
unsigned int grid_sz = (v_set_sz + blocks_dim[i - 1].y - 1) / blocks_dim[i - 1].y;
if (blocks_dim[i - 1].x <= WARP_SZ) {
unsigned int hash_sz = vert_hash_sizes[i - 1] * blocks_dim[i - 1].y;
unsigned int shmem_sz = (hash_sz + blocks_dim[i - 1].y) * (sizeof(float) + sizeof(int));
hipLaunchKernelGGL(( computeMoveWarp), dim3(grid_sz), dim3(blocks_dim[i - 1]), shmem_sz, 0, dev_graph, v_set, v_set_sz, prime[i - 1],
vert_hash_sizes[i - 1]);
} else if (buckets[i] == 319) {
unsigned int hash_sz = vert_hash_sizes[i - 1];
unsigned int shmem_sz = (hash_sz + blocks_dim[i - 1].x + 1) * sizeof(float) +
(hash_sz + 2 * blocks_dim[i - 1].x) * sizeof(int);
hipLaunchKernelGGL(( computeMoveBlock), dim3(grid_sz), dim3(blocks_dim[i - 1]), shmem_sz, 0, dev_graph, v_set, v_set_sz, prime[i - 1],
vert_hash_sizes[i - 1]);
} else {
unsigned int hash_sz = prime[i - 1];
unsigned int shmem_sz = blocks_dim[i - 1].x * (sizeof(float) + 2 * sizeof(int)) + sizeof(float);
if (!hash_comm || !hash_weights) {
HANDLE_ERROR(hipMalloc((void **) &hash_comm, v_set_sz * hash_sz * sizeof(int)));
HANDLE_ERROR(hipMalloc((void **) &hash_weights, v_set_sz * hash_sz * sizeof(float)));
}
hipLaunchKernelGGL(( computeMoveGlobal), dim3(grid_sz), dim3(blocks_dim[i - 1]), shmem_sz, 0, dev_graph, v_set, v_set_sz,
prime[i - 1],
hash_sz, hash_comm, hash_weights);
}
hipLaunchKernelGGL(( assignCommunity), dim3((v_set_sz + THREADS_N - 1) / THREADS_N), dim3(THREADS_N), 0, 0,
dev_graph, v_set, v_set_sz);
thrust::fill(thrust::device, dev_graph.comm_w, dev_graph.comm_w + dev_graph.vert_sz, 0.0);
hipLaunchKernelGGL(( computeNewCommWeights), dim3((dev_graph.vert_sz + THREADS_N - 1) / THREADS_N), dim3(THREADS_N), 0, 0, dev_graph);
}
}
old_modularity = act_modularity;
act_modularity = computeModularity(dev_graph);
if (first_loop && act_modularity - old_modularity < threshold) {
std::cout << old_modularity << std::endl;
ret = false;
}
first_loop = false;
} while (act_modularity - old_modularity >= threshold);
if (hash_comm || hash_weights) {
HANDLE_ERROR(hipFree(hash_comm));
HANDLE_ERROR(hipFree(hash_weights));
}
if (ret) {
hipLaunchKernelGGL(( graphVertToComm), dim3((dev_graph.init_sz + THREADS_N - 1) / THREADS_N), dim3(THREADS_N), 0, 0, dev_graph);
}
return ret;
}
|
122769f7cb400b8d5e6cf177612d57aba7f1a0a1.cu
|
#include <thrust/reduce.h>
#include "modularity.cuh"
struct deg {
unsigned int *vertndx;
unsigned int lb_deg, ub_deg;
deg(unsigned int *_vertndx, unsigned int _lb_deg, unsigned int _ub_deg) :
vertndx(_vertndx),
lb_deg(_lb_deg),
ub_deg(_ub_deg) {}
__device__ bool operator()(const int &v) const {
unsigned int deg = vertndx[v + 1] - vertndx[v];
return lb_deg < deg && deg <= ub_deg;
}
};
template<typename T>
struct square {
__device__ T operator()(const T &x) const {
return x * x;
}
};
static __global__ void sumNeighWeights(Graph graph) {
unsigned int vertndx = blockIdx.x * blockDim.y + threadIdx.y;
if (vertndx < graph.vert_sz) {
unsigned int begin = graph.vertices[vertndx];
unsigned int deg = graph.vertices[vertndx + 1] - begin;
unsigned int e_ndx = threadIdx.x;
float sum = 0.0;
while (e_ndx < deg) {
sum += graph.weights[begin + e_ndx];
e_ndx += blockDim.x;
}
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
sum += __shfl_down_sync(FULL_MASK, sum, offset);
}
if (threadIdx.x == 0) {
graph.neigh_w[vertndx] = sum;
}
}
}
static __global__ void graphVertToComm(Graph graph) {
unsigned int v_ndx = blockIdx.x * blockDim.x + threadIdx.x;
if (v_ndx < graph.init_sz) {
graph.init_comm[v_ndx] = graph.comm[graph.init_comm[v_ndx]];
}
}
template<typename T>
static __device__ void initTable(T *arr, unsigned int arr_sz, T init_val) {
unsigned int block_dim = blockDim.x * blockDim.y;
unsigned int thrd_id = threadIdx.x * blockDim.y + threadIdx.y;
for (unsigned int i = thrd_id; i < arr_sz; i += block_dim) {
arr[i] = init_val;
}
__syncthreads();
}
template<typename T>
static __device__ void initSharedVar(T *var, T val) {
if (threadIdx.x == 0) {
*var = val;
}
}
static __device__ void initHashTables(int *hash_c, float *hash_w, unsigned int hash_sz) {
unsigned int block_dim = blockDim.x * blockDim.y;
unsigned int thrd_id = threadIdx.x * blockDim.y + threadIdx.y;
for (unsigned int i = thrd_id; i < hash_sz; i += block_dim) {
hash_c[i] = HASH_INIT;
hash_w[i] = 0;
}
__syncthreads();
}
static __device__ bool isLowerIdx(Graph &g, unsigned int vert, unsigned int neigh) {
unsigned int vert_comm = g.comm[vert];
unsigned int neigh_comm = g.comm[neigh];
if (g.comm_sz[vert_comm] == 1 && g.comm_sz[neigh_comm] == 1) {
return neigh_comm < vert_comm;
}
return true;
}
static __global__ void computeMoveGlobal(
Graph graph,
const unsigned int *verts,
unsigned int vert_size,
unsigned int prime,
unsigned int vert_hash_sz,
int *hash_comm,
float *hash_weight) {
extern __shared__ int cache_pos[];
int *cache_comm = cache_pos + blockDim.x;
float *cache_max = reinterpret_cast<float *>(cache_comm + blockDim.x);
float *comm_e_w = cache_max + blockDim.x;
initSharedVar<float>(comm_e_w, (float) 0);
__syncthreads();
unsigned int curr_pos;
float max_mod_gain = (-1) * INFINITY;
unsigned int deg = 0;
unsigned int vertndx = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int v, e, max_e, max_comm;
if (vertndx < vert_size) {
int *vert_hash_comm = hash_comm + vertndx * vert_hash_sz;
float *vert_hash_weight = hash_weight + vertndx * vert_hash_sz;
initHashTables(vert_hash_comm, vert_hash_weight, vert_hash_sz);
v = verts[vertndx];
deg = graph.vertices[v + 1] - graph.vertices[v];
unsigned int e_ndx = threadIdx.x;
while (e_ndx < deg) {
e = graph.edges[graph.vertices[v] + e_ndx];
unsigned int e_comm = graph.comm[e];
unsigned int v_comm = graph.comm[v];
float edge_weight = graph.weights[graph.vertices[v] + e_ndx];
unsigned int it = 0;
do {
curr_pos = hash(e_comm, it, prime);
it++;
if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == HASH_INIT) {
if (atomicCAS(vert_hash_comm + curr_pos, HASH_INIT, (int) e_comm) == HASH_INIT) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
}
}
} while (vert_hash_comm[curr_pos] != e_comm);
if (e_comm == v_comm && v != e) {
atomicAdd(comm_e_w, edge_weight);
}
if (isLowerIdx(graph, v, e)) {
float v_weights = graph.neigh_w[v];
float mod_gain = (vert_hash_weight[curr_pos] / graph.all_w)
+ (v_weights
* ((graph.comm_w[v_comm] - v_weights) - graph.comm_w[e_comm])
/ (2 * graph.all_w * graph.all_w));
if (mod_gain > max_mod_gain) {
max_mod_gain = mod_gain;
max_e = e;
max_comm = e_comm;
}
}
e_ndx += blockDim.x;
}
}
cache_max[threadIdx.x] = max_mod_gain;
cache_pos[threadIdx.x] = threadIdx.x;
cache_comm[threadIdx.x] = max_comm;
__syncthreads();
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
if (threadIdx.x < offset
&& (cache_max[threadIdx.x + offset] > cache_max[threadIdx.x]
|| (cache_max[threadIdx.x + offset] == cache_max[threadIdx.x] &&
cache_comm[threadIdx.x + offset] < cache_comm[threadIdx.x]))) {
cache_max[threadIdx.x] = cache_max[threadIdx.x + offset];
cache_pos[threadIdx.x] = cache_pos[threadIdx.x + offset];
cache_comm[threadIdx.x] = cache_comm[threadIdx.x + offset];
}
__syncthreads();
}
if (vertndx < vert_size && threadIdx.x < deg && cache_pos[0] == threadIdx.x) {
if (max_mod_gain - (*comm_e_w / graph.all_w) > 0) {
graph.new_comm[v] = graph.comm[e];
} else {
graph.new_comm[v] = graph.comm[v];
}
}
}
static __global__ void computeMoveBlock(
Graph graph,
const unsigned int *verts,
unsigned int vert_size,
unsigned int prime,
unsigned int vert_hash_sz) {
extern __shared__ float s[];
float *hash_weight = s;
float *vert_hash_weight = s + threadIdx.y * vert_hash_sz;
int *hash_comm = reinterpret_cast<int *>(hash_weight + blockDim.y * vert_hash_sz);
int *vert_hash_comm = hash_comm + threadIdx.y * vert_hash_sz;
initHashTables(hash_comm, hash_weight, blockDim.y * vert_hash_sz);
int *cache_pos = hash_comm + blockDim.y * vert_hash_sz;
int *cache_comm = cache_pos + blockDim.x;
float *cache_max = reinterpret_cast<float *>(cache_comm + blockDim.x);
float *comm_e_w = cache_max + blockDim.x;
initSharedVar<float>(comm_e_w, (float) 0);
__syncthreads();
unsigned int curr_pos;
float max_mod_gain = (-1) * INFINITY;
unsigned int deg = 0;
unsigned int vertndx = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int v, e, max_e, max_comm;
if (vertndx < vert_size) {
v = verts[vertndx];
deg = graph.vertices[v + 1] - graph.vertices[v];
unsigned int e_ndx = threadIdx.x;
while (e_ndx < deg) {
e = graph.edges[graph.vertices[v] + e_ndx];
unsigned int e_comm = graph.comm[e];
unsigned int v_comm = graph.comm[v];
float edge_weight = graph.weights[graph.vertices[v] + e_ndx];
unsigned int it = 0;
do {
curr_pos = hash(e_comm, it, prime);
it++;
if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == HASH_INIT) {
if (atomicCAS(vert_hash_comm + curr_pos, HASH_INIT, (int) e_comm) == HASH_INIT) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
}
}
} while (vert_hash_comm[curr_pos] != e_comm);
if (e_comm == v_comm && e != v) {
atomicAdd(comm_e_w, edge_weight);
}
if (isLowerIdx(graph, v, e)) {
float v_weights = graph.neigh_w[v];
float mod_gain = (vert_hash_weight[curr_pos] / graph.all_w)
+ (v_weights
* ((graph.comm_w[v_comm] - v_weights) - graph.comm_w[e_comm])
/ (2 * graph.all_w * graph.all_w));
if (mod_gain > max_mod_gain) {
max_mod_gain = mod_gain;
max_e = e;
max_comm = e_comm;
}
}
e_ndx += blockDim.x;
}
}
cache_max[threadIdx.x] = max_mod_gain;
cache_pos[threadIdx.x] = threadIdx.x;
cache_comm[threadIdx.x] = max_comm;
__syncthreads();
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
if (threadIdx.x < offset
&& (cache_max[threadIdx.x + offset] > cache_max[threadIdx.x]
|| (cache_max[threadIdx.x + offset] == cache_max[threadIdx.x] &&
cache_comm[threadIdx.x + offset] < cache_comm[threadIdx.x]))) {
cache_max[threadIdx.x] = cache_max[threadIdx.x + offset];
cache_pos[threadIdx.x] = cache_pos[threadIdx.x + offset];
cache_comm[threadIdx.x] = cache_comm[threadIdx.x + offset];
}
__syncthreads();
}
if (vertndx < vert_size && threadIdx.x < deg && cache_pos[0] == threadIdx.x) {
if (max_mod_gain - (*comm_e_w / graph.all_w) > 0) {
graph.new_comm[v] = graph.comm[e];
} else {
graph.new_comm[v] = graph.comm[v];
}
}
}
static __global__ void computeMoveWarp(
Graph graph,
const unsigned int *verts,
unsigned int vert_size,
unsigned int prime,
unsigned int vert_hash_sz) {
extern __shared__ float s[];
float *hash_weight = s;
float *vert_hash_weight = s + threadIdx.y * vert_hash_sz;
int *hash_comm = reinterpret_cast<int *>(hash_weight + blockDim.y * vert_hash_sz);
int *vert_hash_comm = hash_comm + threadIdx.y * vert_hash_sz;
initHashTables(hash_comm, hash_weight, blockDim.y * vert_hash_sz);
int *block_max_pos = hash_comm + blockDim.y * vert_hash_sz;
int *vert_max_pos = block_max_pos + threadIdx.y;
float *comm_e_w = reinterpret_cast<float *>(block_max_pos + blockDim.y + threadIdx.y);
initSharedVar<float>(comm_e_w, (float) 0);
unsigned int curr_pos;
float max_mod_gain = (-1) * INFINITY;
unsigned int deg = 0;
unsigned int vertndx = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int v, e, max_e, max_comm;
if (vertndx < vert_size) {
v = verts[vertndx];
deg = graph.vertices[v + 1] - graph.vertices[v];
unsigned int e_ndx = threadIdx.x;
while (e_ndx < deg) {
e = graph.edges[graph.vertices[v] + e_ndx];
unsigned int e_comm = graph.comm[e];
unsigned int v_comm = graph.comm[v];
float edge_weight = graph.weights[graph.vertices[v] + e_ndx];
unsigned int it = 0;
do {
curr_pos = hash(e_comm, it, prime);
it++;
if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == HASH_INIT) {
if (atomicCAS(vert_hash_comm + curr_pos, HASH_INIT, (int) e_comm) == HASH_INIT) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
} else if (vert_hash_comm[curr_pos] == e_comm) {
atomicAdd(vert_hash_weight + curr_pos, edge_weight);
}
}
} while (vert_hash_comm[curr_pos] != e_comm);
if (e_comm == v_comm && e != v) {
atomicAdd(comm_e_w, edge_weight);
}
if (isLowerIdx(graph, v, e)) {
float v_weights = graph.neigh_w[v];
float mod_gain = (vert_hash_weight[curr_pos] / graph.all_w)
+ (v_weights
* ((graph.comm_w[v_comm] - v_weights) - graph.comm_w[e_comm])
/ (2 * graph.all_w * graph.all_w));
if (mod_gain > max_mod_gain) {
max_mod_gain = mod_gain;
max_e = e;
max_comm = e_comm;
}
}
e_ndx += blockDim.x;
}
}
float tmp_gain;
float e_mod_gain = max_mod_gain;
unsigned int tmp_pos, tmp_comm;
unsigned int pos = threadIdx.x;
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
tmp_gain = __shfl_down_sync(FULL_MASK, max_mod_gain, offset);
tmp_pos = __shfl_down_sync(FULL_MASK, pos, offset);
tmp_comm = __shfl_down_sync(FULL_MASK, max_comm, offset);
if (tmp_gain > max_mod_gain || (tmp_gain == max_mod_gain && tmp_comm < max_comm)) {
max_mod_gain = tmp_gain;
pos = tmp_pos;
max_comm = tmp_comm;
}
}
if (threadIdx.x == 0) {
*vert_max_pos = pos;
}
if (vertndx < vert_size && threadIdx.x < deg && *vert_max_pos == threadIdx.x) {
if (e_mod_gain - (*comm_e_w / graph.all_w) > 0) {
graph.new_comm[v] = graph.comm[max_e];
} else {
graph.new_comm[v] = graph.comm[v];
}
}
}
static __global__ void assignCommunity(Graph graph, const unsigned int *verts, unsigned int vert_size) {
unsigned int v_ndx = blockIdx.x * blockDim.x + threadIdx.x;
if (v_ndx < vert_size) {
unsigned int v = verts[v_ndx];
unsigned int v_new_comm = graph.new_comm[v];
atomicSub(graph.comm_sz + graph.comm[v], 1);
atomicAdd(graph.comm_sz + v_new_comm, 1);
graph.comm[v] = v_new_comm;
}
}
static __global__ void computeNewCommWeights(Graph graph) {
unsigned int v_ndx = blockIdx.x * blockDim.x + threadIdx.x;
if (v_ndx < graph.vert_sz) {
atomicAdd(graph.comm_w + graph.comm[v_ndx], graph.neigh_w[v_ndx]);
}
}
static __global__ void computeCommNeighSum(Graph graph, float *sum) {
unsigned int v_ndx = blockIdx.x * blockDim.y + threadIdx.y;
unsigned int e_ndx = threadIdx.x;
float temp = 0;
if (v_ndx < graph.vert_sz) {
unsigned int v_e_ndx = graph.vertices[v_ndx];
unsigned int v_comm = graph.comm[v_ndx];
unsigned int deg = graph.vertices[v_ndx + 1] - v_e_ndx;
unsigned int *v_neigh = graph.edges + v_e_ndx;
float *v_weights = graph.weights + v_e_ndx;
while (e_ndx < deg) {
if (graph.comm[v_neigh[e_ndx]] == v_comm) {
temp += v_weights[e_ndx];
}
e_ndx += blockDim.x;
}
for (unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
temp += __shfl_down_sync(FULL_MASK, temp, offset);
}
if (threadIdx.x == 0) {
sum[v_ndx] = temp;
}
}
}
static float computeModularity(Graph &dev_graph) {
computeCommNeighSum<<<(dev_graph.vert_sz + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK, dim3(WARP_SZ, WARPS_PER_BLOCK)>>>(
dev_graph, dev_graph.modularity);
float comm_w_sum = thrust::transform_reduce(thrust::device, dev_graph.comm_w, dev_graph.comm_w + dev_graph.vert_sz,
square<float>(), 0.0, thrust::plus<float>());
float sum_edges_comm_v = thrust::reduce(thrust::device, dev_graph.modularity,
dev_graph.modularity + dev_graph.vert_sz);
return (sum_edges_comm_v / (2 * dev_graph.all_w)) - (comm_w_sum / (4 * dev_graph.all_w * dev_graph.all_w));
}
bool modularityOptimization(Graph &dev_graph, unsigned int *v_set, float threshold) {
unsigned int buckets[] = {0, 4, 8, 16, 32, 84, 319, INT_MAX};
unsigned int prime[] = {7, 13, 29, 53, 127, 479,
findNearestPrime((unsigned int) (dev_graph.max_deg * 1.5) + 1)};
unsigned int vert_hash_sizes[] = {8, 16, 32, 64, 128, 512};
dim3 blocks_dim[] = {
{4, 32},
{8, 16},
{16, 8},
{32, 4},
{32, 4},
{128, 1},
{128, 1}};
size_t bucket_sz = 8;
int *hash_comm = nullptr;
float *hash_weights = nullptr;
sumNeighWeights<<<(dev_graph.vert_sz + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK, dim3(WARP_SZ, WARPS_PER_BLOCK)>>>(
dev_graph);
dev_graph.all_w = thrust::reduce(thrust::device, dev_graph.neigh_w, dev_graph.neigh_w + dev_graph.vert_sz,
(float) 0) / 2;
thrust::sequence(thrust::device, v_set, v_set + dev_graph.vert_sz, 0);
thrust::sequence(thrust::device, dev_graph.comm, dev_graph.comm + dev_graph.vert_sz, 0);
thrust::fill(thrust::device, dev_graph.comm_sz, dev_graph.comm_sz + dev_graph.vert_sz, 1);
thrust::copy(thrust::device, dev_graph.neigh_w, dev_graph.neigh_w + dev_graph.vert_sz, dev_graph.comm_w);
float act_modularity = computeModularity(dev_graph);
float old_modularity;
bool first_loop = true, ret = true;
do {
for (int i = 1; i < bucket_sz; i++) {
unsigned int *v_set_end = thrust::partition(thrust::device, v_set, v_set + dev_graph.vert_sz,
deg(dev_graph.vertices, buckets[i - 1], buckets[i]));
unsigned int v_set_sz = v_set_end - v_set;
if (v_set_sz > 0) {
unsigned int grid_sz = (v_set_sz + blocks_dim[i - 1].y - 1) / blocks_dim[i - 1].y;
if (blocks_dim[i - 1].x <= WARP_SZ) {
unsigned int hash_sz = vert_hash_sizes[i - 1] * blocks_dim[i - 1].y;
unsigned int shmem_sz = (hash_sz + blocks_dim[i - 1].y) * (sizeof(float) + sizeof(int));
computeMoveWarp<<<grid_sz, blocks_dim[i - 1], shmem_sz>>>(dev_graph, v_set, v_set_sz, prime[i - 1],
vert_hash_sizes[i - 1]);
} else if (buckets[i] == 319) {
unsigned int hash_sz = vert_hash_sizes[i - 1];
unsigned int shmem_sz = (hash_sz + blocks_dim[i - 1].x + 1) * sizeof(float) +
(hash_sz + 2 * blocks_dim[i - 1].x) * sizeof(int);
computeMoveBlock<<<grid_sz, blocks_dim[i - 1], shmem_sz>>>(dev_graph, v_set, v_set_sz, prime[i - 1],
vert_hash_sizes[i - 1]);
} else {
unsigned int hash_sz = prime[i - 1];
unsigned int shmem_sz = blocks_dim[i - 1].x * (sizeof(float) + 2 * sizeof(int)) + sizeof(float);
if (!hash_comm || !hash_weights) {
HANDLE_ERROR(cudaMalloc((void **) &hash_comm, v_set_sz * hash_sz * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **) &hash_weights, v_set_sz * hash_sz * sizeof(float)));
}
computeMoveGlobal<<<grid_sz, blocks_dim[i - 1], shmem_sz>>>(dev_graph, v_set, v_set_sz,
prime[i - 1],
hash_sz, hash_comm, hash_weights);
}
assignCommunity<<<(v_set_sz + THREADS_N - 1) / THREADS_N, THREADS_N>>>(
dev_graph, v_set, v_set_sz);
thrust::fill(thrust::device, dev_graph.comm_w, dev_graph.comm_w + dev_graph.vert_sz, 0.0);
computeNewCommWeights<<<(dev_graph.vert_sz + THREADS_N - 1) / THREADS_N, THREADS_N>>>(dev_graph);
}
}
old_modularity = act_modularity;
act_modularity = computeModularity(dev_graph);
if (first_loop && act_modularity - old_modularity < threshold) {
std::cout << old_modularity << std::endl;
ret = false;
}
first_loop = false;
} while (act_modularity - old_modularity >= threshold);
if (hash_comm || hash_weights) {
HANDLE_ERROR(cudaFree(hash_comm));
HANDLE_ERROR(cudaFree(hash_weights));
}
if (ret) {
graphVertToComm<<<(dev_graph.init_sz + THREADS_N - 1) / THREADS_N, THREADS_N>>>(dev_graph);
}
return ret;
}
|
0200c7786220d516bbdf5a341d9f41a452cd54ec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pad_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void PadImageConstNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageReflectNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageEdgeNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageConstNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageReflectNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageEdgeNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientConstNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / width;
const int pw = index % width + pad_l;
const int ph = nc % height + pad_t;
nc /= height;
bottom_diff[index] =
top_diff[(nc * padded_height + ph) * padded_width + pw];
}
}
template <typename T>
__global__ void PadImageGradientReflectNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientConstNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % width + pad_l;
n /= width;
const int ph = n % height + pad_t;
n /= height;
bottom_diff[index] =
top_diff[((n * padded_height + ph) * padded_width + pw) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientReflectNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
} // namespace
template <>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
const int num = X.dim32(0);
const int channels = X.dim32(1);
const int height = X.dim32(2);
const int width = X.dim32(3);
auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, channels);
auto* Y = Output(0, sizes, at::dtype<float>());
const int output_size = Y->numel();
const int padded_height = Y->dim32(2);
const int padded_width = Y->dim32(3);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageConstNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageReflectNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageEdgeNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
const int num = X.dim32(0);
const int height = X.dim32(1);
const int width = X.dim32(2);
const int channels = X.dim32(3);
auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, channels);
auto* Y = Output(0, sizes, at::dtype<float>());
const int output_size = Y->numel();
const int padded_height = Y->dim32(1);
const int padded_width = Y->dim32(2);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageConstNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageReflectNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageEdgeNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& dY = Input(0);
auto* dX = Output(0, { dY.dim32(0),
dY.dim32(1),
dY.dim32(2) - pad_t() - pad_b(),
dY.dim32(3) - pad_l() - pad_r()}, at::dtype<float>());
const int input_size = dY.numel();
const int padded_height = dY.dim32(2);
const int padded_width = dY.dim32(3);
const int output_size = dX->numel();
const int num = dX->dim32(0);
const int channels = dX->dim32(1);
const int height = dX->dim32(2);
const int width = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageGradientConstNCHW<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageGradientReflectNCHW<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageGradientEdgeNCHW<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& dY = Input(0);
auto* dX = Output(0, { dY.dim32(0),
dY.dim32(1) - pad_t() - pad_b(),
dY.dim32(2) - pad_l() - pad_r(),
dY.dim32(3)}, at::dtype<float>());
const int input_size = dY.numel();
const int padded_height = dY.dim32(1);
const int padded_width = dY.dim32(2);
const int output_size = dX->numel();
const int num = dX->dim32(0);
const int height = dX->dim32(1);
const int width = dX->dim32(2);
const int channels = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
hipLaunchKernelGGL(( PadImageGradientConstNHWC<float>),
dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
hipLaunchKernelGGL(( PadImageGradientReflectNHWC<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
hipLaunchKernelGGL(( PadImageGradientEdgeNHWC<float>),
dim3(CAFFE_GET_BLOCKS(input_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
REGISTER_CUDA_OPERATOR(PadImage, PadImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PadImageGradient,
PadImageGradientOp<float, CUDAContext>);
} // namespace caffe2
|
0200c7786220d516bbdf5a341d9f41a452cd54ec.cu
|
#include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pad_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void PadImageConstNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageReflectNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageEdgeNCHW(
const int nthreads, const T* const bottom_data, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] = bottom_data[(nc * height + h) * width + w];
}
}
template <typename T>
__global__ void PadImageConstNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T value, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = ph - pad_t;
const int w = pw - pad_l;
top_data[index] = (h < 0 || w < 0 || h >= height || w >= width)
? value
: bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageReflectNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageEdgeNHWC(
const int nthreads, const T* const bottom_data, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
top_data[index] =
bottom_data[((n * height + h) * width + w) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientConstNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / width;
const int pw = index % width + pad_l;
const int ph = nc % height + pad_t;
nc /= height;
bottom_diff[index] =
top_diff[(nc * padded_height + ph) * padded_width + pw];
}
}
template <typename T>
__global__ void PadImageGradientReflectNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNCHW(
const int nthreads, const T* const top_diff, const int num,
const int channels, const int height, const int width,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int nc = index / padded_width;
const int pw = index % padded_width;
const int ph = nc % padded_height;
nc /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(&bottom_diff[(nc * height + h) * width + w], top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientConstNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % width + pad_l;
n /= width;
const int ph = n % height + pad_t;
n /= height;
bottom_diff[index] =
top_diff[((n * padded_height + ph) * padded_width + pw) * channels + c];
}
}
template <typename T>
__global__ void PadImageGradientReflectNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
int h = ph - pad_t;
int w = pw - pad_l;
h = max(h, -h);
w = max(w, -w);
h = min(h, 2 * height - h - 2);
w = min(w, 2 * width - w - 2);
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
template <typename T>
__global__ void PadImageGradientEdgeNHWC(
const int nthreads, const T* const top_diff, const int num,
const int height, const int width, const int channels,
const int padded_height, const int padded_width,
const int pad_t, const int pad_l, T* const bottom_diff) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / channels;
const int c = index % channels;
const int pw = n % padded_width;
n /= padded_width;
const int ph = n % padded_height;
n /= padded_height;
const int h = min(height - 1, max(ph - pad_t, 0));
const int w = min(width - 1, max(pw - pad_l, 0));
atomicAdd(
&bottom_diff[((n * height + h) * width + w) * channels + c],
top_diff[index]);
}
}
} // namespace
template <>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
const int num = X.dim32(0);
const int channels = X.dim32(1);
const int height = X.dim32(2);
const int width = X.dim32(3);
auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, channels);
auto* Y = Output(0, sizes, at::dtype<float>());
const int output_size = Y->numel();
const int padded_height = Y->dim32(2);
const int padded_width = Y->dim32(3);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
PadImageConstNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
PadImageReflectNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
PadImageEdgeNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
const int num = X.dim32(0);
const int height = X.dim32(1);
const int width = X.dim32(2);
const int channels = X.dim32(3);
auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, channels);
auto* Y = Output(0, sizes, at::dtype<float>());
const int output_size = Y->numel();
const int padded_height = Y->dim32(1);
const int padded_width = Y->dim32(2);
const float* Xdata = X.data<float>();
float* Ydata = Y->template mutable_data<float>();
switch (mode_) {
case PadMode::CONSTANT:
PadImageConstNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
value_,
Ydata);
break;
case PadMode::REFLECT:
PadImageReflectNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
case PadMode::EDGE:
PadImageEdgeNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
Xdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
Ydata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
auto& dY = Input(0);
auto* dX = Output(0, { dY.dim32(0),
dY.dim32(1),
dY.dim32(2) - pad_t() - pad_b(),
dY.dim32(3) - pad_l() - pad_r()}, at::dtype<float>());
const int input_size = dY.numel();
const int padded_height = dY.dim32(2);
const int padded_width = dY.dim32(3);
const int output_size = dX->numel();
const int num = dX->dim32(0);
const int channels = dX->dim32(1);
const int height = dX->dim32(2);
const int width = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
PadImageGradientConstNCHW<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
PadImageGradientReflectNCHW<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
PadImageGradientEdgeNCHW<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
channels,
height,
width,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
template<>
bool PadImageGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
auto& dY = Input(0);
auto* dX = Output(0, { dY.dim32(0),
dY.dim32(1) - pad_t() - pad_b(),
dY.dim32(2) - pad_l() - pad_r(),
dY.dim32(3)}, at::dtype<float>());
const int input_size = dY.numel();
const int padded_height = dY.dim32(1);
const int padded_width = dY.dim32(2);
const int output_size = dX->numel();
const int num = dX->dim32(0);
const int height = dX->dim32(1);
const int width = dX->dim32(2);
const int channels = dX->dim32(3);
const float* dYdata = dY.data<float>();
float* dXdata = dX->template mutable_data<float>();
math::Set<float, CUDAContext>(output_size, 0, dXdata, &context_);
switch (mode_) {
case PadMode::CONSTANT:
PadImageGradientConstNHWC<float><<<
CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::REFLECT:
PadImageGradientReflectNHWC<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
case PadMode::EDGE:
PadImageGradientEdgeNHWC<float><<<
CAFFE_GET_BLOCKS(input_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
input_size,
dYdata,
num,
height,
width,
channels,
padded_height,
padded_width,
pad_t(),
pad_l(),
dXdata);
break;
}
return true;
}
REGISTER_CUDA_OPERATOR(PadImage, PadImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PadImageGradient,
PadImageGradientOp<float, CUDAContext>);
} // namespace caffe2
|
d714e7a01aac6271b09a032a2f024a78707ffb25.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/embedding/persistent_table_key_value_store.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/embedding/persistent_table.h"
#include <robin_hood.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <dirent.h>
namespace oneflow {
namespace embedding {
namespace {
class IteratorImpl : public KVIterator {
public:
OF_DISALLOW_COPY_AND_MOVE(IteratorImpl);
IteratorImpl(PersistentTable::Iterator* base_iter, uint32_t key_size, uint32_t value_size,
uint32_t max_query_length, void* host_keys_buffer, void* host_values_buffer,
uint32_t* host_num_buffer)
: base_iter_(base_iter),
key_size_(key_size),
value_size_(value_size),
max_query_length_(max_query_length),
host_keys_buffer_(host_keys_buffer),
host_values_buffer_(host_values_buffer),
host_num_buffer_(host_num_buffer) {}
~IteratorImpl() override = default;
void NextN(ep::Stream* stream, uint32_t n_request, uint32_t* n_result, void* keys,
void* values) override {
CHECK_LE(n_request, max_query_length_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_JUST(cuda_stream->Sync());
base_iter_->Next(n_request, host_num_buffer_, host_keys_buffer_, host_values_buffer_);
OF_CUDA_CHECK(hipMemcpyAsync(n_result, host_num_buffer_, sizeof(uint32_t), hipMemcpyDefault,
cuda_stream->cuda_stream()));
const uint32_t num_keys = *host_num_buffer_;
if (num_keys != 0) {
OF_CUDA_CHECK(hipMemcpyAsync(keys, host_keys_buffer_, num_keys * key_size_,
hipMemcpyDefault, cuda_stream->cuda_stream()));
OF_CUDA_CHECK(hipMemcpyAsync(values, host_values_buffer_, num_keys * value_size_,
hipMemcpyDefault, cuda_stream->cuda_stream()));
}
}
void Reset() override { base_iter_->Reset(); }
private:
PersistentTable::Iterator* base_iter_;
uint32_t key_size_;
uint32_t value_size_;
uint32_t max_query_length_;
void* host_keys_buffer_;
void* host_values_buffer_;
uint32_t* host_num_buffer_;
};
template<typename Key>
class KeyValueStoreImpl : public KeyValueStore {
public:
OF_DISALLOW_COPY_AND_MOVE(KeyValueStoreImpl);
explicit KeyValueStoreImpl(const PersistentTableKeyValueStoreOptions& options)
: device_index_(-1), max_query_length_(0) {
OF_CUDA_CHECK(hipGetDevice(&device_index_));
key_size_ = options.table_options.key_size;
value_size_ = options.table_options.value_size;
table_ = NewPersistentTable(options.table_options);
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_keys_), key_size_ * max_query_length_));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_query_values_),
value_size_ * max_query_length_));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_, reinterpret_cast<void**>(&host_n_missing_),
sizeof(uint32_t)));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_missing_indices_),
sizeof(uint32_t) * max_query_length_));
}
~KeyValueStoreImpl() {
CudaCurrentDeviceGuard guard(device_index_);
if (max_query_length_ != 0) {
OF_CUDA_CHECK(hipHostFree(host_query_keys_));
OF_CUDA_CHECK(hipHostFree(host_query_values_));
OF_CUDA_CHECK(hipHostFree(host_missing_indices_));
}
OF_CUDA_CHECK(hipHostFree(host_n_missing_));
}
uint32_t KeySize() const override { return key_size_; }
uint32_t ValueSize() const override { return value_size_; }
uint32_t MaxQueryLength() const override { return max_query_length_; }
void ReserveQueryLength(uint32_t query_length) override {
CudaCurrentDeviceGuard guard(device_index_);
if (query_length <= max_query_length_) { return; }
if (max_query_length_ != 0) {
OF_CUDA_CHECK(hipHostFree(host_query_keys_));
OF_CUDA_CHECK(hipHostFree(host_query_values_));
OF_CUDA_CHECK(hipHostFree(host_missing_indices_));
}
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_keys_), key_size_ * query_length));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_values_), value_size_ * query_length));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_missing_indices_),
sizeof(uint32_t) * query_length));
max_query_length_ = query_length;
}
using KeyValueStore::Get;
void Get(ep::Stream* stream, uint32_t num_keys, const void* keys, void* values,
uint32_t* n_missing, uint32_t* missing_indices) override;
void Put(ep::Stream* stream, uint32_t num_keys, const void* keys, const void* values) override;
bool SnapshotExists(const std::string& name) override;
void LoadSnapshot(const std::string& name) override;
void LoadSnapshot(const std::string& name,
const std::function<void(KVIterator* iter)>& Hook) override;
void SaveSnapshot(const std::string& name) override;
private:
int device_index_;
uint32_t max_query_length_;
uint32_t key_size_;
uint32_t value_size_;
Key* host_query_keys_{};
uint8_t* host_query_values_{};
uint32_t* host_n_missing_{};
uint32_t* host_missing_indices_{};
std::mutex mutex_;
std::unique_ptr<PersistentTable> table_;
};
template<typename Key>
void KeyValueStoreImpl<Key>::Get(ep::Stream* stream, uint32_t num_keys, const void* keys,
void* values, uint32_t* n_missing, uint32_t* missing_indices) {
std::lock_guard<std::mutex> lock(mutex_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_LE(num_keys, max_query_length_);
if (num_keys == 0) {
OF_CUDA_CHECK(hipMemsetAsync(n_missing, 0, sizeof(uint32_t),
stream->As<ep::CudaStream>()->cuda_stream()));
return;
}
OF_CUDA_CHECK(hipMemcpyAsync(host_query_keys_, keys, key_size_ * num_keys, hipMemcpyDefault,
cuda_stream->cuda_stream()));
CHECK_JUST(cuda_stream->Sync());
table_->Get(num_keys, host_query_keys_, host_query_values_, host_n_missing_,
host_missing_indices_);
OF_CUDA_CHECK(hipMemcpyAsync(values, host_query_values_, num_keys * value_size_,
hipMemcpyDefault, cuda_stream->cuda_stream()));
OF_CUDA_CHECK(hipMemcpyAsync(n_missing, host_n_missing_, sizeof(uint32_t), hipMemcpyDefault,
cuda_stream->cuda_stream()));
OF_CUDA_CHECK(hipMemcpyAsync(missing_indices, host_missing_indices_,
(*host_n_missing_) * sizeof(uint32_t), hipMemcpyDefault,
cuda_stream->cuda_stream()));
}
template<typename Key>
void KeyValueStoreImpl<Key>::Put(ep::Stream* stream, uint32_t num_keys, const void* keys,
const void* values) {
std::lock_guard<std::mutex> lock(mutex_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_LE(num_keys, max_query_length_);
if (num_keys == 0) { return; }
OF_CUDA_CHECK(hipMemcpyAsync(host_query_keys_, keys, key_size_ * num_keys, hipMemcpyDefault,
cuda_stream->cuda_stream()));
OF_CUDA_CHECK(hipMemcpyAsync(host_query_values_, values, value_size_ * num_keys,
hipMemcpyDefault, cuda_stream->cuda_stream()));
CHECK_JUST(cuda_stream->Sync());
table_->Put(num_keys, host_query_keys_, host_query_values_);
}
template<typename Key>
bool KeyValueStoreImpl<Key>::SnapshotExists(const std::string& name) {
return table_->SnapshotExists(name);
}
template<typename Key>
void KeyValueStoreImpl<Key>::LoadSnapshot(const std::string& name) {
CudaCurrentDeviceGuard guard(device_index_);
LoadSnapshot(name, nullptr);
}
template<typename Key>
void KeyValueStoreImpl<Key>::LoadSnapshot(const std::string& name,
const std::function<void(KVIterator* iter)>& Hook) {
CudaCurrentDeviceGuard guard(device_index_);
if (Hook) {
table_->LoadSnapshot(name, [&](PersistentTable::Iterator* chunk_iterator) {
IteratorImpl iterator(chunk_iterator, KeySize(), ValueSize(), max_query_length_,
host_query_keys_, host_query_values_, host_n_missing_);
Hook(&iterator);
});
} else {
table_->LoadSnapshot(name);
}
}
template<typename Key>
void KeyValueStoreImpl<Key>::SaveSnapshot(const std::string& name) {
CudaCurrentDeviceGuard guard(device_index_);
table_->SaveSnapshot(name);
}
} // namespace
std::unique_ptr<KeyValueStore> NewPersistentTableKeyValueStore(
const PersistentTableKeyValueStoreOptions& options) {
if (options.table_options.key_size == sizeof(uint64_t)) {
return std::unique_ptr<KeyValueStore>(new KeyValueStoreImpl<uint64_t>(options));
} else if (options.table_options.key_size == sizeof(uint32_t)) {
return std::unique_ptr<KeyValueStore>(new KeyValueStoreImpl<uint32_t>(options));
} else {
UNIMPLEMENTED();
return nullptr;
}
}
} // namespace embedding
} // namespace oneflow
|
d714e7a01aac6271b09a032a2f024a78707ffb25.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/embedding/persistent_table_key_value_store.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/embedding/persistent_table.h"
#include <robin_hood.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <dirent.h>
namespace oneflow {
namespace embedding {
namespace {
class IteratorImpl : public KVIterator {
public:
OF_DISALLOW_COPY_AND_MOVE(IteratorImpl);
IteratorImpl(PersistentTable::Iterator* base_iter, uint32_t key_size, uint32_t value_size,
uint32_t max_query_length, void* host_keys_buffer, void* host_values_buffer,
uint32_t* host_num_buffer)
: base_iter_(base_iter),
key_size_(key_size),
value_size_(value_size),
max_query_length_(max_query_length),
host_keys_buffer_(host_keys_buffer),
host_values_buffer_(host_values_buffer),
host_num_buffer_(host_num_buffer) {}
~IteratorImpl() override = default;
void NextN(ep::Stream* stream, uint32_t n_request, uint32_t* n_result, void* keys,
void* values) override {
CHECK_LE(n_request, max_query_length_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_JUST(cuda_stream->Sync());
base_iter_->Next(n_request, host_num_buffer_, host_keys_buffer_, host_values_buffer_);
OF_CUDA_CHECK(cudaMemcpyAsync(n_result, host_num_buffer_, sizeof(uint32_t), cudaMemcpyDefault,
cuda_stream->cuda_stream()));
const uint32_t num_keys = *host_num_buffer_;
if (num_keys != 0) {
OF_CUDA_CHECK(cudaMemcpyAsync(keys, host_keys_buffer_, num_keys * key_size_,
cudaMemcpyDefault, cuda_stream->cuda_stream()));
OF_CUDA_CHECK(cudaMemcpyAsync(values, host_values_buffer_, num_keys * value_size_,
cudaMemcpyDefault, cuda_stream->cuda_stream()));
}
}
void Reset() override { base_iter_->Reset(); }
private:
PersistentTable::Iterator* base_iter_;
uint32_t key_size_;
uint32_t value_size_;
uint32_t max_query_length_;
void* host_keys_buffer_;
void* host_values_buffer_;
uint32_t* host_num_buffer_;
};
template<typename Key>
class KeyValueStoreImpl : public KeyValueStore {
public:
OF_DISALLOW_COPY_AND_MOVE(KeyValueStoreImpl);
explicit KeyValueStoreImpl(const PersistentTableKeyValueStoreOptions& options)
: device_index_(-1), max_query_length_(0) {
OF_CUDA_CHECK(cudaGetDevice(&device_index_));
key_size_ = options.table_options.key_size;
value_size_ = options.table_options.value_size;
table_ = NewPersistentTable(options.table_options);
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_keys_), key_size_ * max_query_length_));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_query_values_),
value_size_ * max_query_length_));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_, reinterpret_cast<void**>(&host_n_missing_),
sizeof(uint32_t)));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_missing_indices_),
sizeof(uint32_t) * max_query_length_));
}
~KeyValueStoreImpl() {
CudaCurrentDeviceGuard guard(device_index_);
if (max_query_length_ != 0) {
OF_CUDA_CHECK(cudaFreeHost(host_query_keys_));
OF_CUDA_CHECK(cudaFreeHost(host_query_values_));
OF_CUDA_CHECK(cudaFreeHost(host_missing_indices_));
}
OF_CUDA_CHECK(cudaFreeHost(host_n_missing_));
}
uint32_t KeySize() const override { return key_size_; }
uint32_t ValueSize() const override { return value_size_; }
uint32_t MaxQueryLength() const override { return max_query_length_; }
void ReserveQueryLength(uint32_t query_length) override {
CudaCurrentDeviceGuard guard(device_index_);
if (query_length <= max_query_length_) { return; }
if (max_query_length_ != 0) {
OF_CUDA_CHECK(cudaFreeHost(host_query_keys_));
OF_CUDA_CHECK(cudaFreeHost(host_query_values_));
OF_CUDA_CHECK(cudaFreeHost(host_missing_indices_));
}
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_keys_), key_size_ * query_length));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(
device_index_, reinterpret_cast<void**>(&host_query_values_), value_size_ * query_length));
OF_CUDA_CHECK(NumaAwareCudaMallocHost(device_index_,
reinterpret_cast<void**>(&host_missing_indices_),
sizeof(uint32_t) * query_length));
max_query_length_ = query_length;
}
using KeyValueStore::Get;
void Get(ep::Stream* stream, uint32_t num_keys, const void* keys, void* values,
uint32_t* n_missing, uint32_t* missing_indices) override;
void Put(ep::Stream* stream, uint32_t num_keys, const void* keys, const void* values) override;
bool SnapshotExists(const std::string& name) override;
void LoadSnapshot(const std::string& name) override;
void LoadSnapshot(const std::string& name,
const std::function<void(KVIterator* iter)>& Hook) override;
void SaveSnapshot(const std::string& name) override;
private:
int device_index_;
uint32_t max_query_length_;
uint32_t key_size_;
uint32_t value_size_;
Key* host_query_keys_{};
uint8_t* host_query_values_{};
uint32_t* host_n_missing_{};
uint32_t* host_missing_indices_{};
std::mutex mutex_;
std::unique_ptr<PersistentTable> table_;
};
template<typename Key>
void KeyValueStoreImpl<Key>::Get(ep::Stream* stream, uint32_t num_keys, const void* keys,
void* values, uint32_t* n_missing, uint32_t* missing_indices) {
std::lock_guard<std::mutex> lock(mutex_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_LE(num_keys, max_query_length_);
if (num_keys == 0) {
OF_CUDA_CHECK(cudaMemsetAsync(n_missing, 0, sizeof(uint32_t),
stream->As<ep::CudaStream>()->cuda_stream()));
return;
}
OF_CUDA_CHECK(cudaMemcpyAsync(host_query_keys_, keys, key_size_ * num_keys, cudaMemcpyDefault,
cuda_stream->cuda_stream()));
CHECK_JUST(cuda_stream->Sync());
table_->Get(num_keys, host_query_keys_, host_query_values_, host_n_missing_,
host_missing_indices_);
OF_CUDA_CHECK(cudaMemcpyAsync(values, host_query_values_, num_keys * value_size_,
cudaMemcpyDefault, cuda_stream->cuda_stream()));
OF_CUDA_CHECK(cudaMemcpyAsync(n_missing, host_n_missing_, sizeof(uint32_t), cudaMemcpyDefault,
cuda_stream->cuda_stream()));
OF_CUDA_CHECK(cudaMemcpyAsync(missing_indices, host_missing_indices_,
(*host_n_missing_) * sizeof(uint32_t), cudaMemcpyDefault,
cuda_stream->cuda_stream()));
}
template<typename Key>
void KeyValueStoreImpl<Key>::Put(ep::Stream* stream, uint32_t num_keys, const void* keys,
const void* values) {
std::lock_guard<std::mutex> lock(mutex_);
auto cuda_stream = stream->As<ep::CudaStream>();
CHECK_LE(num_keys, max_query_length_);
if (num_keys == 0) { return; }
OF_CUDA_CHECK(cudaMemcpyAsync(host_query_keys_, keys, key_size_ * num_keys, cudaMemcpyDefault,
cuda_stream->cuda_stream()));
OF_CUDA_CHECK(cudaMemcpyAsync(host_query_values_, values, value_size_ * num_keys,
cudaMemcpyDefault, cuda_stream->cuda_stream()));
CHECK_JUST(cuda_stream->Sync());
table_->Put(num_keys, host_query_keys_, host_query_values_);
}
template<typename Key>
bool KeyValueStoreImpl<Key>::SnapshotExists(const std::string& name) {
return table_->SnapshotExists(name);
}
template<typename Key>
void KeyValueStoreImpl<Key>::LoadSnapshot(const std::string& name) {
CudaCurrentDeviceGuard guard(device_index_);
LoadSnapshot(name, nullptr);
}
template<typename Key>
void KeyValueStoreImpl<Key>::LoadSnapshot(const std::string& name,
const std::function<void(KVIterator* iter)>& Hook) {
CudaCurrentDeviceGuard guard(device_index_);
if (Hook) {
table_->LoadSnapshot(name, [&](PersistentTable::Iterator* chunk_iterator) {
IteratorImpl iterator(chunk_iterator, KeySize(), ValueSize(), max_query_length_,
host_query_keys_, host_query_values_, host_n_missing_);
Hook(&iterator);
});
} else {
table_->LoadSnapshot(name);
}
}
template<typename Key>
void KeyValueStoreImpl<Key>::SaveSnapshot(const std::string& name) {
CudaCurrentDeviceGuard guard(device_index_);
table_->SaveSnapshot(name);
}
} // namespace
std::unique_ptr<KeyValueStore> NewPersistentTableKeyValueStore(
const PersistentTableKeyValueStoreOptions& options) {
if (options.table_options.key_size == sizeof(uint64_t)) {
return std::unique_ptr<KeyValueStore>(new KeyValueStoreImpl<uint64_t>(options));
} else if (options.table_options.key_size == sizeof(uint32_t)) {
return std::unique_ptr<KeyValueStore>(new KeyValueStoreImpl<uint32_t>(options));
} else {
UNIMPLEMENTED();
return nullptr;
}
}
} // namespace embedding
} // namespace oneflow
|
bfb1cf93e6ce0ac8992cbd9a7665d3fce1b9288e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <stdlib.h>
#include <assert.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
typedef struct {
int width;
int height;
float * elements;
} Matrix;
Matrix initMatrix(int height, int width) {
Matrix A;
A.width = width;
A.height = height;
A.elements = (float*)malloc(width * height * sizeof(float));
return A;
}
void setRandom(Matrix A) {
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
A.elements[i*A.width + j] = (float)(rand() % 3);
}
void printMatrix(Matrix A){
for (int i = 0; i < A.height; i++)
for(int j = 0; j < A.width; j++) {
if ( j == 0 ) printf("\n");
printf(" %f ", A.elements[i*A.width + j]);
}
printf("\n");
}
float cell(Matrix A, int row, int column) {
return A.elements[row * A.width + column];
}
Matrix allocateMatrixToDevice(Matrix A) {
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipError_t err = hipMalloc(&d_A.elements, size);
printf("CUDA malloc Matrix : %s\n", hipGetErrorString(err));
err = hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
printf("Copy Matrix to device: %s\n",hipGetErrorString(err));
return d_A;
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
float Cvalue = 0.0;
/* calculate value for C(row, column) */
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
/* not all threads in grid need return a value, as C may not fit exactly the grid */
if (row > A.height || col > B.width) return;
/* we are using Row Major representation for the matrix */
for (int e = 0; e < A.width; ++e) {
int a = row * A.width + e; /* row major, so just add e to index*/
int b = e * B.width + col; /* row major, so multiply index by e */
Cvalue += (A.elements[a] * B.elements[b]);
}
C.elements[row * C.width + col] = Cvalue;
}
void matmul(Matrix A, Matrix B, Matrix C) {
/* copy the matrices to the GPU */
Matrix d_A = allocateMatrixToDevice(A);
Matrix d_B = allocateMatrixToDevice(B);
Matrix d_C = allocateMatrixToDevice(C);
/* specify 2 dimensional blocks of 16 x 16 = 256 threads per block */
dim3 dimBlock(16,16);
/* calculate how many blocks we need to perform the calculation */
/* the grid is based on the size of the product matrix */
/* ie: A(2,3) * B(3,4) = C(2,4) */
/* A(height,width) * B(height,width) = C(A height, B width) */
dim3 dimGrid(
( (B.width + dimBlock.x - 1 ) / dimBlock.x),
( (A.height + dimBlock.y -1 ) / dimBlock.y)
);
/* launch a grid and run the kernel function*/
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A,d_B,d_C);
/* wait for all threads to finish */
hipError_t err = hipDeviceSynchronize();
err = hipMemcpy(C.elements, d_C.elements, C.height * C.width * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_A.elements);
hipFree(d_B.elements);
}
int main(void)
{
Matrix A = initMatrix(4,4);
Matrix B = initMatrix(4,4);
Matrix C = initMatrix(4,4);
setRandom(A);
setRandom(B);
printMatrix(A);
printMatrix(B);
matmul(A,B,C);
printMatrix(C);
float c_0_0 = cell(A,0,0) * cell(B,0,0) + cell(A,0,1) * cell(B,1,0) + cell(A,0,2) * cell(B,2,0) + cell(A,0,3) * cell(B,3,0);
printf("%f\n", c_0_0);
assert(c_0_0 == cell(C,0,0));
}
|
bfb1cf93e6ce0ac8992cbd9a7665d3fce1b9288e.cu
|
#include <iostream>
#include <math.h>
#include <stdlib.h>
#include <assert.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
typedef struct {
int width;
int height;
float * elements;
} Matrix;
Matrix initMatrix(int height, int width) {
Matrix A;
A.width = width;
A.height = height;
A.elements = (float*)malloc(width * height * sizeof(float));
return A;
}
void setRandom(Matrix A) {
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
A.elements[i*A.width + j] = (float)(rand() % 3);
}
void printMatrix(Matrix A){
for (int i = 0; i < A.height; i++)
for(int j = 0; j < A.width; j++) {
if ( j == 0 ) printf("\n");
printf(" %f ", A.elements[i*A.width + j]);
}
printf("\n");
}
float cell(Matrix A, int row, int column) {
return A.elements[row * A.width + column];
}
Matrix allocateMatrixToDevice(Matrix A) {
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaError_t err = cudaMalloc(&d_A.elements, size);
printf("CUDA malloc Matrix : %s\n", cudaGetErrorString(err));
err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
printf("Copy Matrix to device: %s\n",cudaGetErrorString(err));
return d_A;
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
float Cvalue = 0.0;
/* calculate value for C(row, column) */
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
/* not all threads in grid need return a value, as C may not fit exactly the grid */
if (row > A.height || col > B.width) return;
/* we are using Row Major representation for the matrix */
for (int e = 0; e < A.width; ++e) {
int a = row * A.width + e; /* row major, so just add e to index*/
int b = e * B.width + col; /* row major, so multiply index by e */
Cvalue += (A.elements[a] * B.elements[b]);
}
C.elements[row * C.width + col] = Cvalue;
}
void matmul(Matrix A, Matrix B, Matrix C) {
/* copy the matrices to the GPU */
Matrix d_A = allocateMatrixToDevice(A);
Matrix d_B = allocateMatrixToDevice(B);
Matrix d_C = allocateMatrixToDevice(C);
/* specify 2 dimensional blocks of 16 x 16 = 256 threads per block */
dim3 dimBlock(16,16);
/* calculate how many blocks we need to perform the calculation */
/* the grid is based on the size of the product matrix */
/* ie: A(2,3) * B(3,4) = C(2,4) */
/* A(height,width) * B(height,width) = C(A height, B width) */
dim3 dimGrid(
( (B.width + dimBlock.x - 1 ) / dimBlock.x),
( (A.height + dimBlock.y -1 ) / dimBlock.y)
);
/* launch a grid and run the kernel function*/
MatMulKernel<<<dimGrid, dimBlock>>>(d_A,d_B,d_C);
/* wait for all threads to finish */
cudaError_t err = cudaThreadSynchronize();
err = cudaMemcpy(C.elements, d_C.elements, C.height * C.width * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
}
int main(void)
{
Matrix A = initMatrix(4,4);
Matrix B = initMatrix(4,4);
Matrix C = initMatrix(4,4);
setRandom(A);
setRandom(B);
printMatrix(A);
printMatrix(B);
matmul(A,B,C);
printMatrix(C);
float c_0_0 = cell(A,0,0) * cell(B,0,0) + cell(A,0,1) * cell(B,1,0) + cell(A,0,2) * cell(B,2,0) + cell(A,0,3) * cell(B,3,0);
printf("%f\n", c_0_0);
assert(c_0_0 == cell(C,0,0));
}
|
af5398ee0b4e7c341a521a60d458ad5dafa65a0a.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef _CUDA_PSO_CU_
#define _CUDA_PSO_CU_
#include <hip/hip_runtime.h>
//#include <cutil_inline.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <stdint.h>
typedef uint32_t u_int32_t;
//#include <helper_cuda.h>
#include <cstdio>
#include "cudaPSO.cuh"
#include "reductions.cu"
#include "utilities.h"
#include "cutil_compat.h"
//#include "MersenneTwister.cuh"
//NOTE To print debug messages uncomment the following line
//#define PRINT_DEBUG
///Number of array elements used for each particle, usually greater than the problem dimensionality
unsigned int actualParticleSize;
///CUDA Random states global array
hiprandState_t* devStates;
//available GPU memory tracking
///Amount of used gobal memory
unsigned long int usedGlobalMem;
///Amount of free gobal memory
unsigned long int freeGlobalMem;
///Amount of available gobal memory
unsigned long int totGlobalMem;
//********
//Constant Memory Data
///starting coordinate of the hypercubical search space (Resides in GPU's constant memory)
__constant__ float c_minValue;
///ending coordinate of the hypercubical search space (Resides in GPU's constant memory)
__constant__ float c_maxValue;
///width of the hypercubical search space (Resides in GPU's constant memory)
__constant__ float c_deltaValue;
//********
//********
//Global Memory Data Arrays
///pointer to the GPU's global-memory array containing the current position of all particles (from all swarms, in case of multi-swarm simulation)
float *g_positions;
///pointer to the GPU's global-memory array containing the current personal best position of all particles
float *g_bestPositions;
///pointer to the GPU's global-memory array containing the current velocity of all particles
float *g_velocities;
///pointer to the GPU's global-memory array containing the current fitness of all particles
float *g_fitnesses;
///pointer to the GPU's global-memory array containing the current personal best fitness of all particles
float *g_bestFitnesses;
///pointer to the GPU's global-memory array containing the final global best fitness value
float *g_globalBestFitness;
///pointer to the GPU's global-memory array containing the coordinates of the global best position of all swarms
float *g_globalBestPositions;
///pointer to the GPU's global-memory array containing the indexes (for all particles) of the best neighbour (for the ring topology in this case)
u_int32_t *g_localBestIDs;
///pointer to the GPU's global-memory array containing the index of the best particle
u_int32_t *g_globalBestID;
///pointer to the GPU's global-memory array containing the flags saying to each particle whether to update their personal best
u_int32_t *g_update;
//********
//********
//Textures
///GPU's texture interface used for fast acces to the update flags in global memory
texture<unsigned int, 1, hipReadModeElementType> t_texUpdatePositionFlags;
///GPU's texture interface used for fast acces to the local best indices in global memory
texture<unsigned int, 1, hipReadModeElementType> t_texLocalBestIDs;
///GPU's texture interface used for fast acces to the current particles's velocities in global memory
texture<float, 1, hipReadModeElementType> t_texVelocities;
///GPU's texture interface used for fast acces to the current particles's positions in global memory
texture<float, 1, hipReadModeElementType> t_texPositions;
///GPU's texture interface used for fast acces to the current particles's best positions in global memory
texture<float, 1, hipReadModeElementType> t_texBestPositions;
///GPU's texture interface used for fast acces to the current particles's fitnesses in global memory
texture<float, 1, hipReadModeElementType> t_texFitnesses;
///GPU's texture interface used for fast acces to the current particles's best fitnesses in global memory
texture<float, 1, hipReadModeElementType> t_texBestFitnesses;
//********
//*******************************************************************************************
// DEVICE KERNELS
//*******************************************************************************************
//includes all kernels code...
#include "cudaPSO_kernels.cuh"
//includes fitnesses computation stuff...
#include "cudaPSO_fitnesses.cuh"
__global__ void setup_kernel(hiprandState_t *state, unsigned long seed)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence number,
no offset */
hiprand_init(seed, id, 0, &state[id]);
}
//*******************************************************************************************
// HOST ROUTINES
//*******************************************************************************************
//***********************************************************
// DATA ALLOCATION ON GPU
//***********************************************************
/// Initialization of the GPU...
/// Here global variables pointing to device memory are initialized...
/// @param particlesNumber number of particles in the swarm
/// @param problemDimension dimensionality of the problem
/// @param numberOfGenerations number of generation to be performed during the optimization
__host__ void h_cudaPSO_Init(int particlesNumber, int problemDimension, int numberOfGenerations){
#ifdef PRINT_DEBUG
printf("Allocating data structures on GPU...\n");
#endif
int dim;
//Determination of the total amount of global memory
int devID = cutGetMaxGflopsDeviceId();
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, devID);
totGlobalMem = deviceProp.totalGlobalMem;
usedGlobalMem = 0;
//To accomplish CUDA byte alignment requirements, we need data arrays with a number of elements
// for each particle which is a multiple of 16
//The actual number of simulated problem dimension might be greater than the required one:
// during cost funziont evaluation, obviously, only the needed coordinates will be considered
actualParticleSize = iAlignUp(problemDimension, 16);
dim = particlesNumber * actualParticleSize * sizeof(float);
#ifdef PRINT_DEBUG
printf("\t - actualParticleSize = %d\n", actualParticleSize);
#endif
//Allocation of the positions array
cudasafe(hipMalloc( (void**) &g_positions, dim), "h_init_cudaPSO: hipMalloc() execution failed\n");
cudasafe(hipMemset( g_positions, 0, dim), "h_init_cudaPSO: hipMemset() execution failed\n");
cudasafe(hipBindTexture(NULL, t_texPositions, g_positions, dim), "h_init_cudaPSO: hipBindTexture() execution failed\n");
usedGlobalMem += dim;
//Allocation of the best positions array
cudasafe(hipMalloc( (void**) &g_bestPositions, dim), "cudaPSO: hipMalloc() execution failed\n");
cudasafe(hipMemset( g_bestPositions, 0, dim), "cudaPSO: hipMemset() execution failed\n");
cudasafe(hipBindTexture(NULL, t_texBestPositions, g_bestPositions, dim), "h_cudaPSOBindBestPositionsTextures: hipBindTexture() execution failed\n");
usedGlobalMem += dim;
//Allocation of the velocities array
cudasafe(hipMalloc( (void**) &g_velocities, dim), "cudaPSO: hipMalloc() execution failed\n");
cudasafe(hipMemset( g_velocities, 0, dim), "cudaPSO: hipMemset() execution failed\n");
cudasafe(hipBindTexture(NULL, t_texVelocities, g_velocities, dim), "h_cudaPSOBindPositionsTextures: hipBindTexture() execution failed\n");
usedGlobalMem += dim;
dim = particlesNumber * sizeof(float);
//Allocation of the fitnesses array
hipMalloc( (void**) &g_fitnesses, dim);
cutilCheckMsg("cudaPSO: hipMalloc() execution failed\n");
hipMemset( g_fitnesses, 0, dim);
cutilCheckMsg("cudaPSO: hipMemset() execution failed\n");
hipBindTexture(NULL, t_texFitnesses, g_fitnesses, dim);
cutilCheckMsg("h_cudaPSOBindFitnessesTextures: hipBindTexture() execution failed\n");
usedGlobalMem += dim;
//Allocation of the best fitnesses array
hipMalloc( (void**) &g_bestFitnesses, dim);
cutilCheckMsg("cudaPSO: hipMalloc() execution failed\n");
hipMemset( g_bestFitnesses, 0, dim);
cutilCheckMsg("cudaPSO: hipMemset() execution failed\n");
hipBindTexture(NULL, t_texBestFitnesses, g_bestFitnesses, dim);
cutilCheckMsg("h_cudaPSOBindBestFitnessesTextures: hipBindTexture() execution failed\n");
usedGlobalMem += dim;
dim = particlesNumber * sizeof(unsigned int);
//Allocation of the local best ids array
hipMalloc( (void**) &g_localBestIDs, dim);
cutilCheckMsg("cudaPSO: hipMalloc() execution failed\n");
hipMemset( g_localBestIDs, 0, dim);
cutilCheckMsg("cudaPSO: hipMemset() execution failed\n");
hipBindTexture(NULL, t_texLocalBestIDs, g_localBestIDs, dim);
cutilCheckMsg("h_cudaPSO_BindUpdatePositionFlagsTexture: hipBindTexture() execution failed\n");
usedGlobalMem += dim;
//Allocation of the update flag array
hipMalloc( (void**) &g_update, dim);
cutilCheckMsg("cudaPSO: hipMalloc() execution failed\n");
hipMemset( g_update, 0, dim);
cutilCheckMsg("cudaPSO: hipMemset() execution failed\n");
hipBindTexture(NULL, t_texUpdatePositionFlags, g_update, dim);
cutilCheckMsg("h_cudaPSO_BindUpdatePositionFlagsTexture: hipBindTexture() execution failed\n");
usedGlobalMem += dim;
dim = actualParticleSize * sizeof(float);
//Allocation of the global best positions array
hipMalloc( (void**) &g_globalBestPositions, dim);
cutilCheckMsg("cudaPSO: hipMalloc() execution failed\n");
hipMemset( g_globalBestPositions, 0, dim);
cutilCheckMsg("cudaPSO: hipMemset() execution failed\n");
usedGlobalMem += dim;
dim = sizeof(float);
//Allocation of the global best fitnesse value
hipMalloc( (void**) &g_globalBestFitness, dim);
cutilCheckMsg("cudaPSO: hipMalloc() execution failed\n");
hipMemset( g_globalBestFitness, 0, dim);
cutilCheckMsg("cudaPSO: hipMemset() execution failed\n");
usedGlobalMem += dim;
dim = sizeof(u_int32_t);
//Allocation of the global best id
hipMalloc( (void**) &g_globalBestID, dim);
cutilCheckMsg("cudaPSO: hipMalloc() execution failed\n");
hipMemset( g_globalBestID, 0, dim);
cutilCheckMsg("cudaPSO: hipMemset() execution failed\n");
usedGlobalMem += dim;
dim = particlesNumber * actualParticleSize * sizeof(hiprandState_t);
//Allocation of the CUDA random states
hipMalloc((void **)&devStates, dim);
cutilCheckMsg("cudaPSO: hipMalloc() execution failed\n");
usedGlobalMem += dim;
freeGlobalMem = totGlobalMem - usedGlobalMem;
/* Setup prng states */
hipLaunchKernelGGL(( setup_kernel), dim3(particlesNumber), dim3(actualParticleSize), 0, 0, devStates, time(NULL));
h_initFitnessFunctions(g_positions, particlesNumber, actualParticleSize);
#ifdef PRINT_DEBUG
printf("\t - totalGlobalMem = %ld\n", totGlobalMem);
printf("\t - usedGlobalMem = %ld\n", usedGlobalMem);
printf("\t - usedGlobalMemForRandNums = %ld\n", dim);
printf("\t - freeGlobalMem = %ld\n", freeGlobalMem);
printf("Done!\n");
#endif
}
/// Frees GPU's resources
__host__ void h_cudaPSO_Free(){
//**********************
// TEXTURES UN-BINDINGS
//**********************
clearFitnessFunctions();
hipUnbindTexture(t_texVelocities);
cutilCheckMsg("h_cudaPSO_Free: hipUnbindTexture() execution failed\n");
hipUnbindTexture(t_texPositions);
cutilCheckMsg("h_cudaPSO_Free: hipUnbindTexture() execution failed\n");
hipUnbindTexture(t_texBestPositions);
cutilCheckMsg("h_cudaPSO_Free: hipUnbindTexture() execution failed\n");
hipUnbindTexture(t_texFitnesses);
cutilCheckMsg("h_cudaPSO_Free: hipUnbindTexture() execution failed\n");
hipUnbindTexture(t_texBestFitnesses);
cutilCheckMsg("h_cudaPSO_Free: hipUnbindTexture() execution failed\n");
hipUnbindTexture(t_texUpdatePositionFlags);
cutilCheckMsg("h_cudaPSO_Free: hipUnbindTexture() execution failed\n");
hipUnbindTexture(t_texLocalBestIDs);
cutilCheckMsg("h_cudaPSO_Free: hipUnbindTexture() execution failed\n");
//**********************
// ARRAYS DE-ALLOCATION
//**********************
hipFree(g_positions);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(g_bestPositions);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(g_velocities);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(g_fitnesses);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(g_bestFitnesses);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(g_globalBestPositions);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(g_globalBestFitness);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(g_localBestIDs);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(g_globalBestID);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(g_update);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
hipFree(devStates);
cutilCheckMsg("h_cudaPSO_Free: hipFree() execution failed\n");
}
///@brief wrapper to appropriately call the g_findGlobalBest() kernel
///@param g_globalBestFitness pointer to the GPU's global-memory array containing the final global best fitness value
///@param g_globalBestID pointer to the GPU's global-memory array containing the index of the best particle
///@param numberOfParticles swarm's size
///@param finalBestsUpdateGrid definition of the blocks grid for this kernel (containing the number of thread-blocks for each dimension of the grid)
///@param finalBestsUpdateBlock definition of the thread blocks for this kernel (containing the number of threads for each dimension of the block)
__host__ void h_findGlobalBest(float* g_globalBestFitness, u_int32_t* g_globalBestID, int numberOfParticles, dim3 finalBestsUpdateGrid, dim3 finalBestsUpdateBlock){
switch (finalBestsUpdateBlock.x){
case 8:hipLaunchKernelGGL(( g_findGlobalBest< 8>), dim3(finalBestsUpdateGrid), dim3(finalBestsUpdateBlock), 0, 0, g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 16:hipLaunchKernelGGL(( g_findGlobalBest< 16>), dim3(finalBestsUpdateGrid), dim3(finalBestsUpdateBlock), 0, 0, g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 32:hipLaunchKernelGGL(( g_findGlobalBest< 32>), dim3(finalBestsUpdateGrid), dim3(finalBestsUpdateBlock), 0, 0, g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 64:hipLaunchKernelGGL(( g_findGlobalBest< 64>), dim3(finalBestsUpdateGrid), dim3(finalBestsUpdateBlock), 0, 0, g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 128:hipLaunchKernelGGL(( g_findGlobalBest<128>), dim3(finalBestsUpdateGrid), dim3(finalBestsUpdateBlock), 0, 0, g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 256:hipLaunchKernelGGL(( g_findGlobalBest<256>), dim3(finalBestsUpdateGrid), dim3(finalBestsUpdateBlock), 0, 0, g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 512:hipLaunchKernelGGL(( g_findGlobalBest<512>), dim3(finalBestsUpdateGrid), dim3(finalBestsUpdateBlock), 0, 0, g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
}
}
/*
__host__ void h_cudaPSOPrintBestFitnesses(u_int32_t* g_globalBestID, float* g_bestFitnesses, float* g_bestPositions, float* g_globalBestFitness, float* g_globalBestPositions, float* g_positions, int particlesNumber, int actualParticleSize, int problemDimension){
//Load fintnesses values from GPU and print them to screen
//float* fitnesses = (float*) malloc(particlesNumber * sizeof(float));
//hipMemcpy(fitnesses, g_bestFitnesses, particlesNumber * sizeof(float), hipMemcpyDeviceToHost);
//cutilCheckMsg("h_cudaPSOPrintBestFitnesses: hipMemcpy() execution failed\n");
//printf(" - Fitnesses:\n ");
//for(int i = 0; i < particlesNumber; ++i)
// printf("%e ", fitnesses[i]);
//printf("\n");
//free(fitnesses);
u_int32_t h_globalBestID;
hipMemcpy(&h_globalBestID, g_globalBestID, sizeof(u_int32_t), hipMemcpyDeviceToHost);
printf(" - Best particle ID: %u\n", h_globalBestID);
float h_globalBestFitness;
hipMemcpy(&h_globalBestFitness, g_globalBestFitness, sizeof(float), hipMemcpyDeviceToHost);
printf(" - Global best fitness: %e\n", h_globalBestFitness);
int dim = actualParticleSize * sizeof(float);
float* h_globalBestPositions = (float*) malloc(dim);
hipMemcpy(h_globalBestPositions, g_globalBestPositions, dim, hipMemcpyDeviceToHost);
printf(" - Global best positions:\n");
for(int m = 0; m < problemDimension; ++m)
printf(" %2d: % 12.10e\n", m+1, h_globalBestPositions[m]);
printf("\n");
delete h_globalBestPositions;
printf("particlesNumber = %d\n", particlesNumber);
printf("problemDimension = %d\n", problemDimension);
printf("actualParticleSize = %d\n", actualParticleSize);
dim = particlesNumber * actualParticleSize;
float* h_positions = (float*) malloc(dim * sizeof(float));
hipMemcpy(h_positions, g_positions, dim * sizeof(float), hipMemcpyDeviceToHost);
cutilCheckMsg("h_cudaPSOPrintBestFitnesses: hipMemcpy() execution failed\n");
printf(" - Particle's positions:\n");
for(int j = 0; j < particlesNumber; ++j){
printf(" ");
for(int m = 0; m < problemDimension; ++m)
printf("% 3.1f", h_positions[j * actualParticleSize + m]);
printf("\n");
}
printf("\n");
free(h_positions);
dim = particlesNumber * actualParticleSize;
float* h_bestPositions = (float*) malloc(dim * sizeof(float));
hipMemcpy(h_bestPositions, g_bestPositions, dim * sizeof(float), hipMemcpyDeviceToHost);
cutilCheckMsg("h_cudaPSOPrintBestFitnesses: hipMemcpy() execution failed\n");
printf(" - Personal best positions:\n");
for(int j = 0; j < particlesNumber; ++j){
printf(" ");
for(int m = 0; m < problemDimension; ++m)
printf("% 3.1f", h_bestPositions[j * actualParticleSize + m]);
printf("\n");
}
printf("\n");
free(h_bestPositions);
}
*/
//************************
// OPTIMIZATION CALL-BACK
//************************
///Realizes the actual optimization calling appropriately all the cuda kernels involved in this process
///@param functionID Index of the function to be optimized
///@param numberOfGenerations Number of generations/iterations in one optimization step
///@param particlesNumber Number of particles belonging to this swarm
///@param problemDimension Number of parameters to be optimized
///@param W Inertia weight (PSO algorithm)
///@param C1 Cognitive attraction factor (PSO algorithm)
///@param C2 Social attraction factor (PSO algorithm)
///@param minValue Lower limit for each dimension of the search space
///@param maxValue Upper limit for each dimension of the search space
///@param deltaValue Width of each dimension of the search space
///@param h_globalBestFitness pointer to an host variable where to store the final global best fitness value
///@param h_globalBestPosition Pointer to an host array where to store the final result (final global best position)
extern "C" __host__ void h_cudaPSO_Optimize(
int functionID,
int numberOfGenerations,
int particlesNumber,
int problemDimension,
float W,
float C1,
float C2,
float minValue,
float maxValue,
float deltaValue,
float* h_globalBestFitness,
float* h_globalBestPosition
)
{
//kernel parameters for particles initialization:
// - one thread block for each particle
// - one thread for each problem dimension
dim3 initializationGrid(particlesNumber, 1);
dim3 initializationBlock(actualParticleSize,1,1);
//kernel parameters for positions update:
// - one thread block for each particle
// - one thread for each problem dimension
dim3 updateGrid(particlesNumber, 1, 1);
dim3 updateBlock(actualParticleSize,1,1);
//kernel parameters for local bests update:
// - one thread block
// - one thread for each particle
dim3 bestsUpdateGrid(1, 1, 1);
dim3 bestsUpdateBlock(particlesNumber,1,1);
unsigned int bestsUpdateSharedAmount = (particlesNumber + 2) * sizeof(float);
//kernel parameters for the global bests update:
// - one thread block
// - the number of threads is chosen to have enough thread to perform
// a parallel reduction of the fitness values
dim3 globalBestUpdateGrid(1, 1, 1);
int thNum = (int) rint( pow(2.0f, ceil( log2( (float) particlesNumber) ) ) );
dim3 globalBestUpdateBlock(thNum,1,1);
//kernel parameters for the computation of fitnesses values:
// - one thread block for each individual
// - one thread for dimension of the problem
dim3 calculateFitnessesGrid(particlesNumber, 1, 1);
thNum = (int) rint( pow(2.0f, ceil( log2( (float) problemDimension) ) ) );
thNum = max(8, thNum);
dim3 calculateFitnessesBlock(thNum,1,1);
//kernel parameters for the global bests update:
// - one thread block
// - one thread for each dimension of the problem
dim3 globalBestCopyGrid(1, 1, 1);
dim3 globalBestCopyBlock(actualParticleSize,1,1);
//CUDA routines to time events
hipEvent_t start, stop;
cudasafe( hipEventCreate(&start), "h_cudaPSO_Optimize: hipEventCreate() execution failed\n");
cudasafe( hipEventCreate(&stop), "h_cudaPSO_Optimize: hipEventCreate() execution failed\n");
//printf("Starting Optimization...");
//Start timing...
hipEventRecord(start,0);
//Set up search space limits
hipMemcpyToSymbol(c_minValue, &minValue, sizeof(float), 0, hipMemcpyHostToDevice);
cutilCheckMsg("h_cudaPSO_Optimize: hipMemcpyToSymbol() execution failed\n");
hipMemcpyToSymbol(c_maxValue, &maxValue, sizeof(float), 0, hipMemcpyHostToDevice);
cutilCheckMsg("h_cudaPSO_Optimize: hipMemcpyToSymbol() execution failed\n");
hipMemcpyToSymbol(c_deltaValue, &deltaValue, sizeof(float), 0, hipMemcpyHostToDevice);
cutilCheckMsg("h_cudaPSO_Optimize: hipMemcpyToSymbol() execution failed\n");
//Particles initialization
hipLaunchKernelGGL(( g_initParticles), dim3(initializationGrid), dim3(initializationBlock), 0, 0, g_positions, g_bestPositions, g_velocities, devStates);
cutilCheckMsg("h_cudaPSO_Optimize: g_initParticles() execution failed\n");
//Set to zero the update flags
hipMemset(g_update, 0, particlesNumber * sizeof(u_int32_t));
//First fitnesses evaluation
h_calculateFitnessesValues(functionID, g_fitnesses, actualParticleSize, problemDimension, calculateFitnessesGrid, calculateFitnessesBlock);
cutilCheckMsg("h_cudaPSO_Optimize: h_calculateFitnessesValues() execution failed\n");
//First Local bests update
hipLaunchKernelGGL(( g_firstBestsUpdate), dim3(bestsUpdateGrid), dim3(bestsUpdateBlock), bestsUpdateSharedAmount, 0, g_bestFitnesses, g_localBestIDs);
cutilCheckMsg("h_cudaPSO_Optimize: g_firstBestsUpdate() execution failed\n");
//Generations main cycle
for(unsigned int generationNumber = 1; generationNumber < numberOfGenerations; ++generationNumber){
//Position Update
hipLaunchKernelGGL(( g_positionsUpdate), dim3(updateGrid), dim3(updateBlock), 0, 0, W, C1, C2, g_positions, g_bestPositions, g_velocities, devStates);
cutilCheckMsg("h_cudaPSO_Optimize: g_positionsUpdate() execution failed\n");
//Fitnesses evaluation
h_calculateFitnessesValues(functionID, g_fitnesses, actualParticleSize, problemDimension, calculateFitnessesGrid, calculateFitnessesBlock);
cutilCheckMsg("h_cudaPSO_Optimize: h_calculateFitnessesValues() execution failed\n");
//Local bests update
hipLaunchKernelGGL(( g_bestsUpdate), dim3(bestsUpdateGrid), dim3(bestsUpdateBlock), bestsUpdateSharedAmount, 0, g_bestFitnesses, g_localBestIDs, g_update);
cutilCheckMsg("h_cudaPSO_Optimize: g_bestsUpdate() execution failed\n");
}
//Global best determination
h_findGlobalBest(g_globalBestFitness, g_globalBestID, particlesNumber, globalBestUpdateGrid, globalBestUpdateBlock);
cutilCheckMsg("h_cudaPSO_Optimize: h_findGlobalBest() execution failed\n");
//Copy global best positions
hipLaunchKernelGGL(( g_globalBestCopy), dim3(globalBestCopyGrid), dim3(globalBestCopyBlock), 0, 0, g_globalBestPositions, g_globalBestID);
cutilCheckMsg("h_cudaPSO_Optimize: g_copyBests() execution failed\n");
hipDeviceSynchronize();
//Stop timing...
hipEventRecord(stop,0);
//waits for the stop event to be recorded...
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
//Print the current best fitnesses
//h_cudaPSOPrintBestFitnesses(g_globalBestID, g_bestFitnesses, g_bestPositions, g_globalBestFitness, g_globalBestPositions, g_positions, particlesNumber, actualParticleSize, problemDimension);
//cutilCheckMsg("h_cudaPSO_Optimize: h_cudaPSOPrintBestFitnesses() execution failed\n");
//Retrieves the global best fitness value
hipMemcpy(h_globalBestFitness, g_globalBestFitness, sizeof(float), hipMemcpyDeviceToHost);
//Retrieves the global best position
hipMemcpy(h_globalBestPosition, g_globalBestPositions, problemDimension * sizeof(float), hipMemcpyDeviceToHost);
//Prints the amount of time elapsed for optimization
//printf("Elapsed time = %f ms\n", elapsedTime);
printf("%d %d %d %d %f %e %d\n", 1 /*swarmsNumber*/, particlesNumber, problemDimension, numberOfGenerations, elapsedTime, *h_globalBestFitness, functionID);
}
__host__ int cutGetMaxGflopsDeviceId()
{
int device_count = 0;
hipGetDeviceCount( &device_count );
hipDeviceProp_t device_properties;
int max_gflops_device = 0;
int max_gflops = 0;
int current_device = 0;
hipGetDeviceProperties( &device_properties, current_device );
max_gflops = device_properties.multiProcessorCount * device_properties.clockRate;
++current_device;
while( current_device < device_count )
{
hipGetDeviceProperties( &device_properties, current_device );
int gflops = device_properties.multiProcessorCount * device_properties.clockRate;
if( gflops > max_gflops )
{
max_gflops = gflops;
max_gflops_device = current_device;
}
++current_device;
}
return max_gflops_device;
}
__host__ void cudasafe( hipError_t error, char* message)
{
if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s : %i\n",message,error); exit(-1); }
}
__host__ void cutilCheckMsg( const char *errorMessage)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "cutilCheckMsg() CUTIL CUDA error : %s : %s.\n",
errorMessage, hipGetErrorString( err) );
exit(-1);
}
#ifdef _DEBUG
err = hipDeviceSynchronize();
if( hipSuccess != err) {
fprintf(stderr, "cutilCheckMsg hipDeviceSynchronize error: %s : %s.\n",
errorMessage, hipGetErrorString( err) );
exit(-1);
}
#endif
}
#endif
|
af5398ee0b4e7c341a521a60d458ad5dafa65a0a.cu
|
#ifndef _CUDA_PSO_CU_
#define _CUDA_PSO_CU_
#include <cuda_runtime.h>
//#include <cutil_inline.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdint.h>
typedef uint32_t u_int32_t;
//#include <helper_cuda.h>
#include <cstdio>
#include "cudaPSO.cuh"
#include "reductions.cu"
#include "utilities.h"
#include "cutil_compat.h"
//#include "MersenneTwister.cuh"
//NOTE To print debug messages uncomment the following line
//#define PRINT_DEBUG
///Number of array elements used for each particle, usually greater than the problem dimensionality
unsigned int actualParticleSize;
///CUDA Random states global array
curandState* devStates;
//available GPU memory tracking
///Amount of used gobal memory
unsigned long int usedGlobalMem;
///Amount of free gobal memory
unsigned long int freeGlobalMem;
///Amount of available gobal memory
unsigned long int totGlobalMem;
//********
//Constant Memory Data
///starting coordinate of the hypercubical search space (Resides in GPU's constant memory)
__constant__ float c_minValue;
///ending coordinate of the hypercubical search space (Resides in GPU's constant memory)
__constant__ float c_maxValue;
///width of the hypercubical search space (Resides in GPU's constant memory)
__constant__ float c_deltaValue;
//********
//********
//Global Memory Data Arrays
///pointer to the GPU's global-memory array containing the current position of all particles (from all swarms, in case of multi-swarm simulation)
float *g_positions;
///pointer to the GPU's global-memory array containing the current personal best position of all particles
float *g_bestPositions;
///pointer to the GPU's global-memory array containing the current velocity of all particles
float *g_velocities;
///pointer to the GPU's global-memory array containing the current fitness of all particles
float *g_fitnesses;
///pointer to the GPU's global-memory array containing the current personal best fitness of all particles
float *g_bestFitnesses;
///pointer to the GPU's global-memory array containing the final global best fitness value
float *g_globalBestFitness;
///pointer to the GPU's global-memory array containing the coordinates of the global best position of all swarms
float *g_globalBestPositions;
///pointer to the GPU's global-memory array containing the indexes (for all particles) of the best neighbour (for the ring topology in this case)
u_int32_t *g_localBestIDs;
///pointer to the GPU's global-memory array containing the index of the best particle
u_int32_t *g_globalBestID;
///pointer to the GPU's global-memory array containing the flags saying to each particle whether to update their personal best
u_int32_t *g_update;
//********
//********
//Textures
///GPU's texture interface used for fast acces to the update flags in global memory
texture<unsigned int, 1, cudaReadModeElementType> t_texUpdatePositionFlags;
///GPU's texture interface used for fast acces to the local best indices in global memory
texture<unsigned int, 1, cudaReadModeElementType> t_texLocalBestIDs;
///GPU's texture interface used for fast acces to the current particles's velocities in global memory
texture<float, 1, cudaReadModeElementType> t_texVelocities;
///GPU's texture interface used for fast acces to the current particles's positions in global memory
texture<float, 1, cudaReadModeElementType> t_texPositions;
///GPU's texture interface used for fast acces to the current particles's best positions in global memory
texture<float, 1, cudaReadModeElementType> t_texBestPositions;
///GPU's texture interface used for fast acces to the current particles's fitnesses in global memory
texture<float, 1, cudaReadModeElementType> t_texFitnesses;
///GPU's texture interface used for fast acces to the current particles's best fitnesses in global memory
texture<float, 1, cudaReadModeElementType> t_texBestFitnesses;
//********
//*******************************************************************************************
// DEVICE KERNELS
//*******************************************************************************************
//includes all kernels code...
#include "cudaPSO_kernels.cuh"
//includes fitnesses computation stuff...
#include "cudaPSO_fitnesses.cuh"
__global__ void setup_kernel(curandState *state, unsigned long seed)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence number,
no offset */
curand_init(seed, id, 0, &state[id]);
}
//*******************************************************************************************
// HOST ROUTINES
//*******************************************************************************************
//***********************************************************
// DATA ALLOCATION ON GPU
//***********************************************************
/// Initialization of the GPU...
/// Here global variables pointing to device memory are initialized...
/// @param particlesNumber number of particles in the swarm
/// @param problemDimension dimensionality of the problem
/// @param numberOfGenerations number of generation to be performed during the optimization
__host__ void h_cudaPSO_Init(int particlesNumber, int problemDimension, int numberOfGenerations){
#ifdef PRINT_DEBUG
printf("Allocating data structures on GPU...\n");
#endif
int dim;
//Determination of the total amount of global memory
int devID = cutGetMaxGflopsDeviceId();
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, devID);
totGlobalMem = deviceProp.totalGlobalMem;
usedGlobalMem = 0;
//To accomplish CUDA byte alignment requirements, we need data arrays with a number of elements
// for each particle which is a multiple of 16
//The actual number of simulated problem dimension might be greater than the required one:
// during cost funziont evaluation, obviously, only the needed coordinates will be considered
actualParticleSize = iAlignUp(problemDimension, 16);
dim = particlesNumber * actualParticleSize * sizeof(float);
#ifdef PRINT_DEBUG
printf("\t - actualParticleSize = %d\n", actualParticleSize);
#endif
//Allocation of the positions array
cudasafe(cudaMalloc( (void**) &g_positions, dim), "h_init_cudaPSO: cudaMalloc() execution failed\n");
cudasafe(cudaMemset( g_positions, 0, dim), "h_init_cudaPSO: cudaMemset() execution failed\n");
cudasafe(cudaBindTexture(NULL, t_texPositions, g_positions, dim), "h_init_cudaPSO: cudaBindTexture() execution failed\n");
usedGlobalMem += dim;
//Allocation of the best positions array
cudasafe(cudaMalloc( (void**) &g_bestPositions, dim), "cudaPSO: cudaMalloc() execution failed\n");
cudasafe(cudaMemset( g_bestPositions, 0, dim), "cudaPSO: cudaMemset() execution failed\n");
cudasafe(cudaBindTexture(NULL, t_texBestPositions, g_bestPositions, dim), "h_cudaPSOBindBestPositionsTextures: cudaBindTexture() execution failed\n");
usedGlobalMem += dim;
//Allocation of the velocities array
cudasafe(cudaMalloc( (void**) &g_velocities, dim), "cudaPSO: cudaMalloc() execution failed\n");
cudasafe(cudaMemset( g_velocities, 0, dim), "cudaPSO: cudaMemset() execution failed\n");
cudasafe(cudaBindTexture(NULL, t_texVelocities, g_velocities, dim), "h_cudaPSOBindPositionsTextures: cudaBindTexture() execution failed\n");
usedGlobalMem += dim;
dim = particlesNumber * sizeof(float);
//Allocation of the fitnesses array
cudaMalloc( (void**) &g_fitnesses, dim);
cutilCheckMsg("cudaPSO: cudaMalloc() execution failed\n");
cudaMemset( g_fitnesses, 0, dim);
cutilCheckMsg("cudaPSO: cudaMemset() execution failed\n");
cudaBindTexture(NULL, t_texFitnesses, g_fitnesses, dim);
cutilCheckMsg("h_cudaPSOBindFitnessesTextures: cudaBindTexture() execution failed\n");
usedGlobalMem += dim;
//Allocation of the best fitnesses array
cudaMalloc( (void**) &g_bestFitnesses, dim);
cutilCheckMsg("cudaPSO: cudaMalloc() execution failed\n");
cudaMemset( g_bestFitnesses, 0, dim);
cutilCheckMsg("cudaPSO: cudaMemset() execution failed\n");
cudaBindTexture(NULL, t_texBestFitnesses, g_bestFitnesses, dim);
cutilCheckMsg("h_cudaPSOBindBestFitnessesTextures: cudaBindTexture() execution failed\n");
usedGlobalMem += dim;
dim = particlesNumber * sizeof(unsigned int);
//Allocation of the local best ids array
cudaMalloc( (void**) &g_localBestIDs, dim);
cutilCheckMsg("cudaPSO: cudaMalloc() execution failed\n");
cudaMemset( g_localBestIDs, 0, dim);
cutilCheckMsg("cudaPSO: cudaMemset() execution failed\n");
cudaBindTexture(NULL, t_texLocalBestIDs, g_localBestIDs, dim);
cutilCheckMsg("h_cudaPSO_BindUpdatePositionFlagsTexture: cudaBindTexture() execution failed\n");
usedGlobalMem += dim;
//Allocation of the update flag array
cudaMalloc( (void**) &g_update, dim);
cutilCheckMsg("cudaPSO: cudaMalloc() execution failed\n");
cudaMemset( g_update, 0, dim);
cutilCheckMsg("cudaPSO: cudaMemset() execution failed\n");
cudaBindTexture(NULL, t_texUpdatePositionFlags, g_update, dim);
cutilCheckMsg("h_cudaPSO_BindUpdatePositionFlagsTexture: cudaBindTexture() execution failed\n");
usedGlobalMem += dim;
dim = actualParticleSize * sizeof(float);
//Allocation of the global best positions array
cudaMalloc( (void**) &g_globalBestPositions, dim);
cutilCheckMsg("cudaPSO: cudaMalloc() execution failed\n");
cudaMemset( g_globalBestPositions, 0, dim);
cutilCheckMsg("cudaPSO: cudaMemset() execution failed\n");
usedGlobalMem += dim;
dim = sizeof(float);
//Allocation of the global best fitnesse value
cudaMalloc( (void**) &g_globalBestFitness, dim);
cutilCheckMsg("cudaPSO: cudaMalloc() execution failed\n");
cudaMemset( g_globalBestFitness, 0, dim);
cutilCheckMsg("cudaPSO: cudaMemset() execution failed\n");
usedGlobalMem += dim;
dim = sizeof(u_int32_t);
//Allocation of the global best id
cudaMalloc( (void**) &g_globalBestID, dim);
cutilCheckMsg("cudaPSO: cudaMalloc() execution failed\n");
cudaMemset( g_globalBestID, 0, dim);
cutilCheckMsg("cudaPSO: cudaMemset() execution failed\n");
usedGlobalMem += dim;
dim = particlesNumber * actualParticleSize * sizeof(curandState);
//Allocation of the CUDA random states
cudaMalloc((void **)&devStates, dim);
cutilCheckMsg("cudaPSO: cudaMalloc() execution failed\n");
usedGlobalMem += dim;
freeGlobalMem = totGlobalMem - usedGlobalMem;
/* Setup prng states */
setup_kernel<<<particlesNumber, actualParticleSize>>>(devStates, time(NULL));
h_initFitnessFunctions(g_positions, particlesNumber, actualParticleSize);
#ifdef PRINT_DEBUG
printf("\t - totalGlobalMem = %ld\n", totGlobalMem);
printf("\t - usedGlobalMem = %ld\n", usedGlobalMem);
printf("\t - usedGlobalMemForRandNums = %ld\n", dim);
printf("\t - freeGlobalMem = %ld\n", freeGlobalMem);
printf("Done!\n");
#endif
}
/// Frees GPU's resources
__host__ void h_cudaPSO_Free(){
//**********************
// TEXTURES UN-BINDINGS
//**********************
clearFitnessFunctions();
cudaUnbindTexture(t_texVelocities);
cutilCheckMsg("h_cudaPSO_Free: cudaUnbindTexture() execution failed\n");
cudaUnbindTexture(t_texPositions);
cutilCheckMsg("h_cudaPSO_Free: cudaUnbindTexture() execution failed\n");
cudaUnbindTexture(t_texBestPositions);
cutilCheckMsg("h_cudaPSO_Free: cudaUnbindTexture() execution failed\n");
cudaUnbindTexture(t_texFitnesses);
cutilCheckMsg("h_cudaPSO_Free: cudaUnbindTexture() execution failed\n");
cudaUnbindTexture(t_texBestFitnesses);
cutilCheckMsg("h_cudaPSO_Free: cudaUnbindTexture() execution failed\n");
cudaUnbindTexture(t_texUpdatePositionFlags);
cutilCheckMsg("h_cudaPSO_Free: cudaUnbindTexture() execution failed\n");
cudaUnbindTexture(t_texLocalBestIDs);
cutilCheckMsg("h_cudaPSO_Free: cudaUnbindTexture() execution failed\n");
//**********************
// ARRAYS DE-ALLOCATION
//**********************
cudaFree(g_positions);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(g_bestPositions);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(g_velocities);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(g_fitnesses);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(g_bestFitnesses);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(g_globalBestPositions);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(g_globalBestFitness);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(g_localBestIDs);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(g_globalBestID);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(g_update);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
cudaFree(devStates);
cutilCheckMsg("h_cudaPSO_Free: cudaFree() execution failed\n");
}
///@brief wrapper to appropriately call the g_findGlobalBest() kernel
///@param g_globalBestFitness pointer to the GPU's global-memory array containing the final global best fitness value
///@param g_globalBestID pointer to the GPU's global-memory array containing the index of the best particle
///@param numberOfParticles swarm's size
///@param finalBestsUpdateGrid definition of the blocks grid for this kernel (containing the number of thread-blocks for each dimension of the grid)
///@param finalBestsUpdateBlock definition of the thread blocks for this kernel (containing the number of threads for each dimension of the block)
__host__ void h_findGlobalBest(float* g_globalBestFitness, u_int32_t* g_globalBestID, int numberOfParticles, dim3 finalBestsUpdateGrid, dim3 finalBestsUpdateBlock){
switch (finalBestsUpdateBlock.x){
case 8: g_findGlobalBest< 8><<<finalBestsUpdateGrid, finalBestsUpdateBlock>>>(g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 16: g_findGlobalBest< 16><<<finalBestsUpdateGrid, finalBestsUpdateBlock>>>(g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 32: g_findGlobalBest< 32><<<finalBestsUpdateGrid, finalBestsUpdateBlock>>>(g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 64: g_findGlobalBest< 64><<<finalBestsUpdateGrid, finalBestsUpdateBlock>>>(g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 128: g_findGlobalBest<128><<<finalBestsUpdateGrid, finalBestsUpdateBlock>>>(g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 256: g_findGlobalBest<256><<<finalBestsUpdateGrid, finalBestsUpdateBlock>>>(g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
case 512: g_findGlobalBest<512><<<finalBestsUpdateGrid, finalBestsUpdateBlock>>>(g_globalBestFitness, g_globalBestID, numberOfParticles);
break;
}
}
/*
__host__ void h_cudaPSOPrintBestFitnesses(u_int32_t* g_globalBestID, float* g_bestFitnesses, float* g_bestPositions, float* g_globalBestFitness, float* g_globalBestPositions, float* g_positions, int particlesNumber, int actualParticleSize, int problemDimension){
//Load fintnesses values from GPU and print them to screen
//float* fitnesses = (float*) malloc(particlesNumber * sizeof(float));
//cudaMemcpy(fitnesses, g_bestFitnesses, particlesNumber * sizeof(float), cudaMemcpyDeviceToHost);
//cutilCheckMsg("h_cudaPSOPrintBestFitnesses: cudaMemcpy() execution failed\n");
//printf(" - Fitnesses:\n ");
//for(int i = 0; i < particlesNumber; ++i)
// printf("%e ", fitnesses[i]);
//printf("\n");
//free(fitnesses);
u_int32_t h_globalBestID;
cudaMemcpy(&h_globalBestID, g_globalBestID, sizeof(u_int32_t), cudaMemcpyDeviceToHost);
printf(" - Best particle ID: %u\n", h_globalBestID);
float h_globalBestFitness;
cudaMemcpy(&h_globalBestFitness, g_globalBestFitness, sizeof(float), cudaMemcpyDeviceToHost);
printf(" - Global best fitness: %e\n", h_globalBestFitness);
int dim = actualParticleSize * sizeof(float);
float* h_globalBestPositions = (float*) malloc(dim);
cudaMemcpy(h_globalBestPositions, g_globalBestPositions, dim, cudaMemcpyDeviceToHost);
printf(" - Global best positions:\n");
for(int m = 0; m < problemDimension; ++m)
printf(" %2d: % 12.10e\n", m+1, h_globalBestPositions[m]);
printf("\n");
delete h_globalBestPositions;
printf("particlesNumber = %d\n", particlesNumber);
printf("problemDimension = %d\n", problemDimension);
printf("actualParticleSize = %d\n", actualParticleSize);
dim = particlesNumber * actualParticleSize;
float* h_positions = (float*) malloc(dim * sizeof(float));
cudaMemcpy(h_positions, g_positions, dim * sizeof(float), cudaMemcpyDeviceToHost);
cutilCheckMsg("h_cudaPSOPrintBestFitnesses: cudaMemcpy() execution failed\n");
printf(" - Particle's positions:\n");
for(int j = 0; j < particlesNumber; ++j){
printf(" ");
for(int m = 0; m < problemDimension; ++m)
printf("% 3.1f", h_positions[j * actualParticleSize + m]);
printf("\n");
}
printf("\n");
free(h_positions);
dim = particlesNumber * actualParticleSize;
float* h_bestPositions = (float*) malloc(dim * sizeof(float));
cudaMemcpy(h_bestPositions, g_bestPositions, dim * sizeof(float), cudaMemcpyDeviceToHost);
cutilCheckMsg("h_cudaPSOPrintBestFitnesses: cudaMemcpy() execution failed\n");
printf(" - Personal best positions:\n");
for(int j = 0; j < particlesNumber; ++j){
printf(" ");
for(int m = 0; m < problemDimension; ++m)
printf("% 3.1f", h_bestPositions[j * actualParticleSize + m]);
printf("\n");
}
printf("\n");
free(h_bestPositions);
}
*/
//************************
// OPTIMIZATION CALL-BACK
//************************
///Realizes the actual optimization calling appropriately all the cuda kernels involved in this process
///@param functionID Index of the function to be optimized
///@param numberOfGenerations Number of generations/iterations in one optimization step
///@param particlesNumber Number of particles belonging to this swarm
///@param problemDimension Number of parameters to be optimized
///@param W Inertia weight (PSO algorithm)
///@param C1 Cognitive attraction factor (PSO algorithm)
///@param C2 Social attraction factor (PSO algorithm)
///@param minValue Lower limit for each dimension of the search space
///@param maxValue Upper limit for each dimension of the search space
///@param deltaValue Width of each dimension of the search space
///@param h_globalBestFitness pointer to an host variable where to store the final global best fitness value
///@param h_globalBestPosition Pointer to an host array where to store the final result (final global best position)
extern "C" __host__ void h_cudaPSO_Optimize(
int functionID,
int numberOfGenerations,
int particlesNumber,
int problemDimension,
float W,
float C1,
float C2,
float minValue,
float maxValue,
float deltaValue,
float* h_globalBestFitness,
float* h_globalBestPosition
)
{
//kernel parameters for particles initialization:
// - one thread block for each particle
// - one thread for each problem dimension
dim3 initializationGrid(particlesNumber, 1);
dim3 initializationBlock(actualParticleSize,1,1);
//kernel parameters for positions update:
// - one thread block for each particle
// - one thread for each problem dimension
dim3 updateGrid(particlesNumber, 1, 1);
dim3 updateBlock(actualParticleSize,1,1);
//kernel parameters for local bests update:
// - one thread block
// - one thread for each particle
dim3 bestsUpdateGrid(1, 1, 1);
dim3 bestsUpdateBlock(particlesNumber,1,1);
unsigned int bestsUpdateSharedAmount = (particlesNumber + 2) * sizeof(float);
//kernel parameters for the global bests update:
// - one thread block
// - the number of threads is chosen to have enough thread to perform
// a parallel reduction of the fitness values
dim3 globalBestUpdateGrid(1, 1, 1);
int thNum = (int) rint( pow(2.0f, ceil( log2( (float) particlesNumber) ) ) );
dim3 globalBestUpdateBlock(thNum,1,1);
//kernel parameters for the computation of fitnesses values:
// - one thread block for each individual
// - one thread for dimension of the problem
dim3 calculateFitnessesGrid(particlesNumber, 1, 1);
thNum = (int) rint( pow(2.0f, ceil( log2( (float) problemDimension) ) ) );
thNum = max(8, thNum);
dim3 calculateFitnessesBlock(thNum,1,1);
//kernel parameters for the global bests update:
// - one thread block
// - one thread for each dimension of the problem
dim3 globalBestCopyGrid(1, 1, 1);
dim3 globalBestCopyBlock(actualParticleSize,1,1);
//CUDA routines to time events
cudaEvent_t start, stop;
cudasafe( cudaEventCreate(&start), "h_cudaPSO_Optimize: cudaEventCreate() execution failed\n");
cudasafe( cudaEventCreate(&stop), "h_cudaPSO_Optimize: cudaEventCreate() execution failed\n");
//printf("Starting Optimization...");
//Start timing...
cudaEventRecord(start,0);
//Set up search space limits
cudaMemcpyToSymbol(c_minValue, &minValue, sizeof(float), 0, cudaMemcpyHostToDevice);
cutilCheckMsg("h_cudaPSO_Optimize: cudaMemcpyToSymbol() execution failed\n");
cudaMemcpyToSymbol(c_maxValue, &maxValue, sizeof(float), 0, cudaMemcpyHostToDevice);
cutilCheckMsg("h_cudaPSO_Optimize: cudaMemcpyToSymbol() execution failed\n");
cudaMemcpyToSymbol(c_deltaValue, &deltaValue, sizeof(float), 0, cudaMemcpyHostToDevice);
cutilCheckMsg("h_cudaPSO_Optimize: cudaMemcpyToSymbol() execution failed\n");
//Particles initialization
g_initParticles<<<initializationGrid, initializationBlock>>>(g_positions, g_bestPositions, g_velocities, devStates);
cutilCheckMsg("h_cudaPSO_Optimize: g_initParticles() execution failed\n");
//Set to zero the update flags
cudaMemset(g_update, 0, particlesNumber * sizeof(u_int32_t));
//First fitnesses evaluation
h_calculateFitnessesValues(functionID, g_fitnesses, actualParticleSize, problemDimension, calculateFitnessesGrid, calculateFitnessesBlock);
cutilCheckMsg("h_cudaPSO_Optimize: h_calculateFitnessesValues() execution failed\n");
//First Local bests update
g_firstBestsUpdate<<<bestsUpdateGrid, bestsUpdateBlock, bestsUpdateSharedAmount>>>(g_bestFitnesses, g_localBestIDs);
cutilCheckMsg("h_cudaPSO_Optimize: g_firstBestsUpdate() execution failed\n");
//Generations main cycle
for(unsigned int generationNumber = 1; generationNumber < numberOfGenerations; ++generationNumber){
//Position Update
g_positionsUpdate<<<updateGrid, updateBlock>>>(W, C1, C2, g_positions, g_bestPositions, g_velocities, devStates);
cutilCheckMsg("h_cudaPSO_Optimize: g_positionsUpdate() execution failed\n");
//Fitnesses evaluation
h_calculateFitnessesValues(functionID, g_fitnesses, actualParticleSize, problemDimension, calculateFitnessesGrid, calculateFitnessesBlock);
cutilCheckMsg("h_cudaPSO_Optimize: h_calculateFitnessesValues() execution failed\n");
//Local bests update
g_bestsUpdate<<<bestsUpdateGrid, bestsUpdateBlock, bestsUpdateSharedAmount>>>(g_bestFitnesses, g_localBestIDs, g_update);
cutilCheckMsg("h_cudaPSO_Optimize: g_bestsUpdate() execution failed\n");
}
//Global best determination
h_findGlobalBest(g_globalBestFitness, g_globalBestID, particlesNumber, globalBestUpdateGrid, globalBestUpdateBlock);
cutilCheckMsg("h_cudaPSO_Optimize: h_findGlobalBest() execution failed\n");
//Copy global best positions
g_globalBestCopy<<<globalBestCopyGrid, globalBestCopyBlock>>>(g_globalBestPositions, g_globalBestID);
cutilCheckMsg("h_cudaPSO_Optimize: g_copyBests() execution failed\n");
cudaThreadSynchronize();
//Stop timing...
cudaEventRecord(stop,0);
//waits for the stop event to be recorded...
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
//Print the current best fitnesses
//h_cudaPSOPrintBestFitnesses(g_globalBestID, g_bestFitnesses, g_bestPositions, g_globalBestFitness, g_globalBestPositions, g_positions, particlesNumber, actualParticleSize, problemDimension);
//cutilCheckMsg("h_cudaPSO_Optimize: h_cudaPSOPrintBestFitnesses() execution failed\n");
//Retrieves the global best fitness value
cudaMemcpy(h_globalBestFitness, g_globalBestFitness, sizeof(float), cudaMemcpyDeviceToHost);
//Retrieves the global best position
cudaMemcpy(h_globalBestPosition, g_globalBestPositions, problemDimension * sizeof(float), cudaMemcpyDeviceToHost);
//Prints the amount of time elapsed for optimization
//printf("Elapsed time = %f ms\n", elapsedTime);
printf("%d %d %d %d %f %e %d\n", 1 /*swarmsNumber*/, particlesNumber, problemDimension, numberOfGenerations, elapsedTime, *h_globalBestFitness, functionID);
}
__host__ int cutGetMaxGflopsDeviceId()
{
int device_count = 0;
cudaGetDeviceCount( &device_count );
cudaDeviceProp device_properties;
int max_gflops_device = 0;
int max_gflops = 0;
int current_device = 0;
cudaGetDeviceProperties( &device_properties, current_device );
max_gflops = device_properties.multiProcessorCount * device_properties.clockRate;
++current_device;
while( current_device < device_count )
{
cudaGetDeviceProperties( &device_properties, current_device );
int gflops = device_properties.multiProcessorCount * device_properties.clockRate;
if( gflops > max_gflops )
{
max_gflops = gflops;
max_gflops_device = current_device;
}
++current_device;
}
return max_gflops_device;
}
__host__ void cudasafe( cudaError_t error, char* message)
{
if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s : %i\n",message,error); exit(-1); }
}
__host__ void cutilCheckMsg( const char *errorMessage)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "cutilCheckMsg() CUTIL CUDA error : %s : %s.\n",
errorMessage, cudaGetErrorString( err) );
exit(-1);
}
#ifdef _DEBUG
err = cudaThreadSynchronize();
if( cudaSuccess != err) {
fprintf(stderr, "cutilCheckMsg cudaThreadSynchronize error: %s : %s.\n",
errorMessage, cudaGetErrorString( err) );
exit(-1);
}
#endif
}
#endif
|
a2f600b39c5508ed32fc718061dfa3da8d3cb93a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <rocblas.h>
#include <hipsparse.h>
#include "cml/cml_spblas.cuh"
#include "cml/cml_spmat.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_sparse.h"
#include "util.h"
namespace pogs {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template <typename T>
struct GpuData {
const T *orig_data;
const POGS_INT *orig_ptr, *orig_ind;
hipblasHandle_t d_hdl;
hipsparseHandle_t s_hdl;
hipsparseMatDescr_t descr;
GpuData(const T *data, const POGS_INT *ptr, const POGS_INT *ind)
: orig_data(data), orig_ptr(ptr), orig_ind(ind) {
hipblasCreate(&d_hdl);
hipsparseCreate(&s_hdl);
hipsparseCreateMatDescr(&descr);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
hipblasDestroy(d_hdl);
hipsparseDestroy(s_hdl);
hipsparseDestroyMatDescr(descr);
DEBUG_CUDA_CHECK_ERR();
}
};
hipsparseOperation_t OpToCusparseOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return (trans == 'n' || trans == 'N')
? HIPSPARSE_OPERATION_NON_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE;
}
template <typename T>
void MultDiag(const T *d, const T *e, POGS_INT m, POGS_INT n, POGS_INT nnz,
typename MatrixSparse<T>::Ord ord, T *data, const POGS_INT *ind,
const POGS_INT *ptr);
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixSparse<T>& A);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
template <typename T>
MatrixSparse<T>::MatrixSparse(char ord, POGS_INT m, POGS_INT n, POGS_INT nnz,
const T *data, const POGS_INT *ptr,
const POGS_INT *ind)
: Matrix<T>(m, n), _data(0), _ptr(0), _ind(0), _nnz(nnz) {
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
// It should work up to 2^31 == 2B, but let's be sure.
DEBUG_EXPECT(nnz < static_cast<POGS_INT>(1 << 29));
// Set GPU specific data.
GpuData<T> *info = new GpuData<T>(data, ptr, ind);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixSparse<T>::MatrixSparse(const MatrixSparse<T>& A)
: Matrix<T>(A._m, A._n), _data(0), _ptr(0), _ind(0), _nnz(A._nnz),
_ord(A._ord) {
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info);
GpuData<T> *info = new GpuData<T>(info_A->orig_data, info_A->orig_ptr,
info_A->orig_ind);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixSparse<T>::~MatrixSparse() {
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
delete info;
this->_info = 0;
if (this->_done_init) {
if (_data) {
hipFree(_data);
_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
if (_ptr) {
hipFree(_ptr);
_ptr = 0;
DEBUG_CUDA_CHECK_ERR();
}
if (_ind) {
hipFree(_ind);
_ind = 0;
DEBUG_CUDA_CHECK_ERR();
}
}
}
template <typename T>
int MatrixSparse<T>::Init() {
DEBUG_ASSERT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
const T *orig_data = info->orig_data;
const POGS_INT *orig_ptr = info->orig_ptr;
const POGS_INT *orig_ind = info->orig_ind;
// Allocate sparse matrix on gpu.
hipMalloc(&_data, static_cast<size_t>(2) * _nnz * sizeof(T));
hipMalloc(&_ind, static_cast<size_t>(2) * _nnz * sizeof(POGS_INT));
hipMalloc(&_ptr, (this->_m + this->_n + 2) * sizeof(POGS_INT));
DEBUG_CUDA_CHECK_ERR();
if (_ord == ROW) {
cml::spmat<T, POGS_INT, CblasRowMajor> A(_data, _ind, _ptr, this->_m,
this->_n, _nnz);
cml::spmat_memcpy(info->s_hdl, &A, orig_data, orig_ind, orig_ptr);
} else {
cml::spmat<T, POGS_INT, CblasColMajor> A(_data, _ind, _ptr, this->_m,
this->_n, _nnz);
cml::spmat_memcpy(info->s_hdl, &A, orig_data, orig_ind, orig_ptr);
}
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixSparse<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cml::vector<T> x_vec, y_vec;
if (trans == 'n' || trans == 'N') {
x_vec = cml::vector_view_array<T>(x, this->_n);
y_vec = cml::vector_view_array<T>(y, this->_m);
} else {
x_vec = cml::vector_view_array<T>(x, this->_m);
y_vec = cml::vector_view_array<T>(y, this->_n);
}
if (_ord == ROW) {
cml::spmat<T, POGS_INT, CblasRowMajor> A(_data, _ind, _ptr, this->_m,
this->_n, _nnz);
cml::spblas_gemv(info->s_hdl, OpToCusparseOp(trans), info->descr, alpha,
&A, &x_vec, beta, &y_vec);
} else {
cml::spmat<T, POGS_INT, CblasColMajor> A(_data, _ind, _ptr, this->_m,
this->_n, _nnz);
cml::spblas_gemv(info->s_hdl, OpToCusparseOp(trans), info->descr, alpha,
&A, &x_vec, beta, &y_vec);
}
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixSparse<T>::Equil(T *d, T *e) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->d_hdl;
// Number of elements in matrix.
size_t num_el = static_cast<size_t>(2) * _nnz;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign;
size_t num_sign_bytes = (num_el + 7) / 8;
hipMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
// Fill sign bits, assigning each thread a multiple of 8 elements.
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
AbsF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Perform Sinkhorn-Knopp equilibration.
SinkhornKnopp(this, d, e);
hipDeviceSynchronize();
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
IdentityF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
hipDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _nnz, _ord, _data, _ind, _ptr);
hipDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
hipDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
hipDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
T normd = cml::blas_nrm2(hdl, &d_vec);
T norme = cml::blas_nrm2(hdl, &e_vec);
// T scale = sqrt(normd * sqrt(this->_n) / (norme * sqrt(this->_m)));
T scale = static_cast<T>(1.);
cml::vector_scale(&d_vec, 1 / (scale * sqrt(normA)));
cml::vector_scale(&e_vec, scale / sqrt(normA));
hipDeviceSynchronize();
hipFree(sign);
CUDA_CHECK_ERR();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA, normd, norme);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixSparse<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(), A.Nnz());
return cml::blas_nrm2(hdl, &a) / std::sqrt(::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs D * A * E for A in row major
template <typename T>
void __global__ __MultRow(const T *d, const T *e, T *data,
const POGS_INT *row_ptr, const POGS_INT *col_ind,
POGS_INT size) {
POGS_INT tid = blockIdx.x * blockDim.x + threadIdx.x;
for (POGS_INT t = tid; t < size; t += gridDim.x * blockDim.x)
for (POGS_INT i = row_ptr[t]; i < row_ptr[t + 1]; ++i)
data[i] *= d[t] * e[col_ind[i]];
}
// Performs D * A * E for A in col major
template <typename T>
void __global__ __MultCol(const T *d, const T *e, T *data,
const POGS_INT *col_ptr, const POGS_INT *row_ind,
POGS_INT size) {
POGS_INT tid = blockIdx.x * blockDim.x + threadIdx.x;
for (POGS_INT t = tid; t < size; t += gridDim.x * blockDim.x)
for (POGS_INT i = col_ptr[t]; i < col_ptr[t + 1]; ++i)
data[i] *= d[row_ind[i]] * e[t];
}
template <typename T>
void MultDiag(const T *d, const T *e, POGS_INT m, POGS_INT n, POGS_INT nnz,
typename MatrixSparse<T>::Ord ord, T *data, const POGS_INT *ind,
const POGS_INT *ptr) {
if (ord == MatrixSparse<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m, cml::kBlockSize);
hipLaunchKernelGGL(( __MultRow), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, d, e, data, ptr, ind, m);
size_t grid_dim_col = cml::calc_grid_dim(n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultCol), dim3(grid_dim_col), dim3(cml::kBlockSize), 0, 0, d, e, data + nnz, ptr + m + 1,
ind + nnz, n);
} else {
size_t grid_dim_col = cml::calc_grid_dim(n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultCol), dim3(grid_dim_col), dim3(cml::kBlockSize), 0, 0, d, e, data, ptr, ind, n);
size_t grid_dim_row = cml::calc_grid_dim(m, cml::kBlockSize);
hipLaunchKernelGGL(( __MultRow), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, d, e, data + nnz, ptr + n + 1,
ind + nnz, m);
}
}
} // namespace
template class MatrixSparse<double>;
template class MatrixSparse<float>;
} // namespace pogs
|
a2f600b39c5508ed32fc718061dfa3da8d3cb93a.cu
|
#include <cublas_v2.h>
#include <cusparse.h>
#include "cml/cml_spblas.cuh"
#include "cml/cml_spmat.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_sparse.h"
#include "util.h"
namespace pogs {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template <typename T>
struct GpuData {
const T *orig_data;
const POGS_INT *orig_ptr, *orig_ind;
cublasHandle_t d_hdl;
cusparseHandle_t s_hdl;
cusparseMatDescr_t descr;
GpuData(const T *data, const POGS_INT *ptr, const POGS_INT *ind)
: orig_data(data), orig_ptr(ptr), orig_ind(ind) {
cublasCreate(&d_hdl);
cusparseCreate(&s_hdl);
cusparseCreateMatDescr(&descr);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
cublasDestroy(d_hdl);
cusparseDestroy(s_hdl);
cusparseDestroyMatDescr(descr);
DEBUG_CUDA_CHECK_ERR();
}
};
cusparseOperation_t OpToCusparseOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return (trans == 'n' || trans == 'N')
? CUSPARSE_OPERATION_NON_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE;
}
template <typename T>
void MultDiag(const T *d, const T *e, POGS_INT m, POGS_INT n, POGS_INT nnz,
typename MatrixSparse<T>::Ord ord, T *data, const POGS_INT *ind,
const POGS_INT *ptr);
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixSparse<T>& A);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
template <typename T>
MatrixSparse<T>::MatrixSparse(char ord, POGS_INT m, POGS_INT n, POGS_INT nnz,
const T *data, const POGS_INT *ptr,
const POGS_INT *ind)
: Matrix<T>(m, n), _data(0), _ptr(0), _ind(0), _nnz(nnz) {
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
// It should work up to 2^31 == 2B, but let's be sure.
DEBUG_EXPECT(nnz < static_cast<POGS_INT>(1 << 29));
// Set GPU specific data.
GpuData<T> *info = new GpuData<T>(data, ptr, ind);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixSparse<T>::MatrixSparse(const MatrixSparse<T>& A)
: Matrix<T>(A._m, A._n), _data(0), _ptr(0), _ind(0), _nnz(A._nnz),
_ord(A._ord) {
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info);
GpuData<T> *info = new GpuData<T>(info_A->orig_data, info_A->orig_ptr,
info_A->orig_ind);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixSparse<T>::~MatrixSparse() {
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
delete info;
this->_info = 0;
if (this->_done_init) {
if (_data) {
cudaFree(_data);
_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
if (_ptr) {
cudaFree(_ptr);
_ptr = 0;
DEBUG_CUDA_CHECK_ERR();
}
if (_ind) {
cudaFree(_ind);
_ind = 0;
DEBUG_CUDA_CHECK_ERR();
}
}
}
template <typename T>
int MatrixSparse<T>::Init() {
DEBUG_ASSERT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
const T *orig_data = info->orig_data;
const POGS_INT *orig_ptr = info->orig_ptr;
const POGS_INT *orig_ind = info->orig_ind;
// Allocate sparse matrix on gpu.
cudaMalloc(&_data, static_cast<size_t>(2) * _nnz * sizeof(T));
cudaMalloc(&_ind, static_cast<size_t>(2) * _nnz * sizeof(POGS_INT));
cudaMalloc(&_ptr, (this->_m + this->_n + 2) * sizeof(POGS_INT));
DEBUG_CUDA_CHECK_ERR();
if (_ord == ROW) {
cml::spmat<T, POGS_INT, CblasRowMajor> A(_data, _ind, _ptr, this->_m,
this->_n, _nnz);
cml::spmat_memcpy(info->s_hdl, &A, orig_data, orig_ind, orig_ptr);
} else {
cml::spmat<T, POGS_INT, CblasColMajor> A(_data, _ind, _ptr, this->_m,
this->_n, _nnz);
cml::spmat_memcpy(info->s_hdl, &A, orig_data, orig_ind, orig_ptr);
}
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixSparse<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cml::vector<T> x_vec, y_vec;
if (trans == 'n' || trans == 'N') {
x_vec = cml::vector_view_array<T>(x, this->_n);
y_vec = cml::vector_view_array<T>(y, this->_m);
} else {
x_vec = cml::vector_view_array<T>(x, this->_m);
y_vec = cml::vector_view_array<T>(y, this->_n);
}
if (_ord == ROW) {
cml::spmat<T, POGS_INT, CblasRowMajor> A(_data, _ind, _ptr, this->_m,
this->_n, _nnz);
cml::spblas_gemv(info->s_hdl, OpToCusparseOp(trans), info->descr, alpha,
&A, &x_vec, beta, &y_vec);
} else {
cml::spmat<T, POGS_INT, CblasColMajor> A(_data, _ind, _ptr, this->_m,
this->_n, _nnz);
cml::spblas_gemv(info->s_hdl, OpToCusparseOp(trans), info->descr, alpha,
&A, &x_vec, beta, &y_vec);
}
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixSparse<T>::Equil(T *d, T *e) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->d_hdl;
// Number of elements in matrix.
size_t num_el = static_cast<size_t>(2) * _nnz;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign;
size_t num_sign_bytes = (num_el + 7) / 8;
cudaMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
// Fill sign bits, assigning each thread a multiple of 8 elements.
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SquareF<T>());
} else {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
AbsF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Perform Sinkhorn-Knopp equilibration.
SinkhornKnopp(this, d, e);
cudaDeviceSynchronize();
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SqrtF<T>());
} else {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
IdentityF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _nnz, _ord, _data, _ind, _ptr);
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
cudaDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
cudaDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
T normd = cml::blas_nrm2(hdl, &d_vec);
T norme = cml::blas_nrm2(hdl, &e_vec);
// T scale = sqrt(normd * sqrt(this->_n) / (norme * sqrt(this->_m)));
T scale = static_cast<T>(1.);
cml::vector_scale(&d_vec, 1 / (scale * sqrt(normA)));
cml::vector_scale(&e_vec, scale / sqrt(normA));
cudaDeviceSynchronize();
cudaFree(sign);
CUDA_CHECK_ERR();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA, normd, norme);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixSparse<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(), A.Nnz());
return cml::blas_nrm2(hdl, &a) / std::sqrt(std::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs D * A * E for A in row major
template <typename T>
void __global__ __MultRow(const T *d, const T *e, T *data,
const POGS_INT *row_ptr, const POGS_INT *col_ind,
POGS_INT size) {
POGS_INT tid = blockIdx.x * blockDim.x + threadIdx.x;
for (POGS_INT t = tid; t < size; t += gridDim.x * blockDim.x)
for (POGS_INT i = row_ptr[t]; i < row_ptr[t + 1]; ++i)
data[i] *= d[t] * e[col_ind[i]];
}
// Performs D * A * E for A in col major
template <typename T>
void __global__ __MultCol(const T *d, const T *e, T *data,
const POGS_INT *col_ptr, const POGS_INT *row_ind,
POGS_INT size) {
POGS_INT tid = blockIdx.x * blockDim.x + threadIdx.x;
for (POGS_INT t = tid; t < size; t += gridDim.x * blockDim.x)
for (POGS_INT i = col_ptr[t]; i < col_ptr[t + 1]; ++i)
data[i] *= d[row_ind[i]] * e[t];
}
template <typename T>
void MultDiag(const T *d, const T *e, POGS_INT m, POGS_INT n, POGS_INT nnz,
typename MatrixSparse<T>::Ord ord, T *data, const POGS_INT *ind,
const POGS_INT *ptr) {
if (ord == MatrixSparse<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m, cml::kBlockSize);
__MultRow<<<grid_dim_row, cml::kBlockSize>>>(d, e, data, ptr, ind, m);
size_t grid_dim_col = cml::calc_grid_dim(n, cml::kBlockSize);
__MultCol<<<grid_dim_col, cml::kBlockSize>>>(d, e, data + nnz, ptr + m + 1,
ind + nnz, n);
} else {
size_t grid_dim_col = cml::calc_grid_dim(n, cml::kBlockSize);
__MultCol<<<grid_dim_col, cml::kBlockSize>>>(d, e, data, ptr, ind, n);
size_t grid_dim_row = cml::calc_grid_dim(m, cml::kBlockSize);
__MultRow<<<grid_dim_row, cml::kBlockSize>>>(d, e, data + nnz, ptr + n + 1,
ind + nnz, m);
}
}
} // namespace
template class MatrixSparse<double>;
template class MatrixSparse<float>;
} // namespace pogs
|
0ae22fc405fa01e29e7ea1e325142c77f7301576.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <chrono>
#include <hip/hip_runtime.h>
template<typename T>
void verify(const T* cpu_out, const T* gpu_out, int n)
{
int error = memcmp(cpu_out, gpu_out, n * sizeof(T));
printf("%s\n", error ? "FAIL" : "PASS");
}
// bank conflict aware optimization
#define LOG_MEM_BANKS 5
#define OFFSET(n) ((n) >> LOG_MEM_BANKS)
// N is the number of elements to scan in a thread block
template<typename T, int N>
__global__ void scan_bcao (
T *__restrict__ g_odata,
const T *__restrict__ g_idata)
{
__shared__ T temp[2*N];
int bid = blockIdx.x;
g_idata += bid * N;
g_odata += bid * N;
int thid = threadIdx.x;
int a = thid;
int b = a + (N/2);
int oa = OFFSET(a);
int ob = OFFSET(b);
temp[a + oa] = g_idata[a];
temp[b + ob] = g_idata[b];
int offset = 1;
for (int d = N >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += OFFSET(ai);
bi += OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) temp[N-1+OFFSET(N-1)] = 0; // clear the last elem
for (int d = 1; d < N; d *= 2) // traverse down
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += OFFSET(ai);
bi += OFFSET(bi);
T t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads(); // required
g_odata[a] = temp[a + oa];
g_odata[b] = temp[b + ob];
}
template<typename T, int N>
__global__ void scan(
T *__restrict__ g_odata,
const T *__restrict__ g_idata)
{
__shared__ T temp[N];
int bid = blockIdx.x;
g_idata += bid * N;
g_odata += bid * N;
int thid = threadIdx.x;
int offset = 1;
temp[2*thid] = g_idata[2*thid];
temp[2*thid+1] = g_idata[2*thid+1];
for (int d = N >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) temp[N-1] = 0; // clear the last elem
for (int d = 1; d < N; d *= 2) // traverse down
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
g_odata[2*thid] = temp[2*thid];
g_odata[2*thid+1] = temp[2*thid+1];
}
template <typename T, int N>
void runTest (const size_t n, const int repeat, bool timing = false)
{
const size_t num_blocks = (n + N - 1) / N;
const size_t nelems = num_blocks * N; // actual total number of elements
size_t bytes = nelems * sizeof(T);
T *in = (T*) malloc (bytes);
T *cpu_out = (T*) malloc (bytes);
T *gpu_out = (T*) malloc (bytes);
srand(123);
for (size_t n = 0; n < nelems; n++) in[n] = rand() % 5 + 1;
T *t_in = in;
T *t_out = cpu_out;
for (size_t n = 0; n < num_blocks; n++) {
t_out[0] = 0;
for (int i = 1; i < N; i++)
t_out[i] = t_out[i-1] + t_in[i-1];
t_out += N;
t_in += N;
}
T *d_in, *d_out;
hipMalloc((void**)&d_in, bytes);
hipMemcpy(d_in, in, bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_out, bytes);
dim3 grids (num_blocks);
dim3 blocks (N/2);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( scan<T, N>), dim3(grids), dim3(blocks), 0, 0, d_out, d_in);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
if (timing) {
printf("Element size in bytes is %zu. Average execution time of scan (w/ bank conflicts): %f (us)\n",
sizeof(T), (time * 1e-3f) / repeat);
}
hipMemcpy(gpu_out, d_out, bytes, hipMemcpyDeviceToHost);
if (!timing) verify(cpu_out, gpu_out, nelems);
// bcao
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( scan_bcao<T, N>), dim3(grids), dim3(blocks), 0, 0, d_out, d_in);
}
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
auto bcao_time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
if (timing) {
printf("Element size in bytes is %zu. Average execution time of scan (w/o bank conflicts): %f (us). ",
sizeof(T), (bcao_time * 1e-3f) / repeat);
printf("Reduce the time by %.1f%%\n", (time - bcao_time) * 1.0 / time * 100);
}
hipMemcpy(gpu_out, d_out, bytes, hipMemcpyDeviceToHost);
if (!timing) verify(cpu_out, gpu_out, nelems);
hipFree(d_in);
hipFree(d_out);
free(in);
free(cpu_out);
free(gpu_out);
}
template<int N>
void run (const int n, const int repeat) {
for (int i = 0; i < 2; i++) {
bool report_timing = i > 0;
printf("\nThe number of elements to scan in a thread block: %d\n", N);
runTest< char, N>(n, repeat, report_timing);
runTest<short, N>(n, repeat, report_timing);
runTest< int, N>(n, repeat, report_timing);
runTest< long, N>(n, repeat, report_timing);
}
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int n = atoi(argv[1]);
const int repeat = atoi(argv[2]);
run< 128>(n, repeat);
run< 256>(n, repeat);
run< 512>(n, repeat);
run<1024>(n, repeat);
run<2048>(n, repeat);
return 0;
}
|
0ae22fc405fa01e29e7ea1e325142c77f7301576.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <chrono>
#include <cuda.h>
template<typename T>
void verify(const T* cpu_out, const T* gpu_out, int n)
{
int error = memcmp(cpu_out, gpu_out, n * sizeof(T));
printf("%s\n", error ? "FAIL" : "PASS");
}
// bank conflict aware optimization
#define LOG_MEM_BANKS 5
#define OFFSET(n) ((n) >> LOG_MEM_BANKS)
// N is the number of elements to scan in a thread block
template<typename T, int N>
__global__ void scan_bcao (
T *__restrict__ g_odata,
const T *__restrict__ g_idata)
{
__shared__ T temp[2*N];
int bid = blockIdx.x;
g_idata += bid * N;
g_odata += bid * N;
int thid = threadIdx.x;
int a = thid;
int b = a + (N/2);
int oa = OFFSET(a);
int ob = OFFSET(b);
temp[a + oa] = g_idata[a];
temp[b + ob] = g_idata[b];
int offset = 1;
for (int d = N >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += OFFSET(ai);
bi += OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) temp[N-1+OFFSET(N-1)] = 0; // clear the last elem
for (int d = 1; d < N; d *= 2) // traverse down
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += OFFSET(ai);
bi += OFFSET(bi);
T t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads(); // required
g_odata[a] = temp[a + oa];
g_odata[b] = temp[b + ob];
}
template<typename T, int N>
__global__ void scan(
T *__restrict__ g_odata,
const T *__restrict__ g_idata)
{
__shared__ T temp[N];
int bid = blockIdx.x;
g_idata += bid * N;
g_odata += bid * N;
int thid = threadIdx.x;
int offset = 1;
temp[2*thid] = g_idata[2*thid];
temp[2*thid+1] = g_idata[2*thid+1];
for (int d = N >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) temp[N-1] = 0; // clear the last elem
for (int d = 1; d < N; d *= 2) // traverse down
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
g_odata[2*thid] = temp[2*thid];
g_odata[2*thid+1] = temp[2*thid+1];
}
template <typename T, int N>
void runTest (const size_t n, const int repeat, bool timing = false)
{
const size_t num_blocks = (n + N - 1) / N;
const size_t nelems = num_blocks * N; // actual total number of elements
size_t bytes = nelems * sizeof(T);
T *in = (T*) malloc (bytes);
T *cpu_out = (T*) malloc (bytes);
T *gpu_out = (T*) malloc (bytes);
srand(123);
for (size_t n = 0; n < nelems; n++) in[n] = rand() % 5 + 1;
T *t_in = in;
T *t_out = cpu_out;
for (size_t n = 0; n < num_blocks; n++) {
t_out[0] = 0;
for (int i = 1; i < N; i++)
t_out[i] = t_out[i-1] + t_in[i-1];
t_out += N;
t_in += N;
}
T *d_in, *d_out;
cudaMalloc((void**)&d_in, bytes);
cudaMemcpy(d_in, in, bytes, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_out, bytes);
dim3 grids (num_blocks);
dim3 blocks (N/2);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
scan<T, N><<<grids, blocks>>>(d_out, d_in);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
if (timing) {
printf("Element size in bytes is %zu. Average execution time of scan (w/ bank conflicts): %f (us)\n",
sizeof(T), (time * 1e-3f) / repeat);
}
cudaMemcpy(gpu_out, d_out, bytes, cudaMemcpyDeviceToHost);
if (!timing) verify(cpu_out, gpu_out, nelems);
// bcao
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
scan_bcao<T, N><<<grids, blocks>>>(d_out, d_in);
}
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
auto bcao_time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
if (timing) {
printf("Element size in bytes is %zu. Average execution time of scan (w/o bank conflicts): %f (us). ",
sizeof(T), (bcao_time * 1e-3f) / repeat);
printf("Reduce the time by %.1f%%\n", (time - bcao_time) * 1.0 / time * 100);
}
cudaMemcpy(gpu_out, d_out, bytes, cudaMemcpyDeviceToHost);
if (!timing) verify(cpu_out, gpu_out, nelems);
cudaFree(d_in);
cudaFree(d_out);
free(in);
free(cpu_out);
free(gpu_out);
}
template<int N>
void run (const int n, const int repeat) {
for (int i = 0; i < 2; i++) {
bool report_timing = i > 0;
printf("\nThe number of elements to scan in a thread block: %d\n", N);
runTest< char, N>(n, repeat, report_timing);
runTest<short, N>(n, repeat, report_timing);
runTest< int, N>(n, repeat, report_timing);
runTest< long, N>(n, repeat, report_timing);
}
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int n = atoi(argv[1]);
const int repeat = atoi(argv[2]);
run< 128>(n, repeat);
run< 256>(n, repeat);
run< 512>(n, repeat);
run<1024>(n, repeat);
run<2048>(n, repeat);
return 0;
}
|
b2ac6cd042bfa0c08c49e8e893800bc3dc3c5bb1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename X, typename Y>
static _CUDA_G void scatterSimpleKernel(void *vx, const Nd4jLong *xTadShape, const Nd4jLong *xTadOffsets, Nd4jLong xLength, Nd4jLong numTads, const void *vi, const Nd4jLong *iShapeInfo, Nd4jLong iLength, const void *vu, const Nd4jLong *uShapeInfo, Nd4jLong uLength) {
auto u = reinterpret_cast<const X*>(vu);
auto indices = reinterpret_cast<const Y*>(vi);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < iLength; i += blockDim.x * gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i];
auto idx = indices[shape::getIndexOffset(i, iShapeInfo)];
x[shape::getIndexOffset(idx, xTadShape)] = u[shape::getIndexOffset(i, uShapeInfo)];
}
}
template <typename X, typename Y>
void scatterSimple_(sd::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) {
auto dims = ShapeUtils::evalDimsToExclude(input.rankOf(), dimensions);
auto packX = ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), dims);
auto xLength = shape::length(packX.primaryShapeInfo());
auto iLength = indices.lengthOf();
auto uLength = updates.lengthOf();
hipLaunchKernelGGL(( scatterSimpleKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), xLength, packX.numberOfTads(), indices.specialBuffer(), indices.specialShapeInfo(), iLength, updates.specialBuffer(), updates.specialShapeInfo(), uLength);
}
void scatterSimple(sd::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) {
auto xType = input.dataType();
auto yType = indices.dataType();
if (opId != 6)
throw std::runtime_error("scatterSimple: only copy op is supported");
NDArray::prepareSpecialUse({&input}, {&updates, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, scatterSimple_, (context, opId, input, updates, indices, dimensions), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&input}, {&updates, &indices});
}
}
}
}
|
b2ac6cd042bfa0c08c49e8e893800bc3dc3c5bb1.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename X, typename Y>
static _CUDA_G void scatterSimpleKernel(void *vx, const Nd4jLong *xTadShape, const Nd4jLong *xTadOffsets, Nd4jLong xLength, Nd4jLong numTads, const void *vi, const Nd4jLong *iShapeInfo, Nd4jLong iLength, const void *vu, const Nd4jLong *uShapeInfo, Nd4jLong uLength) {
auto u = reinterpret_cast<const X*>(vu);
auto indices = reinterpret_cast<const Y*>(vi);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < iLength; i += blockDim.x * gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i];
auto idx = indices[shape::getIndexOffset(i, iShapeInfo)];
x[shape::getIndexOffset(idx, xTadShape)] = u[shape::getIndexOffset(i, uShapeInfo)];
}
}
template <typename X, typename Y>
void scatterSimple_(sd::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) {
auto dims = ShapeUtils::evalDimsToExclude(input.rankOf(), dimensions);
auto packX = ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), dims);
auto xLength = shape::length(packX.primaryShapeInfo());
auto iLength = indices.lengthOf();
auto uLength = updates.lengthOf();
scatterSimpleKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), xLength, packX.numberOfTads(), indices.specialBuffer(), indices.specialShapeInfo(), iLength, updates.specialBuffer(), updates.specialShapeInfo(), uLength);
}
void scatterSimple(sd::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) {
auto xType = input.dataType();
auto yType = indices.dataType();
if (opId != 6)
throw std::runtime_error("scatterSimple: only copy op is supported");
NDArray::prepareSpecialUse({&input}, {&updates, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, scatterSimple_, (context, opId, input, updates, indices, dimensions), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&input}, {&updates, &indices});
}
}
}
}
|
0b0d8b949e84cbe65ca0f33b8937f7e4bfd0fa76.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[40,51] --blockDim=[8,8]
#include "common.h"
__global__ void NLM2(
TColor *dst,
int imageW,
int imageH,
float Noise,
float lerpC
)
{
__requires(imageW == 320);
__requires(imageH == 408);
//Weights cache
__shared__ float fWeights[BLOCKDIM_X * BLOCKDIM_Y];
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
const float cx = blockDim.x * blockIdx.x + NLM_WINDOW_RADIUS + 0.5f;
const float cy = blockDim.x * blockIdx.y + NLM_WINDOW_RADIUS + 0.5f;
if (ix < imageW && iy < imageH)
{
//Find color distance from current texel to the center of NLM window
float weight = 0;
for (float n = -NLM_BLOCK_RADIUS; n <= NLM_BLOCK_RADIUS; n++)
for (float m = -NLM_BLOCK_RADIUS; m <= NLM_BLOCK_RADIUS; m++)
weight += vecLen(
tex2D(texImage, cx + m, cy + n),
tex2D(texImage, x + m, y + n)
);
//Geometric distance from current texel to the center of NLM window
float dist =
(threadIdx.x - NLM_WINDOW_RADIUS) * (threadIdx.x - NLM_WINDOW_RADIUS) +
(threadIdx.y - NLM_WINDOW_RADIUS) * (threadIdx.y - NLM_WINDOW_RADIUS);
//Derive final weight from color and geometric distance
weight = __expf(-(weight * Noise + dist * INV_NLM_WINDOW_AREA));
//Write the result to shared memory
fWeights[threadIdx.y * BLOCKDIM_X + threadIdx.x] = weight;
//Wait until all the weights are ready
__syncthreads();
//Normalized counter for the NLM weight threshold
float fCount = 0;
//Total sum of pixel weights
float sumWeights = 0;
//Result accumulator
float3 clr = {0, 0, 0};
int idx = 0;
//Cycle through NLM window, surrounding (x, y) texel
for (float i = -NLM_WINDOW_RADIUS; i <= NLM_WINDOW_RADIUS + 1; i++)
for (float j = -NLM_WINDOW_RADIUS; j <= NLM_WINDOW_RADIUS + 1; j++)
{
//Load precomputed weight
float weightIJ = fWeights[idx++];
//Accumulate (x + j, y + i) texel color with computed weight
float4 clrIJ = tex2D(texImage, x + j, y + i);
clr.x += clrIJ.x * weightIJ;
clr.y += clrIJ.y * weightIJ;
clr.z += clrIJ.z * weightIJ;
//Sum of weights for color normalization to [0..1] range
sumWeights += weightIJ;
//Update weight counter, if NLM weight for current window texel
//exceeds the weight threshold
fCount += (weightIJ > NLM_WEIGHT_THRESHOLD) ? INV_NLM_WINDOW_AREA : 0;
}
//Normalize result color by sum of weights
sumWeights = 1.0f / sumWeights;
clr.x *= sumWeights;
clr.y *= sumWeights;
clr.z *= sumWeights;
//Choose LERP quotent basing on how many texels
//within the NLM window exceeded the weight threshold
float lerpQ = (fCount > NLM_LERP_THRESHOLD) ? lerpC : 1.0f - lerpC;
//Write final result to global memory
float4 clr00 = tex2D(texImage, x, y);
clr.x = lerpf(clr.x, clr00.x, lerpQ);
clr.y = lerpf(clr.y, clr00.y, lerpQ);
clr.z = lerpf(clr.z, clr00.z, lerpQ);
dst[imageW * iy + ix] = make_color(clr.x, clr.y, clr.z, 0);
}
}
|
0b0d8b949e84cbe65ca0f33b8937f7e4bfd0fa76.cu
|
//pass
//--gridDim=[40,51] --blockDim=[8,8]
#include "common.h"
__global__ void NLM2(
TColor *dst,
int imageW,
int imageH,
float Noise,
float lerpC
)
{
__requires(imageW == 320);
__requires(imageH == 408);
//Weights cache
__shared__ float fWeights[BLOCKDIM_X * BLOCKDIM_Y];
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
const int iy = blockDim.y * blockIdx.y + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
const float cx = blockDim.x * blockIdx.x + NLM_WINDOW_RADIUS + 0.5f;
const float cy = blockDim.x * blockIdx.y + NLM_WINDOW_RADIUS + 0.5f;
if (ix < imageW && iy < imageH)
{
//Find color distance from current texel to the center of NLM window
float weight = 0;
for (float n = -NLM_BLOCK_RADIUS; n <= NLM_BLOCK_RADIUS; n++)
for (float m = -NLM_BLOCK_RADIUS; m <= NLM_BLOCK_RADIUS; m++)
weight += vecLen(
tex2D(texImage, cx + m, cy + n),
tex2D(texImage, x + m, y + n)
);
//Geometric distance from current texel to the center of NLM window
float dist =
(threadIdx.x - NLM_WINDOW_RADIUS) * (threadIdx.x - NLM_WINDOW_RADIUS) +
(threadIdx.y - NLM_WINDOW_RADIUS) * (threadIdx.y - NLM_WINDOW_RADIUS);
//Derive final weight from color and geometric distance
weight = __expf(-(weight * Noise + dist * INV_NLM_WINDOW_AREA));
//Write the result to shared memory
fWeights[threadIdx.y * BLOCKDIM_X + threadIdx.x] = weight;
//Wait until all the weights are ready
__syncthreads();
//Normalized counter for the NLM weight threshold
float fCount = 0;
//Total sum of pixel weights
float sumWeights = 0;
//Result accumulator
float3 clr = {0, 0, 0};
int idx = 0;
//Cycle through NLM window, surrounding (x, y) texel
for (float i = -NLM_WINDOW_RADIUS; i <= NLM_WINDOW_RADIUS + 1; i++)
for (float j = -NLM_WINDOW_RADIUS; j <= NLM_WINDOW_RADIUS + 1; j++)
{
//Load precomputed weight
float weightIJ = fWeights[idx++];
//Accumulate (x + j, y + i) texel color with computed weight
float4 clrIJ = tex2D(texImage, x + j, y + i);
clr.x += clrIJ.x * weightIJ;
clr.y += clrIJ.y * weightIJ;
clr.z += clrIJ.z * weightIJ;
//Sum of weights for color normalization to [0..1] range
sumWeights += weightIJ;
//Update weight counter, if NLM weight for current window texel
//exceeds the weight threshold
fCount += (weightIJ > NLM_WEIGHT_THRESHOLD) ? INV_NLM_WINDOW_AREA : 0;
}
//Normalize result color by sum of weights
sumWeights = 1.0f / sumWeights;
clr.x *= sumWeights;
clr.y *= sumWeights;
clr.z *= sumWeights;
//Choose LERP quotent basing on how many texels
//within the NLM window exceeded the weight threshold
float lerpQ = (fCount > NLM_LERP_THRESHOLD) ? lerpC : 1.0f - lerpC;
//Write final result to global memory
float4 clr00 = tex2D(texImage, x, y);
clr.x = lerpf(clr.x, clr00.x, lerpQ);
clr.y = lerpf(clr.y, clr00.y, lerpQ);
clr.z = lerpf(clr.z, clr00.z, lerpQ);
dst[imageW * iy + ix] = make_color(clr.x, clr.y, clr.z, 0);
}
}
|
09edcefbdf7f225b117cdb68dfaba0676f9b1e5c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "column_utilities.hpp"
#include <cudf/column/column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/detail/copy.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <thrust/equal.h>
#include <gmock/gmock.h>
namespace cudf {
namespace test {
// Property equality
void expect_column_properties_equal(cudf::column_view lhs, cudf::column_view rhs) {
EXPECT_EQ(lhs.type(), rhs.type());
EXPECT_EQ(lhs.size(), rhs.size());
EXPECT_EQ(lhs.null_count(), rhs.null_count());
if (lhs.size() > 0) {
EXPECT_EQ(lhs.nullable(), rhs.nullable());
}
EXPECT_EQ(lhs.has_nulls(), rhs.has_nulls());
EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs): comp(d_lhs, d_rhs) {
}
cudf::experimental::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index) {
return !comp(index, index);
}
};
void expect_columns_equal(cudf::column_view lhs, cudf::column_view rhs, bool print_all_differences) {
expect_column_properties_equal(lhs, rhs);
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
thrust::device_vector<int> differences(lhs.size());
auto diff_iter = thrust::copy_if(thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs.size()),
differences.begin(),
corresponding_rows_unequal(*d_lhs, *d_rhs));
CUDA_TRY(hipDeviceSynchronize());
differences.resize(thrust::distance(differences.begin(), diff_iter));
if (diff_iter > differences.begin()) {
if (print_all_differences) {
//
// If there are differences, display them all
//
std::ostringstream buffer;
buffer << "differences:" << std::endl;
cudf::table_view source_table ({lhs, rhs});
fixed_width_column_wrapper<int32_t> diff_column(differences.begin(), differences.end());
std::unique_ptr<cudf::experimental::table> diff_table = cudf::experimental::gather(source_table,
diff_column);
//
// Need to pull back the differences
//
std::vector<std::string> h_left_strings = to_strings(diff_table->get_column(0));
std::vector<std::string> h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0 ; i < differences.size() ; ++i) {
buffer << "lhs[" << differences[i] << "] = " << h_left_strings[i]
<< ", rhs[" << differences[i] << "] = " << h_right_strings[i] << std::endl;
}
EXPECT_EQ(differences.size(), size_t{0}) << buffer.str();
} else {
//
// If there are differences, just display the first one
//
int index = differences[0];
auto diff_lhs = cudf::experimental::detail::slice(lhs, index, index+1);
auto diff_rhs = cudf::experimental::detail::slice(rhs, index, index+1);
std::vector<std::string> h_left_strings = to_strings(diff_lhs);
std::vector<std::string> h_right_strings = to_strings(diff_rhs);
EXPECT_EQ(differences.size(), size_t{0}) << "first difference: "
<< "lhs[" << index << "] = "
<< to_string(diff_lhs, "")
<< ", rhs[" << index << "] = "
<< to_string(diff_rhs, "");
}
}
}
// Bitwise equality
void expect_equal_buffers(void const* lhs, void const* rhs,
std::size_t size_bytes) {
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes,
typed_rhs));
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
size_type index = 0;
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [&h_data, &index](Element el) {
return (bit_is_set(h_data.second.data(), index++)) ? std::to_string(el) : std::string("@");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return std::to_string(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
this->template operator()<cudf::string_view>(*col_as_strings, out);
}
template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
//
// Implementation for strings, call special to_host variant
//
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
if (col.nullable()) {
size_type index = 0;
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [&h_data, &index](std::string el) {
return (bit_is_set(h_data.second.data(), index++)) ? el : std::string("@");
});
} else {
out = std::move(h_data.first);
}
}
};
std::vector<std::string> to_strings(cudf::column_view const& col) {
std::vector<std::string> reply;
cudf::experimental::type_dispatcher(col.type(),
column_view_printer{},
col,
reply);
return reply;
}
std::string to_string(cudf::column_view const& col, std::string const& delimiter) {
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col);
std::copy(h_data.begin(), h_data.end() - 1, std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
buffer << h_data.back();
return buffer.str();
}
void print(cudf::column_view const& col, std::ostream &os, std::string const& delimiter) {
os << to_string(col, delimiter);
}
} // namespace test
} // namespace cudf
|
09edcefbdf7f225b117cdb68dfaba0676f9b1e5c.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "column_utilities.hpp"
#include <cudf/column/column_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/strings/convert/convert_datetime.hpp>
#include <cudf/detail/copy.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <thrust/equal.h>
#include <gmock/gmock.h>
namespace cudf {
namespace test {
// Property equality
void expect_column_properties_equal(cudf::column_view lhs, cudf::column_view rhs) {
EXPECT_EQ(lhs.type(), rhs.type());
EXPECT_EQ(lhs.size(), rhs.size());
EXPECT_EQ(lhs.null_count(), rhs.null_count());
if (lhs.size() > 0) {
EXPECT_EQ(lhs.nullable(), rhs.nullable());
}
EXPECT_EQ(lhs.has_nulls(), rhs.has_nulls());
EXPECT_EQ(lhs.num_children(), rhs.num_children());
}
class corresponding_rows_unequal {
public:
corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs): comp(d_lhs, d_rhs) {
}
cudf::experimental::row_equality_comparator<true> comp;
__device__ bool operator()(size_type index) {
return !comp(index, index);
}
};
void expect_columns_equal(cudf::column_view lhs, cudf::column_view rhs, bool print_all_differences) {
expect_column_properties_equal(lhs, rhs);
auto d_lhs = cudf::table_device_view::create(table_view{{lhs}});
auto d_rhs = cudf::table_device_view::create(table_view{{rhs}});
thrust::device_vector<int> differences(lhs.size());
auto diff_iter = thrust::copy_if(thrust::device,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(lhs.size()),
differences.begin(),
corresponding_rows_unequal(*d_lhs, *d_rhs));
CUDA_TRY(cudaDeviceSynchronize());
differences.resize(thrust::distance(differences.begin(), diff_iter));
if (diff_iter > differences.begin()) {
if (print_all_differences) {
//
// If there are differences, display them all
//
std::ostringstream buffer;
buffer << "differences:" << std::endl;
cudf::table_view source_table ({lhs, rhs});
fixed_width_column_wrapper<int32_t> diff_column(differences.begin(), differences.end());
std::unique_ptr<cudf::experimental::table> diff_table = cudf::experimental::gather(source_table,
diff_column);
//
// Need to pull back the differences
//
std::vector<std::string> h_left_strings = to_strings(diff_table->get_column(0));
std::vector<std::string> h_right_strings = to_strings(diff_table->get_column(1));
for (size_t i = 0 ; i < differences.size() ; ++i) {
buffer << "lhs[" << differences[i] << "] = " << h_left_strings[i]
<< ", rhs[" << differences[i] << "] = " << h_right_strings[i] << std::endl;
}
EXPECT_EQ(differences.size(), size_t{0}) << buffer.str();
} else {
//
// If there are differences, just display the first one
//
int index = differences[0];
auto diff_lhs = cudf::experimental::detail::slice(lhs, index, index+1);
auto diff_rhs = cudf::experimental::detail::slice(rhs, index, index+1);
std::vector<std::string> h_left_strings = to_strings(diff_lhs);
std::vector<std::string> h_right_strings = to_strings(diff_rhs);
EXPECT_EQ(differences.size(), size_t{0}) << "first difference: "
<< "lhs[" << index << "] = "
<< to_string(diff_lhs, "")
<< ", rhs[" << index << "] = "
<< to_string(diff_rhs, "");
}
}
}
// Bitwise equality
void expect_equal_buffers(void const* lhs, void const* rhs,
std::size_t size_bytes) {
if (size_bytes > 0) {
EXPECT_NE(nullptr, lhs);
EXPECT_NE(nullptr, rhs);
}
auto typed_lhs = static_cast<char const*>(lhs);
auto typed_rhs = static_cast<char const*>(rhs);
EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes,
typed_rhs));
}
struct column_view_printer {
template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
auto h_data = cudf::test::to_host<Element>(col);
out.resize(col.size());
if (col.nullable()) {
size_type index = 0;
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [&h_data, &index](Element el) {
return (bit_is_set(h_data.second.data(), index++)) ? std::to_string(el) : std::string("@");
});
} else {
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) {
return std::to_string(el);
});
}
}
template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
//
// For timestamps, convert timestamp column to column of strings, then
// call string version
//
auto col_as_strings = cudf::strings::from_timestamps(col);
this->template operator()<cudf::string_view>(*col_as_strings, out);
}
template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr>
void operator()(cudf::column_view const& col, std::vector<std::string> & out) {
//
// Implementation for strings, call special to_host variant
//
auto h_data = cudf::test::to_host<std::string>(col);
out.resize(col.size());
if (col.nullable()) {
size_type index = 0;
std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [&h_data, &index](std::string el) {
return (bit_is_set(h_data.second.data(), index++)) ? el : std::string("@");
});
} else {
out = std::move(h_data.first);
}
}
};
std::vector<std::string> to_strings(cudf::column_view const& col) {
std::vector<std::string> reply;
cudf::experimental::type_dispatcher(col.type(),
column_view_printer{},
col,
reply);
return reply;
}
std::string to_string(cudf::column_view const& col, std::string const& delimiter) {
std::ostringstream buffer;
std::vector<std::string> h_data = to_strings(col);
std::copy(h_data.begin(), h_data.end() - 1, std::ostream_iterator<std::string>(buffer, delimiter.c_str()));
buffer << h_data.back();
return buffer.str();
}
void print(cudf::column_view const& col, std::ostream &os, std::string const& delimiter) {
os << to_string(col, delimiter);
}
} // namespace test
} // namespace cudf
|
e04c308135bd993a041866b28af94038ed4069d1.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../prims/test_utils.h"
#include "test_opg_utils.h"
#include <cuml/neighbors/knn_mg.hpp>
#include <gtest/gtest.h>
#include <memory>
#include <raft/random/make_blobs.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/cuda_utils.cuh>
namespace ML {
namespace KNN {
namespace opg {
struct KNNParams {
int k;
size_t min_rows;
size_t n_cols;
int n_query_parts;
int n_index_parts;
size_t batch_size;
};
class BruteForceKNNTest : public ::testing::TestWithParam<KNNParams> {
public:
void generate_partition(Matrix::floatData_t* part,
size_t n_rows,
int n_cols,
int n_clusters,
int part_num,
hipStream_t stream)
{
rmm::device_uvector<int> labels(n_rows, stream);
raft::random::make_blobs<float, int>(
part->ptr, labels.data(), (int)n_rows, (int)n_cols, 5, stream);
}
bool runTest(const KNNParams& params)
{
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
const auto& comm = handle.get_comms();
const auto allocator = rmm::mr::get_current_device_resource();
hipStream_t stream = handle.get_stream();
int my_rank = comm.get_rank();
int size = comm.get_size();
int index_parts_per_rank = raft::ceildiv(params.n_index_parts, size);
int query_parts_per_rank = raft::ceildiv(params.n_query_parts, size);
std::vector<Matrix::RankSizePair*> idxPartsToRanks;
std::vector<Matrix::RankSizePair*> queryPartsToRanks;
for (int cur_rank = 0; cur_rank < size; cur_rank++) {
int ippr = index_parts_per_rank;
int qppr = query_parts_per_rank;
if (cur_rank == size - 1) {
ippr = params.n_index_parts - (cur_rank * index_parts_per_rank);
qppr = params.n_query_parts - (cur_rank * query_parts_per_rank);
}
std::cout << "Generating " << ippr << " partitions for rank " << cur_rank << std::endl;
std::cout << "min_rows: " << params.min_rows << std::endl;
for (int part_n = 0; part_n < ippr; part_n++) {
Matrix::RankSizePair* rsp = new Matrix::RankSizePair(cur_rank, params.min_rows);
idxPartsToRanks.push_back(rsp);
}
for (int part_n = 0; part_n < qppr; part_n++) {
Matrix::RankSizePair* rsp = new Matrix::RankSizePair(cur_rank, params.min_rows);
queryPartsToRanks.push_back(rsp);
}
}
std::cout << idxPartsToRanks.size() << std::endl;
if (my_rank == size - 1) {
index_parts_per_rank = params.n_index_parts - ((size - 1) * index_parts_per_rank);
query_parts_per_rank = params.n_query_parts - ((size - 1) * query_parts_per_rank);
}
std::cout << "Generating " << index_parts_per_rank << " partitions for rank " << my_rank
<< std::endl;
std::vector<Matrix::floatData_t*> query_parts;
std::vector<Matrix::floatData_t*> out_d_parts;
std::vector<Matrix::Data<int64_t>*> out_i_parts;
for (int i = 0; i < query_parts_per_rank; i++) {
float* q =
(float*)allocator.get()->allocate(params.min_rows * params.n_cols * sizeof(float*), stream);
float* o =
(float*)allocator.get()->allocate(params.min_rows * params.k * sizeof(float*), stream);
int64_t* ind =
(int64_t*)allocator.get()->allocate(params.min_rows * params.k * sizeof(int64_t), stream);
Matrix::Data<float>* query_d = new Matrix::Data<float>(q, params.min_rows * params.n_cols);
Matrix::floatData_t* out_d = new Matrix::floatData_t(o, params.min_rows * params.k);
Matrix::Data<int64_t>* out_i = new Matrix::Data<int64_t>(ind, params.min_rows * params.k);
query_parts.push_back(query_d);
out_d_parts.push_back(out_d);
out_i_parts.push_back(out_i);
generate_partition(query_d, params.min_rows, params.n_cols, 5, i, stream);
}
std::vector<Matrix::floatData_t*> index_parts;
for (int i = 0; i < index_parts_per_rank; i++) {
float* ind =
(float*)allocator.get()->allocate(params.min_rows * params.n_cols * sizeof(float), stream);
Matrix::Data<float>* i_d = new Matrix::Data<float>(ind, params.min_rows * params.n_cols);
index_parts.push_back(i_d);
generate_partition(i_d, params.min_rows, params.n_cols, 5, i, stream);
}
Matrix::PartDescriptor idx_desc(
params.min_rows * params.n_index_parts, params.n_cols, idxPartsToRanks, comm.get_rank());
Matrix::PartDescriptor query_desc(
params.min_rows * params.n_query_parts, params.n_cols, queryPartsToRanks, comm.get_rank());
handle.sync_stream(stream);
/**
* Execute brute_force_knn()
*/
brute_force_knn(handle,
out_i_parts,
out_d_parts,
index_parts,
idx_desc,
query_parts,
query_desc,
params.k,
params.batch_size,
true);
handle.sync_stream(stream);
std::cout << raft::arr2Str(out_i_parts[0]->ptr, 10, "final_out_I", stream) << std::endl;
std::cout << raft::arr2Str(out_d_parts[0]->ptr, 10, "final_out_D", stream) << std::endl;
/**
* Verify expected results
*/
for (Matrix::floatData_t* fd : query_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float), stream);
delete fd;
}
for (Matrix::floatData_t* fd : index_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float), stream);
delete fd;
}
for (Matrix::Data<int64_t>* fd : out_i_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(int64_t), stream);
delete fd;
}
for (Matrix::floatData_t* fd : out_d_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float), stream);
delete fd;
}
for (Matrix::RankSizePair* rsp : queryPartsToRanks) {
delete rsp;
}
for (Matrix::RankSizePair* rsp : idxPartsToRanks) {
delete rsp;
}
int actual = 1;
int expected = 1;
return raft::CompareApprox<int>(1)(actual, expected);
}
private:
raft::handle_t handle;
};
const std::vector<KNNParams> inputs = {{5, 50, 3, 5, 5, 12},
{10, 50, 3, 5, 5, 50},
{5, 50, 3, 5, 5, 50},
{5, 500, 5, 5, 5, 50},
{10, 500, 50, 5, 5, 50},
{15, 500, 5, 5, 5, 50},
{5, 500, 10, 5, 5, 50},
{10, 500, 10, 5, 5, 50},
{15, 500, 10, 5, 5, 50}};
typedef BruteForceKNNTest KNNTest;
TEST_P(KNNTest, Result) { ASSERT_TRUE(runTest(GetParam())); }
INSTANTIATE_TEST_CASE_P(BruteForceKNNTest, KNNTest, ::testing::ValuesIn(inputs));
} // namespace opg
} // namespace KNN
} // namespace ML
|
e04c308135bd993a041866b28af94038ed4069d1.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../prims/test_utils.h"
#include "test_opg_utils.h"
#include <cuml/neighbors/knn_mg.hpp>
#include <gtest/gtest.h>
#include <memory>
#include <raft/random/make_blobs.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/cuda_utils.cuh>
namespace ML {
namespace KNN {
namespace opg {
struct KNNParams {
int k;
size_t min_rows;
size_t n_cols;
int n_query_parts;
int n_index_parts;
size_t batch_size;
};
class BruteForceKNNTest : public ::testing::TestWithParam<KNNParams> {
public:
void generate_partition(Matrix::floatData_t* part,
size_t n_rows,
int n_cols,
int n_clusters,
int part_num,
cudaStream_t stream)
{
rmm::device_uvector<int> labels(n_rows, stream);
raft::random::make_blobs<float, int>(
part->ptr, labels.data(), (int)n_rows, (int)n_cols, 5, stream);
}
bool runTest(const KNNParams& params)
{
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
const auto& comm = handle.get_comms();
const auto allocator = rmm::mr::get_current_device_resource();
cudaStream_t stream = handle.get_stream();
int my_rank = comm.get_rank();
int size = comm.get_size();
int index_parts_per_rank = raft::ceildiv(params.n_index_parts, size);
int query_parts_per_rank = raft::ceildiv(params.n_query_parts, size);
std::vector<Matrix::RankSizePair*> idxPartsToRanks;
std::vector<Matrix::RankSizePair*> queryPartsToRanks;
for (int cur_rank = 0; cur_rank < size; cur_rank++) {
int ippr = index_parts_per_rank;
int qppr = query_parts_per_rank;
if (cur_rank == size - 1) {
ippr = params.n_index_parts - (cur_rank * index_parts_per_rank);
qppr = params.n_query_parts - (cur_rank * query_parts_per_rank);
}
std::cout << "Generating " << ippr << " partitions for rank " << cur_rank << std::endl;
std::cout << "min_rows: " << params.min_rows << std::endl;
for (int part_n = 0; part_n < ippr; part_n++) {
Matrix::RankSizePair* rsp = new Matrix::RankSizePair(cur_rank, params.min_rows);
idxPartsToRanks.push_back(rsp);
}
for (int part_n = 0; part_n < qppr; part_n++) {
Matrix::RankSizePair* rsp = new Matrix::RankSizePair(cur_rank, params.min_rows);
queryPartsToRanks.push_back(rsp);
}
}
std::cout << idxPartsToRanks.size() << std::endl;
if (my_rank == size - 1) {
index_parts_per_rank = params.n_index_parts - ((size - 1) * index_parts_per_rank);
query_parts_per_rank = params.n_query_parts - ((size - 1) * query_parts_per_rank);
}
std::cout << "Generating " << index_parts_per_rank << " partitions for rank " << my_rank
<< std::endl;
std::vector<Matrix::floatData_t*> query_parts;
std::vector<Matrix::floatData_t*> out_d_parts;
std::vector<Matrix::Data<int64_t>*> out_i_parts;
for (int i = 0; i < query_parts_per_rank; i++) {
float* q =
(float*)allocator.get()->allocate(params.min_rows * params.n_cols * sizeof(float*), stream);
float* o =
(float*)allocator.get()->allocate(params.min_rows * params.k * sizeof(float*), stream);
int64_t* ind =
(int64_t*)allocator.get()->allocate(params.min_rows * params.k * sizeof(int64_t), stream);
Matrix::Data<float>* query_d = new Matrix::Data<float>(q, params.min_rows * params.n_cols);
Matrix::floatData_t* out_d = new Matrix::floatData_t(o, params.min_rows * params.k);
Matrix::Data<int64_t>* out_i = new Matrix::Data<int64_t>(ind, params.min_rows * params.k);
query_parts.push_back(query_d);
out_d_parts.push_back(out_d);
out_i_parts.push_back(out_i);
generate_partition(query_d, params.min_rows, params.n_cols, 5, i, stream);
}
std::vector<Matrix::floatData_t*> index_parts;
for (int i = 0; i < index_parts_per_rank; i++) {
float* ind =
(float*)allocator.get()->allocate(params.min_rows * params.n_cols * sizeof(float), stream);
Matrix::Data<float>* i_d = new Matrix::Data<float>(ind, params.min_rows * params.n_cols);
index_parts.push_back(i_d);
generate_partition(i_d, params.min_rows, params.n_cols, 5, i, stream);
}
Matrix::PartDescriptor idx_desc(
params.min_rows * params.n_index_parts, params.n_cols, idxPartsToRanks, comm.get_rank());
Matrix::PartDescriptor query_desc(
params.min_rows * params.n_query_parts, params.n_cols, queryPartsToRanks, comm.get_rank());
handle.sync_stream(stream);
/**
* Execute brute_force_knn()
*/
brute_force_knn(handle,
out_i_parts,
out_d_parts,
index_parts,
idx_desc,
query_parts,
query_desc,
params.k,
params.batch_size,
true);
handle.sync_stream(stream);
std::cout << raft::arr2Str(out_i_parts[0]->ptr, 10, "final_out_I", stream) << std::endl;
std::cout << raft::arr2Str(out_d_parts[0]->ptr, 10, "final_out_D", stream) << std::endl;
/**
* Verify expected results
*/
for (Matrix::floatData_t* fd : query_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float), stream);
delete fd;
}
for (Matrix::floatData_t* fd : index_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float), stream);
delete fd;
}
for (Matrix::Data<int64_t>* fd : out_i_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(int64_t), stream);
delete fd;
}
for (Matrix::floatData_t* fd : out_d_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float), stream);
delete fd;
}
for (Matrix::RankSizePair* rsp : queryPartsToRanks) {
delete rsp;
}
for (Matrix::RankSizePair* rsp : idxPartsToRanks) {
delete rsp;
}
int actual = 1;
int expected = 1;
return raft::CompareApprox<int>(1)(actual, expected);
}
private:
raft::handle_t handle;
};
const std::vector<KNNParams> inputs = {{5, 50, 3, 5, 5, 12},
{10, 50, 3, 5, 5, 50},
{5, 50, 3, 5, 5, 50},
{5, 500, 5, 5, 5, 50},
{10, 500, 50, 5, 5, 50},
{15, 500, 5, 5, 5, 50},
{5, 500, 10, 5, 5, 50},
{10, 500, 10, 5, 5, 50},
{15, 500, 10, 5, 5, 50}};
typedef BruteForceKNNTest KNNTest;
TEST_P(KNNTest, Result) { ASSERT_TRUE(runTest(GetParam())); }
INSTANTIATE_TEST_CASE_P(BruteForceKNNTest, KNNTest, ::testing::ValuesIn(inputs));
} // namespace opg
} // namespace KNN
} // namespace ML
|
201821b2968faa7175979c144378d886e1932fe0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void helloFromGPU(void)
{
printf("Hello from GPU - block: %d - thread: %d. \n", blockIdx.x, threadIdx.x);
}
|
201821b2968faa7175979c144378d886e1932fe0.cu
|
#include "includes.h"
__global__ void helloFromGPU(void)
{
printf("Hello from GPU - block: %d - thread: %d. \n", blockIdx.x, threadIdx.x);
}
|
8c7ff04d1be730faad3cfe1ce7c32aa82fb17d57.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "compress_impl.h"
//TODO:fix the warnings
#ifdef _MSC_VER
#pragma warning(disable : 4244)
#endif
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
namespace onnxruntime {
namespace cuda {
void PrefixSumImpl(const int8_t* condition_data,
int32_t* condition_cumulative_sum,
const size_t length) {
thrust::inclusive_scan(thrust::device, condition_data, condition_data + length, condition_cumulative_sum);
}
template <typename T>
__global__ void _CompressKernel(const int32_t valid_condition_length,
const fast_divmod axis_right_stride_div,
const fast_divmod input_axis_included_stride_div,
const int32_t output_axis_included_stride,
const int32_t* condition_cumulative_sum,
const bool* condition_data,
const T* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG output_index = 0;
int div, mod;
input_axis_included_stride_div.divmod(id, div, mod);
output_index = output_axis_included_stride * div;
axis_right_stride_div.divmod(mod, div, mod);
if (div < valid_condition_length && condition_data[div]) {
output_index += (condition_cumulative_sum[div] - 1) * axis_right_stride_div.d_ + mod;
output_data[output_index] = input_data[id];
}
}
Status CompressImpl(const size_t element_bytes,
const int32_t valid_condition_length,
const int32_t axis_right_stride,
const int32_t input_axis_dim_length,
const int32_t output_axis_dim_length,
const int32_t* condition_cumulative_sum,
const bool* condition_data,
const void* input_data,
void* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
fast_divmod axis_right_stride_div(axis_right_stride);
fast_divmod input_axis_included_stride_div(axis_right_stride * input_axis_dim_length);
int output_axis_included_stride = axis_right_stride * output_axis_dim_length;
switch (element_bytes) {
case sizeof(int8_t):
hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
valid_condition_length,
axis_right_stride_div,
input_axis_included_stride_div,
output_axis_included_stride,
condition_cumulative_sum,
condition_data,
reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data),
(CUDA_LONG)N);
break;
case sizeof(int16_t):
hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
valid_condition_length,
axis_right_stride_div,
input_axis_included_stride_div,
output_axis_included_stride,
condition_cumulative_sum,
condition_data,
reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data),
(CUDA_LONG)N);
break;
case sizeof(int32_t):
hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
valid_condition_length,
axis_right_stride_div,
input_axis_included_stride_div,
output_axis_included_stride,
condition_cumulative_sum,
condition_data,
reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data),
(CUDA_LONG)N);
break;
case sizeof(int64_t):
hipLaunchKernelGGL(( _CompressKernel), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
valid_condition_length,
axis_right_stride_div,
input_axis_included_stride_div,
output_axis_included_stride,
condition_cumulative_sum,
condition_data,
reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data),
(CUDA_LONG)N);
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Compress operator");
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
|
8c7ff04d1be730faad3cfe1ce7c32aa82fb17d57.cu
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "compress_impl.h"
//TODO:fix the warnings
#ifdef _MSC_VER
#pragma warning(disable : 4244)
#endif
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
namespace onnxruntime {
namespace cuda {
void PrefixSumImpl(const int8_t* condition_data,
int32_t* condition_cumulative_sum,
const size_t length) {
thrust::inclusive_scan(thrust::device, condition_data, condition_data + length, condition_cumulative_sum);
}
template <typename T>
__global__ void _CompressKernel(const int32_t valid_condition_length,
const fast_divmod axis_right_stride_div,
const fast_divmod input_axis_included_stride_div,
const int32_t output_axis_included_stride,
const int32_t* condition_cumulative_sum,
const bool* condition_data,
const T* input_data,
T* output_data,
const CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
CUDA_LONG output_index = 0;
int div, mod;
input_axis_included_stride_div.divmod(id, div, mod);
output_index = output_axis_included_stride * div;
axis_right_stride_div.divmod(mod, div, mod);
if (div < valid_condition_length && condition_data[div]) {
output_index += (condition_cumulative_sum[div] - 1) * axis_right_stride_div.d_ + mod;
output_data[output_index] = input_data[id];
}
}
Status CompressImpl(const size_t element_bytes,
const int32_t valid_condition_length,
const int32_t axis_right_stride,
const int32_t input_axis_dim_length,
const int32_t output_axis_dim_length,
const int32_t* condition_cumulative_sum,
const bool* condition_data,
const void* input_data,
void* output_data,
const size_t N) {
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
fast_divmod axis_right_stride_div(axis_right_stride);
fast_divmod input_axis_included_stride_div(axis_right_stride * input_axis_dim_length);
int output_axis_included_stride = axis_right_stride * output_axis_dim_length;
switch (element_bytes) {
case sizeof(int8_t):
_CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
valid_condition_length,
axis_right_stride_div,
input_axis_included_stride_div,
output_axis_included_stride,
condition_cumulative_sum,
condition_data,
reinterpret_cast<const ToCudaType<int8_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int8_t>::MappedType*>(output_data),
(CUDA_LONG)N);
break;
case sizeof(int16_t):
_CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
valid_condition_length,
axis_right_stride_div,
input_axis_included_stride_div,
output_axis_included_stride,
condition_cumulative_sum,
condition_data,
reinterpret_cast<const ToCudaType<int16_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int16_t>::MappedType*>(output_data),
(CUDA_LONG)N);
break;
case sizeof(int32_t):
_CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
valid_condition_length,
axis_right_stride_div,
input_axis_included_stride_div,
output_axis_included_stride,
condition_cumulative_sum,
condition_data,
reinterpret_cast<const ToCudaType<int32_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int32_t>::MappedType*>(output_data),
(CUDA_LONG)N);
break;
case sizeof(int64_t):
_CompressKernel<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
valid_condition_length,
axis_right_stride_div,
input_axis_included_stride_div,
output_axis_included_stride,
condition_cumulative_sum,
condition_data,
reinterpret_cast<const ToCudaType<int64_t>::MappedType*>(input_data),
reinterpret_cast<ToCudaType<int64_t>::MappedType*>(output_data),
(CUDA_LONG)N);
break;
default:
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Compress operator");
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
|
31b95d8d146365bb81a5b1e71aa521ffda3cddb1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019,20-21-22 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <THH/THHAtomics.cuh>
#include "../../utils.h"
#define EPS 1e-7
namespace kaolin {
template<typename scalar_t>
__global__ void dibr_soft_mask_forward_cuda_kernel(
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ face_bboxes,
const int64_t* __restrict__ selected_face_idx,
scalar_t* __restrict__ close_face_prob,
int64_t* __restrict__ close_face_idx,
uint8_t* __restrict__ close_face_dist_type,
scalar_t* __restrict__ soft_mask,
int batch_size,
int height,
int width,
int num_faces,
int knum,
float sigmainv,
float multiplier) {
// bidx * height * width + heiidx * width + wididx
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= batch_size || heiidx >= height || wididx >= width) {
return;
}
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
// which face it belongs to?
// face begins from 1
// convert it into int, use round!
int fidxint = selected_face_idx[totalidx1];
// not covered by any faces
// maybe we can search its neighbour
if (fidxint >= 0) {
soft_mask[totalidx1] = 1.0;
}
// pixels not covered by any faces
else {
// pixel coordinate
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
int kid = 0;
for (int fidxint = 0; fidxint < num_faces; fidxint++) {
// which face it belongs to
const int shift1 = bidx * num_faces + fidxint;
const int shift4 = shift1 * 4;
const int shift6 = shift1 * 6;
///////////////////////////////////////////////////////////////
// will this pixel is influenced by this face?
scalar_t xmin = face_bboxes[shift4 + 0];
scalar_t ymin = face_bboxes[shift4 + 1];
scalar_t xmax = face_bboxes[shift4 + 2];
scalar_t ymax = face_bboxes[shift4 + 3];
// not covered by this face!
if (x0 < xmin || x0 >= xmax || y0 < ymin || y0 >= ymax) {
continue;
}
//////////////////////////////////////////////////////////
scalar_t pdis[6];
// perdis
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
int pshift2 = shift6 + ((i + 1) % 3) * 2;
scalar_t x2 = face_vertices_image[pshift2 + 0];
scalar_t y2 = face_vertices_image[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
// is it a bad triangle?
scalar_t x3 = B * B * x0 - A * B * y0 - A * C;
scalar_t y3 = A * A * y0 - A * B * x0 - B * C;
x3 = x3 / (down + EPS);
y3 = y3 / (down + EPS);
scalar_t direct = (x3 - x1) * (x3 - x2) + (y3 - y1) * (y3 - y2);
if (direct > 0) {
// bad triangle
pdis[i] = 4 * multiplier * multiplier;
} else {
// perpendicular distance
pdis[i] = up * up / (down + EPS);
}
}
////////////////////////////////////////////////////////////
// point distance
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
pdis[i + 3] = (x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1);
}
int edgeid = 0;
scalar_t dissquare = pdis[0];
for (int i = 1; i < 6; i++) {
if (dissquare > pdis[i]) {
dissquare = pdis[i];
edgeid = i;
}
}
scalar_t z = sigmainv * dissquare / multiplier / multiplier;
scalar_t prob = exp(-z);
close_face_prob[totalidxk + kid] = prob;
close_face_idx[totalidxk + kid] = fidxint;
close_face_dist_type[totalidxk + kid] = edgeid + 1;
kid++;
if (kid >= knum)
break;
}
scalar_t allprob = 1.0;
for (int i = 0; i < kid; i++) {
scalar_t prob = close_face_prob[totalidxk + i];
allprob *= (1.0 - prob);
}
// final result
allprob = 1.0 - allprob;
soft_mask[totalidx1] = allprob;
}
}
void dibr_soft_mask_forward_cuda_impl(
const at::Tensor face_vertices_image,
const at::Tensor face_large_bboxes,
const at::Tensor selected_face_idx,
at::Tensor close_face_prob,
at::Tensor close_face_idx,
at::Tensor close_face_dist_type,
at::Tensor soft_mask,
const float sigmainv,
const float multiplier) {
const int batch_size = face_vertices_image.size(0);
const int num_faces = face_vertices_image.size(1);
const int height = selected_face_idx.size(1);
const int width = selected_face_idx.size(2);
const int knum = close_face_idx.size(3);
const int num_pixels = batch_size * height * width;
AT_DISPATCH_FLOATING_TYPES(face_vertices_image.scalar_type(),
"dibr_soft_mask_forward_cuda", [&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(face_vertices_image));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int block_size = 512;
const int grid_size = (num_pixels + block_size - 1) / block_size;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(grid_size, 1, 1);
hipLaunchKernelGGL(( dibr_soft_mask_forward_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
face_vertices_image.data_ptr<scalar_t>(),
face_large_bboxes.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
close_face_prob.data_ptr<scalar_t>(),
close_face_idx.data_ptr<int64_t>(),
close_face_dist_type.data_ptr<uint8_t>(),
soft_mask.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, knum, sigmainv, multiplier
);
AT_CUDA_CHECK(hipGetLastError());
});
return;
}
template<typename scalar_t>
__global__ void dibr_soft_mask_backward_cuda_kernel(
const scalar_t* __restrict__ grad_soft_mask,
const scalar_t* __restrict__ soft_mask,
const int64_t* __restrict__ selected_face_idx,
const scalar_t* __restrict__ close_face_prob,
const int64_t* __restrict__ close_face_idx,
const uint8_t* __restrict__ close_face_dist_type,
const scalar_t* __restrict__ face_vertices_image,
scalar_t* __restrict__ grad_face_vertices_image,
int batch_size, int height, int width, int num_faces,
int knum, float sigmainv, float multiplier) {
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= batch_size || heiidx >= height || wididx >= width)
return;
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
// coordinates
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
// which face it belongs to?
int fidxint = selected_face_idx[totalidx1];
// not covered by any faces
if (fidxint < 0) {
scalar_t dLdp = grad_soft_mask[totalidx1];
scalar_t allprob = soft_mask[totalidx1];
for (int kid = 0; kid < knum; kid++) {
int fidxint = close_face_idx[totalidxk + kid];
if (fidxint < 0)
break;
const int shift1 = bidx * num_faces + fidxint;
const int shift6 = shift1 * 6;
scalar_t prob = close_face_prob[totalidxk + kid];
scalar_t dLdz = -1.0 * sigmainv * dLdp * (1.0 - allprob)
/ (1.0 - prob + EPS) * prob;
int edgecase = close_face_dist_type[totalidxk + kid];
int edgeid = edgecase - 1;
if (edgeid >= 3) {
// point distance
int pshift = shift6 + (edgeid - 3) * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
scalar_t dLdx1 = dLdz * 2 * (x1 - x0);
scalar_t dLdy1 = dLdz * 2 * (y1 - y0);
atomicAdd(grad_face_vertices_image + pshift + 0,
dLdx1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift + 1,
dLdy1 / multiplier);
} else {
// perpendicular distance
int pshift = shift6 + edgeid * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
int pshift2 = shift6 + ((edgeid + 1) % 3) * 2;
scalar_t x2 = face_vertices_image[pshift2 + 0];
scalar_t y2 = face_vertices_image[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
scalar_t dissquare = up * up / (down + EPS);
scalar_t dzdA = 2 * (x0 * up - dissquare * A) / (down + EPS);
scalar_t dzdB = 2 * (y0 * up - dissquare * B) / (down + EPS);
scalar_t dzdC = 2 * up / (down + EPS);
scalar_t dLdx1 = dLdz * (dzdB - y2 * dzdC);
scalar_t dLdy1 = dLdz * (x2 * dzdC - dzdA);
scalar_t dLdx2 = dLdz * (y1 * dzdC - dzdB);
scalar_t dLdy2 = dLdz * (dzdA - x1 * dzdC);
atomicAdd(grad_face_vertices_image + pshift + 0,
dLdx1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift + 1,
dLdy1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift2 + 0,
dLdx2 / multiplier);
atomicAdd(grad_face_vertices_image + pshift2 + 1,
dLdy2 / multiplier);
}
}
}
return;
}
void dibr_soft_mask_backward_cuda_impl(
const at::Tensor grad_soft_mask,
const at::Tensor soft_mask,
const at::Tensor selected_face_idx,
const at::Tensor close_face_prob,
const at::Tensor close_face_idx,
const at::Tensor close_face_dist_type,
const at::Tensor face_vertices_image,
at::Tensor grad_face_vertices_image,
const float sigmainv,
const float multiplier) {
int batch_size = face_vertices_image.size(0);
int num_faces = face_vertices_image.size(1);
int height = selected_face_idx.size(1);
int width = selected_face_idx.size(2);
int knum = close_face_idx.size(3);
const int num_pixels = batch_size * height * width;
AT_DISPATCH_FLOATING_TYPES(face_vertices_image.scalar_type(),
"dibr_soft_mask_backward_cuda", [&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(face_vertices_image));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int block_size = 1024;
const int grid_size = (num_pixels + block_size - 1) / block_size;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(grid_size, 1, 1);
hipLaunchKernelGGL(( dibr_soft_mask_backward_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
grad_soft_mask.data_ptr<scalar_t>(),
soft_mask.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
close_face_prob.data_ptr<scalar_t>(),
close_face_idx.data_ptr<int64_t>(),
close_face_dist_type.data_ptr<uint8_t>(),
face_vertices_image.data_ptr<scalar_t>(),
grad_face_vertices_image.data_ptr<scalar_t>(),
batch_size, height, width, num_faces,
knum, sigmainv, multiplier
);
AT_CUDA_CHECK(hipGetLastError());
});
return;
}
}
|
31b95d8d146365bb81a5b1e71aa521ffda3cddb1.cu
|
// Copyright (c) 2019,20-21-22 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <THC/THCAtomics.cuh>
#include "../../utils.h"
#define EPS 1e-7
namespace kaolin {
template<typename scalar_t>
__global__ void dibr_soft_mask_forward_cuda_kernel(
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ face_bboxes,
const int64_t* __restrict__ selected_face_idx,
scalar_t* __restrict__ close_face_prob,
int64_t* __restrict__ close_face_idx,
uint8_t* __restrict__ close_face_dist_type,
scalar_t* __restrict__ soft_mask,
int batch_size,
int height,
int width,
int num_faces,
int knum,
float sigmainv,
float multiplier) {
// bidx * height * width + heiidx * width + wididx
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= batch_size || heiidx >= height || wididx >= width) {
return;
}
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
// which face it belongs to?
// face begins from 1
// convert it into int, use round!
int fidxint = selected_face_idx[totalidx1];
// not covered by any faces
// maybe we can search its neighbour
if (fidxint >= 0) {
soft_mask[totalidx1] = 1.0;
}
// pixels not covered by any faces
else {
// pixel coordinate
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
int kid = 0;
for (int fidxint = 0; fidxint < num_faces; fidxint++) {
// which face it belongs to
const int shift1 = bidx * num_faces + fidxint;
const int shift4 = shift1 * 4;
const int shift6 = shift1 * 6;
///////////////////////////////////////////////////////////////
// will this pixel is influenced by this face?
scalar_t xmin = face_bboxes[shift4 + 0];
scalar_t ymin = face_bboxes[shift4 + 1];
scalar_t xmax = face_bboxes[shift4 + 2];
scalar_t ymax = face_bboxes[shift4 + 3];
// not covered by this face!
if (x0 < xmin || x0 >= xmax || y0 < ymin || y0 >= ymax) {
continue;
}
//////////////////////////////////////////////////////////
scalar_t pdis[6];
// perdis
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
int pshift2 = shift6 + ((i + 1) % 3) * 2;
scalar_t x2 = face_vertices_image[pshift2 + 0];
scalar_t y2 = face_vertices_image[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
// is it a bad triangle?
scalar_t x3 = B * B * x0 - A * B * y0 - A * C;
scalar_t y3 = A * A * y0 - A * B * x0 - B * C;
x3 = x3 / (down + EPS);
y3 = y3 / (down + EPS);
scalar_t direct = (x3 - x1) * (x3 - x2) + (y3 - y1) * (y3 - y2);
if (direct > 0) {
// bad triangle
pdis[i] = 4 * multiplier * multiplier;
} else {
// perpendicular distance
pdis[i] = up * up / (down + EPS);
}
}
////////////////////////////////////////////////////////////
// point distance
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
pdis[i + 3] = (x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1);
}
int edgeid = 0;
scalar_t dissquare = pdis[0];
for (int i = 1; i < 6; i++) {
if (dissquare > pdis[i]) {
dissquare = pdis[i];
edgeid = i;
}
}
scalar_t z = sigmainv * dissquare / multiplier / multiplier;
scalar_t prob = exp(-z);
close_face_prob[totalidxk + kid] = prob;
close_face_idx[totalidxk + kid] = fidxint;
close_face_dist_type[totalidxk + kid] = edgeid + 1;
kid++;
if (kid >= knum)
break;
}
scalar_t allprob = 1.0;
for (int i = 0; i < kid; i++) {
scalar_t prob = close_face_prob[totalidxk + i];
allprob *= (1.0 - prob);
}
// final result
allprob = 1.0 - allprob;
soft_mask[totalidx1] = allprob;
}
}
void dibr_soft_mask_forward_cuda_impl(
const at::Tensor face_vertices_image,
const at::Tensor face_large_bboxes,
const at::Tensor selected_face_idx,
at::Tensor close_face_prob,
at::Tensor close_face_idx,
at::Tensor close_face_dist_type,
at::Tensor soft_mask,
const float sigmainv,
const float multiplier) {
const int batch_size = face_vertices_image.size(0);
const int num_faces = face_vertices_image.size(1);
const int height = selected_face_idx.size(1);
const int width = selected_face_idx.size(2);
const int knum = close_face_idx.size(3);
const int num_pixels = batch_size * height * width;
AT_DISPATCH_FLOATING_TYPES(face_vertices_image.scalar_type(),
"dibr_soft_mask_forward_cuda", [&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(face_vertices_image));
auto stream = at::cuda::getCurrentCUDAStream();
const int block_size = 512;
const int grid_size = (num_pixels + block_size - 1) / block_size;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(grid_size, 1, 1);
dibr_soft_mask_forward_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
face_vertices_image.data_ptr<scalar_t>(),
face_large_bboxes.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
close_face_prob.data_ptr<scalar_t>(),
close_face_idx.data_ptr<int64_t>(),
close_face_dist_type.data_ptr<uint8_t>(),
soft_mask.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, knum, sigmainv, multiplier
);
AT_CUDA_CHECK(cudaGetLastError());
});
return;
}
template<typename scalar_t>
__global__ void dibr_soft_mask_backward_cuda_kernel(
const scalar_t* __restrict__ grad_soft_mask,
const scalar_t* __restrict__ soft_mask,
const int64_t* __restrict__ selected_face_idx,
const scalar_t* __restrict__ close_face_prob,
const int64_t* __restrict__ close_face_idx,
const uint8_t* __restrict__ close_face_dist_type,
const scalar_t* __restrict__ face_vertices_image,
scalar_t* __restrict__ grad_face_vertices_image,
int batch_size, int height, int width, int num_faces,
int knum, float sigmainv, float multiplier) {
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= batch_size || heiidx >= height || wididx >= width)
return;
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
// coordinates
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
// which face it belongs to?
int fidxint = selected_face_idx[totalidx1];
// not covered by any faces
if (fidxint < 0) {
scalar_t dLdp = grad_soft_mask[totalidx1];
scalar_t allprob = soft_mask[totalidx1];
for (int kid = 0; kid < knum; kid++) {
int fidxint = close_face_idx[totalidxk + kid];
if (fidxint < 0)
break;
const int shift1 = bidx * num_faces + fidxint;
const int shift6 = shift1 * 6;
scalar_t prob = close_face_prob[totalidxk + kid];
scalar_t dLdz = -1.0 * sigmainv * dLdp * (1.0 - allprob)
/ (1.0 - prob + EPS) * prob;
int edgecase = close_face_dist_type[totalidxk + kid];
int edgeid = edgecase - 1;
if (edgeid >= 3) {
// point distance
int pshift = shift6 + (edgeid - 3) * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
scalar_t dLdx1 = dLdz * 2 * (x1 - x0);
scalar_t dLdy1 = dLdz * 2 * (y1 - y0);
atomicAdd(grad_face_vertices_image + pshift + 0,
dLdx1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift + 1,
dLdy1 / multiplier);
} else {
// perpendicular distance
int pshift = shift6 + edgeid * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
int pshift2 = shift6 + ((edgeid + 1) % 3) * 2;
scalar_t x2 = face_vertices_image[pshift2 + 0];
scalar_t y2 = face_vertices_image[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
scalar_t dissquare = up * up / (down + EPS);
scalar_t dzdA = 2 * (x0 * up - dissquare * A) / (down + EPS);
scalar_t dzdB = 2 * (y0 * up - dissquare * B) / (down + EPS);
scalar_t dzdC = 2 * up / (down + EPS);
scalar_t dLdx1 = dLdz * (dzdB - y2 * dzdC);
scalar_t dLdy1 = dLdz * (x2 * dzdC - dzdA);
scalar_t dLdx2 = dLdz * (y1 * dzdC - dzdB);
scalar_t dLdy2 = dLdz * (dzdA - x1 * dzdC);
atomicAdd(grad_face_vertices_image + pshift + 0,
dLdx1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift + 1,
dLdy1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift2 + 0,
dLdx2 / multiplier);
atomicAdd(grad_face_vertices_image + pshift2 + 1,
dLdy2 / multiplier);
}
}
}
return;
}
void dibr_soft_mask_backward_cuda_impl(
const at::Tensor grad_soft_mask,
const at::Tensor soft_mask,
const at::Tensor selected_face_idx,
const at::Tensor close_face_prob,
const at::Tensor close_face_idx,
const at::Tensor close_face_dist_type,
const at::Tensor face_vertices_image,
at::Tensor grad_face_vertices_image,
const float sigmainv,
const float multiplier) {
int batch_size = face_vertices_image.size(0);
int num_faces = face_vertices_image.size(1);
int height = selected_face_idx.size(1);
int width = selected_face_idx.size(2);
int knum = close_face_idx.size(3);
const int num_pixels = batch_size * height * width;
AT_DISPATCH_FLOATING_TYPES(face_vertices_image.scalar_type(),
"dibr_soft_mask_backward_cuda", [&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(face_vertices_image));
auto stream = at::cuda::getCurrentCUDAStream();
const int block_size = 1024;
const int grid_size = (num_pixels + block_size - 1) / block_size;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(grid_size, 1, 1);
dibr_soft_mask_backward_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
grad_soft_mask.data_ptr<scalar_t>(),
soft_mask.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
close_face_prob.data_ptr<scalar_t>(),
close_face_idx.data_ptr<int64_t>(),
close_face_dist_type.data_ptr<uint8_t>(),
face_vertices_image.data_ptr<scalar_t>(),
grad_face_vertices_image.data_ptr<scalar_t>(),
batch_size, height, width, num_faces,
knum, sigmainv, multiplier
);
AT_CUDA_CHECK(cudaGetLastError());
});
return;
}
}
|
03c3ed3bb9f31025e76c3c56796d2a6de076f62d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: CudaAbsImageFilterKernel.cu
Language: CUDA
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
/** \class CudaAbsImageFilterKernel.cu
* \brief Cuda kernel code
* \author Phillip Ward, Luke Parkinson, Daniel Micevski, Christopher
* Share, Victorian Partnership for Advanced Computing (VPAC).
* Richard Beare, Monash University
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#ifndef CITK_USE_THRUST
template <class T>
__global__ void AbsImageKernel(T *output, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
T temp = output[idx];
output[idx] = (temp < 0) ? -temp : temp;
}
}
template <class T, class S>
__global__ void AbsImageKernel(S *output, const T *input, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
T temp = input[idx];
output[idx] = (temp < 0) ? -temp : temp;
}
}
template <class T, class S>
void AbsImageKernelFunction(const T * input, S * output, unsigned int N)
{
// Compute execution configuration
int blockSize = 128;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Call kernel
if (input == output)
{
hipLaunchKernelGGL(( AbsImageKernel), dim3(nBlocks), dim3(blockSize) , 0, 0, output, N);
}
else
{
hipLaunchKernelGGL(( AbsImageKernel), dim3(nBlocks), dim3(blockSize) , 0, 0, output, input, N);
}
}
#else
#include "thrust/transform.h"
#include "thrust/functional.h"
template <typename T>
struct ABS
{
__host__ __device__
T operator()(const T& x) const {
return abs(x);
}
};
template <class T, typename S>
void AbsImageKernelFunction(const T * input, S * output, unsigned int N)
{
thrust::device_ptr<const T> i1(input);
thrust::device_ptr<S> o1(output);
// absolute_value is deprecated in thrust - not sure what to replace
// it with
thrust::transform(i1, i1 + N, o1, ABS<S>());
}
#endif
// versions we wish to compile
#define THISFUNC AbsImageKernelFunction
#define THISTYPE float
template void THISFUNC<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, unsigned int N);
#undef THISTYPE
#define THISTYPE int
template void THISFUNC<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, unsigned int N);
#undef THISTYPE
#define THISTYPE short
template void THISFUNC<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, unsigned int N);
#undef THISTYPE
#undef THISFUNC
|
03c3ed3bb9f31025e76c3c56796d2a6de076f62d.cu
|
/*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: CudaAbsImageFilterKernel.cu
Language: CUDA
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
/** \class CudaAbsImageFilterKernel.cu
* \brief Cuda kernel code
* \author Phillip Ward, Luke Parkinson, Daniel Micevski, Christopher
* Share, Victorian Partnership for Advanced Computing (VPAC).
* Richard Beare, Monash University
*/
#include <stdio.h>
#include <cuda.h>
#ifndef CITK_USE_THRUST
template <class T>
__global__ void AbsImageKernel(T *output, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
T temp = output[idx];
output[idx] = (temp < 0) ? -temp : temp;
}
}
template <class T, class S>
__global__ void AbsImageKernel(S *output, const T *input, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
T temp = input[idx];
output[idx] = (temp < 0) ? -temp : temp;
}
}
template <class T, class S>
void AbsImageKernelFunction(const T * input, S * output, unsigned int N)
{
// Compute execution configuration
int blockSize = 128;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Call kernel
if (input == output)
{
AbsImageKernel<<< nBlocks, blockSize >>> (output, N);
}
else
{
AbsImageKernel<<< nBlocks, blockSize >>> (output, input, N);
}
}
#else
#include "thrust/transform.h"
#include "thrust/functional.h"
template <typename T>
struct ABS
{
__host__ __device__
T operator()(const T& x) const {
return abs(x);
}
};
template <class T, typename S>
void AbsImageKernelFunction(const T * input, S * output, unsigned int N)
{
thrust::device_ptr<const T> i1(input);
thrust::device_ptr<S> o1(output);
// absolute_value is deprecated in thrust - not sure what to replace
// it with
thrust::transform(i1, i1 + N, o1, ABS<S>());
}
#endif
// versions we wish to compile
#define THISFUNC AbsImageKernelFunction
#define THISTYPE float
template void THISFUNC<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, unsigned int N);
#undef THISTYPE
#define THISTYPE int
template void THISFUNC<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, unsigned int N);
#undef THISTYPE
#define THISTYPE short
template void THISFUNC<THISTYPE, THISTYPE>(const THISTYPE * input, THISTYPE * output, unsigned int N);
#undef THISTYPE
#undef THISFUNC
|
b84b4b6618b65cbb2359521c544c52c59a36d216.hip
|
// !!! This is a file automatically generated by hipify!!!
//******************************************************************************
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "math_fn.cuh"
#include "op1.h"
#include "op2.h"
#include "op3.h"
//==============================================================================
// Swift importable C interface functions
//==============================================================================
//------------------------------------------------------------------------------
Op1(Abs, abs, isSignedNumeric<A>())
Op1_TO(Abs_TO, abs, ((isSignedNumeric<A>() && isSame<A,Out>()) || isComplexRealType<A,Out>()))
hipError_t srtAbs(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
if (aDesc.type == oDesc.type) {
return select<Abs>(a, aDesc, out, oDesc, stream);
} else {
return select<Abs_TO>(a, aDesc, out, oDesc, stream);
}
}
hipError_t srtAbsFlat(
srtDataType atype,
const void* a,
srtDataType otype,
void* out,
size_t count,
hipStream_t stream
) {
if (atype == otype) {
return select<Abs>(atype, a, out, count, stream);
} else {
return select<Abs_TO>(atype, a, otype, out, count, stream);
}
}
//------------------------------------------------------------------------------
Op1_TO(Abs2_TO, abs2, (isComplexRealType<A,Out>()))
hipError_t srtAbs2Flat(
srtDataType atype,
const void* a,
srtDataType otype,
void* out,
size_t count,
hipStream_t stream
) {
return select<Abs2_TO>(atype, a, otype, out, count, stream);
}
hipError_t srtAbs2(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Abs2_TO>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Acos, acos, isFloating<A>())
hipError_t srtAcosFlat(
const void* a, srtDataType atype,
void* out,
size_t count, hipStream_t stream
) {
return select<Acos>(atype, a, out, count, stream);
}
hipError_t srtAcos(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Acos>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Acosh, acosh, isFloating<A>())
hipError_t srtAcoshFlat(
const void* a, srtDataType atype,
void* out,
size_t count, hipStream_t stream
) {
return select<Acosh>(atype, a, out, count, stream);
}
hipError_t srtAcosh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Acosh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Add, add, isNumeric<A>())
hipError_t srtAdd(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Add>(a, aDesc, b, bDesc, out, oDesc, stream);
}
hipError_t srtAddFlat(
srtDataType type,
const void* a,
const void* b,
void* out,
size_t count,
hipStream_t stream
) {
return select<Add>(type, a, b, type, out, count, stream);
}
//------------------------------------------------------------------------------
hipError_t srtAddTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Add>(a, aDesc, element, out, oDesc, stream);
}
hipError_t srtAddTEFlat(
const void* a, srtDataType atype,
const void* b,
void* out,
size_t count, hipStream_t stream
) {
return hipErrorNotSupported;
}
//------------------------------------------------------------------------------
Op1(Asin, asin, isFloating<A>())
hipError_t srtAsin(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Asin>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Asinh, asinh, isFloating<A>())
hipError_t srtAsinh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Asinh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Atan, atan, isFloating<A>())
hipError_t srtAtan(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Atan>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Atan2, atan2, isFloating<A>())
hipError_t srtAtan2(
const void* b, const srtTensorDescriptor* pbDesc,
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
// b comes first
return select<Atan2>(b, bDesc, a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Atanh, atanh, isFloating<A>())
hipError_t srtAtanh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Atanh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Cos, cos, isFloating<A>())
hipError_t srtCos(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Cos>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Cosh, cosh, isFloating<A>())
hipError_t srtCosh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Cosh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Div, divide, isNumeric<A>())
hipError_t srtDiv(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Div>(a, aDesc, b, bDesc, out, oDesc, stream);
}
hipError_t srtDivTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Div>(a, aDesc, element, out, oDesc, stream);
}
// `true` swaps `a` and `element` when calling `divide`
Op2SwapAB(DivET, divide, (isNumeric<A>() && isSame<A,Out>()))
hipError_t srtDivET(
const void* element,
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<DivET>(a, aDesc, element, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Erf, erf, isFloating<A>())
hipError_t srtErf(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Erf>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Erfc, erfc, isFloating<A>())
hipError_t srtErfc(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Erfc>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Exp, exp, isFloating<A>())
hipError_t srtExp(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Exp>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Exp2, exp2, isFloating<A>())
hipError_t srtExp2(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Exp2>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Exp10, exp10, isFloating<A>())
hipError_t srtExp10(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Exp10>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(ExpMinusOne, expm1, isFloating<A>())
hipError_t srtExpMinusOne(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<ExpMinusOne>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Gamma, tgamma, isFloating<A>())
hipError_t srtGamma(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Gamma>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Hypot, hypot, isFloating<A>())
hipError_t srtHypot(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Hypot>(a, aDesc, b, bDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Log, log, isFloating<A>())
hipError_t srtLog(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Log>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(LogOnePlus, log1p, isFloating<A>())
hipError_t srtLogOnePlus(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<LogOnePlus>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Log2, log2, isFloating<A>())
hipError_t srtLog2(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Log2>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Log10, log10, isFloating<A>())
hipError_t srtLog10(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Log10>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(LogGamma, lgamma, isFloating<A>())
hipError_t srtLogGamma(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<LogGamma>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Mul, multiply, isNumeric<A>())
hipError_t srtMul(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Mul>(a, aDesc, b, bDesc, out, oDesc, stream);
}
hipError_t srtMulTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Mul>(a, aDesc, element, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op3(MultiplyAdd, multiplyAdd, (isNumeric<A>() && isSame<A,Out>()))
hipError_t srtMultiplyAdd(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
const void* c, const srtTensorDescriptor* pcDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsABC(paDesc, pbDesc, pcDesc, poDesc)
return select<MultiplyAdd>(a, aDesc, b, bDesc, c, cDesc, out, oDesc, stream);
}
Op3Same(MultiplyAddFlat, multiplyAdd, isNumeric<A>())
hipError_t srtMultiplyAddFlat(
srtDataType type,
const void* a,
const void* b,
const void* c,
void* out,
size_t count,
hipStream_t stream
) {
return select<MultiplyAddFlat>(type, a, b, c, out, count, stream);
}
Op3SwapBC(MultiplyAddE, multiplyAdd, (isNumeric<A>() && isSame<A,Out>()))
hipError_t srtMultiplyAddTTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<MultiplyAddE>(a, aDesc, element, b, bDesc, out, oDesc, stream);
}
Op3SwapBCSame(MultiplyAddEFlat, multiplyAdd, isNumeric<A>())
hipError_t srtMultiplyAddFlatTTE(
srtDataType type,
const void* a,
const void* b,
const void* element,
void* out,
size_t count,
hipStream_t stream
) {
return selectTET<MultiplyAddEFlat>(type, a, element, b, out, count, stream);
}
//------------------------------------------------------------------------------
Op1(Neg, neg, (isSignedNumeric<A>() || isComplex<A>()))
hipError_t srtNeg(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Neg>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Pow, pow, isFloating<A>())
hipError_t srtPow(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Pow>(a, aDesc, b, bDesc, out, oDesc, stream);
}
hipError_t srtPowTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* exponent,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Pow>(a, aDesc, exponent, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sigmoid, sigmoid, isFloating<A>())
hipError_t srtSigmoid(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sigmoid>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sign, sign, isSignedNumeric<A>())
hipError_t srtSign(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sign>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sin, sin, isFloating<A>())
hipError_t srtSin(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sin>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sinh, sinh, isFloating<A>())
hipError_t srtSinh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sinh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sqrt, sqrt, isFloating<A>())
hipError_t srtSqrt(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sqrt>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Squared, squared, isNumeric<A>())
hipError_t srtSquared(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Squared>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Sub, subtract, isNumeric<A>())
hipError_t srtSub(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Sub>(a, aDesc, b, bDesc, out, oDesc, stream);
}
hipError_t srtSubTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sub>(a, aDesc, element, out, oDesc, stream);
}
// `true` swaps `a` and `element` when calling `divide`
Op2SwapAB(SubET, subtract, (isNumeric<A>() && isSame<A,Out>()))
hipError_t srtSubET(
const void* element,
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<SubET>(a, aDesc, element, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Tan, tan, isFloating<A>())
hipError_t srtTan(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Tan>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Tanh, tanh, isFloating<A>())
hipError_t srtTanh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
hipStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Tanh>(a, aDesc, out, oDesc, stream);
}
|
b84b4b6618b65cbb2359521c544c52c59a36d216.cu
|
//******************************************************************************
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "math_fn.cuh"
#include "op1.h"
#include "op2.h"
#include "op3.h"
//==============================================================================
// Swift importable C interface functions
//==============================================================================
//------------------------------------------------------------------------------
Op1(Abs, abs, isSignedNumeric<A>())
Op1_TO(Abs_TO, abs, ((isSignedNumeric<A>() && isSame<A,Out>()) || isComplexRealType<A,Out>()))
cudaError_t srtAbs(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
if (aDesc.type == oDesc.type) {
return select<Abs>(a, aDesc, out, oDesc, stream);
} else {
return select<Abs_TO>(a, aDesc, out, oDesc, stream);
}
}
cudaError_t srtAbsFlat(
srtDataType atype,
const void* a,
srtDataType otype,
void* out,
size_t count,
cudaStream_t stream
) {
if (atype == otype) {
return select<Abs>(atype, a, out, count, stream);
} else {
return select<Abs_TO>(atype, a, otype, out, count, stream);
}
}
//------------------------------------------------------------------------------
Op1_TO(Abs2_TO, abs2, (isComplexRealType<A,Out>()))
cudaError_t srtAbs2Flat(
srtDataType atype,
const void* a,
srtDataType otype,
void* out,
size_t count,
cudaStream_t stream
) {
return select<Abs2_TO>(atype, a, otype, out, count, stream);
}
cudaError_t srtAbs2(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Abs2_TO>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Acos, acos, isFloating<A>())
cudaError_t srtAcosFlat(
const void* a, srtDataType atype,
void* out,
size_t count, cudaStream_t stream
) {
return select<Acos>(atype, a, out, count, stream);
}
cudaError_t srtAcos(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Acos>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Acosh, acosh, isFloating<A>())
cudaError_t srtAcoshFlat(
const void* a, srtDataType atype,
void* out,
size_t count, cudaStream_t stream
) {
return select<Acosh>(atype, a, out, count, stream);
}
cudaError_t srtAcosh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Acosh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Add, add, isNumeric<A>())
cudaError_t srtAdd(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Add>(a, aDesc, b, bDesc, out, oDesc, stream);
}
cudaError_t srtAddFlat(
srtDataType type,
const void* a,
const void* b,
void* out,
size_t count,
cudaStream_t stream
) {
return select<Add>(type, a, b, type, out, count, stream);
}
//------------------------------------------------------------------------------
cudaError_t srtAddTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Add>(a, aDesc, element, out, oDesc, stream);
}
cudaError_t srtAddTEFlat(
const void* a, srtDataType atype,
const void* b,
void* out,
size_t count, cudaStream_t stream
) {
return cudaErrorNotSupported;
}
//------------------------------------------------------------------------------
Op1(Asin, asin, isFloating<A>())
cudaError_t srtAsin(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Asin>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Asinh, asinh, isFloating<A>())
cudaError_t srtAsinh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Asinh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Atan, atan, isFloating<A>())
cudaError_t srtAtan(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Atan>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Atan2, atan2, isFloating<A>())
cudaError_t srtAtan2(
const void* b, const srtTensorDescriptor* pbDesc,
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
// b comes first
return select<Atan2>(b, bDesc, a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Atanh, atanh, isFloating<A>())
cudaError_t srtAtanh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Atanh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Cos, cos, isFloating<A>())
cudaError_t srtCos(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Cos>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Cosh, cosh, isFloating<A>())
cudaError_t srtCosh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Cosh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Div, divide, isNumeric<A>())
cudaError_t srtDiv(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Div>(a, aDesc, b, bDesc, out, oDesc, stream);
}
cudaError_t srtDivTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Div>(a, aDesc, element, out, oDesc, stream);
}
// `true` swaps `a` and `element` when calling `divide`
Op2SwapAB(DivET, divide, (isNumeric<A>() && isSame<A,Out>()))
cudaError_t srtDivET(
const void* element,
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<DivET>(a, aDesc, element, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Erf, erf, isFloating<A>())
cudaError_t srtErf(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Erf>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Erfc, erfc, isFloating<A>())
cudaError_t srtErfc(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Erfc>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Exp, exp, isFloating<A>())
cudaError_t srtExp(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Exp>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Exp2, exp2, isFloating<A>())
cudaError_t srtExp2(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Exp2>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Exp10, exp10, isFloating<A>())
cudaError_t srtExp10(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Exp10>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(ExpMinusOne, expm1, isFloating<A>())
cudaError_t srtExpMinusOne(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<ExpMinusOne>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Gamma, tgamma, isFloating<A>())
cudaError_t srtGamma(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Gamma>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Hypot, hypot, isFloating<A>())
cudaError_t srtHypot(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Hypot>(a, aDesc, b, bDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Log, log, isFloating<A>())
cudaError_t srtLog(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Log>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(LogOnePlus, log1p, isFloating<A>())
cudaError_t srtLogOnePlus(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<LogOnePlus>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Log2, log2, isFloating<A>())
cudaError_t srtLog2(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Log2>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Log10, log10, isFloating<A>())
cudaError_t srtLog10(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Log10>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(LogGamma, lgamma, isFloating<A>())
cudaError_t srtLogGamma(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<LogGamma>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Mul, multiply, isNumeric<A>())
cudaError_t srtMul(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Mul>(a, aDesc, b, bDesc, out, oDesc, stream);
}
cudaError_t srtMulTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Mul>(a, aDesc, element, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op3(MultiplyAdd, multiplyAdd, (isNumeric<A>() && isSame<A,Out>()))
cudaError_t srtMultiplyAdd(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
const void* c, const srtTensorDescriptor* pcDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsABC(paDesc, pbDesc, pcDesc, poDesc)
return select<MultiplyAdd>(a, aDesc, b, bDesc, c, cDesc, out, oDesc, stream);
}
Op3Same(MultiplyAddFlat, multiplyAdd, isNumeric<A>())
cudaError_t srtMultiplyAddFlat(
srtDataType type,
const void* a,
const void* b,
const void* c,
void* out,
size_t count,
cudaStream_t stream
) {
return select<MultiplyAddFlat>(type, a, b, c, out, count, stream);
}
Op3SwapBC(MultiplyAddE, multiplyAdd, (isNumeric<A>() && isSame<A,Out>()))
cudaError_t srtMultiplyAddTTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<MultiplyAddE>(a, aDesc, element, b, bDesc, out, oDesc, stream);
}
Op3SwapBCSame(MultiplyAddEFlat, multiplyAdd, isNumeric<A>())
cudaError_t srtMultiplyAddFlatTTE(
srtDataType type,
const void* a,
const void* b,
const void* element,
void* out,
size_t count,
cudaStream_t stream
) {
return selectTET<MultiplyAddEFlat>(type, a, element, b, out, count, stream);
}
//------------------------------------------------------------------------------
Op1(Neg, neg, (isSignedNumeric<A>() || isComplex<A>()))
cudaError_t srtNeg(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Neg>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Pow, pow, isFloating<A>())
cudaError_t srtPow(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Pow>(a, aDesc, b, bDesc, out, oDesc, stream);
}
cudaError_t srtPowTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* exponent,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Pow>(a, aDesc, exponent, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sigmoid, sigmoid, isFloating<A>())
cudaError_t srtSigmoid(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sigmoid>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sign, sign, isSignedNumeric<A>())
cudaError_t srtSign(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sign>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sin, sin, isFloating<A>())
cudaError_t srtSin(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sin>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sinh, sinh, isFloating<A>())
cudaError_t srtSinh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sinh>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Sqrt, sqrt, isFloating<A>())
cudaError_t srtSqrt(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sqrt>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Squared, squared, isNumeric<A>())
cudaError_t srtSquared(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Squared>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op2(Sub, subtract, isNumeric<A>())
cudaError_t srtSub(
const void* a, const srtTensorDescriptor* paDesc,
const void* b, const srtTensorDescriptor* pbDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsAB(paDesc, pbDesc, poDesc)
return select<Sub>(a, aDesc, b, bDesc, out, oDesc, stream);
}
cudaError_t srtSubTE(
const void* a, const srtTensorDescriptor* paDesc,
const void* element,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Sub>(a, aDesc, element, out, oDesc, stream);
}
// `true` swaps `a` and `element` when calling `divide`
Op2SwapAB(SubET, subtract, (isNumeric<A>() && isSame<A,Out>()))
cudaError_t srtSubET(
const void* element,
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<SubET>(a, aDesc, element, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Tan, tan, isFloating<A>())
cudaError_t srtTan(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Tan>(a, aDesc, out, oDesc, stream);
}
//------------------------------------------------------------------------------
Op1(Tanh, tanh, isFloating<A>())
cudaError_t srtTanh(
const void* a, const srtTensorDescriptor* paDesc,
void* out, const srtTensorDescriptor* poDesc,
cudaStream_t stream
) {
Cast2TensorDescriptorsA(paDesc, poDesc)
return select<Tanh>(a, aDesc, out, oDesc, stream);
}
|
f61d2ae72c8365d3b74c6cc06ac6bc46a86f5d72.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../knet.h"
template<typename dType>
__global__ void _mul(int n, dType alpha, dType *a, dType beta, dType *b, dType *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
dType ai = (alpha == 1 ? a[i] : alpha == -1 ? 1/a[i] : pow(a[i], alpha));
dType bi = (beta == 1 ? b[i] : beta == -1 ? 1/b[i] : pow(b[i], beta));
z[i] = ai * bi;
i += blockDim.x * gridDim.x;
}
}
extern "C" {
void mul32(int n, float alpha, float *a, float beta, float *b, float *c) KCALL(_mul,n,alpha,a,beta,b,c);
void mul64(int n, double alpha, double *a, double beta, double *b, double *c) KCALL(_mul,n,alpha,a,beta,b,c);
}
// broadcasting mul: c = a^alpha * b^beta
// each dim of a is either 1 or matches b. c has the same size as b.
template<typename dType>
__global__ void _bmul(int ndims, dType alpha, int *adims, dType *a, dType beta, int *bdims, dType *b, dType *c) {
int b0, b1, b2, b3, b4, b5, b6, b7, i, j, ai;
int bi = threadIdx.x + blockIdx.x * blockDim.x;
int bn = 1;
for (int n=0; n<ndims; n++) bn *= bdims[n];
while(bi < bn) {
j = bi;
if (ndims > 0) { i=j; j=i/bdims[0]; b0=i-j*bdims[0]; }
if (ndims > 1) { i=j; j=i/bdims[1]; b1=i-j*bdims[1]; }
if (ndims > 2) { i=j; j=i/bdims[2]; b2=i-j*bdims[2]; }
if (ndims > 3) { i=j; j=i/bdims[3]; b3=i-j*bdims[3]; }
if (ndims > 4) { i=j; j=i/bdims[4]; b4=i-j*bdims[4]; }
if (ndims > 5) { i=j; j=i/bdims[5]; b5=i-j*bdims[5]; }
if (ndims > 6) { i=j; j=i/bdims[6]; b6=i-j*bdims[6]; }
if (ndims > 7) { i=j; j=i/bdims[7]; b7=i-j*bdims[7]; }
ai = 0;
if (ndims > 7) { ai = adims[7]*ai + (adims[7]==1 ? 0 : b7); }
if (ndims > 6) { ai = adims[6]*ai + (adims[6]==1 ? 0 : b6); }
if (ndims > 5) { ai = adims[5]*ai + (adims[5]==1 ? 0 : b5); }
if (ndims > 4) { ai = adims[4]*ai + (adims[4]==1 ? 0 : b4); }
if (ndims > 3) { ai = adims[3]*ai + (adims[3]==1 ? 0 : b3); }
if (ndims > 2) { ai = adims[2]*ai + (adims[2]==1 ? 0 : b2); }
if (ndims > 1) { ai = adims[1]*ai + (adims[1]==1 ? 0 : b1); }
if (ndims > 0) { ai = adims[0]*ai + (adims[0]==1 ? 0 : b0); }
dType aval = (alpha == 1 ? a[ai] : alpha == -1 ? 1/a[ai] : pow(a[ai], alpha)); // Note the extra work here, a tmp array for a^alpha would reduce the pow ops
dType bval = (beta == 1 ? b[bi] : beta == -1 ? 1/b[bi] : pow(b[bi], beta));
c[bi] = aval * bval;
bi += blockDim.x * gridDim.x;
}
}
// slightly more optimized 2D version
template<typename dType>
__global__ void _bmul2d(dType alpha, int *adims, dType *a, dType beta, int *bdims, dType *b, dType *c) {
int b0, b1, i, j, ai, A0, A1, B0, B1;
B0 = bdims[0]; B1 = bdims[1]; A0 = adims[0]; A1 = adims[1];
int bi = threadIdx.x + blockIdx.x * blockDim.x;
int bn = B0*B1;
while(bi < bn) {
j=bi/B0; b0=bi-j*B0;
i=j; j=i/B1; b1=i-j*B1;
ai = A0*(A1==1 ? 0 : b1) + (A0==1 ? 0 : b0);
dType aval = (alpha == 1 ? a[ai] : alpha == -1 ? 1/a[ai] : pow(a[ai], alpha)); // Note the extra work here, a tmp array for a^alpha would reduce the pow ops
dType bval = (beta == 1 ? b[bi] : beta == -1 ? 1/b[bi] : pow(b[bi], beta));
c[bi] = aval * bval;
bi += blockDim.x * gridDim.x;
}
}
extern "C" {
void bmul32(int ndims, float alpha, int *adims, float *a, float beta, int *bdims, float *b, float *c) {
if (ndims==2) {
KCALL(_bmul2d,alpha,adims,a,beta,bdims,b,c);
} else {
KCALL(_bmul,ndims,alpha,adims,a,beta,bdims,b,c);
}
}
void bmul64(int ndims, double alpha, int *adims, double *a, double beta, int *bdims, double *b, double *c) {
if (ndims==2) {
KCALL(_bmul2d,alpha,adims,a,beta,bdims,b,c);
} else {
KCALL(_bmul,ndims,alpha,adims,a,beta,bdims,b,c);
}
}
}
|
f61d2ae72c8365d3b74c6cc06ac6bc46a86f5d72.cu
|
#include "../knet.h"
template<typename dType>
__global__ void _mul(int n, dType alpha, dType *a, dType beta, dType *b, dType *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
dType ai = (alpha == 1 ? a[i] : alpha == -1 ? 1/a[i] : pow(a[i], alpha));
dType bi = (beta == 1 ? b[i] : beta == -1 ? 1/b[i] : pow(b[i], beta));
z[i] = ai * bi;
i += blockDim.x * gridDim.x;
}
}
extern "C" {
void mul32(int n, float alpha, float *a, float beta, float *b, float *c) KCALL(_mul,n,alpha,a,beta,b,c);
void mul64(int n, double alpha, double *a, double beta, double *b, double *c) KCALL(_mul,n,alpha,a,beta,b,c);
}
// broadcasting mul: c = a^alpha * b^beta
// each dim of a is either 1 or matches b. c has the same size as b.
template<typename dType>
__global__ void _bmul(int ndims, dType alpha, int *adims, dType *a, dType beta, int *bdims, dType *b, dType *c) {
int b0, b1, b2, b3, b4, b5, b6, b7, i, j, ai;
int bi = threadIdx.x + blockIdx.x * blockDim.x;
int bn = 1;
for (int n=0; n<ndims; n++) bn *= bdims[n];
while(bi < bn) {
j = bi;
if (ndims > 0) { i=j; j=i/bdims[0]; b0=i-j*bdims[0]; }
if (ndims > 1) { i=j; j=i/bdims[1]; b1=i-j*bdims[1]; }
if (ndims > 2) { i=j; j=i/bdims[2]; b2=i-j*bdims[2]; }
if (ndims > 3) { i=j; j=i/bdims[3]; b3=i-j*bdims[3]; }
if (ndims > 4) { i=j; j=i/bdims[4]; b4=i-j*bdims[4]; }
if (ndims > 5) { i=j; j=i/bdims[5]; b5=i-j*bdims[5]; }
if (ndims > 6) { i=j; j=i/bdims[6]; b6=i-j*bdims[6]; }
if (ndims > 7) { i=j; j=i/bdims[7]; b7=i-j*bdims[7]; }
ai = 0;
if (ndims > 7) { ai = adims[7]*ai + (adims[7]==1 ? 0 : b7); }
if (ndims > 6) { ai = adims[6]*ai + (adims[6]==1 ? 0 : b6); }
if (ndims > 5) { ai = adims[5]*ai + (adims[5]==1 ? 0 : b5); }
if (ndims > 4) { ai = adims[4]*ai + (adims[4]==1 ? 0 : b4); }
if (ndims > 3) { ai = adims[3]*ai + (adims[3]==1 ? 0 : b3); }
if (ndims > 2) { ai = adims[2]*ai + (adims[2]==1 ? 0 : b2); }
if (ndims > 1) { ai = adims[1]*ai + (adims[1]==1 ? 0 : b1); }
if (ndims > 0) { ai = adims[0]*ai + (adims[0]==1 ? 0 : b0); }
dType aval = (alpha == 1 ? a[ai] : alpha == -1 ? 1/a[ai] : pow(a[ai], alpha)); // Note the extra work here, a tmp array for a^alpha would reduce the pow ops
dType bval = (beta == 1 ? b[bi] : beta == -1 ? 1/b[bi] : pow(b[bi], beta));
c[bi] = aval * bval;
bi += blockDim.x * gridDim.x;
}
}
// slightly more optimized 2D version
template<typename dType>
__global__ void _bmul2d(dType alpha, int *adims, dType *a, dType beta, int *bdims, dType *b, dType *c) {
int b0, b1, i, j, ai, A0, A1, B0, B1;
B0 = bdims[0]; B1 = bdims[1]; A0 = adims[0]; A1 = adims[1];
int bi = threadIdx.x + blockIdx.x * blockDim.x;
int bn = B0*B1;
while(bi < bn) {
j=bi/B0; b0=bi-j*B0;
i=j; j=i/B1; b1=i-j*B1;
ai = A0*(A1==1 ? 0 : b1) + (A0==1 ? 0 : b0);
dType aval = (alpha == 1 ? a[ai] : alpha == -1 ? 1/a[ai] : pow(a[ai], alpha)); // Note the extra work here, a tmp array for a^alpha would reduce the pow ops
dType bval = (beta == 1 ? b[bi] : beta == -1 ? 1/b[bi] : pow(b[bi], beta));
c[bi] = aval * bval;
bi += blockDim.x * gridDim.x;
}
}
extern "C" {
void bmul32(int ndims, float alpha, int *adims, float *a, float beta, int *bdims, float *b, float *c) {
if (ndims==2) {
KCALL(_bmul2d,alpha,adims,a,beta,bdims,b,c);
} else {
KCALL(_bmul,ndims,alpha,adims,a,beta,bdims,b,c);
}
}
void bmul64(int ndims, double alpha, int *adims, double *a, double beta, int *bdims, double *b, double *c) {
if (ndims==2) {
KCALL(_bmul2d,alpha,adims,a,beta,bdims,b,c);
} else {
KCALL(_bmul,ndims,alpha,adims,a,beta,bdims,b,c);
}
}
}
|
15f7e2d25fa08abb10719cede2f7e3e4958e9374.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cutlass/gemm/device/gemm.h"
#include "matmul_kernel.h"
// #define USE_TENSOR_CORE
hipError_t CutlassSgemmNN(int M, int N, int K, float alpha, float const* A, int lda, float const* B, int ldb,
float beta, float* C, int ldc, int cycle_count) {
// for (int i = 0; i < cycle_count; ++i) {
#ifndef USE_TENSOR_CORE
using CutlassGemm = cutlass::gemm::device::Gemm<float, // Data-type of A matrix
cutlass::layout::RowMajor, // Layout of A matrix
float, // Data-type of B matrix
cutlass::layout::RowMajor, // Layout of B matrix
float, // Data-type of C matrix
cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 8>, cutlass::gemm::GemmShape<64, 64, 8>,
cutlass::gemm::GemmShape<1, 1, 1>
// cutlass::gemm::GemmShape<32, 32, 8>,
// cutlass::gemm::GemmShape<16, 16, 8>,
// cutlass::gemm::GemmShape<1, 1, 1>,
>;
// Define a CUTLASS GEMM type
CutlassGemm gemm_operator;
CutlassGemm::Arguments args({M, N, K}, // Gemm Problem dimensions
{A, lda}, // Tensor-ref for source matrix A
{B, ldb}, // Tensor-ref for source matrix B
{C, ldc}, // Tensor-ref for source matrix C
{C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix)
{alpha, beta}); // Scalars used in the Epilogue
#else
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128,
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 4;
using CutlassGemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp,
ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>;
// Define a CUTLASS GEMM type
CutlassGemm gemm_operator;
CutlassGemm::Arguments args({M, N, K}, // Gemm Problem dimensions
{(ElementInputA*)A, lda}, // Tensor-ref for source matrix A
{(ElementInputB*)B, ldb}, // Tensor-ref for source matrix B
{C, ldc}, // Tensor-ref for source matrix C
{C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix)
{alpha, beta}); // Scalars used in the Epilogue
#endif
// Launch the CUTLASS GEMM kernel.
cutlass::Status status = gemm_operator(args);
if (status != cutlass::Status::kSuccess) {
return hipErrorUnknown;
}
// }
return hipSuccess;
}
|
15f7e2d25fa08abb10719cede2f7e3e4958e9374.cu
|
#include "cutlass/gemm/device/gemm.h"
#include "matmul_kernel.h"
// #define USE_TENSOR_CORE
cudaError_t CutlassSgemmNN(int M, int N, int K, float alpha, float const* A, int lda, float const* B, int ldb,
float beta, float* C, int ldc, int cycle_count) {
// for (int i = 0; i < cycle_count; ++i) {
#ifndef USE_TENSOR_CORE
using CutlassGemm = cutlass::gemm::device::Gemm<float, // Data-type of A matrix
cutlass::layout::RowMajor, // Layout of A matrix
float, // Data-type of B matrix
cutlass::layout::RowMajor, // Layout of B matrix
float, // Data-type of C matrix
cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm80,
cutlass::gemm::GemmShape<256, 128, 8>, cutlass::gemm::GemmShape<64, 64, 8>,
cutlass::gemm::GemmShape<1, 1, 1>
// cutlass::gemm::GemmShape<32, 32, 8>,
// cutlass::gemm::GemmShape<16, 16, 8>,
// cutlass::gemm::GemmShape<1, 1, 1>,
>;
// Define a CUTLASS GEMM type
CutlassGemm gemm_operator;
CutlassGemm::Arguments args({M, N, K}, // Gemm Problem dimensions
{A, lda}, // Tensor-ref for source matrix A
{B, ldb}, // Tensor-ref for source matrix B
{C, ldc}, // Tensor-ref for source matrix C
{C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix)
{alpha, beta}); // Scalars used in the Epilogue
#else
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128,
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 4;
using CutlassGemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp,
ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>;
// Define a CUTLASS GEMM type
CutlassGemm gemm_operator;
CutlassGemm::Arguments args({M, N, K}, // Gemm Problem dimensions
{(ElementInputA*)A, lda}, // Tensor-ref for source matrix A
{(ElementInputB*)B, ldb}, // Tensor-ref for source matrix B
{C, ldc}, // Tensor-ref for source matrix C
{C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix)
{alpha, beta}); // Scalars used in the Epilogue
#endif
// Launch the CUTLASS GEMM kernel.
cutlass::Status status = gemm_operator(args);
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// }
return cudaSuccess;
}
|
3d079adf7eec186bcf79466c5ceb869ad8a72d41.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2013 William J. Brouwer, Pierre-Yves Taunay
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <vector>
#include <map>
#include <ctime>
#include "main.h"
//#include "checks.cuh"
using namespace std;
__global__ void print_Y(float *Y) {
for(int i = 0;i<179;i++)
printf("Y[%d] = %e\n",i,Y[i]);
}
void init_f(vector<float> &k_inds, //as before
vector<float>& constants, //
vector<map<float,float> >& y_complete, //
vector<int>& terms, //
eval_node ** f_nodes_dev, //all the node data in an array, device pointer
int ** terms_dev, //the term information, device pointer
int ** offset_terms_dev, //offsets for the term information, device pointer
float ** function_dev, //storage for f(y)
float ** delta, //storage for delta
float ** y_dev, //solution vector
vector<float>& iv){ //init guess
int num_leaves = constants.size();
int num_funcs = terms.size();
eval_node * tmp_nodes = new eval_node[num_leaves ];
int * tmp_terms = new int[num_funcs ];
float * tmp_y = new float[num_funcs ];
int * tmp_offsets = new int[num_funcs ];
tmp_offsets[0] = 0;
int off = terms[0];
for (int i=1; i<num_funcs; i++){
tmp_offsets[i] = off;
off+=terms[i];
}
for (int i=0; i<num_funcs; i++)
tmp_terms[i] = terms[i];
for (int i=0; i<num_leaves; i++){
tmp_nodes[i].constant = constants[i];
tmp_nodes[i].k_index = (int) k_inds[i];
tmp_nodes[i].y_index_1 = 0;
tmp_nodes[i].y_exp_1 = 1.0;
tmp_nodes[i].y_index_2 = -1;
tmp_nodes[i].y_exp_2 = 1.0;
map<float,float> tmp = y_complete[i];
tmp_nodes[i].y_index_1 = (int) tmp.begin()->first;
tmp_nodes[i].y_exp_1 = tmp.begin()->second;
if (tmp.size()>1){
map<float,float>:: iterator it = tmp.begin();
it++;
tmp_nodes[i].y_index_2 = (int) it->first;
tmp_nodes[i].y_exp_2 = it->second;
}
}
hipMalloc(f_nodes_dev, sizeof(eval_node)*num_leaves);
//cudaCheckError("malloc, f_nodes_dev");
hipMemcpy(*f_nodes_dev, tmp_nodes, sizeof(eval_node)*num_leaves, hipMemcpyHostToDevice);
//cudaCheckError("memcpy, f_nodes_dev");
hipMalloc(terms_dev, sizeof(int)*num_funcs);
hipMalloc(offset_terms_dev, sizeof(int)*num_funcs);
//cudaCheckError("malloc, terms_dev");
hipMemcpy(*terms_dev, tmp_terms, sizeof(int)*num_funcs, hipMemcpyHostToDevice);
hipMemcpy(*offset_terms_dev, tmp_offsets, sizeof(int)*num_funcs, hipMemcpyHostToDevice);
//cudaCheckError("memcpy, terms_dev");
hipMalloc(function_dev, sizeof(float)*num_funcs);
//cudaCheckError("malloc, function_dev");
hipMalloc(delta, sizeof(float)*num_funcs);
//cudaCheckError("malloc, delta");
//cout << num_funcs << endl;
//init y
// srand(time(NULL));
srand(1024);
for (int i=0; i<num_funcs; i++)
tmp_y[i] = iv[i]; //guess * rand() / (float) RAND_MAX;
hipMalloc(y_dev, sizeof(float)*num_funcs);
//cout << "ydev 2 " << *y_dev << endl;
//cudaCheckError("malloc, y_dev");
hipMemcpy(*y_dev, tmp_y, sizeof(float)*num_funcs, hipMemcpyHostToDevice);
//cudaCheckError("memcpy, y_dev");
delete[] tmp_terms, tmp_nodes, tmp_y, tmp_offsets;
}
void init_j(vector<float> &k_inds_jac, //as before
vector<float>& constants_jac, //
vector<map<float,float> >& jac_complete, //
vector<int>& terms_jac, //
eval_node ** jac_nodes_dev, //all the node data in an array, device pointer
int ** terms_jac_dev, //the term information, device pointer
int ** offset_terms_jac_dev, //offset for the term information, device pointer
float ** jacobian_dev){ //storage for the results J(y)
int num_leaves = constants_jac.size();
int num_funcs = terms_jac.size();
eval_node * tmp_nodes = new eval_node[num_leaves ];
int * tmp_terms = new int[num_funcs ];
int * tmp_offsets = new int[num_funcs ];
tmp_offsets[0] = 0;
int off = terms_jac[0];
for (int i=1; i<num_funcs; i++){
tmp_offsets[i] = off;
off+=terms_jac[i];
}
for (int i=0; i<num_funcs; i++)
tmp_terms[i] = terms_jac[i];
for (int i=0; i<num_leaves; i++){
tmp_nodes[i].constant = constants_jac[i];
tmp_nodes[i].k_index = (int) k_inds_jac[i];
tmp_nodes[i].y_index_1 = 0;
tmp_nodes[i].y_exp_1 = 1.0;
tmp_nodes[i].y_index_2 = -1;
tmp_nodes[i].y_exp_2 = 1.0;
map<float,float> tmp = jac_complete[i];
tmp_nodes[i].y_index_1 = (int) tmp.begin()->first;
tmp_nodes[i].y_exp_1 = tmp.begin()->second;
if (tmp.size()>1){
map<float,float>:: iterator it = tmp.begin();
it++;
tmp_nodes[i].y_index_2 = (int) it->first;
tmp_nodes[i].y_exp_2 = it->second;
}
}
hipMalloc(jac_nodes_dev, sizeof(eval_node)*num_leaves);
//cudaCheckError("malloc, jac_nodes_dev");
hipMemcpy(*jac_nodes_dev, tmp_nodes, sizeof(eval_node)*num_leaves, hipMemcpyHostToDevice);
//cudaCheckError("memcpy, jac_nodes_dev");
hipMalloc(terms_jac_dev, sizeof(int)*num_funcs);
hipMalloc(offset_terms_jac_dev, sizeof(int)*num_funcs);
//cudaCheckError("malloc, terms_jac_dev");
hipMemcpy(*terms_jac_dev, tmp_terms, sizeof(int)*num_funcs, hipMemcpyHostToDevice);
hipMemcpy(*offset_terms_jac_dev, tmp_offsets, sizeof(int)*num_funcs, hipMemcpyHostToDevice);
//cudaCheckError("memcpy, terms_jac_dev");
hipMalloc(jacobian_dev, sizeof(float)*num_funcs);
//cudaCheckError("malloc, jacobian_dev");
delete[] tmp_terms, tmp_nodes, tmp_offsets;
}
|
3d079adf7eec186bcf79466c5ceb869ad8a72d41.cu
|
/*
* Copyright 2013 William J. Brouwer, Pierre-Yves Taunay
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#include <vector>
#include <map>
#include <ctime>
#include "main.h"
//#include "checks.cuh"
using namespace std;
__global__ void print_Y(float *Y) {
for(int i = 0;i<179;i++)
printf("Y[%d] = %e\n",i,Y[i]);
}
void init_f(vector<float> &k_inds, //as before
vector<float>& constants, //
vector<map<float,float> >& y_complete, //
vector<int>& terms, //
eval_node ** f_nodes_dev, //all the node data in an array, device pointer
int ** terms_dev, //the term information, device pointer
int ** offset_terms_dev, //offsets for the term information, device pointer
float ** function_dev, //storage for f(y)
float ** delta, //storage for delta
float ** y_dev, //solution vector
vector<float>& iv){ //init guess
int num_leaves = constants.size();
int num_funcs = terms.size();
eval_node * tmp_nodes = new eval_node[num_leaves ];
int * tmp_terms = new int[num_funcs ];
float * tmp_y = new float[num_funcs ];
int * tmp_offsets = new int[num_funcs ];
tmp_offsets[0] = 0;
int off = terms[0];
for (int i=1; i<num_funcs; i++){
tmp_offsets[i] = off;
off+=terms[i];
}
for (int i=0; i<num_funcs; i++)
tmp_terms[i] = terms[i];
for (int i=0; i<num_leaves; i++){
tmp_nodes[i].constant = constants[i];
tmp_nodes[i].k_index = (int) k_inds[i];
tmp_nodes[i].y_index_1 = 0;
tmp_nodes[i].y_exp_1 = 1.0;
tmp_nodes[i].y_index_2 = -1;
tmp_nodes[i].y_exp_2 = 1.0;
map<float,float> tmp = y_complete[i];
tmp_nodes[i].y_index_1 = (int) tmp.begin()->first;
tmp_nodes[i].y_exp_1 = tmp.begin()->second;
if (tmp.size()>1){
map<float,float>:: iterator it = tmp.begin();
it++;
tmp_nodes[i].y_index_2 = (int) it->first;
tmp_nodes[i].y_exp_2 = it->second;
}
}
cudaMalloc(f_nodes_dev, sizeof(eval_node)*num_leaves);
//cudaCheckError("malloc, f_nodes_dev");
cudaMemcpy(*f_nodes_dev, tmp_nodes, sizeof(eval_node)*num_leaves, cudaMemcpyHostToDevice);
//cudaCheckError("memcpy, f_nodes_dev");
cudaMalloc(terms_dev, sizeof(int)*num_funcs);
cudaMalloc(offset_terms_dev, sizeof(int)*num_funcs);
//cudaCheckError("malloc, terms_dev");
cudaMemcpy(*terms_dev, tmp_terms, sizeof(int)*num_funcs, cudaMemcpyHostToDevice);
cudaMemcpy(*offset_terms_dev, tmp_offsets, sizeof(int)*num_funcs, cudaMemcpyHostToDevice);
//cudaCheckError("memcpy, terms_dev");
cudaMalloc(function_dev, sizeof(float)*num_funcs);
//cudaCheckError("malloc, function_dev");
cudaMalloc(delta, sizeof(float)*num_funcs);
//cudaCheckError("malloc, delta");
//cout << num_funcs << endl;
//init y
// srand(time(NULL));
srand(1024);
for (int i=0; i<num_funcs; i++)
tmp_y[i] = iv[i]; //guess * rand() / (float) RAND_MAX;
cudaMalloc(y_dev, sizeof(float)*num_funcs);
//cout << "ydev 2 " << *y_dev << endl;
//cudaCheckError("malloc, y_dev");
cudaMemcpy(*y_dev, tmp_y, sizeof(float)*num_funcs, cudaMemcpyHostToDevice);
//cudaCheckError("memcpy, y_dev");
delete[] tmp_terms, tmp_nodes, tmp_y, tmp_offsets;
}
void init_j(vector<float> &k_inds_jac, //as before
vector<float>& constants_jac, //
vector<map<float,float> >& jac_complete, //
vector<int>& terms_jac, //
eval_node ** jac_nodes_dev, //all the node data in an array, device pointer
int ** terms_jac_dev, //the term information, device pointer
int ** offset_terms_jac_dev, //offset for the term information, device pointer
float ** jacobian_dev){ //storage for the results J(y)
int num_leaves = constants_jac.size();
int num_funcs = terms_jac.size();
eval_node * tmp_nodes = new eval_node[num_leaves ];
int * tmp_terms = new int[num_funcs ];
int * tmp_offsets = new int[num_funcs ];
tmp_offsets[0] = 0;
int off = terms_jac[0];
for (int i=1; i<num_funcs; i++){
tmp_offsets[i] = off;
off+=terms_jac[i];
}
for (int i=0; i<num_funcs; i++)
tmp_terms[i] = terms_jac[i];
for (int i=0; i<num_leaves; i++){
tmp_nodes[i].constant = constants_jac[i];
tmp_nodes[i].k_index = (int) k_inds_jac[i];
tmp_nodes[i].y_index_1 = 0;
tmp_nodes[i].y_exp_1 = 1.0;
tmp_nodes[i].y_index_2 = -1;
tmp_nodes[i].y_exp_2 = 1.0;
map<float,float> tmp = jac_complete[i];
tmp_nodes[i].y_index_1 = (int) tmp.begin()->first;
tmp_nodes[i].y_exp_1 = tmp.begin()->second;
if (tmp.size()>1){
map<float,float>:: iterator it = tmp.begin();
it++;
tmp_nodes[i].y_index_2 = (int) it->first;
tmp_nodes[i].y_exp_2 = it->second;
}
}
cudaMalloc(jac_nodes_dev, sizeof(eval_node)*num_leaves);
//cudaCheckError("malloc, jac_nodes_dev");
cudaMemcpy(*jac_nodes_dev, tmp_nodes, sizeof(eval_node)*num_leaves, cudaMemcpyHostToDevice);
//cudaCheckError("memcpy, jac_nodes_dev");
cudaMalloc(terms_jac_dev, sizeof(int)*num_funcs);
cudaMalloc(offset_terms_jac_dev, sizeof(int)*num_funcs);
//cudaCheckError("malloc, terms_jac_dev");
cudaMemcpy(*terms_jac_dev, tmp_terms, sizeof(int)*num_funcs, cudaMemcpyHostToDevice);
cudaMemcpy(*offset_terms_jac_dev, tmp_offsets, sizeof(int)*num_funcs, cudaMemcpyHostToDevice);
//cudaCheckError("memcpy, terms_jac_dev");
cudaMalloc(jacobian_dev, sizeof(float)*num_funcs);
//cudaCheckError("malloc, jacobian_dev");
delete[] tmp_terms, tmp_nodes, tmp_offsets;
}
|
ee692fd80b9e75c9e2fe34a1a0ce482ab2ef91d5.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zbcsrlugemm.cu normal z -> d, Fri Jul 18 17:34:27 2014
*/
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h> // include before magma.h
#include "magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_d
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
#define fetch_x_A(i) (((i)<m*m)?Aval[i]:0)
#define fetch_x_B(i) (((i)<m*m)?B[i]:0)
// every multiprocessor handles one BCSR-block
__global__ void
dbcsr_gemm_kernel32(
int m,
int n,
int kblocks,
double **Avals,
double **Bval,
double **Cval)
{
#if (__CUDA_ARCH__ >= 200)
#if defined(PRECISION_d)
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
double xxB[4];
double *B;
int trackA = __mul24( ty2, lda) + tx2 ;
double *Aval = Avals[blockIdx.z];
__shared__ double Abs[64][65];
__shared__ double Bb[16][65];
for(int j=ty2; j<64; j+=16){
for(int y=tx2; y<64; y+=16){
Abs[y][j] = fetch_x_A(trackA + y-tx2) ;
}
trackA += __mul24( 16, m);
}
for(int k=0; k<kblocks; k++){
B = Bval[k];
int trackB = tx2+ __mul24( ty2 * 16, ldb );
// Prefetch part of B
#pragma unroll
for(int y=0; y<4; y++){
Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
}
__syncthreads(); // this is necessary!!!
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<m-16; k1+=16)
{
trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++){
Axs[y] = Abs[tx2+y*16][j1+k1] ;
}
#pragma unroll
for( int y=0; y<4; y++){
Bxp[y]= Bb[j1][ty2+y*16];
}
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads(); // this is necessary!!!
}
// Prepare where to write the result
double *C = Cval[blockIdx.z * kblocks + k];
C += tx2 + __mul24 (ty2 ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1+k1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n){
C[x*16] -= Cb[y+x*4];
}
}
C += ldc*16;
}
}
#endif
#endif
}
// every multiprocessor handles one BCSR-block
__global__ void
dbcsr_gemm_kernel64(
int m,
int n,
int kblocks,
double **Avals,
double **Bval,
double **Cval)
{
#if (__CUDA_ARCH__ >= 200)
#if defined(PRECISION_d)
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
double xxB[4];
double *B;
int trackA = __mul24( ty2, lda) + tx2 ;
double *Aval = Avals[blockIdx.z];
__shared__ double Abs[64][65];
__shared__ double Bb[16][65];
for(int j=ty2; j<64; j+=16){
for(int y=tx2; y<64; y+=16){
Abs[y][j] = fetch_x_A(trackA + y-tx2) ;
}
trackA += __mul24( 16, m);
}
for(int k=0; k<kblocks; k++){
B = Bval[k];
int trackB = tx2+ __mul24( ty2 * 4, ldb );
// Prefetch part of B
#pragma unroll
for(int y=0; y<4; y++){
Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
}
__syncthreads(); // this is necessary!!!
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<m-16; k1+=16)
{
trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++){
Axs[y] = Abs[tx2+y*16][j1+k1] ;
}
#pragma unroll
for( int y=0; y<4; y++){
Bxp[y]= Bb[j1][ty2+y*16];
}
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads(); // this is necessary!!!
}
// Prepare where to write the result
double *C = Cval[blockIdx.z * kblocks + k];
C += tx2 + __mul24 (ty2 ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1+k1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n){
C[x*16] -= Cb[y+x*4];
}
}
C += ldc*16;
}
}
#endif
#endif
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
num_brows magma_int_t
number of block rows
@param
kblocks magma_int_t
number of blocks in row
@param
dA double**
input blocks of matrix A
@param
dB double**
input blocks of matrix B
@param
dC double**
output blocks of matrix C
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbcsrluegemm( magma_int_t size_b,
magma_int_t num_brows,
magma_int_t kblocks,
double **dA,
double **dB,
double **dC ){
#if defined(PRECISION_d)
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("error: magma_dbcsrluegemm needs a CUDA architecture"
" with at least 48K shared memory (Fermi +).\n"
"Please run dbcsrlu.cpp using CUBLAS batched.\n");
}
else {
dim3 threads( 64, 4 );
dim3 grid(1, 1, num_brows);
hipLaunchKernelGGL(( dbcsr_gemm_kernel64), dim3(grid), dim3(threads), 0, magma_stream ,
size_b, size_b, kblocks, dA, dB, dC );
}
#else
printf("error: currently only supported for double precision.\n"
"Please run dbcsrlu.cpp using CUBLAS batched.\n");
#endif
return MAGMA_SUCCESS;
}
|
ee692fd80b9e75c9e2fe34a1a0ce482ab2ef91d5.cu
|
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zbcsrlugemm.cu normal z -> d, Fri Jul 18 17:34:27 2014
*/
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <cublas_v2.h> // include before magma.h
#include "magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
#define PRECISION_d
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
#define fetch_x_A(i) (((i)<m*m)?Aval[i]:0)
#define fetch_x_B(i) (((i)<m*m)?B[i]:0)
// every multiprocessor handles one BCSR-block
__global__ void
dbcsr_gemm_kernel32(
int m,
int n,
int kblocks,
double **Avals,
double **Bval,
double **Cval)
{
#if (__CUDA_ARCH__ >= 200)
#if defined(PRECISION_d)
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
double xxB[4];
double *B;
int trackA = __mul24( ty2, lda) + tx2 ;
double *Aval = Avals[blockIdx.z];
__shared__ double Abs[64][65];
__shared__ double Bb[16][65];
for(int j=ty2; j<64; j+=16){
for(int y=tx2; y<64; y+=16){
Abs[y][j] = fetch_x_A(trackA + y-tx2) ;
}
trackA += __mul24( 16, m);
}
for(int k=0; k<kblocks; k++){
B = Bval[k];
int trackB = tx2+ __mul24( ty2 * 16, ldb );
// Prefetch part of B
#pragma unroll
for(int y=0; y<4; y++){
Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
}
__syncthreads(); // this is necessary!!!
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<m-16; k1+=16)
{
trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++){
Axs[y] = Abs[tx2+y*16][j1+k1] ;
}
#pragma unroll
for( int y=0; y<4; y++){
Bxp[y]= Bb[j1][ty2+y*16];
}
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads(); // this is necessary!!!
}
// Prepare where to write the result
double *C = Cval[blockIdx.z * kblocks + k];
C += tx2 + __mul24 (ty2 ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1+k1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n){
C[x*16] -= Cb[y+x*4];
}
}
C += ldc*16;
}
}
#endif
#endif
}
// every multiprocessor handles one BCSR-block
__global__ void
dbcsr_gemm_kernel64(
int m,
int n,
int kblocks,
double **Avals,
double **Bval,
double **Cval)
{
#if (__CUDA_ARCH__ >= 200)
#if defined(PRECISION_d)
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
double xxB[4];
double *B;
int trackA = __mul24( ty2, lda) + tx2 ;
double *Aval = Avals[blockIdx.z];
__shared__ double Abs[64][65];
__shared__ double Bb[16][65];
for(int j=ty2; j<64; j+=16){
for(int y=tx2; y<64; y+=16){
Abs[y][j] = fetch_x_A(trackA + y-tx2) ;
}
trackA += __mul24( 16, m);
}
for(int k=0; k<kblocks; k++){
B = Bval[k];
int trackB = tx2+ __mul24( ty2 * 4, ldb );
// Prefetch part of B
#pragma unroll
for(int y=0; y<4; y++){
Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ;
}
__syncthreads(); // this is necessary!!!
double Axs[4];
double Bxp[4];
double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0};
int k1;
for(k1=0; k1<m-16; k1+=16)
{
trackB += 16;
#pragma unroll
for( int y=0; y<4; y++)
xxB[y] = fetch_x_B( trackB + y*ldb);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++){
Axs[y] = Abs[tx2+y*16][j1+k1] ;
}
#pragma unroll
for( int y=0; y<4; y++){
Bxp[y]= Bb[j1][ty2+y*16];
}
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0; y<4; y++)
{
Cb[x*4+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
#pragma unroll
for(int y=0; y<4; y++)
Bb[tx2][ty2*4 + y] = xxB[y];
__syncthreads(); // this is necessary!!!
}
// Prepare where to write the result
double *C = Cval[blockIdx.z * kblocks + k];
C += tx2 + __mul24 (ty2 ,ldc);
#pragma unroll
for(int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<4; y++)
Axs[y] = Abs[tx2 + y*16][j1+k1] ;
#pragma unroll
for( int y=0; y<4; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<4; x++)
{
#pragma unroll
for( int y=0;y<4; y++)
{
Cb[x*4 + y] += Axs[x]*Bxp[y];
}
}
}
int gy = ty2;
#pragma unroll
for( int y=0;y<4;y++, gy+=16)
{
int gx = tx2;
#pragma unroll
for(int x=0;x<4;x++, gx+=16)
{
if (gx < m && gy < n){
C[x*16] -= Cb[y+x*4];
}
}
C += ldc*16;
}
}
#endif
#endif
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
num_brows magma_int_t
number of block rows
@param
kblocks magma_int_t
number of blocks in row
@param
dA double**
input blocks of matrix A
@param
dB double**
input blocks of matrix B
@param
dC double**
output blocks of matrix C
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbcsrluegemm( magma_int_t size_b,
magma_int_t num_brows,
magma_int_t kblocks,
double **dA,
double **dB,
double **dC ){
#if defined(PRECISION_d)
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("error: magma_dbcsrluegemm needs a CUDA architecture"
" with at least 48K shared memory (Fermi +).\n"
"Please run dbcsrlu.cpp using CUBLAS batched.\n");
}
else {
dim3 threads( 64, 4 );
dim3 grid(1, 1, num_brows);
dbcsr_gemm_kernel64<<< grid, threads, 0, magma_stream >>>(
size_b, size_b, kblocks, dA, dB, dC );
}
#else
printf("error: currently only supported for double precision.\n"
"Please run dbcsrlu.cpp using CUBLAS batched.\n");
#endif
return MAGMA_SUCCESS;
}
|
ebbd8ae66565389e112be2d5bb221b630b6155ee.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAConvolution.h"
#include "CUDAUtils.h"
#include "CUDAData.h"
extern CUDAData* cudaData;
__device__ __constant__ float dKernelConvolution[KERNEL_WIDTH];
const int KERNEL_SIZE = KERNEL_WIDTH * sizeof(float);
// Loop unrolling templates, needed for best performance
template<int i> __device__ float convolutionRowT(float *data){return data[KERNEL_RADIUS-i]*dKernelConvolution[i]+convolutionRowT<i-1>(data);}
template<> __device__ float convolutionRowT<-1>(float *data){return 0;}
template<int i> __device__ float convolutionColT(float *data){return data[(KERNEL_RADIUS-i)*COLUMN_TILE_WIDTH]*dKernelConvolution[i]+convolutionColT<i-1>(data);}
template<> __device__ float convolutionColT<-1>(float *data){return 0;}
__global__ void convolutionRow(float *d_Result, float *d_Data, int dataW, int dataH)
{
const int rowStart = IMUL(blockIdx.y, dataW);
__shared__ float data[KERNEL_RADIUS + ROW_TILE_WIDTH + KERNEL_RADIUS];
const int tileStart = IMUL(blockIdx.x, ROW_TILE_WIDTH);
const int tileEnd = tileStart + ROW_TILE_WIDTH - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
if(loadPos >= apronStart)
{
const int smemPos = loadPos - apronStart;
data[smemPos] = ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ? d_Data[rowStart + loadPos] : 0;
}
__syncthreads();
const int writePos = tileStart + threadIdx.x;
if(writePos <= tileEndClamped)
{
const int smemPos = writePos - apronStart;
float sum = 0;
sum = convolutionRowT<2 * KERNEL_RADIUS>(data + smemPos);
d_Result[rowStart + writePos] = sum;
}
}
__global__ void convolutionColumn(float *d_Result, float *d_Data, int dataW, int dataH, int smemStride, int gmemStride)
{
const int columnStart = IMUL(blockIdx.x, COLUMN_TILE_WIDTH) + threadIdx.x;
__shared__ float data[COLUMN_TILE_WIDTH * (KERNEL_RADIUS + COLUMN_TILE_HEIGHT + KERNEL_RADIUS)];
const int tileStart = IMUL(blockIdx.y, COLUMN_TILE_HEIGHT);
const int tileEnd = tileStart + COLUMN_TILE_HEIGHT - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
const int tileEndClamped = min(tileEnd, dataH - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataH - 1);
int smemPos = IMUL(threadIdx.y, COLUMN_TILE_WIDTH) + threadIdx.x;
int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart;
for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y)
{
data[smemPos] = ((y >= apronStartClamped) && (y <= apronEndClamped)) ? d_Data[gmemPos] : 0;
smemPos += smemStride;
gmemPos += gmemStride;
}
__syncthreads();
smemPos = IMUL(threadIdx.y + KERNEL_RADIUS, COLUMN_TILE_WIDTH) + threadIdx.x;
gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart;
for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y)
{
float sum = 0;
sum = convolutionColT<2 * KERNEL_RADIUS>(data + smemPos);
d_Result[gmemPos] = sum;
smemPos += smemStride;
gmemPos += gmemStride;
}
}
__host__ void initialiseConvolution(int width, int height)
{
cudaData->hKernelConvolution = (float *)malloc(KERNEL_SIZE);
cudaData->hKernelConvolution[0] = 0.5f;
cudaData->hKernelConvolution[1] = 0;
cudaData->hKernelConvolution[2] = -0.5f;
perseusSafeCall(hipMemcpyToSymbol(dKernelConvolution, cudaData->hKernelConvolution, KERNEL_SIZE));
}
__host__ void shutdownConvolution()
{
free(cudaData->hKernelConvolution);
}
__host__ void computeDerivativeXY(float* function, float* derivativeX, float* derivativeY, int width, int height)
{
dim3 blockGridRows = dim3(iDivUp(width, ROW_TILE_WIDTH), height);
dim3 blockGridColumns = dim3(iDivUp(width, COLUMN_TILE_WIDTH), iDivUp(height, COLUMN_TILE_HEIGHT));
dim3 threadBlockRows = dim3(KERNEL_RADIUS_ALIGNED + ROW_TILE_WIDTH + KERNEL_RADIUS);
dim3 threadBlockColumns = dim3(COLUMN_TILE_WIDTH, 8);
hipLaunchKernelGGL(( convolutionRow), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, derivativeX, function, width, height);
hipLaunchKernelGGL(( convolutionColumn), dim3(blockGridColumns), dim3(threadBlockColumns), 0, 0, derivativeY, function, width, height,
COLUMN_TILE_WIDTH * threadBlockColumns.y, width * threadBlockColumns.y);
}
|
ebbd8ae66565389e112be2d5bb221b630b6155ee.cu
|
#include "CUDAConvolution.h"
#include "CUDAUtils.h"
#include "CUDAData.h"
extern CUDAData* cudaData;
__device__ __constant__ float dKernelConvolution[KERNEL_WIDTH];
const int KERNEL_SIZE = KERNEL_WIDTH * sizeof(float);
// Loop unrolling templates, needed for best performance
template<int i> __device__ float convolutionRowT(float *data){return data[KERNEL_RADIUS-i]*dKernelConvolution[i]+convolutionRowT<i-1>(data);}
template<> __device__ float convolutionRowT<-1>(float *data){return 0;}
template<int i> __device__ float convolutionColT(float *data){return data[(KERNEL_RADIUS-i)*COLUMN_TILE_WIDTH]*dKernelConvolution[i]+convolutionColT<i-1>(data);}
template<> __device__ float convolutionColT<-1>(float *data){return 0;}
__global__ void convolutionRow(float *d_Result, float *d_Data, int dataW, int dataH)
{
const int rowStart = IMUL(blockIdx.y, dataW);
__shared__ float data[KERNEL_RADIUS + ROW_TILE_WIDTH + KERNEL_RADIUS];
const int tileStart = IMUL(blockIdx.x, ROW_TILE_WIDTH);
const int tileEnd = tileStart + ROW_TILE_WIDTH - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
if(loadPos >= apronStart)
{
const int smemPos = loadPos - apronStart;
data[smemPos] = ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ? d_Data[rowStart + loadPos] : 0;
}
__syncthreads();
const int writePos = tileStart + threadIdx.x;
if(writePos <= tileEndClamped)
{
const int smemPos = writePos - apronStart;
float sum = 0;
sum = convolutionRowT<2 * KERNEL_RADIUS>(data + smemPos);
d_Result[rowStart + writePos] = sum;
}
}
__global__ void convolutionColumn(float *d_Result, float *d_Data, int dataW, int dataH, int smemStride, int gmemStride)
{
const int columnStart = IMUL(blockIdx.x, COLUMN_TILE_WIDTH) + threadIdx.x;
__shared__ float data[COLUMN_TILE_WIDTH * (KERNEL_RADIUS + COLUMN_TILE_HEIGHT + KERNEL_RADIUS)];
const int tileStart = IMUL(blockIdx.y, COLUMN_TILE_HEIGHT);
const int tileEnd = tileStart + COLUMN_TILE_HEIGHT - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
const int tileEndClamped = min(tileEnd, dataH - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataH - 1);
int smemPos = IMUL(threadIdx.y, COLUMN_TILE_WIDTH) + threadIdx.x;
int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart;
for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y)
{
data[smemPos] = ((y >= apronStartClamped) && (y <= apronEndClamped)) ? d_Data[gmemPos] : 0;
smemPos += smemStride;
gmemPos += gmemStride;
}
__syncthreads();
smemPos = IMUL(threadIdx.y + KERNEL_RADIUS, COLUMN_TILE_WIDTH) + threadIdx.x;
gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart;
for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y)
{
float sum = 0;
sum = convolutionColT<2 * KERNEL_RADIUS>(data + smemPos);
d_Result[gmemPos] = sum;
smemPos += smemStride;
gmemPos += gmemStride;
}
}
__host__ void initialiseConvolution(int width, int height)
{
cudaData->hKernelConvolution = (float *)malloc(KERNEL_SIZE);
cudaData->hKernelConvolution[0] = 0.5f;
cudaData->hKernelConvolution[1] = 0;
cudaData->hKernelConvolution[2] = -0.5f;
perseusSafeCall(cudaMemcpyToSymbol(dKernelConvolution, cudaData->hKernelConvolution, KERNEL_SIZE));
}
__host__ void shutdownConvolution()
{
free(cudaData->hKernelConvolution);
}
__host__ void computeDerivativeXY(float* function, float* derivativeX, float* derivativeY, int width, int height)
{
dim3 blockGridRows = dim3(iDivUp(width, ROW_TILE_WIDTH), height);
dim3 blockGridColumns = dim3(iDivUp(width, COLUMN_TILE_WIDTH), iDivUp(height, COLUMN_TILE_HEIGHT));
dim3 threadBlockRows = dim3(KERNEL_RADIUS_ALIGNED + ROW_TILE_WIDTH + KERNEL_RADIUS);
dim3 threadBlockColumns = dim3(COLUMN_TILE_WIDTH, 8);
convolutionRow<<<blockGridRows, threadBlockRows>>> (derivativeX, function, width, height);
convolutionColumn<<<blockGridColumns, threadBlockColumns>>>( derivativeY, function, width, height,
COLUMN_TILE_WIDTH * threadBlockColumns.y, width * threadBlockColumns.y);
}
|
3b0f9171026d6e754c83044637f4fff4b9602b6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> host_idata(idata, idata + n);
thrust::host_vector<int> host_odata(n);
thrust::device_vector<int> dev_idata = host_idata;
thrust::device_vector<int> dev_odata = host_odata;
timer().startGpuTimer();
thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
timer().endGpuTimer();
thrust::copy(dev_odata.begin(), dev_odata.end(), odata);
}
}
}
|
3b0f9171026d6e754c83044637f4fff4b9602b6d.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> host_idata(idata, idata + n);
thrust::host_vector<int> host_odata(n);
thrust::device_vector<int> dev_idata = host_idata;
thrust::device_vector<int> dev_odata = host_odata;
timer().startGpuTimer();
thrust::exclusive_scan(dev_idata.begin(), dev_idata.end(), dev_odata.begin());
timer().endGpuTimer();
thrust::copy(dev_odata.begin(), dev_odata.end(), odata);
}
}
}
|
22f64dfac52a2446c58f574bff446db406da2b60.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
22f64dfac52a2446c58f574bff446db406da2b60.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
bceb54fe5f9dbe8272d5df75c21458cf83a00074.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/gpu_matcher.hpp"
#include "kernels.hip"
// Constructor for GPU Matcher
GPUMatcher::GPUMatcher(){}
// Function to perform feature matching on GPU
void GPUMatcher::match(cv::Mat& desc1, cv::Mat& desc2,
std::vector<cv::DMatch>& matches)
{
Matrix desc1_h, desc2_h, product_mat_h;
Matrix desc1_d, desc2_d, product_mat_d;
// Transpose desc1 matrix
cv::Mat desc2_trans;
cv::transpose(desc2, desc2_trans);
// Allocate matrices on GPU
AllocateDeviceMatrix(desc1_d, desc1.rows, desc1.cols);
AllocateDeviceMatrix(desc2_d, desc2_trans.rows, desc2_trans.cols);
AllocateDeviceMatrix(product_mat_d, desc1.rows, desc2_trans.cols);
// Flatten matrices
FlattenMatrix(desc1, desc1_h);
FlattenMatrix(desc2_trans, desc2_h);
// Allocate Matrix on Host
//AllocateHostMatrix(product_mat_h, desc1.rows, desc2_trans.cols);
// Copy flattened matrices to device memory
CopyMatrixToDevice(desc1_h, desc1_d);
CopyMatrixToDevice(desc2_h, desc2_d);
/*
// TODO: Implement dynamic block size based on product_mat_d size
// Configure matrix multiplication kernel
hipDeviceProp_t prop;
int deviceId = 0;
hipError_t ret_val = hipGetDeviceProperties(&prop, deviceId);
if(ret_val != hipSuccess)
{
std::cout << "Error getting cuda device property!" << std::endl;
exit(-1);
}
dim3 dimBlock, dimGrid;
if(product_mat_d.rows*product_mat_d.cols < prop.maxThreadsPerBlock)
{
}
*/
dim3 dimBlock, dimGrid;
dimBlock.x = 16, dimBlock.y = 16, dimBlock.z=1;
dimGrid.x = (int)ceil((float)product_mat_d.cols/dimBlock.x);
dimGrid.y = (int)ceil((float)product_mat_d.rows/dimBlock.y);
dimGrid.z = 1;
// Launch matrix multiplication kernel
hipLaunchKernelGGL(( matmult_kernel_v1), dim3(dimGrid), dim3(dimBlock), 0, 0, desc1_d, desc2_d, product_mat_d);
hipDeviceSynchronize();
// Copy matrix to host for testing
//CopyMatrixToHost(product_mat_d, product_mat_h);
// Allocate memory for array of match objects in host and device
Match *matches_h, *matches_d;
//AllocateHostMatchArray(matches_h, product_mat_d.rows);
//AllocateDeviceMatchArray(matches_d, product_mat_d.rows);
matches_h = (Match *)malloc(product_mat_d.rows * sizeof(Match));
if(matches_h == NULL)
{
std::cout << "Error allocating host memory!" << std::endl;
exit(-1);
}
hipError_t ret_val = hipMalloc((void**)&matches_d, product_mat_d.rows * sizeof(Match));
if(ret_val != hipSuccess)
{
std::cout << "Error allocating memory on device!" << std::endl;
exit(-1);
}
// Configure find_min kernel
dimBlock.x = 256;
dimBlock.y = 1;
dimBlock.z = 1;
dimGrid.x = (int)ceil((float)product_mat_d.rows/dimBlock.x);
dimGrid.y = 1;
dimGrid.z = 1;
// Launch find_min kernel
hipLaunchKernelGGL(( find_min), dim3(dimGrid), dim3(dimBlock), 0, 0, product_mat_d, matches_d);
//dummy<<<dimGrid, dimBlock>>>();
hipDeviceSynchronize();
// Copy Match array from device
CopyMatchArrayToHost(matches_d, matches_h, product_mat_d.rows);
// Convert Match array to vector of DMatch objects
ConvertMatchToDMatch(matches_h, matches, product_mat_d.rows);
}
// Function to allocate matrix on device
void GPUMatcher::AllocateDeviceMatrix(Matrix& M, int rows, int cols)
{
M.rows = rows;
M.cols = cols;
int size = rows * cols * sizeof(float);
hipError_t ret_val = hipMalloc((void**)&M.elements, size);
if(ret_val != hipSuccess)
{
std::cout << "Error allocating memory on device!" << std::endl;
exit(-1);
}
}
// Function to free Matrix in device memory
void GPUMatcher::FreeDeviceMatrix(Matrix& M)
{
hipError_t ret_val = hipFree(M.elements);
if(ret_val != hipSuccess)
{
std::cout << "Unable to free allocated device memory!" << std::endl;
}
M.elements = NULL;
}
// Function to allocate matrix in host memory
void GPUMatcher::AllocateHostMatrix(Matrix& M, int rows, int cols)
{
M.rows = rows;
M.cols = cols;
int size = rows * cols * sizeof(float);
M.elements = (float *)malloc(size);
if(M.elements == NULL)
{
std::cout << "Error allocating host memory!" << std::endl;
exit(-1);
}
}
// Function to allocate array of Match objects in host memory
void GPUMatcher::AllocateHostMatchArray(Match* matches, int size)
{
matches = (Match *)malloc(size * sizeof(Match));
if(matches == NULL)
{
std::cout << "Error allocating host memory!" << std::endl;
exit(-1);
}
}
// Function to allocate array of Match objects in device memory
void GPUMatcher::AllocateDeviceMatchArray(Match* matches, int size)
{
hipError_t ret_val = hipMalloc((void**)&matches, size * sizeof(Match));
if(ret_val != hipSuccess)
{
std::cout << "Error allocating memory on device!" << std::endl;
exit(-1);
}
}
// Function to free device Match array
void GPUMatcher::FreeDeviceMatchArray(Match* matches)
{
hipError_t ret_val = hipFree(matches);
if(ret_val != hipSuccess)
{
std::cout << "Unable to free allocated device memory!" << std::endl;
}
matches = NULL;
}
// Function to flatten 2D matrix
void GPUMatcher::FlattenMatrix(cv::Mat& M_Mat, Matrix& M)
{
M.rows = M_Mat.rows;
M.cols = M_Mat.cols;
M.elements = (float *)malloc(M_Mat.rows * M_Mat.cols * sizeof(float));
for(int i=0; i<M_Mat.rows; i++)
{
for(int j=0; j<M_Mat.cols; j++)
{
M.elements[i * M_Mat.cols + j] = M_Mat.at<float>(i,j);
}
}
}
// Function to copy matrix from host to device
void GPUMatcher::CopyMatrixToDevice(Matrix& M_h, Matrix& M_d)
{
int size = M_h.rows * M_h.cols * sizeof(float);
hipError_t ret_val = hipMemcpy(M_d.elements, M_h.elements, size,
hipMemcpyHostToDevice);
if(ret_val != hipSuccess)
{
std::cout << "Unable to copy data to device memory" << std::endl;
exit(-1);
}
}
// Function to copy matrix from device to host
void GPUMatcher::CopyMatrixToHost(Matrix M_d, Matrix M_h)
{
M_h.rows = M_d.rows;
M_h.cols = M_d.cols;
int size = M_d.rows * M_d.cols * sizeof(float);
hipError_t ret_val = hipMemcpy(M_h.elements, M_d.elements, size,
hipMemcpyDeviceToHost);
if(ret_val != hipSuccess)
{
std::cout << "Unable to copy data fom device to host" << std::endl;
exit(-1);
}
}
// Function to copy array of Match objects from device to host memory
void GPUMatcher::CopyMatchArrayToHost(Match* matches_d, Match* matches_h,
int size)
{
hipError_t ret_val = hipMemcpy(matches_h, matches_d, size*sizeof(Match),
hipMemcpyDeviceToHost);
if(ret_val != hipSuccess)
{
std::cout << "Error copying data from device to host!" << std::endl;
exit(-1);
}
}
// Function to copy array of Match objects from device to host memory
void GPUMatcher::CopyMatchArrayToDevice(Match* matches_h, Match* matches_d,
int size)
{
hipError_t ret_val = hipMemcpy(matches_d, matches_h, size*sizeof(Match),
hipMemcpyHostToDevice);
if(ret_val != hipSuccess)
{
std::cout << "Error copying data from host to device!" << std::endl;
exit(-1);
}
}
// Function to conver array of Match objects to vector of cv::DMatch objects
void GPUMatcher::ConvertMatchToDMatch(Match* matches_h,
std::vector<cv::DMatch>& matches,
int size)
{
for(int i=0; i<size; i++)
{
matches.push_back(cv::DMatch(matches_h[i].idx1, matches_h[i].idx2,
0, matches_h[i].distance));
}
}
|
bceb54fe5f9dbe8272d5df75c21458cf83a00074.cu
|
#include "../include/gpu_matcher.hpp"
#include "kernels.cu"
// Constructor for GPU Matcher
GPUMatcher::GPUMatcher(){}
// Function to perform feature matching on GPU
void GPUMatcher::match(cv::Mat& desc1, cv::Mat& desc2,
std::vector<cv::DMatch>& matches)
{
Matrix desc1_h, desc2_h, product_mat_h;
Matrix desc1_d, desc2_d, product_mat_d;
// Transpose desc1 matrix
cv::Mat desc2_trans;
cv::transpose(desc2, desc2_trans);
// Allocate matrices on GPU
AllocateDeviceMatrix(desc1_d, desc1.rows, desc1.cols);
AllocateDeviceMatrix(desc2_d, desc2_trans.rows, desc2_trans.cols);
AllocateDeviceMatrix(product_mat_d, desc1.rows, desc2_trans.cols);
// Flatten matrices
FlattenMatrix(desc1, desc1_h);
FlattenMatrix(desc2_trans, desc2_h);
// Allocate Matrix on Host
//AllocateHostMatrix(product_mat_h, desc1.rows, desc2_trans.cols);
// Copy flattened matrices to device memory
CopyMatrixToDevice(desc1_h, desc1_d);
CopyMatrixToDevice(desc2_h, desc2_d);
/*
// TODO: Implement dynamic block size based on product_mat_d size
// Configure matrix multiplication kernel
cudaDeviceProp prop;
int deviceId = 0;
cudaError_t ret_val = cudaGetDeviceProperties(&prop, deviceId);
if(ret_val != cudaSuccess)
{
std::cout << "Error getting cuda device property!" << std::endl;
exit(-1);
}
dim3 dimBlock, dimGrid;
if(product_mat_d.rows*product_mat_d.cols < prop.maxThreadsPerBlock)
{
}
*/
dim3 dimBlock, dimGrid;
dimBlock.x = 16, dimBlock.y = 16, dimBlock.z=1;
dimGrid.x = (int)ceil((float)product_mat_d.cols/dimBlock.x);
dimGrid.y = (int)ceil((float)product_mat_d.rows/dimBlock.y);
dimGrid.z = 1;
// Launch matrix multiplication kernel
matmult_kernel_v1<<<dimGrid, dimBlock>>>(desc1_d, desc2_d, product_mat_d);
cudaDeviceSynchronize();
// Copy matrix to host for testing
//CopyMatrixToHost(product_mat_d, product_mat_h);
// Allocate memory for array of match objects in host and device
Match *matches_h, *matches_d;
//AllocateHostMatchArray(matches_h, product_mat_d.rows);
//AllocateDeviceMatchArray(matches_d, product_mat_d.rows);
matches_h = (Match *)malloc(product_mat_d.rows * sizeof(Match));
if(matches_h == NULL)
{
std::cout << "Error allocating host memory!" << std::endl;
exit(-1);
}
cudaError_t ret_val = cudaMalloc((void**)&matches_d, product_mat_d.rows * sizeof(Match));
if(ret_val != cudaSuccess)
{
std::cout << "Error allocating memory on device!" << std::endl;
exit(-1);
}
// Configure find_min kernel
dimBlock.x = 256;
dimBlock.y = 1;
dimBlock.z = 1;
dimGrid.x = (int)ceil((float)product_mat_d.rows/dimBlock.x);
dimGrid.y = 1;
dimGrid.z = 1;
// Launch find_min kernel
find_min<<<dimGrid, dimBlock>>>(product_mat_d, matches_d);
//dummy<<<dimGrid, dimBlock>>>();
cudaDeviceSynchronize();
// Copy Match array from device
CopyMatchArrayToHost(matches_d, matches_h, product_mat_d.rows);
// Convert Match array to vector of DMatch objects
ConvertMatchToDMatch(matches_h, matches, product_mat_d.rows);
}
// Function to allocate matrix on device
void GPUMatcher::AllocateDeviceMatrix(Matrix& M, int rows, int cols)
{
M.rows = rows;
M.cols = cols;
int size = rows * cols * sizeof(float);
cudaError_t ret_val = cudaMalloc((void**)&M.elements, size);
if(ret_val != cudaSuccess)
{
std::cout << "Error allocating memory on device!" << std::endl;
exit(-1);
}
}
// Function to free Matrix in device memory
void GPUMatcher::FreeDeviceMatrix(Matrix& M)
{
cudaError_t ret_val = cudaFree(M.elements);
if(ret_val != cudaSuccess)
{
std::cout << "Unable to free allocated device memory!" << std::endl;
}
M.elements = NULL;
}
// Function to allocate matrix in host memory
void GPUMatcher::AllocateHostMatrix(Matrix& M, int rows, int cols)
{
M.rows = rows;
M.cols = cols;
int size = rows * cols * sizeof(float);
M.elements = (float *)malloc(size);
if(M.elements == NULL)
{
std::cout << "Error allocating host memory!" << std::endl;
exit(-1);
}
}
// Function to allocate array of Match objects in host memory
void GPUMatcher::AllocateHostMatchArray(Match* matches, int size)
{
matches = (Match *)malloc(size * sizeof(Match));
if(matches == NULL)
{
std::cout << "Error allocating host memory!" << std::endl;
exit(-1);
}
}
// Function to allocate array of Match objects in device memory
void GPUMatcher::AllocateDeviceMatchArray(Match* matches, int size)
{
cudaError_t ret_val = cudaMalloc((void**)&matches, size * sizeof(Match));
if(ret_val != cudaSuccess)
{
std::cout << "Error allocating memory on device!" << std::endl;
exit(-1);
}
}
// Function to free device Match array
void GPUMatcher::FreeDeviceMatchArray(Match* matches)
{
cudaError_t ret_val = cudaFree(matches);
if(ret_val != cudaSuccess)
{
std::cout << "Unable to free allocated device memory!" << std::endl;
}
matches = NULL;
}
// Function to flatten 2D matrix
void GPUMatcher::FlattenMatrix(cv::Mat& M_Mat, Matrix& M)
{
M.rows = M_Mat.rows;
M.cols = M_Mat.cols;
M.elements = (float *)malloc(M_Mat.rows * M_Mat.cols * sizeof(float));
for(int i=0; i<M_Mat.rows; i++)
{
for(int j=0; j<M_Mat.cols; j++)
{
M.elements[i * M_Mat.cols + j] = M_Mat.at<float>(i,j);
}
}
}
// Function to copy matrix from host to device
void GPUMatcher::CopyMatrixToDevice(Matrix& M_h, Matrix& M_d)
{
int size = M_h.rows * M_h.cols * sizeof(float);
cudaError_t ret_val = cudaMemcpy(M_d.elements, M_h.elements, size,
cudaMemcpyHostToDevice);
if(ret_val != cudaSuccess)
{
std::cout << "Unable to copy data to device memory" << std::endl;
exit(-1);
}
}
// Function to copy matrix from device to host
void GPUMatcher::CopyMatrixToHost(Matrix M_d, Matrix M_h)
{
M_h.rows = M_d.rows;
M_h.cols = M_d.cols;
int size = M_d.rows * M_d.cols * sizeof(float);
cudaError_t ret_val = cudaMemcpy(M_h.elements, M_d.elements, size,
cudaMemcpyDeviceToHost);
if(ret_val != cudaSuccess)
{
std::cout << "Unable to copy data fom device to host" << std::endl;
exit(-1);
}
}
// Function to copy array of Match objects from device to host memory
void GPUMatcher::CopyMatchArrayToHost(Match* matches_d, Match* matches_h,
int size)
{
cudaError_t ret_val = cudaMemcpy(matches_h, matches_d, size*sizeof(Match),
cudaMemcpyDeviceToHost);
if(ret_val != cudaSuccess)
{
std::cout << "Error copying data from device to host!" << std::endl;
exit(-1);
}
}
// Function to copy array of Match objects from device to host memory
void GPUMatcher::CopyMatchArrayToDevice(Match* matches_h, Match* matches_d,
int size)
{
cudaError_t ret_val = cudaMemcpy(matches_d, matches_h, size*sizeof(Match),
cudaMemcpyHostToDevice);
if(ret_val != cudaSuccess)
{
std::cout << "Error copying data from host to device!" << std::endl;
exit(-1);
}
}
// Function to conver array of Match objects to vector of cv::DMatch objects
void GPUMatcher::ConvertMatchToDMatch(Match* matches_h,
std::vector<cv::DMatch>& matches,
int size)
{
for(int i=0; i<size; i++)
{
matches.push_back(cv::DMatch(matches_h[i].idx1, matches_h[i].idx2,
0, matches_h[i].distance));
}
}
|
72a75dad4003aeabb6f12d31fd3f033f872d6b0a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SolverGPU.cuh"
void check(hipError_t x) {
fprintf(stderr, "%s\n", hipGetErrorString(x));
}
typedef unsigned long uint64_t;
void PrintCudaCards()
{
int GPU_N;
printf("Starting MultiGPU\n");
hipGetDeviceCount(&GPU_N);
if (GPU_N > 4)
{
GPU_N = 4;
}
printf("CUDA-capable device count: %i\n", GPU_N);
}
__global__ void initkernelParticals(Player *bots,int numElements, GameConstants* gconst)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
for (int pos = tid; pos < numElements; pos += threadN)
{
for (int l=0;l<gconst->TypeSize;l++)
{
bots[pos].typeAmount[l]=0;
for (int l1=0;l1<gconst->TypeSize;l1++)
{
bots[pos].typeAmountPP[l][l1]=0;
}
}
}
}
__device__ inline float cLength(Vec3 v)
{
float lSq = cLengthSq(v);
if (lSq>0.0f)
return sqrtf(lSq);
else
return 0.0f;
}
__device__ inline float cLengthSq(Vec3 v)
{
return Dot(v,v);
}
__device__ inline Vec3 cSafeNormalize(Vec3 v, Vec3 fallback)
{
float l = cLengthSq(v);
if (l > 0.0f)
{
return v * cInvSqrt(l);
}
else
return fallback;
}
__device__ inline float cDistance(Vec3 v1, Vec3 v2)
{
return cLength(v1-v2);
}
__device__ inline float cInvSqrt(float x)
{
return 1.0f/sqrtf(x);
}
__device__ Vec3 projectUonV(const Vec3& u, const Vec3& v)
{
Vec3 r=Vec3(0,0,0);
float DotVV=Dot(v, v);
if(DotVV!=0)
r = v* (Dot(u, v)/DotVV);
return r;
}
__device__ void performCollision(Player& s1, Player& s2)
{
float softCoeff=0.973f;
Vec3 nv1; // new velocity for sphere 1
Vec3 nv2; // new velocity for sphere 2
// this can probably be optimised a bit, but it basically swaps the velocity amounts
// that are perpendicular to the surface of the collistion.
// If the spheres had different masses, then u would need to scale the amounts of
// velocities exchanged inversely proportional to their masses.
nv1 = s1.velocity;
nv1 =nv1+ projectUonV(s2.velocity, s2.currPos- s1.currPos);
nv1 =nv1- projectUonV(s1.velocity, s1.currPos- s2.currPos);
nv2 = s2.velocity;
nv2 =nv2+ projectUonV(s1.velocity, s2.currPos- s1.currPos);
nv2 =nv2- projectUonV(s2.velocity, s1.currPos- s2.currPos);
s1.velocity = softCoeff*nv1;
s2.velocity = softCoeff*nv2;
}
__device__ inline int array2D(int a, int b, int numElements)
{
return a*numElements+b;
}
__global__ void UpdateBallVelocity(Player* bot, Constraint* allconstr, GameConstants* gconst,int numElements)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
Vec3 directForce=Vec3(0,0,0);
for (int i = 0; i< numElements; i++)
{
if(pos!=i)
{
bool collide=false;
bot[pos].force=0.0f;
float dotV2=cDistance(bot[pos].currPos,bot[i].currPos);
float dotV=cDistance(bot[pos].currPos,bot[i].currPos)-(bot[pos].radius+bot[i].radius);
float dotVC=cDistance(bot[pos].currPos,Vec3(0,0,0));
Vec3 directPP=bot[pos].currPos-bot[i].currPos;
if(dotV<0.0f)
{
collide=true;
}
if(dotV<15.0f)
// if(bot[pos].link)
{
if(LinksField[bot[pos].type][bot[i].type]==1)
bot[pos].force-=directPP*gconst->gravconst2*(bot[pos].mass*bot[i].mass)/(dotV2*dotV2+0.001f);
else {
bot[pos].force+=directPP*gconst->gravconst2*(bot[pos].mass*bot[i].mass)/(dotV2*dotV2+0.001f);
}
}
if (allconstr[array2D(pos,i,numElements)].active )
//or allconstr[array2D(i,pos,numElements)].active)
{
if (dotV>gconst->constrLenth)
//bot[pos].force-=directPP*gconst->gravconst*(bot[pos].mass*bot[i].mass)/(1-(dotV2-gconst->constrLenth)/gconst->constrLenth);
bot[pos].force-=directPP*gconst->gravconst*(bot[pos].mass*bot[i].mass)/abs(1-(dotV-gconst->constrLenth)/gconst->constrLenth);
if(dotV<gconst->constrLenth)
//bot[pos].force+=directPP*gconst->gravconst*(bot[pos].mass*bot[i].mass)/(1-(gconst->constrLenth-dotV2)/gconst->constrLenth);
bot[pos].force+=directPP*gconst->gravconst*(bot[pos].mass*bot[i].mass)/abs(1-(gconst->constrLenth-dotV)/gconst->constrLenth);
}
else
{
if(dotVC>175)
{
bot[pos].force-=directPP*gconst->gravconst*(bot[pos].mass*bot[pos].mass)/(dotVC*dotVC+0.001f);
//bot[pos].velocity=bot[pos].velocity+deltaTime*bot[pos].force;
}
}
bot[pos].velocity=bot[pos].velocity+deltaTime*bot[pos].force;
if(collide)
{
//move to real point of collide
float dotV2=cDistance(bot[pos].currPos,bot[i].currPos);
float dist=0.001+(bot[pos].radius+bot[i].radius)-dotV2;
float distHalf=dist/2.f;
Vec3 directPP=bot[pos].currPos-bot[i].currPos;
bot[pos].currPos=bot[pos].currPos+ distHalf*cSafeNormalize(directPP,Vec3(0,0,0));
bot[i].currPos=bot[i].currPos- distHalf*cSafeNormalize(directPP,Vec3(0,0,0));
if(!bot[pos].collide)
{
bot[pos].collide=true;
bot[i].collide=true;
performCollision(bot[pos],bot[i]);
bot[i].force=Vec3(0,0,0);
bot[pos].force=Vec3(0,0,0);
}
}
}
}
}
}
__global__ void UpdateBallPos(Player* bot, GameConstants* gconst, int numElements)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
//float dotV=cDistance(bot[pos].currPos,Vec3(0,0,0));
// if(dotV>95.0f)
// bot[pos].velocity=-0.75f*bot[pos].velocity;
if(cLength(bot[pos].velocity)>gconst->speedLimit)
{
bot[pos].velocity=cSafeNormalize(bot[pos].velocity, Vec3(0,0,0))*gconst->speedLimit;
}
/*
int sumlink=0;
for(int l=0;l<3;l++)
{
sumlink+=bot[pos].typeAmount[l];
}
if(sumlink>0)
bot[pos].link=true;
else {
bot[pos].link=false;
}
*/
//if(abs(bot[pos].currPos.x)>gconst->WIDTH)
// bot[pos].velocity.x=-bot[pos].velocity.x;
//if(abs(bot[pos].currPos.y)>gconst->HEIGHT)
// bot[pos].velocity.y=-bot[pos].velocity.y;
//if(abs(bot[pos].currPos.z)>gconst->DEPTH)
// bot[pos].velocity.z=-bot[pos].velocity.z;
bot[pos].currPos=bot[pos].currPos+deltaTime*bot[pos].velocity;
bot[pos].collide=false;
bot[pos].velocity_old=bot[pos].velocity;
}
}
__global__ void UpdateBallPos2(Player* bot, int numElements)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=0.01f;
for (int pos = tid; pos < numElements; pos += threadN)
{
//float dotV=cDistance(bot[pos].currPos,Vec3(0,0,0));
// if(dotV>95.0f)
// bot[pos].velocity=-0.75f*bot[pos].velocity;
// bot[pos].accelerate=bot[pos].force/bot[pos].mass;
bot[pos].velocity=bot[pos].velocity+deltaTime*bot[pos].accelerate;
bot[pos].currPos=bot[pos].currPos+deltaTime*bot[pos].velocity;
bot[pos].collide=false;
bot[pos].velocity_old=bot[pos].velocity;
bot[pos].force=Vec3(0, 0, 0);
}
}
__global__ void UpdateConstrains2(Player* bot, Constraint* constr, Constraint* allconstr, GameConstants* gconst, int numElements, int numElements2)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
for (int i = 0; i< numElements; i++)
{
if(pos!=i)
{
float dotV=cDistance(bot[pos].currPos,bot[i].currPos)-(bot[pos].radius+bot[i].radius);
allconstr[array2D(pos,i, gconst->number_of_bots)].restlength=dotV;
if(dotV<gconst->constrLenthCreate && Links[bot[pos].type][bot[i].type]==1
// && bot[pos].typeAmount[bot[i].type]<LinkTypeSize[bot[i].type]
&& bot[pos].typeAmount[bot[i].type]<LinkTypeSize[bot[i].type]
&& bot[i].typeAmount[bot[pos].type]<LinkTypeSize[bot[pos].type]
&& bot[pos].typeAmountPP[bot[pos].type][bot[i].type]<LinkTypePP[bot[pos].type][bot[i].type]
&& bot[i].typeAmountPP[bot[i].type][bot[pos].type]<LinkTypePP[bot[i].type][bot[pos].type]
&& (allconstr[array2D(pos,i, gconst->number_of_bots)].active!=true or
allconstr[array2D(i,pos, gconst->number_of_bots)].active!=true)
//&& bot[i].typeAmount[bot[pos].type]<LinkTypeSize[bot[pos].type]
)
{
//constr[pos].active=true;
// atomicAdd(&bot[pos].typeAmount[bot[i].type],1);
// atomicAdd(&bot[i].typeAmount[bot[pos].type],1);
bot[pos].typeAmount[bot[i].type]+=1;
bot[i].typeAmount[bot[pos].type]+=1;
bot[pos].typeAmountPP[bot[pos].type][bot[i].type]+=1;
bot[i].typeAmountPP[bot[i].type][bot[pos].type]+=1;
allconstr[array2D(pos,i, gconst->number_of_bots)].active=true;
allconstr[array2D(i,pos, gconst->number_of_bots)].active=true;
bot[pos].link=true;
bot[i].link=true;
}
}
}
}
}
__global__ void UpdateConstrains3(Player* bot, Constraint* constr, Constraint* allconstr, GameConstants* gconst, int numElements, int numElements2)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
for (int i = 0; i< numElements; i++)
{
if(pos!=i)
{
float dotV=cDistance(bot[pos].currPos,bot[i].currPos)-(bot[pos].radius+bot[i].radius);
allconstr[array2D(pos,i, gconst->number_of_bots)].restlength=dotV;
if(dotV>=gconst->constrLenthCreate &&
(allconstr[array2D(pos,i, gconst->number_of_bots)].active==true
or
allconstr[array2D(i,pos, gconst->number_of_bots)].active==true))
{
/*
if(bot[pos].typeAmount[bot[i].type]>0)
atomicAdd(&bot[pos].typeAmount[bot[i].type],-1);
if(bot[i].typeAmount[bot[pos].type]>0)
atomicAdd(&bot[i].typeAmount[bot[pos].type],-1);
*/
if(bot[pos].typeAmount[bot[i].type]>0)
bot[pos].typeAmount[bot[i].type]-=1;
if(bot[i].typeAmount[bot[pos].type]>0)
bot[i].typeAmount[bot[pos].type]-=1;
if(bot[pos].typeAmount[bot[i].type]<=0)
{
bot[pos].typeAmount[bot[i].type]=0;
bot[pos].link=false;
}
if(bot[i].typeAmount[bot[pos].type]<=0)
{
bot[i].typeAmount[bot[pos].type]=0;
bot[i].link=false;
}
if(bot[pos].typeAmountPP[bot[pos].type][bot[i].type]>0)
bot[pos].typeAmountPP[bot[pos].type][bot[i].type]-=1;
if(bot[i].typeAmountPP[bot[i].type][bot[pos].type]>0)
bot[i].typeAmountPP[bot[i].type][bot[pos].type]-=1;
if(bot[pos].typeAmountPP[bot[pos].type][bot[i].type]<=0)
{
bot[pos].typeAmountPP[bot[pos].type][bot[i].type]=0;
}
if(bot[i].typeAmountPP[bot[i].type][bot[pos].type]<=0)
{
bot[i].typeAmountPP[bot[i].type][bot[pos].type]=0;
bot[i].link=false;
}
allconstr[array2D(pos,i, gconst->number_of_bots)].active=false;
allconstr[array2D(i,pos, gconst->number_of_bots)].active=false;
}
}
}
}
}
__global__ void UpdateConstrains(Player* bot, Constraint* constr, Constraint* allconstr, GameConstants* gconst, int numElements, int numElements2)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
float dotV=cDistance(bot[constr[pos].particleA].currPos, bot[constr[pos].particleB].currPos)-(bot[constr[pos].particleA].radius+bot[constr[pos].particleB].radius);
constr[pos].restlength=dotV;
/*
if (pos == 0)
{
for (int i = 0; i < 3; ++i)
printf(" particals type %1d %5d ", i, bot[constr[pos].particleA].typeAmount[i]);
printf("\n");
}
*/
if(dotV<5.5f && (Links[bot[constr[pos].particleA].type][bot[constr[pos].particleB].type]==1) &&
bot[constr[pos].particleA].typeAmount[bot[constr[pos].particleB].type]<LinkTypeSize[bot[constr[pos].particleB].type] &&
bot[constr[pos].particleB].typeAmount[bot[constr[pos].particleA].type]<LinkTypeSize[bot[constr[pos].particleA].type]
)
{
constr[pos].active=true;
bot[constr[pos].particleA].typeAmount[bot[constr[pos].particleB].type]+=1;
bot[constr[pos].particleB].typeAmount[bot[constr[pos].particleA].type]+=1;
allconstr[array2D(constr[pos].particleA,constr[pos].particleB, numElements2)].active=true;
allconstr[array2D(constr[pos].particleB,constr[pos].particleA, numElements2)].active=true;
}
else
{
if(bot[constr[pos].particleA].typeAmount[bot[constr[pos].particleB].type]>0)
bot[constr[pos].particleA].typeAmount[bot[constr[pos].particleB].type]-=1;
if(bot[constr[pos].particleB].typeAmount[bot[constr[pos].particleA].type]>0)
bot[constr[pos].particleB].typeAmount[bot[constr[pos].particleA].type]-=1;
constr[pos].active=false;
allconstr[array2D(constr[pos].particleA,constr[pos].particleB, numElements2)].active=false;
allconstr[array2D(constr[pos].particleB,constr[pos].particleA, numElements2)].active=false;
}
}
}
__global__ void CalcConstrains(Player* bot, Constraint* constr, int numElements)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=0.01f;
float gravconst=10.0f;
for (int pos = tid; pos < numElements; pos += threadN)
{
//float dotV=cDistance(bot[constr[pos].particleA].currPos, bot[constr[pos].particleB].currPos);
float dotV=constr[pos].restlength;
bool collide=false;
float sumForce=0.0f;
Vec3 directForce=Vec3(0,0,0);
if(dotV<(bot[constr[pos].particleA].radius+bot[constr[pos].particleB].radius))
{
collide=true;
}
else
{
int botTypeForceFlag=1.0f;
if(bot[constr[pos].particleA].type==bot[constr[pos].particleB].type)
botTypeForceFlag=-1.0f;
if(dotV>55.0f)
{
sumForce=-gravconst*(bot[constr[pos].particleA].mass*bot[constr[pos].particleB].mass)/(dotV*dotV);
}
else if(dotV<35.5f and dotV>9.5f)
{
sumForce=-botTypeForceFlag*gravconst*(bot[constr[pos].particleA].mass*bot[constr[pos].particleB].mass)/(dotV*dotV);
}
else
{
sumForce=gravconst*(bot[constr[pos].particleA].mass*bot[constr[pos].particleB].mass)/(dotV*dotV);
}
}
if(collide)
{
//move to real point of collide
float dist=0.001+(bot[constr[pos].particleA].radius+bot[constr[pos].particleB].radius)-dotV;
float distHalf=dist/2.f;
Vec3 directPP=bot[constr[pos].particleA].currPos-bot[constr[pos].particleB].currPos;
bot[constr[pos].particleA].currPos=bot[constr[pos].particleA].currPos+ distHalf*cSafeNormalize(directPP,Vec3(0,0,0));
bot[constr[pos].particleB].currPos=bot[constr[pos].particleB].currPos+ distHalf*cSafeNormalize(-directPP,Vec3(0,0,0));
bot[constr[pos].particleA].collide=true;
bot[constr[pos].particleB].collide=true;
performCollision(bot[constr[pos].particleA],bot[constr[pos].particleB]);
directForce=Vec3(0,0,0);
sumForce=0;
}
Vec3 directPP=bot[constr[pos].particleA].currPos-bot[constr[pos].particleB].currPos;
bot[constr[pos].particleA].force=bot[constr[pos].particleA].force+sumForce*cSafeNormalize(directPP,Vec3(0,0,0));
bot[constr[pos].particleB].force=bot[constr[pos].particleB].force-sumForce*cSafeNormalize(directPP,Vec3(0,0,0));
}
}
void initTrust(thrust::host_vector<int>& a)
{
thrust::device_vector<int> D(10, 1);
thrust::copy(a.begin(), a.end(), D.begin());
// print D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
}
void initKernelParticals(Player *bots,int numElements, GameConstants* gconst)
{
initkernelParticals<< < int(1 + numElements/ 32), 64 >> > ( bots, numElements, gconst);
gpuAssert(hipDeviceSynchronize());
}
void updateBallVelocity(Player* bot, Constraint* allconstr,GameConstants* gconst,int numElements)
{
UpdateBallVelocity<< < int(1 + numElements / 32), 64 >> > (bot, allconstr, gconst, numElements);
gpuAssert(hipDeviceSynchronize());
UpdateBallPos << < int(1 + numElements / 32), 64 >> > (bot, gconst, numElements);
gpuAssert(hipDeviceSynchronize());
}
void updateConstrains(Player* bot, Constraint* constr, Constraint* allconstr, GameConstants* gconst, int numElements, int numElements2)
{
UpdateConstrains2 << < int(1 + numElements/ 32), 64 >> > (bot, constr, allconstr, gconst, numElements, numElements2);
gpuAssert(hipDeviceSynchronize());
UpdateConstrains3 << < int(1 + numElements/ 32), 64 >> > (bot, constr, allconstr, gconst, numElements, numElements2);
gpuAssert(hipDeviceSynchronize());
}
|
72a75dad4003aeabb6f12d31fd3f033f872d6b0a.cu
|
#include "SolverGPU.cuh"
void check(cudaError x) {
fprintf(stderr, "%s\n", cudaGetErrorString(x));
}
typedef unsigned long uint64_t;
void PrintCudaCards()
{
int GPU_N;
printf("Starting MultiGPU\n");
cudaGetDeviceCount(&GPU_N);
if (GPU_N > 4)
{
GPU_N = 4;
}
printf("CUDA-capable device count: %i\n", GPU_N);
}
__global__ void initkernelParticals(Player *bots,int numElements, GameConstants* gconst)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
for (int pos = tid; pos < numElements; pos += threadN)
{
for (int l=0;l<gconst->TypeSize;l++)
{
bots[pos].typeAmount[l]=0;
for (int l1=0;l1<gconst->TypeSize;l1++)
{
bots[pos].typeAmountPP[l][l1]=0;
}
}
}
}
__device__ inline float cLength(Vec3 v)
{
float lSq = cLengthSq(v);
if (lSq>0.0f)
return sqrtf(lSq);
else
return 0.0f;
}
__device__ inline float cLengthSq(Vec3 v)
{
return Dot(v,v);
}
__device__ inline Vec3 cSafeNormalize(Vec3 v, Vec3 fallback)
{
float l = cLengthSq(v);
if (l > 0.0f)
{
return v * cInvSqrt(l);
}
else
return fallback;
}
__device__ inline float cDistance(Vec3 v1, Vec3 v2)
{
return cLength(v1-v2);
}
__device__ inline float cInvSqrt(float x)
{
return 1.0f/sqrtf(x);
}
__device__ Vec3 projectUonV(const Vec3& u, const Vec3& v)
{
Vec3 r=Vec3(0,0,0);
float DotVV=Dot(v, v);
if(DotVV!=0)
r = v* (Dot(u, v)/DotVV);
return r;
}
__device__ void performCollision(Player& s1, Player& s2)
{
float softCoeff=0.973f;
Vec3 nv1; // new velocity for sphere 1
Vec3 nv2; // new velocity for sphere 2
// this can probably be optimised a bit, but it basically swaps the velocity amounts
// that are perpendicular to the surface of the collistion.
// If the spheres had different masses, then u would need to scale the amounts of
// velocities exchanged inversely proportional to their masses.
nv1 = s1.velocity;
nv1 =nv1+ projectUonV(s2.velocity, s2.currPos- s1.currPos);
nv1 =nv1- projectUonV(s1.velocity, s1.currPos- s2.currPos);
nv2 = s2.velocity;
nv2 =nv2+ projectUonV(s1.velocity, s2.currPos- s1.currPos);
nv2 =nv2- projectUonV(s2.velocity, s1.currPos- s2.currPos);
s1.velocity = softCoeff*nv1;
s2.velocity = softCoeff*nv2;
}
__device__ inline int array2D(int a, int b, int numElements)
{
return a*numElements+b;
}
__global__ void UpdateBallVelocity(Player* bot, Constraint* allconstr, GameConstants* gconst,int numElements)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
Vec3 directForce=Vec3(0,0,0);
for (int i = 0; i< numElements; i++)
{
if(pos!=i)
{
bool collide=false;
bot[pos].force=0.0f;
float dotV2=cDistance(bot[pos].currPos,bot[i].currPos);
float dotV=cDistance(bot[pos].currPos,bot[i].currPos)-(bot[pos].radius+bot[i].radius);
float dotVC=cDistance(bot[pos].currPos,Vec3(0,0,0));
Vec3 directPP=bot[pos].currPos-bot[i].currPos;
if(dotV<0.0f)
{
collide=true;
}
if(dotV<15.0f)
// if(bot[pos].link)
{
if(LinksField[bot[pos].type][bot[i].type]==1)
bot[pos].force-=directPP*gconst->gravconst2*(bot[pos].mass*bot[i].mass)/(dotV2*dotV2+0.001f);
else {
bot[pos].force+=directPP*gconst->gravconst2*(bot[pos].mass*bot[i].mass)/(dotV2*dotV2+0.001f);
}
}
if (allconstr[array2D(pos,i,numElements)].active )
//or allconstr[array2D(i,pos,numElements)].active)
{
if (dotV>gconst->constrLenth)
//bot[pos].force-=directPP*gconst->gravconst*(bot[pos].mass*bot[i].mass)/(1-(dotV2-gconst->constrLenth)/gconst->constrLenth);
bot[pos].force-=directPP*gconst->gravconst*(bot[pos].mass*bot[i].mass)/abs(1-(dotV-gconst->constrLenth)/gconst->constrLenth);
if(dotV<gconst->constrLenth)
//bot[pos].force+=directPP*gconst->gravconst*(bot[pos].mass*bot[i].mass)/(1-(gconst->constrLenth-dotV2)/gconst->constrLenth);
bot[pos].force+=directPP*gconst->gravconst*(bot[pos].mass*bot[i].mass)/abs(1-(gconst->constrLenth-dotV)/gconst->constrLenth);
}
else
{
if(dotVC>175)
{
bot[pos].force-=directPP*gconst->gravconst*(bot[pos].mass*bot[pos].mass)/(dotVC*dotVC+0.001f);
//bot[pos].velocity=bot[pos].velocity+deltaTime*bot[pos].force;
}
}
bot[pos].velocity=bot[pos].velocity+deltaTime*bot[pos].force;
if(collide)
{
//move to real point of collide
float dotV2=cDistance(bot[pos].currPos,bot[i].currPos);
float dist=0.001+(bot[pos].radius+bot[i].radius)-dotV2;
float distHalf=dist/2.f;
Vec3 directPP=bot[pos].currPos-bot[i].currPos;
bot[pos].currPos=bot[pos].currPos+ distHalf*cSafeNormalize(directPP,Vec3(0,0,0));
bot[i].currPos=bot[i].currPos- distHalf*cSafeNormalize(directPP,Vec3(0,0,0));
if(!bot[pos].collide)
{
bot[pos].collide=true;
bot[i].collide=true;
performCollision(bot[pos],bot[i]);
bot[i].force=Vec3(0,0,0);
bot[pos].force=Vec3(0,0,0);
}
}
}
}
}
}
__global__ void UpdateBallPos(Player* bot, GameConstants* gconst, int numElements)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
//float dotV=cDistance(bot[pos].currPos,Vec3(0,0,0));
// if(dotV>95.0f)
// bot[pos].velocity=-0.75f*bot[pos].velocity;
if(cLength(bot[pos].velocity)>gconst->speedLimit)
{
bot[pos].velocity=cSafeNormalize(bot[pos].velocity, Vec3(0,0,0))*gconst->speedLimit;
}
/*
int sumlink=0;
for(int l=0;l<3;l++)
{
sumlink+=bot[pos].typeAmount[l];
}
if(sumlink>0)
bot[pos].link=true;
else {
bot[pos].link=false;
}
*/
//if(abs(bot[pos].currPos.x)>gconst->WIDTH)
// bot[pos].velocity.x=-bot[pos].velocity.x;
//if(abs(bot[pos].currPos.y)>gconst->HEIGHT)
// bot[pos].velocity.y=-bot[pos].velocity.y;
//if(abs(bot[pos].currPos.z)>gconst->DEPTH)
// bot[pos].velocity.z=-bot[pos].velocity.z;
bot[pos].currPos=bot[pos].currPos+deltaTime*bot[pos].velocity;
bot[pos].collide=false;
bot[pos].velocity_old=bot[pos].velocity;
}
}
__global__ void UpdateBallPos2(Player* bot, int numElements)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=0.01f;
for (int pos = tid; pos < numElements; pos += threadN)
{
//float dotV=cDistance(bot[pos].currPos,Vec3(0,0,0));
// if(dotV>95.0f)
// bot[pos].velocity=-0.75f*bot[pos].velocity;
// bot[pos].accelerate=bot[pos].force/bot[pos].mass;
bot[pos].velocity=bot[pos].velocity+deltaTime*bot[pos].accelerate;
bot[pos].currPos=bot[pos].currPos+deltaTime*bot[pos].velocity;
bot[pos].collide=false;
bot[pos].velocity_old=bot[pos].velocity;
bot[pos].force=Vec3(0, 0, 0);
}
}
__global__ void UpdateConstrains2(Player* bot, Constraint* constr, Constraint* allconstr, GameConstants* gconst, int numElements, int numElements2)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
for (int i = 0; i< numElements; i++)
{
if(pos!=i)
{
float dotV=cDistance(bot[pos].currPos,bot[i].currPos)-(bot[pos].radius+bot[i].radius);
allconstr[array2D(pos,i, gconst->number_of_bots)].restlength=dotV;
if(dotV<gconst->constrLenthCreate && Links[bot[pos].type][bot[i].type]==1
// && bot[pos].typeAmount[bot[i].type]<LinkTypeSize[bot[i].type]
&& bot[pos].typeAmount[bot[i].type]<LinkTypeSize[bot[i].type]
&& bot[i].typeAmount[bot[pos].type]<LinkTypeSize[bot[pos].type]
&& bot[pos].typeAmountPP[bot[pos].type][bot[i].type]<LinkTypePP[bot[pos].type][bot[i].type]
&& bot[i].typeAmountPP[bot[i].type][bot[pos].type]<LinkTypePP[bot[i].type][bot[pos].type]
&& (allconstr[array2D(pos,i, gconst->number_of_bots)].active!=true or
allconstr[array2D(i,pos, gconst->number_of_bots)].active!=true)
//&& bot[i].typeAmount[bot[pos].type]<LinkTypeSize[bot[pos].type]
)
{
//constr[pos].active=true;
// atomicAdd(&bot[pos].typeAmount[bot[i].type],1);
// atomicAdd(&bot[i].typeAmount[bot[pos].type],1);
bot[pos].typeAmount[bot[i].type]+=1;
bot[i].typeAmount[bot[pos].type]+=1;
bot[pos].typeAmountPP[bot[pos].type][bot[i].type]+=1;
bot[i].typeAmountPP[bot[i].type][bot[pos].type]+=1;
allconstr[array2D(pos,i, gconst->number_of_bots)].active=true;
allconstr[array2D(i,pos, gconst->number_of_bots)].active=true;
bot[pos].link=true;
bot[i].link=true;
}
}
}
}
}
__global__ void UpdateConstrains3(Player* bot, Constraint* constr, Constraint* allconstr, GameConstants* gconst, int numElements, int numElements2)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
for (int i = 0; i< numElements; i++)
{
if(pos!=i)
{
float dotV=cDistance(bot[pos].currPos,bot[i].currPos)-(bot[pos].radius+bot[i].radius);
allconstr[array2D(pos,i, gconst->number_of_bots)].restlength=dotV;
if(dotV>=gconst->constrLenthCreate &&
(allconstr[array2D(pos,i, gconst->number_of_bots)].active==true
or
allconstr[array2D(i,pos, gconst->number_of_bots)].active==true))
{
/*
if(bot[pos].typeAmount[bot[i].type]>0)
atomicAdd(&bot[pos].typeAmount[bot[i].type],-1);
if(bot[i].typeAmount[bot[pos].type]>0)
atomicAdd(&bot[i].typeAmount[bot[pos].type],-1);
*/
if(bot[pos].typeAmount[bot[i].type]>0)
bot[pos].typeAmount[bot[i].type]-=1;
if(bot[i].typeAmount[bot[pos].type]>0)
bot[i].typeAmount[bot[pos].type]-=1;
if(bot[pos].typeAmount[bot[i].type]<=0)
{
bot[pos].typeAmount[bot[i].type]=0;
bot[pos].link=false;
}
if(bot[i].typeAmount[bot[pos].type]<=0)
{
bot[i].typeAmount[bot[pos].type]=0;
bot[i].link=false;
}
if(bot[pos].typeAmountPP[bot[pos].type][bot[i].type]>0)
bot[pos].typeAmountPP[bot[pos].type][bot[i].type]-=1;
if(bot[i].typeAmountPP[bot[i].type][bot[pos].type]>0)
bot[i].typeAmountPP[bot[i].type][bot[pos].type]-=1;
if(bot[pos].typeAmountPP[bot[pos].type][bot[i].type]<=0)
{
bot[pos].typeAmountPP[bot[pos].type][bot[i].type]=0;
}
if(bot[i].typeAmountPP[bot[i].type][bot[pos].type]<=0)
{
bot[i].typeAmountPP[bot[i].type][bot[pos].type]=0;
bot[i].link=false;
}
allconstr[array2D(pos,i, gconst->number_of_bots)].active=false;
allconstr[array2D(i,pos, gconst->number_of_bots)].active=false;
}
}
}
}
}
__global__ void UpdateConstrains(Player* bot, Constraint* constr, Constraint* allconstr, GameConstants* gconst, int numElements, int numElements2)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=gconst->deltaTime;
for (int pos = tid; pos < numElements; pos += threadN)
{
float dotV=cDistance(bot[constr[pos].particleA].currPos, bot[constr[pos].particleB].currPos)-(bot[constr[pos].particleA].radius+bot[constr[pos].particleB].radius);
constr[pos].restlength=dotV;
/*
if (pos == 0)
{
for (int i = 0; i < 3; ++i)
printf(" particals type %1d %5d ", i, bot[constr[pos].particleA].typeAmount[i]);
printf("\n");
}
*/
if(dotV<5.5f && (Links[bot[constr[pos].particleA].type][bot[constr[pos].particleB].type]==1) &&
bot[constr[pos].particleA].typeAmount[bot[constr[pos].particleB].type]<LinkTypeSize[bot[constr[pos].particleB].type] &&
bot[constr[pos].particleB].typeAmount[bot[constr[pos].particleA].type]<LinkTypeSize[bot[constr[pos].particleA].type]
)
{
constr[pos].active=true;
bot[constr[pos].particleA].typeAmount[bot[constr[pos].particleB].type]+=1;
bot[constr[pos].particleB].typeAmount[bot[constr[pos].particleA].type]+=1;
allconstr[array2D(constr[pos].particleA,constr[pos].particleB, numElements2)].active=true;
allconstr[array2D(constr[pos].particleB,constr[pos].particleA, numElements2)].active=true;
}
else
{
if(bot[constr[pos].particleA].typeAmount[bot[constr[pos].particleB].type]>0)
bot[constr[pos].particleA].typeAmount[bot[constr[pos].particleB].type]-=1;
if(bot[constr[pos].particleB].typeAmount[bot[constr[pos].particleA].type]>0)
bot[constr[pos].particleB].typeAmount[bot[constr[pos].particleA].type]-=1;
constr[pos].active=false;
allconstr[array2D(constr[pos].particleA,constr[pos].particleB, numElements2)].active=false;
allconstr[array2D(constr[pos].particleB,constr[pos].particleA, numElements2)].active=false;
}
}
}
__global__ void CalcConstrains(Player* bot, Constraint* constr, int numElements)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadN = gridDim.x * blockDim.x;
float m=10.0f;
float deltaTime=0.01f;
float gravconst=10.0f;
for (int pos = tid; pos < numElements; pos += threadN)
{
//float dotV=cDistance(bot[constr[pos].particleA].currPos, bot[constr[pos].particleB].currPos);
float dotV=constr[pos].restlength;
bool collide=false;
float sumForce=0.0f;
Vec3 directForce=Vec3(0,0,0);
if(dotV<(bot[constr[pos].particleA].radius+bot[constr[pos].particleB].radius))
{
collide=true;
}
else
{
int botTypeForceFlag=1.0f;
if(bot[constr[pos].particleA].type==bot[constr[pos].particleB].type)
botTypeForceFlag=-1.0f;
if(dotV>55.0f)
{
sumForce=-gravconst*(bot[constr[pos].particleA].mass*bot[constr[pos].particleB].mass)/(dotV*dotV);
}
else if(dotV<35.5f and dotV>9.5f)
{
sumForce=-botTypeForceFlag*gravconst*(bot[constr[pos].particleA].mass*bot[constr[pos].particleB].mass)/(dotV*dotV);
}
else
{
sumForce=gravconst*(bot[constr[pos].particleA].mass*bot[constr[pos].particleB].mass)/(dotV*dotV);
}
}
if(collide)
{
//move to real point of collide
float dist=0.001+(bot[constr[pos].particleA].radius+bot[constr[pos].particleB].radius)-dotV;
float distHalf=dist/2.f;
Vec3 directPP=bot[constr[pos].particleA].currPos-bot[constr[pos].particleB].currPos;
bot[constr[pos].particleA].currPos=bot[constr[pos].particleA].currPos+ distHalf*cSafeNormalize(directPP,Vec3(0,0,0));
bot[constr[pos].particleB].currPos=bot[constr[pos].particleB].currPos+ distHalf*cSafeNormalize(-directPP,Vec3(0,0,0));
bot[constr[pos].particleA].collide=true;
bot[constr[pos].particleB].collide=true;
performCollision(bot[constr[pos].particleA],bot[constr[pos].particleB]);
directForce=Vec3(0,0,0);
sumForce=0;
}
Vec3 directPP=bot[constr[pos].particleA].currPos-bot[constr[pos].particleB].currPos;
bot[constr[pos].particleA].force=bot[constr[pos].particleA].force+sumForce*cSafeNormalize(directPP,Vec3(0,0,0));
bot[constr[pos].particleB].force=bot[constr[pos].particleB].force-sumForce*cSafeNormalize(directPP,Vec3(0,0,0));
}
}
void initTrust(thrust::host_vector<int>& a)
{
thrust::device_vector<int> D(10, 1);
thrust::copy(a.begin(), a.end(), D.begin());
// print D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
}
void initKernelParticals(Player *bots,int numElements, GameConstants* gconst)
{
initkernelParticals<< < int(1 + numElements/ 32), 64 >> > ( bots, numElements, gconst);
gpuAssert(cudaThreadSynchronize());
}
void updateBallVelocity(Player* bot, Constraint* allconstr,GameConstants* gconst,int numElements)
{
UpdateBallVelocity<< < int(1 + numElements / 32), 64 >> > (bot, allconstr, gconst, numElements);
gpuAssert(cudaThreadSynchronize());
UpdateBallPos << < int(1 + numElements / 32), 64 >> > (bot, gconst, numElements);
gpuAssert(cudaThreadSynchronize());
}
void updateConstrains(Player* bot, Constraint* constr, Constraint* allconstr, GameConstants* gconst, int numElements, int numElements2)
{
UpdateConstrains2 << < int(1 + numElements/ 32), 64 >> > (bot, constr, allconstr, gconst, numElements, numElements2);
gpuAssert(cudaThreadSynchronize());
UpdateConstrains3 << < int(1 + numElements/ 32), 64 >> > (bot, constr, allconstr, gconst, numElements, numElements2);
gpuAssert(cudaThreadSynchronize());
}
|
a5f2e78c8656f570ba91a5d67c3d548f3b94f16b.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2018 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/equal.h>
#include <thrust/iterator/counting_iterator.h>
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/host_device_vector.h"
namespace xgboost {
namespace common {
void SetDevice(int device) {
int n_devices;
dh::safe_cuda(hipGetDeviceCount(&n_devices));
device %= n_devices;
dh::safe_cuda(hipSetDevice(device));
}
void InitHostDeviceVector(size_t n, const GPUDistribution& distribution,
HostDeviceVector<int> *v) {
// create the vector
GPUSet devices = distribution.Devices();
v->Shard(distribution);
v->Resize(n);
ASSERT_EQ(v->Size(), n);
ASSERT_TRUE(v->Distribution() == distribution);
ASSERT_TRUE(v->Devices() == devices);
// ensure that the devices have read-write access
for (int i = 0; i < devices.Size(); ++i) {
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kRead));
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kWrite));
}
// ensure that the host has no access
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kWrite));
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kRead));
// fill in the data on the host
std::vector<int>& data_h = v->HostVector();
// ensure that the host has full access, while the devices have none
ASSERT_TRUE(v->HostCanAccess(GPUAccess::kRead));
ASSERT_TRUE(v->HostCanAccess(GPUAccess::kWrite));
for (int i = 0; i < devices.Size(); ++i) {
ASSERT_FALSE(v->DeviceCanAccess(i, GPUAccess::kRead));
ASSERT_FALSE(v->DeviceCanAccess(i, GPUAccess::kWrite));
}
ASSERT_EQ(data_h.size(), n);
std::copy_n(thrust::make_counting_iterator(0), n, data_h.begin());
}
void PlusOne(HostDeviceVector<int> *v) {
int n_devices = v->Devices().Size();
for (int i = 0; i < n_devices; ++i) {
SetDevice(i);
thrust::transform(v->tbegin(i), v->tend(i), v->tbegin(i),
[=]__device__(unsigned int a){ return a + 1; });
}
}
void CheckDevice(HostDeviceVector<int> *v,
const std::vector<size_t>& starts,
const std::vector<size_t>& sizes,
unsigned int first, GPUAccess access) {
int n_devices = sizes.size();
ASSERT_EQ(v->Devices().Size(), n_devices);
for (int i = 0; i < n_devices; ++i) {
ASSERT_EQ(v->DeviceSize(i), sizes.at(i));
SetDevice(i);
ASSERT_TRUE(thrust::equal(v->tcbegin(i), v->tcend(i),
thrust::make_counting_iterator(first + starts[i])));
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kRead));
// ensure that the device has at most the access specified by access
ASSERT_EQ(v->DeviceCanAccess(i, GPUAccess::kWrite), access == GPUAccess::kWrite);
}
ASSERT_EQ(v->HostCanAccess(GPUAccess::kRead), access == GPUAccess::kRead);
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kWrite));
for (int i = 0; i < n_devices; ++i) {
SetDevice(i);
ASSERT_TRUE(thrust::equal(v->tbegin(i), v->tend(i),
thrust::make_counting_iterator(first + starts[i])));
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kRead));
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kWrite));
}
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kRead));
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kWrite));
}
void CheckHost(HostDeviceVector<int> *v, GPUAccess access) {
const std::vector<int>& data_h = access == GPUAccess::kWrite ?
v->HostVector() : v->ConstHostVector();
for (size_t i = 0; i < v->Size(); ++i) {
ASSERT_EQ(data_h.at(i), i + 1);
}
ASSERT_TRUE(v->HostCanAccess(GPUAccess::kRead));
ASSERT_EQ(v->HostCanAccess(GPUAccess::kWrite), access == GPUAccess::kWrite);
size_t n_devices = v->Devices().Size();
for (int i = 0; i < n_devices; ++i) {
ASSERT_EQ(v->DeviceCanAccess(i, GPUAccess::kRead), access == GPUAccess::kRead);
// the devices should have no write access
ASSERT_FALSE(v->DeviceCanAccess(i, GPUAccess::kWrite));
}
}
void TestHostDeviceVector
(size_t n, const GPUDistribution& distribution,
const std::vector<size_t>& starts, const std::vector<size_t>& sizes) {
SetCudaSetDeviceHandler(SetDevice);
HostDeviceVector<int> v;
InitHostDeviceVector(n, distribution, &v);
CheckDevice(&v, starts, sizes, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, starts, sizes, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kWrite);
SetCudaSetDeviceHandler(nullptr);
}
TEST(HostDeviceVector, TestBlock) {
size_t n = 1001;
int n_devices = 2;
auto distribution = GPUDistribution::Block(GPUSet::Range(0, n_devices));
std::vector<size_t> starts{0, 501};
std::vector<size_t> sizes{501, 500};
TestHostDeviceVector(n, distribution, starts, sizes);
}
TEST(HostDeviceVector, TestGranular) {
size_t n = 3003;
int n_devices = 2;
auto distribution = GPUDistribution::Granular(GPUSet::Range(0, n_devices), 3);
std::vector<size_t> starts{0, 1503};
std::vector<size_t> sizes{1503, 1500};
TestHostDeviceVector(n, distribution, starts, sizes);
}
TEST(HostDeviceVector, TestOverlap) {
size_t n = 1001;
int n_devices = 2;
auto distribution = GPUDistribution::Overlap(GPUSet::Range(0, n_devices), 1);
std::vector<size_t> starts{0, 500};
std::vector<size_t> sizes{501, 501};
TestHostDeviceVector(n, distribution, starts, sizes);
}
TEST(HostDeviceVector, TestExplicit) {
size_t n = 1001;
int n_devices = 2;
std::vector<size_t> offsets{0, 550, 1001};
auto distribution = GPUDistribution::Explicit(GPUSet::Range(0, n_devices), offsets);
std::vector<size_t> starts{0, 550};
std::vector<size_t> sizes{550, 451};
TestHostDeviceVector(n, distribution, starts, sizes);
}
TEST(HostDeviceVector, TestCopy) {
size_t n = 1001;
int n_devices = 2;
auto distribution = GPUDistribution::Block(GPUSet::Range(0, n_devices));
std::vector<size_t> starts{0, 501};
std::vector<size_t> sizes{501, 500};
SetCudaSetDeviceHandler(SetDevice);
HostDeviceVector<int> v;
{
// a separate scope to ensure that v1 is gone before further checks
HostDeviceVector<int> v1;
InitHostDeviceVector(n, distribution, &v1);
v = v1;
}
CheckDevice(&v, starts, sizes, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, starts, sizes, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kWrite);
SetCudaSetDeviceHandler(nullptr);
}
TEST(HostDeviceVector, Shard) {
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
auto devices = GPUSet::Range(0, 1);
vec.Shard(devices);
ASSERT_EQ(vec.DeviceSize(0), h_vec.size());
ASSERT_EQ(vec.Size(), h_vec.size());
auto span = vec.DeviceSpan(0); // sync to device
vec.Reshard(GPUDistribution::Empty()); // pull back to cpu, empty devices.
ASSERT_EQ(vec.Size(), h_vec.size());
ASSERT_TRUE(vec.Devices().IsEmpty());
auto h_vec_1 = vec.HostVector();
ASSERT_TRUE(std::equal(h_vec_1.cbegin(), h_vec_1.cend(), h_vec.cbegin()));
}
TEST(HostDeviceVector, Reshard) {
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
auto devices = GPUSet::Range(0, 1);
vec.Shard(devices);
ASSERT_EQ(vec.DeviceSize(0), h_vec.size());
ASSERT_EQ(vec.Size(), h_vec.size());
PlusOne(&vec);
vec.Reshard(GPUDistribution::Empty());
ASSERT_EQ(vec.Size(), h_vec.size());
ASSERT_TRUE(vec.Devices().IsEmpty());
auto h_vec_1 = vec.HostVector();
for (size_t i = 0; i < h_vec_1.size(); ++i) {
ASSERT_EQ(h_vec_1.at(i), i + 1);
}
}
TEST(HostDeviceVector, Span) {
HostDeviceVector<float> vec {1.0f, 2.0f, 3.0f, 4.0f};
vec.Shard(GPUSet{0, 1});
auto span = vec.DeviceSpan(0);
ASSERT_EQ(vec.DeviceSize(0), span.size());
ASSERT_EQ(vec.DevicePointer(0), span.data());
auto const_span = vec.ConstDeviceSpan(0);
ASSERT_EQ(vec.DeviceSize(0), span.size());
ASSERT_EQ(vec.ConstDevicePointer(0), span.data());
}
// Multi-GPUs' test
#if defined(XGBOOST_USE_NCCL)
TEST(HostDeviceVector, MGPU_Shard) {
auto devices = GPUSet::AllVisible();
if (devices.Size() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;
}
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
// Data size for each device.
std::vector<size_t> devices_size (devices.Size());
// From CPU to GPUs.
vec.Shard(devices);
size_t total_size = 0;
for (size_t i = 0; i < devices.Size(); ++i) {
total_size += vec.DeviceSize(i);
devices_size[i] = vec.DeviceSize(i);
}
ASSERT_EQ(total_size, h_vec.size());
ASSERT_EQ(total_size, vec.Size());
// Shard from devices to devices with different distribution.
EXPECT_ANY_THROW(
vec.Shard(GPUDistribution::Granular(devices, 12)));
// All data is drawn back to CPU
vec.Reshard(GPUDistribution::Empty());
ASSERT_TRUE(vec.Devices().IsEmpty());
ASSERT_EQ(vec.Size(), h_vec.size());
vec.Shard(GPUDistribution::Granular(devices, 12));
total_size = 0;
for (size_t i = 0; i < devices.Size(); ++i) {
total_size += vec.DeviceSize(i);
devices_size[i] = vec.DeviceSize(i);
}
ASSERT_EQ(total_size, h_vec.size());
ASSERT_EQ(total_size, vec.Size());
}
TEST(HostDeviceVector, MGPU_Reshard) {
auto devices = GPUSet::AllVisible();
if (devices.Size() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;
}
size_t n = 1001;
int n_devices = 2;
auto distribution = GPUDistribution::Block(GPUSet::Range(0, n_devices));
std::vector<size_t> starts{0, 501};
std::vector<size_t> sizes{501, 500};
HostDeviceVector<int> v;
InitHostDeviceVector(n, distribution, &v);
CheckDevice(&v, starts, sizes, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, starts, sizes, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kWrite);
auto distribution1 = GPUDistribution::Overlap(GPUSet::Range(0, n_devices), 1);
v.Reshard(distribution1);
for (size_t i = 0; i < n_devices; ++i) {
auto span = v.DeviceSpan(i); // sync to device
}
std::vector<size_t> starts1{0, 500};
std::vector<size_t> sizes1{501, 501};
CheckDevice(&v, starts1, sizes1, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kWrite);
}
#endif
} // namespace common
} // namespace xgboost
|
a5f2e78c8656f570ba91a5d67c3d548f3b94f16b.cu
|
/*!
* Copyright 2018 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/equal.h>
#include <thrust/iterator/counting_iterator.h>
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/host_device_vector.h"
namespace xgboost {
namespace common {
void SetDevice(int device) {
int n_devices;
dh::safe_cuda(cudaGetDeviceCount(&n_devices));
device %= n_devices;
dh::safe_cuda(cudaSetDevice(device));
}
void InitHostDeviceVector(size_t n, const GPUDistribution& distribution,
HostDeviceVector<int> *v) {
// create the vector
GPUSet devices = distribution.Devices();
v->Shard(distribution);
v->Resize(n);
ASSERT_EQ(v->Size(), n);
ASSERT_TRUE(v->Distribution() == distribution);
ASSERT_TRUE(v->Devices() == devices);
// ensure that the devices have read-write access
for (int i = 0; i < devices.Size(); ++i) {
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kRead));
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kWrite));
}
// ensure that the host has no access
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kWrite));
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kRead));
// fill in the data on the host
std::vector<int>& data_h = v->HostVector();
// ensure that the host has full access, while the devices have none
ASSERT_TRUE(v->HostCanAccess(GPUAccess::kRead));
ASSERT_TRUE(v->HostCanAccess(GPUAccess::kWrite));
for (int i = 0; i < devices.Size(); ++i) {
ASSERT_FALSE(v->DeviceCanAccess(i, GPUAccess::kRead));
ASSERT_FALSE(v->DeviceCanAccess(i, GPUAccess::kWrite));
}
ASSERT_EQ(data_h.size(), n);
std::copy_n(thrust::make_counting_iterator(0), n, data_h.begin());
}
void PlusOne(HostDeviceVector<int> *v) {
int n_devices = v->Devices().Size();
for (int i = 0; i < n_devices; ++i) {
SetDevice(i);
thrust::transform(v->tbegin(i), v->tend(i), v->tbegin(i),
[=]__device__(unsigned int a){ return a + 1; });
}
}
void CheckDevice(HostDeviceVector<int> *v,
const std::vector<size_t>& starts,
const std::vector<size_t>& sizes,
unsigned int first, GPUAccess access) {
int n_devices = sizes.size();
ASSERT_EQ(v->Devices().Size(), n_devices);
for (int i = 0; i < n_devices; ++i) {
ASSERT_EQ(v->DeviceSize(i), sizes.at(i));
SetDevice(i);
ASSERT_TRUE(thrust::equal(v->tcbegin(i), v->tcend(i),
thrust::make_counting_iterator(first + starts[i])));
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kRead));
// ensure that the device has at most the access specified by access
ASSERT_EQ(v->DeviceCanAccess(i, GPUAccess::kWrite), access == GPUAccess::kWrite);
}
ASSERT_EQ(v->HostCanAccess(GPUAccess::kRead), access == GPUAccess::kRead);
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kWrite));
for (int i = 0; i < n_devices; ++i) {
SetDevice(i);
ASSERT_TRUE(thrust::equal(v->tbegin(i), v->tend(i),
thrust::make_counting_iterator(first + starts[i])));
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kRead));
ASSERT_TRUE(v->DeviceCanAccess(i, GPUAccess::kWrite));
}
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kRead));
ASSERT_FALSE(v->HostCanAccess(GPUAccess::kWrite));
}
void CheckHost(HostDeviceVector<int> *v, GPUAccess access) {
const std::vector<int>& data_h = access == GPUAccess::kWrite ?
v->HostVector() : v->ConstHostVector();
for (size_t i = 0; i < v->Size(); ++i) {
ASSERT_EQ(data_h.at(i), i + 1);
}
ASSERT_TRUE(v->HostCanAccess(GPUAccess::kRead));
ASSERT_EQ(v->HostCanAccess(GPUAccess::kWrite), access == GPUAccess::kWrite);
size_t n_devices = v->Devices().Size();
for (int i = 0; i < n_devices; ++i) {
ASSERT_EQ(v->DeviceCanAccess(i, GPUAccess::kRead), access == GPUAccess::kRead);
// the devices should have no write access
ASSERT_FALSE(v->DeviceCanAccess(i, GPUAccess::kWrite));
}
}
void TestHostDeviceVector
(size_t n, const GPUDistribution& distribution,
const std::vector<size_t>& starts, const std::vector<size_t>& sizes) {
SetCudaSetDeviceHandler(SetDevice);
HostDeviceVector<int> v;
InitHostDeviceVector(n, distribution, &v);
CheckDevice(&v, starts, sizes, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, starts, sizes, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kWrite);
SetCudaSetDeviceHandler(nullptr);
}
TEST(HostDeviceVector, TestBlock) {
size_t n = 1001;
int n_devices = 2;
auto distribution = GPUDistribution::Block(GPUSet::Range(0, n_devices));
std::vector<size_t> starts{0, 501};
std::vector<size_t> sizes{501, 500};
TestHostDeviceVector(n, distribution, starts, sizes);
}
TEST(HostDeviceVector, TestGranular) {
size_t n = 3003;
int n_devices = 2;
auto distribution = GPUDistribution::Granular(GPUSet::Range(0, n_devices), 3);
std::vector<size_t> starts{0, 1503};
std::vector<size_t> sizes{1503, 1500};
TestHostDeviceVector(n, distribution, starts, sizes);
}
TEST(HostDeviceVector, TestOverlap) {
size_t n = 1001;
int n_devices = 2;
auto distribution = GPUDistribution::Overlap(GPUSet::Range(0, n_devices), 1);
std::vector<size_t> starts{0, 500};
std::vector<size_t> sizes{501, 501};
TestHostDeviceVector(n, distribution, starts, sizes);
}
TEST(HostDeviceVector, TestExplicit) {
size_t n = 1001;
int n_devices = 2;
std::vector<size_t> offsets{0, 550, 1001};
auto distribution = GPUDistribution::Explicit(GPUSet::Range(0, n_devices), offsets);
std::vector<size_t> starts{0, 550};
std::vector<size_t> sizes{550, 451};
TestHostDeviceVector(n, distribution, starts, sizes);
}
TEST(HostDeviceVector, TestCopy) {
size_t n = 1001;
int n_devices = 2;
auto distribution = GPUDistribution::Block(GPUSet::Range(0, n_devices));
std::vector<size_t> starts{0, 501};
std::vector<size_t> sizes{501, 500};
SetCudaSetDeviceHandler(SetDevice);
HostDeviceVector<int> v;
{
// a separate scope to ensure that v1 is gone before further checks
HostDeviceVector<int> v1;
InitHostDeviceVector(n, distribution, &v1);
v = v1;
}
CheckDevice(&v, starts, sizes, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, starts, sizes, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kWrite);
SetCudaSetDeviceHandler(nullptr);
}
TEST(HostDeviceVector, Shard) {
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
auto devices = GPUSet::Range(0, 1);
vec.Shard(devices);
ASSERT_EQ(vec.DeviceSize(0), h_vec.size());
ASSERT_EQ(vec.Size(), h_vec.size());
auto span = vec.DeviceSpan(0); // sync to device
vec.Reshard(GPUDistribution::Empty()); // pull back to cpu, empty devices.
ASSERT_EQ(vec.Size(), h_vec.size());
ASSERT_TRUE(vec.Devices().IsEmpty());
auto h_vec_1 = vec.HostVector();
ASSERT_TRUE(std::equal(h_vec_1.cbegin(), h_vec_1.cend(), h_vec.cbegin()));
}
TEST(HostDeviceVector, Reshard) {
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
auto devices = GPUSet::Range(0, 1);
vec.Shard(devices);
ASSERT_EQ(vec.DeviceSize(0), h_vec.size());
ASSERT_EQ(vec.Size(), h_vec.size());
PlusOne(&vec);
vec.Reshard(GPUDistribution::Empty());
ASSERT_EQ(vec.Size(), h_vec.size());
ASSERT_TRUE(vec.Devices().IsEmpty());
auto h_vec_1 = vec.HostVector();
for (size_t i = 0; i < h_vec_1.size(); ++i) {
ASSERT_EQ(h_vec_1.at(i), i + 1);
}
}
TEST(HostDeviceVector, Span) {
HostDeviceVector<float> vec {1.0f, 2.0f, 3.0f, 4.0f};
vec.Shard(GPUSet{0, 1});
auto span = vec.DeviceSpan(0);
ASSERT_EQ(vec.DeviceSize(0), span.size());
ASSERT_EQ(vec.DevicePointer(0), span.data());
auto const_span = vec.ConstDeviceSpan(0);
ASSERT_EQ(vec.DeviceSize(0), span.size());
ASSERT_EQ(vec.ConstDevicePointer(0), span.data());
}
// Multi-GPUs' test
#if defined(XGBOOST_USE_NCCL)
TEST(HostDeviceVector, MGPU_Shard) {
auto devices = GPUSet::AllVisible();
if (devices.Size() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;
}
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
// Data size for each device.
std::vector<size_t> devices_size (devices.Size());
// From CPU to GPUs.
vec.Shard(devices);
size_t total_size = 0;
for (size_t i = 0; i < devices.Size(); ++i) {
total_size += vec.DeviceSize(i);
devices_size[i] = vec.DeviceSize(i);
}
ASSERT_EQ(total_size, h_vec.size());
ASSERT_EQ(total_size, vec.Size());
// Shard from devices to devices with different distribution.
EXPECT_ANY_THROW(
vec.Shard(GPUDistribution::Granular(devices, 12)));
// All data is drawn back to CPU
vec.Reshard(GPUDistribution::Empty());
ASSERT_TRUE(vec.Devices().IsEmpty());
ASSERT_EQ(vec.Size(), h_vec.size());
vec.Shard(GPUDistribution::Granular(devices, 12));
total_size = 0;
for (size_t i = 0; i < devices.Size(); ++i) {
total_size += vec.DeviceSize(i);
devices_size[i] = vec.DeviceSize(i);
}
ASSERT_EQ(total_size, h_vec.size());
ASSERT_EQ(total_size, vec.Size());
}
TEST(HostDeviceVector, MGPU_Reshard) {
auto devices = GPUSet::AllVisible();
if (devices.Size() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;
}
size_t n = 1001;
int n_devices = 2;
auto distribution = GPUDistribution::Block(GPUSet::Range(0, n_devices));
std::vector<size_t> starts{0, 501};
std::vector<size_t> sizes{501, 500};
HostDeviceVector<int> v;
InitHostDeviceVector(n, distribution, &v);
CheckDevice(&v, starts, sizes, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, starts, sizes, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kWrite);
auto distribution1 = GPUDistribution::Overlap(GPUSet::Range(0, n_devices), 1);
v.Reshard(distribution1);
for (size_t i = 0; i < n_devices; ++i) {
auto span = v.DeviceSpan(i); // sync to device
}
std::vector<size_t> starts1{0, 500};
std::vector<size_t> sizes1{501, 501};
CheckDevice(&v, starts1, sizes1, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kWrite);
}
#endif
} // namespace common
} // namespace xgboost
|
f76692409527062f2891dc2a00426cbf85249e8a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string.h>
#include <stdio.h>
#include <math.h>
using namespace std;
namespace GPUfunc{
static int nb = 64; //1024*1024*64*2; // max 1024*1024*64*2
static int nthre = 1; // max 65535
static int nthre_total = nb*nthre;
static int nword = 1024*1024*8;
static int mem_size = sizeof(double) * nword;
static int mem_size_o = nthre_total*sizeof(double);
static double* hmem_i;
static double* hmem_o;
static double* dmem_i;
static double* dmem_o;
#define NLOOP (1000)
#define NX (14705)
__device__ double myDeviceFunc_0(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__device__ double myDeviceFunc_1(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__device__ double myDeviceFunc_2(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__device__ double myDeviceFunc_3(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__global__ void kernel(double* in, double* out, int nword)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int global_id = blockDim.x*bid + tid;
double z;
int kernel_num = global_id % 4;
switch(kernel_num){
case 0:
z = myDeviceFunc_0(in, nword);
break;
case 1:
z = myDeviceFunc_1(in, nword);
break;
case 2:
z = myDeviceFunc_2(in, nword);
break;
case 3:
z = myDeviceFunc_3(in, nword);
break;
default:
z = myDeviceFunc_0(in, nword);
}
out[global_id] = z;
}
void initialize()
{
static bool is_first = true;
if(false == is_first) return;
// input buffer (Host)
hmem_i = (double*) malloc(mem_size);
for(int i=0; i<nword; i++) hmem_i[i] = (double)i;
// input buffer (GPU)
hipMalloc( (void**) &dmem_i, mem_size);
hipMemcpy(dmem_i, hmem_i, mem_size, hipMemcpyHostToDevice);
// output buffer (Host/GPU)
hipMalloc( (void**) &dmem_o, mem_size_o);
hmem_o = (double*) malloc(mem_size_o);
printf("# threads: %d \n", nthre_total);
printf("mem_size: %d MB\n", mem_size >> 20);
printf("mem_size_o: %d kB\n", mem_size_o >> 10);
is_first = false;
}
void run()
{
hipLaunchKernelGGL(( kernel), dim3(nb), dim3(nthre), 0, 0, dmem_i, dmem_o, nword);
hipMemcpy(hmem_o, dmem_o, mem_size_o, hipMemcpyDeviceToHost);
/*
for(int i=0; i<nthre_total; i++){
double z = hmem_o[i];
if(i>(nthre_total-4)) printf("%d, %f\n", i, z);
}
*/
printf("%d, %e\n", nthre_total-1, hmem_o[nthre_total-1]);
return;
}
void finalize(){
free(hmem_i);
free(hmem_o);
hipFree(dmem_i);
hipFree(dmem_o);
}
}
|
f76692409527062f2891dc2a00426cbf85249e8a.cu
|
#include <iostream>
#include <string.h>
#include <stdio.h>
#include <math.h>
using namespace std;
namespace GPUfunc{
static int nb = 64; //1024*1024*64*2; // max 1024*1024*64*2
static int nthre = 1; // max 65535
static int nthre_total = nb*nthre;
static int nword = 1024*1024*8;
static int mem_size = sizeof(double) * nword;
static int mem_size_o = nthre_total*sizeof(double);
static double* hmem_i;
static double* hmem_o;
static double* dmem_i;
static double* dmem_o;
#define NLOOP (1000)
#define NX (14705)
__device__ double myDeviceFunc_0(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__device__ double myDeviceFunc_1(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__device__ double myDeviceFunc_2(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__device__ double myDeviceFunc_3(double* in, int nword)
{
double z=0.0;
double x[NX];
for(int i=0; i<NX; i++) x[i] = 1.0;
for(int j=0; j<NLOOP; j++) for(int i=0; i<NX; i++) z += x[i];
return (z);
}
__global__ void kernel(double* in, double* out, int nword)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int global_id = blockDim.x*bid + tid;
double z;
int kernel_num = global_id % 4;
switch(kernel_num){
case 0:
z = myDeviceFunc_0(in, nword);
break;
case 1:
z = myDeviceFunc_1(in, nword);
break;
case 2:
z = myDeviceFunc_2(in, nword);
break;
case 3:
z = myDeviceFunc_3(in, nword);
break;
default:
z = myDeviceFunc_0(in, nword);
}
out[global_id] = z;
}
void initialize()
{
static bool is_first = true;
if(false == is_first) return;
// input buffer (Host)
hmem_i = (double*) malloc(mem_size);
for(int i=0; i<nword; i++) hmem_i[i] = (double)i;
// input buffer (GPU)
cudaMalloc( (void**) &dmem_i, mem_size);
cudaMemcpy(dmem_i, hmem_i, mem_size, cudaMemcpyHostToDevice);
// output buffer (Host/GPU)
cudaMalloc( (void**) &dmem_o, mem_size_o);
hmem_o = (double*) malloc(mem_size_o);
printf("# threads: %d \n", nthre_total);
printf("mem_size: %d MB\n", mem_size >> 20);
printf("mem_size_o: %d kB\n", mem_size_o >> 10);
is_first = false;
}
void run()
{
kernel<<< nb, nthre>>>(dmem_i, dmem_o, nword);
cudaMemcpy(hmem_o, dmem_o, mem_size_o, cudaMemcpyDeviceToHost);
/*
for(int i=0; i<nthre_total; i++){
double z = hmem_o[i];
if(i>(nthre_total-4)) printf("%d, %f\n", i, z);
}
*/
printf("%d, %e\n", nthre_total-1, hmem_o[nthre_total-1]);
return;
}
void finalize(){
free(hmem_i);
free(hmem_o);
cudaFree(dmem_i);
cudaFree(dmem_o);
}
}
|
3e5aa0d253f7102120d87de149e8d4d6f695bc61.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* !! ABORTED !! */
/*
This is an experimental cuda file, which implements a test version
of memset and assign_add.
assign_add is now availble in assign_add.h
*/
#include "common.h"
void f_memset(int n, float a, float *x);
void f_memcpyadd2D(float* dst, int old, int ostride, const float* src, int iw, int ih, int howmany);
/*
f_memset(35, 2, xx.mutable_gpu_data());
for (int i = 0; i < 2*3; i++) {
yy.mutable_cpu_data()[i] = 1;
}
CHECK_CUDA( hipMemcpy2D(
xx.mutable_gpu_data(0, 0, 2, 2),
7*sizeof(float),
yy.gpu_data(),
3*sizeof(float),
3*sizeof(float),
2,
hipMemcpyDeviceToDevice
));
for (int i = 0; i < 100; i++)
f_memcpyadd2D(
xx.mutable_gpu_data(0, 0, 0, 0),
7,
7*5,
yy.gpu_data(),
3,
2,
1
);
// xx.cpu_data();
cout << xx << endl;
cout << yy << endl;
*/
__global__ void f_memset_kernel(int n, float a, float *x) {
CUDA_KERNEL_LOOP(i, n) {
x[i] = a;
};
}
void f_memset(int n, float a, float *x) {
hipLaunchKernelGGL(( f_memset_kernel), dim3(CUDA_NUM_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, 0, n, a, x);
}
__global__ void f_memcpyadd2D_kernel(int n, float* dst, int old, int ostride, const float* src, int iw, int ih, int howmany) {
CUDA_KERNEL_LOOP(i, n) {
int w = i % iw;
int h = (i / iw) % ih;
int c = i / (iw * ih);
int j = c * ostride + h * old + w;
dst[j] = dst[j] + src[i];
};
}
void f_memcpyadd2D(float* dst, int old, int ostride, const float* src, int iw, int ih, int howmany) {
int n = iw * ih * howmany;
hipLaunchKernelGGL(( f_memcpyadd2D_kernel), dim3(CUDA_NUM_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, 0,
n, dst, old, ostride, src, iw, ih, howmany
);
}
|
3e5aa0d253f7102120d87de149e8d4d6f695bc61.cu
|
/* !! ABORTED !! */
/*
This is an experimental cuda file, which implements a test version
of memset and assign_add.
assign_add is now availble in assign_add.h
*/
#include "common.h"
void f_memset(int n, float a, float *x);
void f_memcpyadd2D(float* dst, int old, int ostride, const float* src, int iw, int ih, int howmany);
/*
f_memset(35, 2, xx.mutable_gpu_data());
for (int i = 0; i < 2*3; i++) {
yy.mutable_cpu_data()[i] = 1;
}
CHECK_CUDA( cudaMemcpy2D(
xx.mutable_gpu_data(0, 0, 2, 2),
7*sizeof(float),
yy.gpu_data(),
3*sizeof(float),
3*sizeof(float),
2,
cudaMemcpyDeviceToDevice
));
for (int i = 0; i < 100; i++)
f_memcpyadd2D(
xx.mutable_gpu_data(0, 0, 0, 0),
7,
7*5,
yy.gpu_data(),
3,
2,
1
);
// xx.cpu_data();
cout << xx << endl;
cout << yy << endl;
*/
__global__ void f_memset_kernel(int n, float a, float *x) {
CUDA_KERNEL_LOOP(i, n) {
x[i] = a;
};
}
void f_memset(int n, float a, float *x) {
f_memset_kernel<<<CUDA_NUM_BLOCKS(n), CUDA_NUM_THREADS>>>(n, a, x);
}
__global__ void f_memcpyadd2D_kernel(int n, float* dst, int old, int ostride, const float* src, int iw, int ih, int howmany) {
CUDA_KERNEL_LOOP(i, n) {
int w = i % iw;
int h = (i / iw) % ih;
int c = i / (iw * ih);
int j = c * ostride + h * old + w;
dst[j] = dst[j] + src[i];
};
}
void f_memcpyadd2D(float* dst, int old, int ostride, const float* src, int iw, int ih, int howmany) {
int n = iw * ih * howmany;
f_memcpyadd2D_kernel<<<CUDA_NUM_BLOCKS(n), CUDA_NUM_THREADS>>>(
n, dst, old, ostride, src, iw, ih, howmany
);
}
|
2a7d9c157a664372d5f5fa08a4143383e0a61860.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void misaligned_read_test(float* a, float* b, float *c, int size, int offset)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int k = gid + offset;
if (k < size)
c[gid] = a[k]+ b[k];
//c[gid] = a[gid];
}
//int main(int argc, char** argv)
//{
// printf("Runing 1D grid \n");
// int size = 1 << 25;
// int block_size = 128;
// unsigned int byte_size = size * sizeof(float);
// int offset = 0;
//
// if (argc > 1)
// offset = atoi(argv[1]);
//
// printf("Input size : %d \n", size);
//
// float * h_a, *h_b, *h_ref;
// h_a = (float*)malloc(byte_size);
// h_b = (float*)malloc(byte_size);
// h_ref = (float*)malloc(byte_size);
//
//
// if (!h_a)
// printf("host memory allocation error \n");
//
// for (size_t i = 0; i < size; i++)
// {
// h_a[i] = i % 10;
// h_b[i] = i % 7;
// }
//
// dim3 block(block_size);
// dim3 grid((size + block.x - 1) / block.x);
//
// printf("Kernel is lauch with grid(%d,%d,%d) and block(%d,%d,%d) \n",
// grid.x, grid.y, grid.z, block.x, block.y, block.z);
//
// float *d_a, *d_b, *d_c;
//
// hipMalloc((void**)&d_a, byte_size);
// hipMalloc((void**)&d_b, byte_size);
// hipMalloc((void**)&d_c, byte_size);
// hipMemset(d_c, 0, byte_size);
//
// hipMemcpy(d_a, h_a, byte_size, hipMemcpyHostToDevice);
// hipMemcpy(d_b, h_b, byte_size, hipMemcpyHostToDevice);
//
// misaligned_read_test << <grid, block >> > (d_a, d_b, d_c, size, offset);
//
// hipDeviceSynchronize();
// hipMemcpy(h_ref, d_c, byte_size, hipMemcpyDeviceToHost);
//
// hipFree(d_c);
// hipFree(d_b);
// hipFree(d_a);
// free(h_ref);
// free(h_b);
// free(h_a);
//}
|
2a7d9c157a664372d5f5fa08a4143383e0a61860.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void misaligned_read_test(float* a, float* b, float *c, int size, int offset)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int k = gid + offset;
if (k < size)
c[gid] = a[k]+ b[k];
//c[gid] = a[gid];
}
//int main(int argc, char** argv)
//{
// printf("Runing 1D grid \n");
// int size = 1 << 25;
// int block_size = 128;
// unsigned int byte_size = size * sizeof(float);
// int offset = 0;
//
// if (argc > 1)
// offset = atoi(argv[1]);
//
// printf("Input size : %d \n", size);
//
// float * h_a, *h_b, *h_ref;
// h_a = (float*)malloc(byte_size);
// h_b = (float*)malloc(byte_size);
// h_ref = (float*)malloc(byte_size);
//
//
// if (!h_a)
// printf("host memory allocation error \n");
//
// for (size_t i = 0; i < size; i++)
// {
// h_a[i] = i % 10;
// h_b[i] = i % 7;
// }
//
// dim3 block(block_size);
// dim3 grid((size + block.x - 1) / block.x);
//
// printf("Kernel is lauch with grid(%d,%d,%d) and block(%d,%d,%d) \n",
// grid.x, grid.y, grid.z, block.x, block.y, block.z);
//
// float *d_a, *d_b, *d_c;
//
// cudaMalloc((void**)&d_a, byte_size);
// cudaMalloc((void**)&d_b, byte_size);
// cudaMalloc((void**)&d_c, byte_size);
// cudaMemset(d_c, 0, byte_size);
//
// cudaMemcpy(d_a, h_a, byte_size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b, byte_size, cudaMemcpyHostToDevice);
//
// misaligned_read_test << <grid, block >> > (d_a, d_b, d_c, size, offset);
//
// cudaDeviceSynchronize();
// cudaMemcpy(h_ref, d_c, byte_size, cudaMemcpyDeviceToHost);
//
// cudaFree(d_c);
// cudaFree(d_b);
// cudaFree(d_a);
// free(h_ref);
// free(h_b);
// free(h_a);
//}
|
deaac074c832a4c5eba139298b231c13a5280468.hip
|
// !!! This is a file automatically generated by hipify!!!
/* A Bison parser, made by GNU Bison 2.4.1. */
/* Skeleton implementation for Bison's Yacc-like parsers in C
Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* C LALR(1) parser skeleton written by Richard Stallman, by
simplifying the original so-called "semantic" parser. */
/* All symbols defined below should begin with yy or YY, to avoid
infringing on user name space. This should be done even for local
variables, as they might otherwise be expanded by user macros.
There are some unavoidable exceptions within include files to
define necessary library symbols; they are noted "INFRINGES ON
USER NAME SPACE" below. */
/* Identify Bison output. */
#define YYBISON 1
/* Bison version. */
#define YYBISON_VERSION "2.4.1"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
/* Pure parsers. */
#define YYPURE 0
/* Push parsers. */
#define YYPUSH 0
/* Pull parsers. */
#define YYPULL 1
/* Using locations. */
#define YYLSP_NEEDED 0
/* Copy the first part of user declarations. */
/* Line 189 of yacc.c */
#line 17 "bison.y"
#include "lex.yy.c"
#include "cm.cu"
void clean_queues();
void order_inplace(CudaSet* a, stack<string> exe_type);
void yyerror(char *s, ...);
void emit(char *s, ...);
void emit_mul();
void emit_add();
void emit_minus();
void emit_div();
void emit_and();
void emit_eq();
void emit_or();
void emit_cmp(int val);
void emit_var(char *s, int c, char *f);
void emit_var_asc(char *s);
void emit_var_desc(char *s);
void emit_name(char *name);
void emit_count();
void emit_sum();
void emit_average();
void emit_min();
void emit_max();
void emit_string(char *str);
void emit_number(int_type val);
void emit_float(float_type val);
void emit_decimal(float_type val);
void emit_sel_name(char* name);
void emit_limit(int val);
void emit_union(char *s, char *f1, char *f2);
void emit_varchar(char *s, int c, char *f, int d);
void emit_load(char *s, char *f, int d, char* sep);
void emit_load_binary(char *s, char *f, int d);
void emit_store(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f);
void emit_filter(char *s, char *f, int e);
void emit_order(char *s, char *f, int e, int ll = 0);
void emit_group(char *s, char *f, int e);
void emit_select(char *s, char *f, int ll);
void emit_join(char *s, char *j1);
void emit_join_tab(char *s);
void emit_distinct(char *s, char *f);
/* Line 189 of yacc.c */
#line 124 "bison.cu"
/* Enabling traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
/* Enabling verbose error messages. */
#ifdef YYERROR_VERBOSE
# undef YYERROR_VERBOSE
# define YYERROR_VERBOSE 1
#else
# define YYERROR_VERBOSE 0
#endif
/* Enabling the token table. */
#ifndef YYTOKEN_TABLE
# define YYTOKEN_TABLE 0
#endif
/* Tokens. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
/* Put the tokens into the symbol table, so that GDB and other debuggers
know about them. */
enum yytokentype {
FILENAME = 258,
NAME = 259,
STRING = 260,
INTNUM = 261,
DECIMAL1 = 262,
BOOL1 = 263,
APPROXNUM = 264,
USERVAR = 265,
ASSIGN = 266,
EQUAL = 267,
OR = 268,
XOR = 269,
AND = 270,
REGEXP = 271,
LIKE = 272,
IS = 273,
IN = 274,
NOT = 275,
BETWEEN = 276,
COMPARISON = 277,
SHIFT = 278,
MOD = 279,
UMINUS = 280,
LOAD = 281,
STREAM = 282,
FILTER = 283,
BY = 284,
JOIN = 285,
STORE = 286,
INTO = 287,
GROUP = 288,
FROM = 289,
SELECT = 290,
AS = 291,
ORDER = 292,
ASC = 293,
DESC = 294,
COUNT = 295,
USING = 296,
SUM = 297,
AVG = 298,
MIN = 299,
MAX = 300,
LIMIT = 301,
ON = 302,
BINARY = 303
};
#endif
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE
{
/* Line 214 of yacc.c */
#line 67 "bison.y"
int intval;
float floatval;
char *strval;
int subtok;
/* Line 214 of yacc.c */
#line 217 "bison.cu"
} YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
/* Copy the second part of user declarations. */
/* Line 264 of yacc.c */
#line 229 "bison.cu"
#ifdef short
# undef short
#endif
#ifdef YYTYPE_UINT8
typedef YYTYPE_UINT8 yytype_uint8;
#else
typedef unsigned char yytype_uint8;
#endif
#ifdef YYTYPE_INT8
typedef YYTYPE_INT8 yytype_int8;
#elif (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
typedef signed char yytype_int8;
#else
typedef short int yytype_int8;
#endif
#ifdef YYTYPE_UINT16
typedef YYTYPE_UINT16 yytype_uint16;
#else
typedef unsigned short int yytype_uint16;
#endif
#ifdef YYTYPE_INT16
typedef YYTYPE_INT16 yytype_int16;
#else
typedef short int yytype_int16;
#endif
#ifndef YYSIZE_T
# ifdef __SIZE_TYPE__
# define YYSIZE_T __SIZE_TYPE__
# elif defined size_t
# define YYSIZE_T size_t
# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
# define YYSIZE_T size_t
# else
# define YYSIZE_T unsigned int
# endif
#endif
#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
#ifndef YY_
# if YYENABLE_NLS
# if ENABLE_NLS
# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
# define YY_(msgid) dgettext ("bison-runtime", msgid)
# endif
# endif
# ifndef YY_
# define YY_(msgid) msgid
# endif
#endif
/* Suppress unused-variable warnings by "using" E. */
#if ! defined lint || defined __GNUC__
# define YYUSE(e) ((void) (e))
#else
# define YYUSE(e) /* empty */
#endif
/* Identity function, used to suppress warnings about constant conditions. */
#ifndef lint
# define YYID(n) (n)
#else
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static int
YYID (int yyi)
#else
static int
YYID (yyi)
int yyi;
#endif
{
return yyi;
}
#endif
#if ! defined yyoverflow || YYERROR_VERBOSE
/* The parser invokes alloca or malloc; define the necessary symbols. */
# ifdef YYSTACK_USE_ALLOCA
# if YYSTACK_USE_ALLOCA
# ifdef __GNUC__
# define YYSTACK_ALLOC __builtin_alloca
# elif defined __BUILTIN_VA_ARG_INCR
# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
# elif defined _AIX
# define YYSTACK_ALLOC __alloca
# elif defined _MSC_VER
# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# endif
# endif
# endif
# ifdef YYSTACK_ALLOC
/* Pacify GCC's `empty if-body' warning. */
# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
# ifndef YYSTACK_ALLOC_MAXIMUM
/* The OS might guarantee only one guard page at the bottom of the stack,
and a page size can be as small as 4096 bytes. So we cannot safely
invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
to allow for a few compiler-allocated temporary stack slots. */
# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
# endif
# else
# define YYSTACK_ALLOC YYMALLOC
# define YYSTACK_FREE YYFREE
# ifndef YYSTACK_ALLOC_MAXIMUM
# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
# endif
# if (defined __cplusplus && ! defined _STDLIB_H \
&& ! ((defined YYMALLOC || defined malloc) \
&& (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# endif
#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
#if (! defined yyoverflow \
&& (! defined __cplusplus \
|| (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
/* A type that is properly aligned for any stack member. */
union yyalloc
{
yytype_int16 yyss_alloc;
YYSTYPE yyvs_alloc;
};
/* The size of the maximum gap between one aligned stack and the next. */
# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
/* The size of an array large to enough to hold all stacks, each with
N elements. */
# define YYSTACK_BYTES(N) \
((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ YYSTACK_GAP_MAXIMUM)
/* Copy COUNT objects from FROM to TO. The source and destination do
not overlap. */
# ifndef YYCOPY
# if defined __GNUC__ && 1 < __GNUC__
# define YYCOPY(To, From, Count) \
__builtin_memcpy (To, From, (Count) * sizeof (*(From)))
# else
# define YYCOPY(To, From, Count) \
do \
{ \
YYSIZE_T yyi; \
for (yyi = 0; yyi < (Count); yyi++) \
(To)[yyi] = (From)[yyi]; \
} \
while (YYID (0))
# endif
# endif
/* Relocate STACK from its old location to the new one. The
local variables YYSIZE and YYSTACKSIZE give the old and new number of
elements in the stack, and YYPTR gives the new location of the
stack. Advance YYPTR to a properly aligned location for the next
stack. */
# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
do \
{ \
YYSIZE_T yynewbytes; \
YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
Stack = &yyptr->Stack_alloc; \
yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
yyptr += yynewbytes / sizeof (*yyptr); \
} \
while (YYID (0))
#endif
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 8
/* YYLAST -- Last index in YYTABLE. */
#define YYLAST 446
/* YYNTOKENS -- Number of terminals. */
#define YYNTOKENS 66
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 13
/* YYNRULES -- Number of rules. */
#define YYNRULES 62
/* YYNRULES -- Number of states. */
#define YYNSTATES 153
/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
#define YYUNDEFTOK 2
#define YYMAXUTOK 303
#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
static const yytype_uint8 yytranslate[] =
{
0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 20, 2, 2, 2, 31, 25, 2,
59, 60, 29, 27, 65, 28, 61, 30, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 64, 58,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 33, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 62, 24, 63, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 21, 22, 23, 26, 32,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57
};
#if YYDEBUG
/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
YYRHS. */
static const yytype_uint16 yyprhs[] =
{
0, 0, 3, 6, 10, 12, 20, 33, 43, 49,
56, 64, 74, 81, 83, 87, 89, 91, 93, 95,
97, 99, 109, 116, 119, 122, 127, 132, 137, 142,
147, 151, 155, 159, 163, 167, 171, 175, 179, 183,
187, 191, 194, 197, 201, 207, 211, 215, 220, 221,
225, 229, 235, 237, 241, 243, 247, 248, 250, 253,
258, 264, 265
};
/* YYRHS -- A `-1'-separated list of the rules' RHS. */
static const yytype_int8 yyrhs[] =
{
67, 0, -1, 68, 58, -1, 67, 68, 58, -1,
69, -1, 4, 11, 44, 72, 43, 4, 71, -1,
4, 11, 35, 3, 50, 59, 3, 60, 45, 59,
73, 60, -1, 4, 11, 35, 3, 57, 45, 59,
73, 60, -1, 4, 11, 37, 4, 76, -1, 4,
11, 46, 4, 38, 75, -1, 4, 11, 44, 72,
43, 4, 77, -1, 40, 4, 41, 3, 50, 59,
3, 60, 78, -1, 40, 4, 41, 3, 78, 57,
-1, 4, -1, 4, 61, 4, -1, 10, -1, 5,
-1, 6, -1, 9, -1, 7, -1, 8, -1, 4,
62, 6, 63, 64, 4, 59, 6, 60, -1, 4,
62, 6, 63, 64, 4, -1, 4, 47, -1, 4,
48, -1, 49, 59, 70, 60, -1, 51, 59, 70,
60, -1, 52, 59, 70, 60, -1, 53, 59, 70,
60, -1, 54, 59, 70, 60, -1, 70, 27, 70,
-1, 70, 28, 70, -1, 70, 29, 70, -1, 70,
30, 70, -1, 70, 31, 70, -1, 70, 32, 70,
-1, 70, 15, 70, -1, 70, 12, 70, -1, 70,
13, 70, -1, 70, 14, 70, -1, 70, 26, 70,
-1, 21, 70, -1, 20, 70, -1, 70, 23, 70,
-1, 70, 23, 59, 69, 60, -1, 59, 70, 60,
-1, 70, 18, 8, -1, 70, 18, 21, 8, -1,
-1, 42, 38, 74, -1, 70, 45, 4, -1, 72,
65, 70, 45, 4, -1, 70, -1, 73, 65, 70,
-1, 70, -1, 70, 65, 74, -1, -1, 74, -1,
38, 70, -1, 39, 4, 56, 70, -1, 39, 4,
56, 70, 77, -1, -1, 55, 6, -1
};
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint8 yyrline[] =
{
0, 137, 137, 138, 142, 145, 147, 149, 151, 153,
155, 157, 159, 164, 165, 166, 167, 168, 169, 170,
171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
184, 185, 186, 187, 188, 189, 191, 192, 193, 194,
195, 196, 197, 198, 200, 201, 205, 206, 209, 212,
216, 217, 221, 222, 226, 227, 230, 232, 235, 238,
239, 241, 244
};
#endif
#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
{
"$end", "error", "$undefined", "FILENAME", "NAME", "STRING", "INTNUM",
"DECIMAL1", "BOOL1", "APPROXNUM", "USERVAR", "ASSIGN", "EQUAL", "OR",
"XOR", "AND", "REGEXP", "LIKE", "IS", "IN", "'!'", "NOT", "BETWEEN",
"COMPARISON", "'|'", "'&'", "SHIFT", "'+'", "'-'", "'*'", "'/'", "'%'",
"MOD", "'^'", "UMINUS", "LOAD", "STREAM", "FILTER", "BY", "JOIN",
"STORE", "INTO", "GROUP", "FROM", "SELECT", "AS", "ORDER", "ASC", "DESC",
"COUNT", "USING", "SUM", "AVG", "MIN", "MAX", "LIMIT", "ON", "BINARY",
"';'", "'('", "')'", "'.'", "'{'", "'}'", "':'", "','", "$accept",
"stmt_list", "stmt", "select_stmt", "expr", "opt_group_list",
"expr_list", "load_list", "val_list", "opt_val_list", "opt_where",
"join_list", "opt_limit", 0
};
#endif
# ifdef YYPRINT
/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
token YYLEX-NUM. */
static const yytype_uint16 yytoknum[] =
{
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
33, 275, 276, 277, 124, 38, 278, 43, 45, 42,
47, 37, 279, 94, 280, 281, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 301, 302, 303, 59, 40,
41, 46, 123, 125, 58, 44
};
# endif
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const yytype_uint8 yyr1[] =
{
0, 66, 67, 67, 68, 69, 69, 69, 69, 69,
69, 69, 69, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 71, 71,
72, 72, 73, 73, 74, 74, 75, 75, 76, 77,
77, 78, 78
};
/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
0, 2, 2, 3, 1, 7, 12, 9, 5, 6,
7, 9, 6, 1, 3, 1, 1, 1, 1, 1,
1, 9, 6, 2, 2, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 2, 2, 3, 5, 3, 3, 4, 0, 3,
3, 5, 1, 3, 1, 3, 0, 1, 2, 4,
5, 0, 2
};
/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
STATE-NUM when YYTABLE doesn't specify something else to do. Zero
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
0, 0, 0, 0, 0, 4, 0, 0, 1, 0,
2, 0, 0, 0, 0, 0, 3, 0, 0, 13,
16, 17, 19, 20, 18, 15, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 61, 0, 0,
0, 8, 23, 24, 0, 0, 42, 41, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56, 0, 0, 0, 0, 0, 58, 14, 0, 0,
0, 0, 0, 0, 45, 37, 38, 39, 36, 46,
0, 0, 43, 40, 30, 31, 32, 33, 34, 35,
50, 48, 0, 54, 57, 9, 0, 62, 12, 0,
0, 0, 25, 26, 27, 28, 29, 47, 13, 0,
0, 0, 5, 10, 0, 0, 0, 0, 52, 0,
0, 44, 0, 0, 51, 55, 61, 0, 7, 0,
22, 0, 49, 11, 0, 53, 0, 59, 0, 0,
60, 6, 21
};
/* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
-1, 3, 4, 5, 103, 122, 35, 129, 104, 105,
41, 123, 73
};
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
#define YYPACT_NINF -122
static const yytype_int16 yypact[] =
{
14, -3, 7, 5, -34, -122, 50, 23, -122, 28,
-122, 52, 61, 62, 77, 85, -122, -35, 51, -45,
-122, -122, -122, -122, -122, -122, 62, 62, 33, 36,
44, 49, 58, 62, 300, -42, 71, -29, 59, 65,
62, -122, -122, -122, 115, 114, 2, 2, 62, 62,
62, 62, 62, 171, 62, 62, 62, 62, -2, 128,
62, 62, 62, 62, 62, 62, 62, 118, 119, 62,
62, 66, 121, 67, 126, 84, 364, -122, 81, 192,
214, 235, 257, 278, -122, 364, 383, 401, 142, -122,
122, 53, 408, 414, 69, 69, -122, -122, -122, -122,
-122, -32, 321, 127, -122, -122, 143, -122, -122, 87,
62, 88, -122, -122, -122, -122, -122, -122, 29, 91,
157, 124, -122, -122, 159, 62, 104, 130, 364, 15,
162, -122, 111, 62, -122, -122, 123, 117, -122, 62,
129, 62, -122, -122, 62, 364, 184, 342, 19, 131,
-122, -122, -122
};
/* YYPGOTO[NTERM-NUM]. */
static const yytype_int16 yypgoto[] =
{
-122, -122, 190, 105, -13, -122, -122, 64, -121, -122,
-122, 48, 73
};
/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule which
number is the opposite. If zero, do what YYDEFACT says.
If YYTABLE_NINF, syntax error. */
#define YYTABLE_NINF -1
static const yytype_uint8 yytable[] =
{
34, 68, 42, 43, 135, 8, 89, 120, 6, 1,
121, 7, 142, 46, 47, 38, 44, 45, 1, 90,
53, 71, 39, 69, 10, 59, 72, 76, 60, 61,
62, 63, 64, 65, 66, 79, 80, 81, 82, 83,
6, 85, 86, 87, 88, 2, 92, 93, 94, 95,
96, 97, 98, 99, 2, 17, 102, 118, 20, 21,
22, 23, 24, 25, 15, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 138, 42, 43, 53, 151,
139, 36, 26, 27, 139, 11, 16, 12, 37, 40,
44, 45, 48, 2, 13, 49, 14, 128, 63, 64,
65, 66, 28, 50, 29, 30, 31, 32, 51, 70,
75, 28, 33, 29, 30, 31, 32, 52, 74, 77,
78, 33, 100, 101, 108, 106, 145, 107, 147, 109,
117, 128, 19, 20, 21, 22, 23, 24, 25, 54,
55, 56, 57, 110, 111, 58, 126, 127, 26, 27,
59, 131, 130, 60, 61, 62, 63, 64, 65, 66,
58, 132, 133, 134, 136, 59, 140, 141, 60, 61,
62, 63, 64, 65, 66, 137, 144, 28, 72, 29,
30, 31, 32, 54, 55, 56, 57, 91, 146, 58,
149, 152, 125, 9, 59, 150, 119, 60, 61, 62,
63, 64, 65, 66, 54, 55, 56, 57, 148, 143,
58, 0, 0, 0, 0, 59, 0, 0, 60, 61,
62, 63, 64, 65, 66, 0, 54, 55, 56, 57,
0, 84, 58, 0, 0, 0, 0, 59, 0, 0,
60, 61, 62, 63, 64, 65, 66, 54, 55, 56,
57, 0, 112, 58, 0, 0, 0, 0, 59, 0,
0, 60, 61, 62, 63, 64, 65, 66, 0, 54,
55, 56, 57, 0, 113, 58, 0, 0, 0, 0,
59, 0, 0, 60, 61, 62, 63, 64, 65, 66,
54, 55, 56, 57, 0, 114, 58, 0, 0, 0,
0, 59, 0, 0, 60, 61, 62, 63, 64, 65,
66, 0, 54, 55, 56, 57, 0, 115, 58, 0,
0, 0, 0, 59, 0, 0, 60, 61, 62, 63,
64, 65, 66, 54, 55, 56, 57, 0, 116, 58,
0, 0, 0, 0, 59, 67, 0, 60, 61, 62,
63, 64, 65, 66, 54, 55, 56, 57, 0, 0,
58, 0, 0, 0, 0, 59, 124, 0, 60, 61,
62, 63, 64, 65, 66, 0, 54, 55, 56, 57,
0, 120, 58, 0, 0, 0, 0, 59, 0, 0,
60, 61, 62, 63, 64, 65, 66, 56, 57, 0,
0, 58, 0, 0, 0, 0, 59, 0, 0, 60,
61, 62, 63, 64, 65, 66, 57, 0, 0, 58,
0, 0, 0, 0, 59, 0, 0, 60, 61, 62,
63, 64, 65, 66, 60, 61, 62, 63, 64, 65,
66, 61, 62, 63, 64, 65, 66
};
static const yytype_int16 yycheck[] =
{
13, 43, 47, 48, 125, 0, 8, 39, 11, 4,
42, 4, 133, 26, 27, 50, 61, 62, 4, 21,
33, 50, 57, 65, 58, 23, 55, 40, 26, 27,
28, 29, 30, 31, 32, 48, 49, 50, 51, 52,
11, 54, 55, 56, 57, 40, 59, 60, 61, 62,
63, 64, 65, 66, 40, 3, 69, 4, 5, 6,
7, 8, 9, 10, 41, 4, 4, 5, 6, 7,
8, 9, 10, 20, 21, 60, 47, 48, 91, 60,
65, 4, 20, 21, 65, 35, 58, 37, 3, 38,
61, 62, 59, 40, 44, 59, 46, 110, 29, 30,
31, 32, 49, 59, 51, 52, 53, 54, 59, 38,
45, 49, 59, 51, 52, 53, 54, 59, 59, 4,
6, 59, 4, 4, 57, 59, 139, 6, 141, 3,
8, 144, 4, 5, 6, 7, 8, 9, 10, 12,
13, 14, 15, 59, 63, 18, 3, 60, 20, 21,
23, 60, 64, 26, 27, 28, 29, 30, 31, 32,
18, 4, 38, 4, 60, 23, 4, 56, 26, 27,
28, 29, 30, 31, 32, 45, 59, 49, 55, 51,
52, 53, 54, 12, 13, 14, 15, 59, 59, 18,
6, 60, 65, 3, 23, 147, 91, 26, 27, 28,
29, 30, 31, 32, 12, 13, 14, 15, 144, 136,
18, -1, -1, -1, -1, 23, -1, -1, 26, 27,
28, 29, 30, 31, 32, -1, 12, 13, 14, 15,
-1, 60, 18, -1, -1, -1, -1, 23, -1, -1,
26, 27, 28, 29, 30, 31, 32, 12, 13, 14,
15, -1, 60, 18, -1, -1, -1, -1, 23, -1,
-1, 26, 27, 28, 29, 30, 31, 32, -1, 12,
13, 14, 15, -1, 60, 18, -1, -1, -1, -1,
23, -1, -1, 26, 27, 28, 29, 30, 31, 32,
12, 13, 14, 15, -1, 60, 18, -1, -1, -1,
-1, 23, -1, -1, 26, 27, 28, 29, 30, 31,
32, -1, 12, 13, 14, 15, -1, 60, 18, -1,
-1, -1, -1, 23, -1, -1, 26, 27, 28, 29,
30, 31, 32, 12, 13, 14, 15, -1, 60, 18,
-1, -1, -1, -1, 23, 45, -1, 26, 27, 28,
29, 30, 31, 32, 12, 13, 14, 15, -1, -1,
18, -1, -1, -1, -1, 23, 45, -1, 26, 27,
28, 29, 30, 31, 32, -1, 12, 13, 14, 15,
-1, 39, 18, -1, -1, -1, -1, 23, -1, -1,
26, 27, 28, 29, 30, 31, 32, 14, 15, -1,
-1, 18, -1, -1, -1, -1, 23, -1, -1, 26,
27, 28, 29, 30, 31, 32, 15, -1, -1, 18,
-1, -1, -1, -1, 23, -1, -1, 26, 27, 28,
29, 30, 31, 32, 26, 27, 28, 29, 30, 31,
32, 27, 28, 29, 30, 31, 32
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const yytype_uint8 yystos[] =
{
0, 4, 40, 67, 68, 69, 11, 4, 0, 68,
58, 35, 37, 44, 46, 41, 58, 3, 4, 4,
5, 6, 7, 8, 9, 10, 20, 21, 49, 51,
52, 53, 54, 59, 70, 72, 4, 3, 50, 57,
38, 76, 47, 48, 61, 62, 70, 70, 59, 59,
59, 59, 59, 70, 12, 13, 14, 15, 18, 23,
26, 27, 28, 29, 30, 31, 32, 45, 43, 65,
38, 50, 55, 78, 59, 45, 70, 4, 6, 70,
70, 70, 70, 70, 60, 70, 70, 70, 70, 8,
21, 59, 70, 70, 70, 70, 70, 70, 70, 70,
4, 4, 70, 70, 74, 75, 59, 6, 57, 3,
59, 63, 60, 60, 60, 60, 60, 8, 4, 69,
39, 42, 71, 77, 45, 65, 3, 60, 70, 73,
64, 60, 4, 38, 4, 74, 60, 45, 60, 65,
4, 56, 74, 78, 59, 70, 59, 70, 73, 6,
77, 60, 60
};
#define yyerrok (yyerrstatus = 0)
#define yyclearin (yychar = YYEMPTY)
#define YYEMPTY (-2)
#define YYEOF 0
#define YYACCEPT goto yyacceptlab
#define YYABORT goto yyabortlab
#define YYERROR goto yyerrorlab
/* Like YYERROR except do call yyerror. This remains here temporarily
to ease the transition to the new meaning of YYERROR, for GCC.
Once GCC version 2 has supplanted version 1, this can go. */
#define YYFAIL goto yyerrlab
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY && yylen == 1) \
{ \
yychar = (Token); \
yylval = (Value); \
yytoken = YYTRANSLATE (yychar); \
YYPOPSTACK (1); \
goto yybackup; \
} \
else \
{ \
yyerror (YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (YYID (0))
#define YYTERROR 1
#define YYERRCODE 256
/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
If N is 0, then set CURRENT to the empty location which ends
the previous symbol: RHS[0] (always defined). */
#define YYRHSLOC(Rhs, K) ((Rhs)[K])
#ifndef YYLLOC_DEFAULT
# define YYLLOC_DEFAULT(Current, Rhs, N) \
do \
if (YYID (N)) \
{ \
(Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
(Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
(Current).last_line = YYRHSLOC (Rhs, N).last_line; \
(Current).last_column = YYRHSLOC (Rhs, N).last_column; \
} \
else \
{ \
(Current).first_line = (Current).last_line = \
YYRHSLOC (Rhs, 0).last_line; \
(Current).first_column = (Current).last_column = \
YYRHSLOC (Rhs, 0).last_column; \
} \
while (YYID (0))
#endif
/* YY_LOCATION_PRINT -- Print the location on the stream.
This macro was not mandated originally: define only if we know
we won't break user code: when these are the locations we know. */
#ifndef YY_LOCATION_PRINT
# if YYLTYPE_IS_TRIVIAL
# define YY_LOCATION_PRINT(File, Loc) \
fprintf (File, "%d.%d-%d.%d", \
(Loc).first_line, (Loc).first_column, \
(Loc).last_line, (Loc).last_column)
# else
# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
# endif
#endif
/* YYLEX -- calling `yylex' with the right arguments. */
#ifdef YYLEX_PARAM
# define YYLEX yylex (YYLEX_PARAM)
#else
# define YYLEX yylex ()
#endif
/* Enable debugging if requested. */
#if YYDEBUG
# ifndef YYFPRINTF
# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
# define YYFPRINTF fprintf
# endif
# define YYDPRINTF(Args) \
do { \
if (yydebug) \
YYFPRINTF Args; \
} while (YYID (0))
# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
do { \
if (yydebug) \
{ \
YYFPRINTF (stderr, "%s ", Title); \
yy_symbol_print (stderr, \
Type, Value); \
YYFPRINTF (stderr, "\n"); \
} \
} while (YYID (0))
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_value_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (!yyvaluep)
return;
# ifdef YYPRINT
if (yytype < YYNTOKENS)
YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
# else
YYUSE (yyoutput);
# endif
switch (yytype)
{
default:
break;
}
}
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (yytype < YYNTOKENS)
YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
else
YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
yy_symbol_value_print (yyoutput, yytype, yyvaluep);
YYFPRINTF (yyoutput, ")");
}
/*------------------------------------------------------------------.
| yy_stack_print -- Print the state stack from its BOTTOM up to its |
| TOP (included). |
`------------------------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
#else
static void
yy_stack_print (yybottom, yytop)
yytype_int16 *yybottom;
yytype_int16 *yytop;
#endif
{
YYFPRINTF (stderr, "Stack now");
for (; yybottom <= yytop; yybottom++)
{
int yybot = *yybottom;
YYFPRINTF (stderr, " %d", yybot);
}
YYFPRINTF (stderr, "\n");
}
# define YY_STACK_PRINT(Bottom, Top) \
do { \
if (yydebug) \
yy_stack_print ((Bottom), (Top)); \
} while (YYID (0))
/*------------------------------------------------.
| Report that the YYRULE is going to be reduced. |
`------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
#else
static void
yy_reduce_print (yyvsp, yyrule)
YYSTYPE *yyvsp;
int yyrule;
#endif
{
int yynrhs = yyr2[yyrule];
int yyi;
unsigned long int yylno = yyrline[yyrule];
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
&(yyvsp[(yyi + 1) - (yynrhs)])
);
YYFPRINTF (stderr, "\n");
}
}
# define YY_REDUCE_PRINT(Rule) \
do { \
if (yydebug) \
yy_reduce_print (yyvsp, Rule); \
} while (YYID (0))
/* Nonzero means print parse trace. It is left uninitialized so that
multiple parsers can coexist. */
int yydebug;
#else /* !YYDEBUG */
# define YYDPRINTF(Args)
# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
# define YY_STACK_PRINT(Bottom, Top)
# define YY_REDUCE_PRINT(Rule)
#endif /* !YYDEBUG */
/* YYINITDEPTH -- initial size of the parser's stacks. */
#ifndef YYINITDEPTH
# define YYINITDEPTH 200
#endif
/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
if the built-in stack extension method is used).
Do not make this value too large; the results are undefined if
YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
evaluated with infinite-precision integer arithmetic. */
#ifndef YYMAXDEPTH
# define YYMAXDEPTH 10000
#endif
#if YYERROR_VERBOSE
# ifndef yystrlen
# if defined __GLIBC__ && defined _STRING_H
# define yystrlen strlen
# else
/* Return the length of YYSTR. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static YYSIZE_T
yystrlen (const char *yystr)
#else
static YYSIZE_T
yystrlen (yystr)
const char *yystr;
#endif
{
YYSIZE_T yylen;
for (yylen = 0; yystr[yylen]; yylen++)
continue;
return yylen;
}
# endif
# endif
# ifndef yystpcpy
# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
# define yystpcpy stpcpy
# else
/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
YYDEST. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static char *
yystpcpy (char *yydest, const char *yysrc)
#else
static char *
yystpcpy (yydest, yysrc)
char *yydest;
const char *yysrc;
#endif
{
char *yyd = yydest;
const char *yys = yysrc;
while ((*yyd++ = *yys++) != '\0')
continue;
return yyd - 1;
}
# endif
# endif
# ifndef yytnamerr
/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
quotes and backslashes, so that it's suitable for yyerror. The
heuristic is that double-quoting is unnecessary unless the string
contains an apostrophe, a comma, or backslash (other than
backslash-backslash). YYSTR is taken from yytname. If YYRES is
null, do not copy; instead, return the length of what the result
would have been. */
static YYSIZE_T
yytnamerr (char *yyres, const char *yystr)
{
if (*yystr == '"')
{
YYSIZE_T yyn = 0;
char const *yyp = yystr;
for (;;)
switch (*++yyp)
{
case '\'':
case ',':
goto do_not_strip_quotes;
case '\\':
if (*++yyp != '\\')
goto do_not_strip_quotes;
/* Fall through. */
default:
if (yyres)
yyres[yyn] = *yyp;
yyn++;
break;
case '"':
if (yyres)
yyres[yyn] = '\0';
return yyn;
}
do_not_strip_quotes: ;
}
if (! yyres)
return yystrlen (yystr);
return yystpcpy (yyres, yystr) - yyres;
}
# endif
/* Copy into YYRESULT an error message about the unexpected token
YYCHAR while in state YYSTATE. Return the number of bytes copied,
including the terminating null byte. If YYRESULT is null, do not
copy anything; just return the number of bytes that would be
copied. As a special case, return 0 if an ordinary "syntax error"
message will do. Return YYSIZE_MAXIMUM if overflow occurs during
size calculation. */
static YYSIZE_T
yysyntax_error (char *yyresult, int yystate, int yychar)
{
int yyn = yypact[yystate];
if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
return 0;
else
{
int yytype = YYTRANSLATE (yychar);
YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
YYSIZE_T yysize = yysize0;
YYSIZE_T yysize1;
int yysize_overflow = 0;
enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
int yyx;
# if 0
/* This is so xgettext sees the translatable formats that are
constructed on the fly. */
YY_("syntax error, unexpected %s");
YY_("syntax error, unexpected %s, expecting %s");
YY_("syntax error, unexpected %s, expecting %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
# endif
char *yyfmt;
char const *yyf;
static char const yyunexpected[] = "syntax error, unexpected %s";
static char const yyexpecting[] = ", expecting %s";
static char const yyor[] = " or %s";
char yyformat[sizeof yyunexpected
+ sizeof yyexpecting - 1
+ ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
* (sizeof yyor - 1))];
char const *yyprefix = yyexpecting;
/* Start YYX at -YYN if negative to avoid negative indexes in
YYCHECK. */
int yyxbegin = yyn < 0 ? -yyn : 0;
/* Stay within bounds of both yycheck and yytname. */
int yychecklim = YYLAST - yyn + 1;
int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
int yycount = 1;
yyarg[0] = yytname[yytype];
yyfmt = yystpcpy (yyformat, yyunexpected);
for (yyx = yyxbegin; yyx < yyxend; ++yyx)
if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
{
if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
{
yycount = 1;
yysize = yysize0;
yyformat[sizeof yyunexpected - 1] = '\0';
break;
}
yyarg[yycount++] = yytname[yyx];
yysize1 = yysize + yytnamerr (0, yytname[yyx]);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
yyfmt = yystpcpy (yyfmt, yyprefix);
yyprefix = yyor;
}
yyf = YY_(yyformat);
yysize1 = yysize + yystrlen (yyf);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
if (yysize_overflow)
return YYSIZE_MAXIMUM;
if (yyresult)
{
/* Avoid sprintf, as that infringes on the user's name space.
Don't have undefined behavior even if the translation
produced a string with the wrong number of "%s"s. */
char *yyp = yyresult;
int yyi = 0;
while ((*yyp = *yyf) != '\0')
{
if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
{
yyp += yytnamerr (yyp, yyarg[yyi++]);
yyf += 2;
}
else
{
yyp++;
yyf++;
}
}
}
return yysize;
}
}
#endif /* YYERROR_VERBOSE */
/*-----------------------------------------------.
| Release the memory associated to this symbol. |
`-----------------------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
#else
static void
yydestruct (yymsg, yytype, yyvaluep)
const char *yymsg;
int yytype;
YYSTYPE *yyvaluep;
#endif
{
YYUSE (yyvaluep);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
switch (yytype)
{
default:
break;
}
}
/* Prevent warnings from -Wmissing-prototypes. */
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
int yyparse (void *YYPARSE_PARAM);
#else
int yyparse ();
#endif
#else /* ! YYPARSE_PARAM */
#if defined __STDC__ || defined __cplusplus
int yyparse (void);
#else
int yyparse ();
#endif
#endif /* ! YYPARSE_PARAM */
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
YYSTYPE yylval;
/* Number of syntax errors so far. */
int yynerrs;
/*-------------------------.
| yyparse or yypush_parse. |
`-------------------------*/
#ifdef YYPARSE_PARAM
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void *YYPARSE_PARAM)
#else
int
yyparse (YYPARSE_PARAM)
void *YYPARSE_PARAM;
#endif
#else /* ! YYPARSE_PARAM */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void)
#else
int
yyparse ()
#endif
#endif
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
`yyss': related to states.
`yyvs': related to semantic values.
Refer to the stacks thru separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yytoken = 0;
yyss = yyssa;
yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
/* Initialize stack pointers.
Waste one element of value and location stack
so that they stay on the same level as the state stack.
The wasted elements are never initialized. */
yyssp = yyss;
yyvsp = yyvs;
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yyn == YYPACT_NINF)
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = YYLEX;
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yyn == 0 || yyn == YYTABLE_NINF)
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
*++yyvsp = yylval;
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
`$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 4:
/* Line 1455 of yacc.c */
#line 142 "bison.y"
{ emit("STMT"); ;}
break;
case 5:
/* Line 1455 of yacc.c */
#line 146 "bison.y"
{ emit_select((yyvsp[(1) - (7)].strval), (yyvsp[(6) - (7)].strval), (yyvsp[(7) - (7)].intval)); ;}
break;
case 6:
/* Line 1455 of yacc.c */
#line 148 "bison.y"
{ emit_load((yyvsp[(1) - (12)].strval), (yyvsp[(4) - (12)].strval), (yyvsp[(11) - (12)].intval), (yyvsp[(7) - (12)].strval)); ;}
break;
case 7:
/* Line 1455 of yacc.c */
#line 150 "bison.y"
{ emit_load_binary((yyvsp[(1) - (9)].strval), (yyvsp[(4) - (9)].strval), (yyvsp[(8) - (9)].intval)); ;}
break;
case 8:
/* Line 1455 of yacc.c */
#line 152 "bison.y"
{ emit_filter((yyvsp[(1) - (5)].strval), (yyvsp[(4) - (5)].strval), (yyvsp[(5) - (5)].intval));;}
break;
case 9:
/* Line 1455 of yacc.c */
#line 154 "bison.y"
{ emit_order((yyvsp[(1) - (6)].strval), (yyvsp[(4) - (6)].strval), (yyvsp[(6) - (6)].intval));;}
break;
case 10:
/* Line 1455 of yacc.c */
#line 156 "bison.y"
{ emit_join((yyvsp[(1) - (7)].strval),(yyvsp[(6) - (7)].strval)); ;}
break;
case 11:
/* Line 1455 of yacc.c */
#line 158 "bison.y"
{ emit_store((yyvsp[(2) - (9)].strval),(yyvsp[(4) - (9)].strval),(yyvsp[(7) - (9)].strval)); ;}
break;
case 12:
/* Line 1455 of yacc.c */
#line 160 "bison.y"
{ emit_store_binary((yyvsp[(2) - (6)].strval),(yyvsp[(4) - (6)].strval)); ;}
break;
case 13:
/* Line 1455 of yacc.c */
#line 164 "bison.y"
{ emit_name((yyvsp[(1) - (1)].strval)); ;}
break;
case 14:
/* Line 1455 of yacc.c */
#line 165 "bison.y"
{ emit("FIELDNAME %s.%s", (yyvsp[(1) - (3)].strval), (yyvsp[(3) - (3)].strval)); ;}
break;
case 15:
/* Line 1455 of yacc.c */
#line 166 "bison.y"
{ emit("USERVAR %s", (yyvsp[(1) - (1)].strval)); ;}
break;
case 16:
/* Line 1455 of yacc.c */
#line 167 "bison.y"
{ emit_string((yyvsp[(1) - (1)].strval)); ;}
break;
case 17:
/* Line 1455 of yacc.c */
#line 168 "bison.y"
{ emit_number((yyvsp[(1) - (1)].intval)); ;}
break;
case 18:
/* Line 1455 of yacc.c */
#line 169 "bison.y"
{ emit_float((yyvsp[(1) - (1)].floatval)); ;}
break;
case 19:
/* Line 1455 of yacc.c */
#line 170 "bison.y"
{ emit_decimal((yyvsp[(1) - (1)].intval)); ;}
break;
case 20:
/* Line 1455 of yacc.c */
#line 171 "bison.y"
{ emit("BOOL %d", (yyvsp[(1) - (1)].intval)); ;}
break;
case 21:
/* Line 1455 of yacc.c */
#line 172 "bison.y"
{ emit_varchar((yyvsp[(1) - (9)].strval), (yyvsp[(3) - (9)].intval), (yyvsp[(6) - (9)].strval), (yyvsp[(8) - (9)].intval));;}
break;
case 22:
/* Line 1455 of yacc.c */
#line 173 "bison.y"
{ emit_var((yyvsp[(1) - (6)].strval), (yyvsp[(3) - (6)].intval), (yyvsp[(6) - (6)].strval));;}
break;
case 23:
/* Line 1455 of yacc.c */
#line 174 "bison.y"
{ emit_var_asc((yyvsp[(1) - (2)].strval));;}
break;
case 24:
/* Line 1455 of yacc.c */
#line 175 "bison.y"
{ emit_var_desc((yyvsp[(1) - (2)].strval));;}
break;
case 25:
/* Line 1455 of yacc.c */
#line 176 "bison.y"
{ emit_count(); ;}
break;
case 26:
/* Line 1455 of yacc.c */
#line 177 "bison.y"
{ emit_sum(); ;}
break;
case 27:
/* Line 1455 of yacc.c */
#line 178 "bison.y"
{ emit_average(); ;}
break;
case 28:
/* Line 1455 of yacc.c */
#line 179 "bison.y"
{ emit_min(); ;}
break;
case 29:
/* Line 1455 of yacc.c */
#line 180 "bison.y"
{ emit_max(); ;}
break;
case 30:
/* Line 1455 of yacc.c */
#line 184 "bison.y"
{ emit_add(); ;}
break;
case 31:
/* Line 1455 of yacc.c */
#line 185 "bison.y"
{ emit_minus(); ;}
break;
case 32:
/* Line 1455 of yacc.c */
#line 186 "bison.y"
{ emit_mul(); ;}
break;
case 33:
/* Line 1455 of yacc.c */
#line 187 "bison.y"
{ emit_div(); ;}
break;
case 34:
/* Line 1455 of yacc.c */
#line 188 "bison.y"
{ emit("MOD"); ;}
break;
case 35:
/* Line 1455 of yacc.c */
#line 189 "bison.y"
{ emit("MOD"); ;}
break;
case 36:
/* Line 1455 of yacc.c */
#line 191 "bison.y"
{ emit_and(); ;}
break;
case 37:
/* Line 1455 of yacc.c */
#line 192 "bison.y"
{ emit_eq(); ;}
break;
case 38:
/* Line 1455 of yacc.c */
#line 193 "bison.y"
{ emit_or(); ;}
break;
case 39:
/* Line 1455 of yacc.c */
#line 194 "bison.y"
{ emit("XOR"); ;}
break;
case 40:
/* Line 1455 of yacc.c */
#line 195 "bison.y"
{ emit("SHIFT %s", (yyvsp[(2) - (3)].subtok)==1?"left":"right"); ;}
break;
case 41:
/* Line 1455 of yacc.c */
#line 196 "bison.y"
{ emit("NOT"); ;}
break;
case 42:
/* Line 1455 of yacc.c */
#line 197 "bison.y"
{ emit("NOT"); ;}
break;
case 43:
/* Line 1455 of yacc.c */
#line 198 "bison.y"
{ emit_cmp((yyvsp[(2) - (3)].subtok)); ;}
break;
case 44:
/* Line 1455 of yacc.c */
#line 200 "bison.y"
{ emit("CMPSELECT %d", (yyvsp[(2) - (5)].subtok)); ;}
break;
case 45:
/* Line 1455 of yacc.c */
#line 201 "bison.y"
{emit("EXPR");;}
break;
case 46:
/* Line 1455 of yacc.c */
#line 205 "bison.y"
{ emit("ISBOOL %d", (yyvsp[(3) - (3)].intval)); ;}
break;
case 47:
/* Line 1455 of yacc.c */
#line 206 "bison.y"
{ emit("ISBOOL %d", (yyvsp[(4) - (4)].intval)); emit("NOT"); ;}
break;
case 48:
/* Line 1455 of yacc.c */
#line 209 "bison.y"
{ /* nil */
(yyval.intval) = 0;
;}
break;
case 49:
/* Line 1455 of yacc.c */
#line 212 "bison.y"
{ (yyval.intval) = (yyvsp[(3) - (3)].intval);}
break;
case 50:
/* Line 1455 of yacc.c */
#line 216 "bison.y"
{ (yyval.intval) = 1; emit_sel_name((yyvsp[(3) - (3)].strval));;}
break;
case 51:
/* Line 1455 of yacc.c */
#line 217 "bison.y"
{ (yyval.intval) = (yyvsp[(1) - (5)].intval) + 1; emit_sel_name((yyvsp[(5) - (5)].strval));;}
break;
case 52:
/* Line 1455 of yacc.c */
#line 221 "bison.y"
{ (yyval.intval) = 1; ;}
break;
case 53:
/* Line 1455 of yacc.c */
#line 222 "bison.y"
{(yyval.intval) = (yyvsp[(1) - (3)].intval) + 1; ;}
break;
case 54:
/* Line 1455 of yacc.c */
#line 226 "bison.y"
{ (yyval.intval) = 1; ;}
break;
case 55:
/* Line 1455 of yacc.c */
#line 227 "bison.y"
{ (yyval.intval) = 1 + (yyvsp[(3) - (3)].intval); ;}
break;
case 56:
/* Line 1455 of yacc.c */
#line 230 "bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 58:
/* Line 1455 of yacc.c */
#line 235 "bison.y"
{ emit("FILTER BY"); ;}
break;
case 59:
/* Line 1455 of yacc.c */
#line 238 "bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (4)].strval));;}
break;
case 60:
/* Line 1455 of yacc.c */
#line 239 "bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (5)].strval)); ;}
break;
case 61:
/* Line 1455 of yacc.c */
#line 241 "bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 62:
/* Line 1455 of yacc.c */
#line 244 "bison.y"
{ emit_limit((yyvsp[(2) - (2)].intval)); ;}
break;
/* Line 1455 of yacc.c */
#line 2023 "bison.cu"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now `shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*------------------------------------.
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
{
YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
{
YYSIZE_T yyalloc = 2 * yysize;
if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
yyalloc = YYSTACK_ALLOC_MAXIMUM;
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yyalloc);
if (yymsg)
yymsg_alloc = yyalloc;
else
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
}
}
if (0 < yysize && yysize <= yymsg_alloc)
{
(void) yysyntax_error (yymsg, yystate, yychar);
yyerror (yymsg);
}
else
{
yyerror (YY_("syntax error"));
if (yysize != 0)
goto yyexhaustedlab;
}
}
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule which action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (yyn != YYPACT_NINF)
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
*++yyvsp = yylval;
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined(yyoverflow) || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
/* Do not reclaim the symbols of the rule which action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
/* Make sure YYID is used. */
return YYID (yyresult);
}
/* Line 1675 of yacc.c */
#line 247 "bison.y"
#include "filter.hip"
#include "select.cu"
#include "merge.cu"
#include "zone_map.cu"
FILE *file_pointer;
queue<string> namevars;
queue<string> typevars;
queue<int> sizevars;
queue<int> cols;
queue<unsigned int> j_col_count;
unsigned int sel_count = 0;
unsigned int join_cnt = 0;
int join_col_cnt = 0;
unsigned int eqq = 0;
stack<string> op_join;
unsigned int statement_count = 0;
map<string,unsigned int> stat;
bool scan_state = 0;
string separator, f_file;
CUDPPHandle theCudpp;
using namespace thrust::placeholders;
void emit_name(char *name)
{
op_type.push("NAME");
op_value.push(name);
}
void emit_limit(int val)
{
op_nums.push(val);
}
void emit_string(char *str)
{ // remove the float_type quotes
string sss(str,1, strlen(str)-2);
op_type.push("STRING");
op_value.push(sss);
}
void emit_number(int_type val)
{
op_type.push("NUMBER");
op_nums.push(val);
}
void emit_float(float_type val)
{
op_type.push("FLOAT");
op_nums_f.push(val);
}
void emit_decimal(float_type val)
{
op_type.push("DECIMAL");
op_nums_f.push(val);
}
void emit_mul()
{
op_type.push("MUL");
}
void emit_add()
{
op_type.push("ADD");
}
void emit_div()
{
op_type.push("DIV");
}
void emit_and()
{
op_type.push("AND");
if (join_col_cnt == -1)
join_col_cnt++;
join_col_cnt++;
eqq = 0;
}
void emit_eq()
{
//op_type.push("JOIN");
eqq++;
join_cnt++;
if(eqq == join_col_cnt+1) {
j_col_count.push(join_col_cnt+1);
join_col_cnt = -1;
}
else if (join_col_cnt == -1 )
j_col_count.push(1);
}
void emit_or()
{
op_type.push("OR");
}
void emit_minus()
{
op_type.push("MINUS");
}
void emit_cmp(int val)
{
op_type.push("CMP");
op_nums.push(val);
}
void emit(char *s, ...)
{
}
void emit_var(char *s, int c, char *f)
{
namevars.push(s);
typevars.push(f);
sizevars.push(0);
cols.push(c);
}
void emit_var_asc(char *s)
{
op_type.push(s);
op_value.push("ASC");
}
void emit_var_desc(char *s)
{
op_type.push(s);
op_value.push("DESC");
}
void emit_varchar(char *s, int c, char *f, int d)
{
namevars.push(s);
typevars.push(f);
sizevars.push(d);
cols.push(c);
}
void emit_sel_name(char *s)
{
op_type.push("emit sel_name");
op_value.push(s);
sel_count++;
}
void emit_count()
{
op_type.push("COUNT");
}
void emit_sum()
{
op_type.push("SUM");
}
void emit_average()
{
op_type.push("AVG");
}
void emit_min()
{
op_type.push("MIN");
}
void emit_max()
{
op_type.push("MAX");
}
void emit_join_tab(char *s)
{
op_join.push(s);
};
void order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, unsigned int segment)
{
std::clock_t start1 = std::clock();
unsigned int sz = a->mRecCount;
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(sz);
thrust::sequence(permutation, permutation+sz,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
// find the largest mRecSize of all data sources exe_type.top()
unsigned int maxSize = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
CudaSet *t = varNames[setMap[*it]];
//cout << "MAX of " << setMap[*it] << " = " << t->mRecCount << endl;
if(t->mRecCount > maxSize)
maxSize = t->mRecCount;
};
//cout << "max size " << maxSize << endl;
//cout << "sort alloc " << maxSize << endl;
//cout << "order mem " << getFreeMem() << endl;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, maxSize*float_size));
for(int i=0; !exe_type.empty(); ++i, exe_type.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, sz, "ASC", (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, sz,"ASC", (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[colInd]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
update_permutation((c->d_columns)[j], raw_ptr, sz, "ASC", (char*)temp);
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
int i = a->columnNames[*it];
if ((a->type)[i] == 0)
apply_permutation(a->d_columns_int[a->type_index[i]], raw_ptr, sz, (int_type*)temp);
else if ((a->type)[i] == 1)
apply_permutation(a->d_columns_float[a->type_index[i]], raw_ptr, sz, (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[i]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
apply_permutation((c->d_columns)[j], raw_ptr, sz, (char*)temp);
};
};
hipFree(temp);
thrust::device_free(permutation);
}
void emit_join(char *s, char *j1)
{
string j2 = op_join.top();
op_join.pop();
statement_count++;
if (scan_state == 0) {
if (stat.find(j1) == stat.end()) {
cout << "Join : couldn't find variable " << j1 << endl;
exit(1);
};
if (stat.find(j2) == stat.end()) {
cout << "Join : couldn't find variable " << j2 << endl;
exit(1);
};
stat[s] = statement_count;
stat[j1] = statement_count;
stat[j2] = statement_count;
return;
};
if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) {
clean_queues();
return;
};
CudaSet* left = varNames.find(j1)->second;
CudaSet* right = varNames.find(j2)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
string f1 = op_value.front();
op_value.pop();
string f2 = op_value.front();
op_value.pop();
cout << "JOIN " << s << " " << getFreeMem() << endl;
std::clock_t start1 = std::clock();
CudaSet* c = new CudaSet(right,left,0,op_sel, op_sel_as);
if (left->mRecCount == 0 || right->mRecCount == 0) {
c = new CudaSet(left,right,0, op_sel, op_sel_as);
varNames[s] = c;
clean_queues();
return;
};
unsigned int colInd1 = (left->columnNames).find(f1)->second;
unsigned int colInd2 = (right->columnNames).find(f2)->second;
if ((left->type)[colInd1] != 0 || (right->type)[colInd2] != 0) {
cout << "Right now only integer joins are supported " << endl;
exit(0);
};
set<string> field_names;
stack<string> exe_type;
exe_type.push(f2);
field_names.insert(f2);
// need to allocate all right columns
queue<string> cc;
queue<string> c1(op_sel);;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front())
cc.push(c1.front());
};
c1.pop();
};
cc.push(f2);
if(right->prm.size())
allocColumns(right, cc);
unsigned int rcount;
if(!right->prm.empty()) {
rcount = std::accumulate(right->prm_count.begin(), right->prm_count.end(), 0 );
}
else
rcount = right->mRecCount;
//cout << "rcount = " << rcount << endl;
queue<string> ct(cc);
while(!ct.empty()) {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
ct.pop();
};
unsigned int cnt_r = 0;
if(right->prm.size() == 0) {
//copy all records
for(unsigned int i = 0; i < right->mColumnCount; i++)
right->CopyColumnToGpu(i);
cnt_r = right->mRecCount;
}
else {
//copy and gather all records
for(unsigned int i = 0; i < right->segCount; i++) {
copyColumns(right, cc, i, cnt_r);
cnt_r = cnt_r + right->prm_count[i];
};
};
unsigned int tt;
if(left->maxRecs > rcount)
tt = left->maxRecs;
else
tt = rcount;
//here we need to make sure that rr is ordered. If not then we order it and keep the permutation
bool sorted = thrust::is_sorted(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r);
thrust::device_vector<unsigned int> v(cnt_r);
thrust::sequence(v.begin(),v.end(),0,1);
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(tt);
if(!sorted) {
thrust::sort_by_key(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, v.begin());
for(unsigned int i = 0; i < right->mColumnCount; i++) {
if(i != colInd2) {
if(right->type[i] == 0) {
thrust::gather(v.begin(), v.end(), right->d_columns_int[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->d_columns_int[right->type_index[i]].begin());
}
else if(right->type[i] == 1) {
thrust::gather(v.begin(), v.end(), right->d_columns_float[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->d_columns_float[right->type_index[i]].begin());
}
};
};
thrust::sequence(v.begin(),v.end(),0,1);
};
thrust::device_free(d_tmp);
while(!cc.empty())
cc.pop();
cc.push(f1);
allocColumns(left, cc);
//cout << "successfully loaded l && r " << cnt_l << " " << cnt_r << " " << getFreeMem() << endl;
thrust::device_vector<unsigned int> d_res1;
thrust::device_vector<unsigned int> d_res2;
thrust::device_ptr<uint2> res = thrust::device_malloc<uint2>(left->maxRecs);
unsigned int cnt_l, res_count, tot_count = 0, offset = 0, k = 0;
queue<string> lc(cc);
curr_segment = 10000000;
CUDPPResult result;
// now for 64bit values we need to create several HashTables where each of them will keep a certain range of values
// lets find out how many tables we need
int_type max_val = right->d_columns_int[right->type_index[colInd2]][rcount-1];
unsigned int tab_count = (max_val / std::numeric_limits<unsigned int>::max()) + 1;
vector<CUDPPHandle> tabs;
vector<unsigned int> tab_nums;
unsigned int v_offset = 0;
int_type min_v, max_v;
thrust::device_ptr<unsigned int> d_r = thrust::device_malloc<unsigned int>(tt);
for(unsigned int i = 0; i < tab_count; i ++) {
// find out rcount
min_v = i*std::numeric_limits<unsigned int>::max();
max_v = min_v + std::numeric_limits<unsigned int>::max();
unsigned int loc_count = thrust::count_if(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + rcount,
_1 > min_v && _1 <= max_v );
CUDPPHandle hash_table_handle;
CUDPPHashTableConfig config;
config.type = CUDPP_MULTIVALUE_HASH_TABLE;
config.kInputSize = loc_count;
config.space_usage = 1.5f;
//cout << "creating table with " << loc_count << " " << getFreeMem() << endl;
result = cudppHashTable(theCudpp, &hash_table_handle, &config);
//if (result == CUDPP_SUCCESS)
// cout << "hash table created " << getFreeMem() << endl;
//cout << "INSERT " << " " << loc_count << " " << getFreeMem() << endl;
if(i != 0)
thrust::transform(right->d_columns_int[right->type_index[colInd2]].begin() + v_offset, right->d_columns_int[right->type_index[colInd2]].begin() + v_offset + loc_count,
d_r, _1 - i*std::numeric_limits<unsigned int>::max());
else
thrust::copy(right->d_columns_int[right->type_index[colInd2]].begin() + v_offset, right->d_columns_int[right->type_index[colInd2]].begin() + v_offset + loc_count, d_r);
result = cudppHashInsert(hash_table_handle, thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(v.data() + v_offset), loc_count);
//if (result == CUDPP_SUCCESS)
// cout << "hash table inserted " << getFreeMem() << endl;
v_offset = v_offset + loc_count;
tabs.push_back(hash_table_handle);
tab_nums.push_back(loc_count);
};
for (unsigned int i = 0; i < left->segCount; i++) {
cout << "segment " << i << " " << getFreeMem() << endl;
cnt_l = 0;
copyColumns(left, lc, i, cnt_l);
if(left->prm.size() == 0) {
//copy all records
cnt_l = left->mRecCount;
}
else {
cnt_l = left->prm_count[i];
};
if (cnt_l) {
unsigned int off = 0;
for(unsigned int j = 0; j < tab_count; j ++) {
if(j)
off = off + tab_nums[j-1];
thrust::device_vector<unsigned int> tc(1);
tc[0] = j;
//when copying to d_r need to make sure to set non-relevant values to zero otherwise they will get truncated to relevant values
thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0);
trans_int t(thrust::raw_pointer_cast(tc.data()),thrust::raw_pointer_cast(left->d_columns_int[left->type_index[colInd1]].data()), thrust::raw_pointer_cast(d_r));
thrust::for_each(begin, begin + cnt_l, t);
result = cudppHashRetrieve(tabs[j], thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(res), cnt_l);
if (result != CUDPP_SUCCESS)
cout << "Failed retrieve " << endl;
uint2 rr = thrust::reduce(res, res+cnt_l, make_uint2(0,0), Uint2Sum());
res_count = rr.y;
if(res_count) {
uint2_split ff(thrust::raw_pointer_cast(res),thrust::raw_pointer_cast(d_r));
thrust::for_each(begin, begin + cnt_l, ff);
thrust::exclusive_scan(d_r, d_r+cnt_l, d_r ); // addresses
tot_count = tot_count + res_count;
d_res1.resize(res_count);
d_res2.resize(res_count);
join_functor ff1(thrust::raw_pointer_cast(res),
thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(d_res1.data()),
thrust::raw_pointer_cast(d_res2.data()));
thrust::for_each(begin, begin + cnt_l, ff1);
thrust::transform(d_res2.begin(), d_res2.end(), d_res2.begin(), _1 + off);
offset = c->mRecCount;
c->resize(res_count);
queue<string> op_sel1(op_sel);
while(!op_sel1.empty()) {
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
if(left->columnNames.find(op_sel1.front()) != left->columnNames.end()) {
// copy field's segment to device, gather it and copy to the host
unsigned int colInd = left->columnNames[op_sel1.front()];
allocColumns(left, cc);
copyColumns(left, cc, i, k);
//gather
if(left->type[colInd] == 0) {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter(left->d_columns_int[left->type_index[colInd]].begin(), d_res1.begin());
thrust::copy(iter, iter + res_count, c->h_columns_int[c->type_index[c->columnNames[op_sel1.front()]]].begin() + offset);
}
else if(left->type[colInd] == 1) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter(left->d_columns_float[left->type_index[colInd]].begin(), d_res1.begin());
thrust::copy(iter, iter + res_count, c->h_columns_float[c->type_index[c->columnNames[op_sel1.front()]]].begin() + offset);
};
}
else {
unsigned int colInd = right->columnNames[op_sel1.front()];
//gather
if(right->type[colInd] == 0) {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter(right->d_columns_int[right->type_index[colInd]].begin(), d_res2.begin());
thrust::copy(iter, iter + res_count, c->h_columns_int[c->type_index[c->columnNames[op_sel1.front()]]].begin() + offset);
}
else if(right->type[colInd] == 1) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter(right->d_columns_float[right->type_index[colInd]].begin(), d_res2.begin());
thrust::copy(iter, iter + res_count, c->h_columns_float[c->type_index[c->columnNames[op_sel1.front()]]].begin() + offset);
};
};
op_sel1.pop();
};
};
};
};
};
for(unsigned int i = 0; i < tab_count; i ++)
cudppDestroyHashTable(theCudpp, tabs[i]);
thrust::device_free(res);
thrust::device_free(d_r);
d_res1.resize(0);
d_res1.shrink_to_fit();
d_res2.resize(0);
d_res2.shrink_to_fit();
left->deAllocOnDevice();
right->deAllocOnDevice();
c->deAllocOnDevice();
cout << "join final end " << tot_count << " " << getFreeMem() << endl;
varNames[s] = c;
c->mRecCount = tot_count;
clean_queues();
if(stat[s] == statement_count) {
c->free();
varNames.erase(s);
};
if(stat[j1] == statement_count) {
left->free();
varNames.erase(j1);
};
if(stat[j2] == statement_count && (strcmp(j1,j2.c_str()) != 0)) {
right->free();
varNames.erase(j2);
};
std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_order(char *s, char *f, int e, int ll)
{
if(ll == 0)
statement_count++;
if (scan_state == 0 && ll == 0) {
if (stat.find(f) == stat.end()) {
cout << "Order : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end() ) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
if (a->mRecCount == 0) {
if(varNames.find(s) == varNames.end())
varNames[s] = new CudaSet(0,1);
else {
CudaSet* c = varNames.find(s)->second;
c->mRecCount = 0;
};
return;
};
stack<string> exe_type, exe_value;
cout << "order: " << s << " " << f << endl;;
for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) {
if ((op_type.front()).compare("NAME") == 0) {
exe_type.push(op_value.front());
exe_value.push("ASC");
}
else {
exe_type.push(op_type.front());
exe_value.push(op_value.front());
};
};
// initialize permutation to [0, 1, 2, ... ,N-1]
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(a->mRecCount);
thrust::sequence(permutation, permutation+(a->mRecCount));
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
CudaSet *b = a->copyDeviceStruct();
b->mRecCount = a->mRecCount;
// find the largest mRecSize of all data sources
stack<string> tp(exe_type);
queue<string> op_vx;
while (!tp.empty()) {
op_vx.push(tp.top());
tp.pop();
};
unsigned int maxSize = a->mRecCount, cnt = 0;
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, maxSize*float_size));
varNames[setMap[exe_type.top()]]->oldRecCount = varNames[setMap[exe_type.top()]]->mRecCount;
allocColumns(a, op_vx);
copyColumns(a, op_vx, 0, cnt);
varNames[setMap[exe_type.top()]]->mRecCount = varNames[setMap[exe_type.top()]]->oldRecCount;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, a->mRecCount, exe_value.top(), (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[colInd]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
update_permutation((c->d_columns)[j], raw_ptr, a->mRecCount, exe_value.top(), (char*)temp);
};
};
// gather a's prm to b's prm
thrust::device_vector<unsigned int> p(a->mRecCount);
if(a->prm.size() != 0) {
thrust::device_vector<unsigned int> p_a(a->mRecCount);
b->prm.push_back(new unsigned int[a->mRecCount]);
b->prm_count.push_back(a->mRecCount);
b->prm_index.push_back('R');
hipMemcpy((void**)(thrust::raw_pointer_cast(p_a.data())), (void**)a->prm[0], 4*a->mRecCount, hipMemcpyHostToDevice);
thrust::gather(permutation, permutation+a->mRecCount, p_a.begin(), p.begin());
hipMemcpy((void**)b->prm[0], (void**)(thrust::raw_pointer_cast(p.data())), 4*a->mRecCount, hipMemcpyDeviceToHost);
}
else {
b->prm.push_back(new unsigned int[a->mRecCount]);
b->prm_count.push_back(a->mRecCount);
b->prm_index.push_back('R');
thrust::copy(permutation, permutation+a->mRecCount, p.begin());
hipMemcpy((void**)b->prm[0], (void**)(thrust::raw_pointer_cast(p.data())), 4*a->mRecCount, hipMemcpyDeviceToHost);
};
b->deAllocOnDevice();
a->deAllocOnDevice();
thrust::device_free(permutation);
hipFree(temp);
varNames[s] = b;
b->segCount = 1;
if (a->fact_table == 1)
b->fact_table = 1;
else
b->fact_table = 0;
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
}
void emit_select(char *s, char *f, int ll)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Select : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
queue<string> op_v1(op_value);
while(op_v1.size() > ll)
op_v1.pop();
stack<string> op_v2;
queue<string> op_v3;
for(int i=0; i < ll; ++i) {
op_v2.push(op_v1.front());
op_v3.push(op_v1.front());
op_v1.pop();
};
CudaSet *a;
a = varNames.find(f)->second;
if(a->mRecCount == 0) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
clean_queues();
return;
};
cout << "SELECT " << s << " " << f << endl;
std::clock_t start1 = std::clock();
// here we need to determine the column count and composition
queue<string> op_v(op_value);
queue<string> op_vx;
set<string> field_names;
map<string,string> aliases;
string tt;
for(int i=0; !op_v.empty(); ++i, op_v.pop()) {
if(a->columnNames.find(op_v.front()) != a->columnNames.end()) {
field_names.insert(op_v.front());
if(aliases.count(op_v.front()) == 0 && aliases.size() < ll) {
tt = op_v.front();
op_v.pop();
aliases[tt] = op_v.front();
};
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
op_vx.push(*it);
};
// find out how many columns a new set will have
queue<string> op_t(op_type);
int_type col_count = 0;
for(int i=0; !op_t.empty(); ++i, op_t.pop())
if((op_t.front()).compare("emit sel_name") == 0)
col_count++;
CudaSet* b, *c;
curr_segment = 10000000;
allocColumns(a, op_vx);
unsigned int cycle_count = 1;
if(a->prm.size())
cycle_count = varNames[setMap[op_value.front()]]->segCount;
unsigned int ol_count = a->mRecCount, cnt;
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
b = new CudaSet(0, col_count);
bool b_set = 0, c_set = 0;
for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE
cout << "cycle " << i << " select mem " << getFreeMem() << endl;
std::clock_t start2 = std::clock();
cnt = 0;
copyColumns(a, op_vx, i, cnt);
if(a->mRecCount) {
if (ll != 0) {
order_inplace(a,op_v2,field_names,i);
a->GroupBy(op_v3);
};
select(op_type,op_value,op_nums, op_nums_f,a,b, a->mRecCount);
if(!b_set) {
for ( map<string,int>::iterator it=b->columnNames.begin() ; it != b->columnNames.end(); ++it )
setMap[(*it).first] = s;
b_set = 1;
};
if (ll != 0) {
if (!c_set) {
c = new CudaSet(b->mRecCount, col_count);
c->fact_table = 1;
c->segCount = 1;
c_set = 1;
}
else {
c->resize(b->mRecCount);
};
add(c,b,op_v3);
};
};
};
a->mRecCount = ol_count;
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
a->deAllocOnDevice();
if (ll != 0) {
CudaSet *r = merge(c,op_v3, op_v2, aliases);
c->free();
c = r;
};
c->deAllocOnDevice();
c->maxRecs = c->mRecCount;
c->name = s;
c->keep = 1;
for ( map<string,int>::iterator it=c->columnNames.begin() ; it != c->columnNames.end(); ++it ) {
setMap[(*it).first] = s;
};
cout << "final select " << c->mRecCount << endl;
clean_queues();
if (ll != 0) {
varNames[s] = c;
b->free();
}
else
varNames[s] = b;
varNames[s]->keep = 1;
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_filter(char *s, char *f, int e)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Filter : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
std::clock_t start1 = std::clock();
if(a->mRecCount == 0) {
b = new CudaSet(0,1);
}
else {
cout << "FILTER " << s << " " << f << " " << getFreeMem() << endl;
b = a->copyDeviceStruct();
b->name = s;
unsigned int cycle_count = 1, cnt = 0;
allocColumns(a, op_value);
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
if(a->segCount != 1)
cycle_count = varNames[setMap[op_value.front()]]->segCount;
oldCount = a->mRecCount;
thrust::device_vector<unsigned int> p(a->maxRecs);
for(unsigned int i = 0; i < cycle_count; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, a, i);
cout << "MAP CHECK " << map_check << endl;
if(map_check == 'R') {
copyColumns(a, op_value, i, cnt);
filter(op_type,op_value,op_nums, op_nums_f,a, b, i, p);
}
else {
setPrm(a,b,map_check,i);
}
};
a->mRecCount = oldCount;
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
cout << "filter is finished " << b->mRecCount << " " << getFreeMem() << endl;
a->deAllocOnDevice();
};
clean_queues();
if (varNames.count(s) > 0)
varNames[s]->free();
varNames[s] = b;
if(stat[s] == statement_count) {
b->free();
varNames.erase(s);
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
std::cout<< "filter time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void emit_store(char *s, char *f, char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
cout << "STORE: " << s << " " << f << " " << sep << endl;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Store(f,sep, limit, 0);
if(stat[s] == statement_count && a->keep == 0) {
a->free();
varNames.erase(s);
};
};
void emit_store_binary(char *s, char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(stat[f] == statement_count)
a->deAllocOnDevice();
printf("STORE: %s %s \n", s, f);
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
total_count = 0;
total_segments = 0;
fact_file_loaded = 0;
while(!fact_file_loaded) {
cout << "LOADING " << f_file << " " << separator << endl;
fact_file_loaded = a->LoadBigFile(f_file.c_str(), separator.c_str());
//cout << "STORING " << f << " " << limit << endl;
a->Store(f,"", limit, 1);
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(s);
};
};
void emit_load_binary(char *s, char *f, int d)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("BINARY LOAD: %s %s \n", s, f);
CudaSet *a;
unsigned int segCount, maxRecs;
char f1[100];
strcpy(f1, f);
strcat(f1,".");
char col_pos[3];
itoaa(cols.front(),col_pos);
strcat(f1,col_pos);
strcat(f1,".header");
FILE* ff = fopen(f1, "rb");
//fseeko(ff, -16, SEEK_END);
fread((char *)&totalRecs, 8, 1, ff);
fread((char *)&segCount, 4, 1, ff);
fread((char *)&maxRecs, 4, 1, ff);
fclose(ff);
queue<string> names(namevars);
while(!names.empty()) {
setMap[names.front()] = s;
names.pop();
};
a = new CudaSet(namevars, typevars, sizevars, cols,totalRecs, f);
a->segCount = segCount;
a->maxRecs = maxRecs;
a->keep = 1;
varNames[s] = a;
if(stat[s] == statement_count ) {
a->free();
varNames.erase(s);
};
}
void emit_load(char *s, char *f, int d, char* sep)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("LOAD: %s %s %d %s \n", s, f, d, sep);
CudaSet *a;
a = new CudaSet(namevars, typevars, sizevars, cols, process_count);
a->mRecCount = 0;
a->resize(process_count);
a->keep = true;
a->fact_table = 1;
string separator1(sep);
separator = separator1;
string ff(f);
f_file = ff;
a->maxRecs = a->mRecCount;
a->segCount = 0;
varNames[s] = a;
if(stat[s] == statement_count) {
a->free();
varNames.erase(s);
};
}
void yyerror(char *s, ...)
{
extern int yylineno;
va_list ap;
va_start(ap, s);
fprintf(stderr, "%d: error: ", yylineno);
vfprintf(stderr, s, ap);
fprintf(stderr, "\n");
}
void clean_queues()
{
while(!op_type.empty()) op_type.pop();
while(!op_value.empty()) op_value.pop();
while(!op_join.empty()) op_join.pop();
while(!op_nums.empty()) op_nums.pop();
while(!op_nums_f.empty()) op_nums_f.pop();
while(!j_col_count.empty()) j_col_count.pop();
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
sel_count = 0;
join_cnt = 0;
join_col_cnt = -1;
eqq = 0;
}
int main(int ac, char **av)
{
extern FILE *yyin;
//hipDeviceProp_t deviceProp;
//hipGetDeviceProperties(&deviceProp, 0);
//if (!deviceProp.canMapHostMemory)
// cout << "Device 0 cannot map host memory" << endl;
//hipSetDeviceFlags(hipDeviceMapHost);
cudppCreate(&theCudpp);
if (ac == 1) {
cout << "Usage : alenka -l process_count script.sql" << endl;
exit(1);
};
if(strcmp(av[1],"-l") == 0) {
process_count = atoff(av[2]);
cout << "Process count = " << process_count << endl;
}
else {
process_count = 6200000;
cout << "Process count = 6200000 " << endl;
};
if((yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[ac-1]);
exit(1);
};
if(yyparse()) {
printf("SQL scan parse failed\n");
exit(1);
};
fclose(yyin);
scan_state = 1;
std::clock_t start1 = std::clock();
statement_count = 0;
clean_queues();
if(ac > 1 && (yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[1]);
exit(1);
}
PROC_FLUSH_BUF ( yyin );
statement_count = 0;
if(!yyparse())
cout << "SQL scan parse worked" << endl;
else
cout << "SQL scan parse failed" << endl;
fclose(yyin);
std::cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
cudppDestroy(theCudpp);
}
|
deaac074c832a4c5eba139298b231c13a5280468.cu
|
/* A Bison parser, made by GNU Bison 2.4.1. */
/* Skeleton implementation for Bison's Yacc-like parsers in C
Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* C LALR(1) parser skeleton written by Richard Stallman, by
simplifying the original so-called "semantic" parser. */
/* All symbols defined below should begin with yy or YY, to avoid
infringing on user name space. This should be done even for local
variables, as they might otherwise be expanded by user macros.
There are some unavoidable exceptions within include files to
define necessary library symbols; they are noted "INFRINGES ON
USER NAME SPACE" below. */
/* Identify Bison output. */
#define YYBISON 1
/* Bison version. */
#define YYBISON_VERSION "2.4.1"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
/* Pure parsers. */
#define YYPURE 0
/* Push parsers. */
#define YYPUSH 0
/* Pull parsers. */
#define YYPULL 1
/* Using locations. */
#define YYLSP_NEEDED 0
/* Copy the first part of user declarations. */
/* Line 189 of yacc.c */
#line 17 "bison.y"
#include "lex.yy.c"
#include "cm.cu"
void clean_queues();
void order_inplace(CudaSet* a, stack<string> exe_type);
void yyerror(char *s, ...);
void emit(char *s, ...);
void emit_mul();
void emit_add();
void emit_minus();
void emit_div();
void emit_and();
void emit_eq();
void emit_or();
void emit_cmp(int val);
void emit_var(char *s, int c, char *f);
void emit_var_asc(char *s);
void emit_var_desc(char *s);
void emit_name(char *name);
void emit_count();
void emit_sum();
void emit_average();
void emit_min();
void emit_max();
void emit_string(char *str);
void emit_number(int_type val);
void emit_float(float_type val);
void emit_decimal(float_type val);
void emit_sel_name(char* name);
void emit_limit(int val);
void emit_union(char *s, char *f1, char *f2);
void emit_varchar(char *s, int c, char *f, int d);
void emit_load(char *s, char *f, int d, char* sep);
void emit_load_binary(char *s, char *f, int d);
void emit_store(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f, char* sep);
void emit_store_binary(char *s, char *f);
void emit_filter(char *s, char *f, int e);
void emit_order(char *s, char *f, int e, int ll = 0);
void emit_group(char *s, char *f, int e);
void emit_select(char *s, char *f, int ll);
void emit_join(char *s, char *j1);
void emit_join_tab(char *s);
void emit_distinct(char *s, char *f);
/* Line 189 of yacc.c */
#line 124 "bison.cu"
/* Enabling traces. */
#ifndef YYDEBUG
# define YYDEBUG 0
#endif
/* Enabling verbose error messages. */
#ifdef YYERROR_VERBOSE
# undef YYERROR_VERBOSE
# define YYERROR_VERBOSE 1
#else
# define YYERROR_VERBOSE 0
#endif
/* Enabling the token table. */
#ifndef YYTOKEN_TABLE
# define YYTOKEN_TABLE 0
#endif
/* Tokens. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
/* Put the tokens into the symbol table, so that GDB and other debuggers
know about them. */
enum yytokentype {
FILENAME = 258,
NAME = 259,
STRING = 260,
INTNUM = 261,
DECIMAL1 = 262,
BOOL1 = 263,
APPROXNUM = 264,
USERVAR = 265,
ASSIGN = 266,
EQUAL = 267,
OR = 268,
XOR = 269,
AND = 270,
REGEXP = 271,
LIKE = 272,
IS = 273,
IN = 274,
NOT = 275,
BETWEEN = 276,
COMPARISON = 277,
SHIFT = 278,
MOD = 279,
UMINUS = 280,
LOAD = 281,
STREAM = 282,
FILTER = 283,
BY = 284,
JOIN = 285,
STORE = 286,
INTO = 287,
GROUP = 288,
FROM = 289,
SELECT = 290,
AS = 291,
ORDER = 292,
ASC = 293,
DESC = 294,
COUNT = 295,
USING = 296,
SUM = 297,
AVG = 298,
MIN = 299,
MAX = 300,
LIMIT = 301,
ON = 302,
BINARY = 303
};
#endif
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE
{
/* Line 214 of yacc.c */
#line 67 "bison.y"
int intval;
float floatval;
char *strval;
int subtok;
/* Line 214 of yacc.c */
#line 217 "bison.cu"
} YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
/* Copy the second part of user declarations. */
/* Line 264 of yacc.c */
#line 229 "bison.cu"
#ifdef short
# undef short
#endif
#ifdef YYTYPE_UINT8
typedef YYTYPE_UINT8 yytype_uint8;
#else
typedef unsigned char yytype_uint8;
#endif
#ifdef YYTYPE_INT8
typedef YYTYPE_INT8 yytype_int8;
#elif (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
typedef signed char yytype_int8;
#else
typedef short int yytype_int8;
#endif
#ifdef YYTYPE_UINT16
typedef YYTYPE_UINT16 yytype_uint16;
#else
typedef unsigned short int yytype_uint16;
#endif
#ifdef YYTYPE_INT16
typedef YYTYPE_INT16 yytype_int16;
#else
typedef short int yytype_int16;
#endif
#ifndef YYSIZE_T
# ifdef __SIZE_TYPE__
# define YYSIZE_T __SIZE_TYPE__
# elif defined size_t
# define YYSIZE_T size_t
# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
# define YYSIZE_T size_t
# else
# define YYSIZE_T unsigned int
# endif
#endif
#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
#ifndef YY_
# if YYENABLE_NLS
# if ENABLE_NLS
# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
# define YY_(msgid) dgettext ("bison-runtime", msgid)
# endif
# endif
# ifndef YY_
# define YY_(msgid) msgid
# endif
#endif
/* Suppress unused-variable warnings by "using" E. */
#if ! defined lint || defined __GNUC__
# define YYUSE(e) ((void) (e))
#else
# define YYUSE(e) /* empty */
#endif
/* Identity function, used to suppress warnings about constant conditions. */
#ifndef lint
# define YYID(n) (n)
#else
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static int
YYID (int yyi)
#else
static int
YYID (yyi)
int yyi;
#endif
{
return yyi;
}
#endif
#if ! defined yyoverflow || YYERROR_VERBOSE
/* The parser invokes alloca or malloc; define the necessary symbols. */
# ifdef YYSTACK_USE_ALLOCA
# if YYSTACK_USE_ALLOCA
# ifdef __GNUC__
# define YYSTACK_ALLOC __builtin_alloca
# elif defined __BUILTIN_VA_ARG_INCR
# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
# elif defined _AIX
# define YYSTACK_ALLOC __alloca
# elif defined _MSC_VER
# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# endif
# endif
# endif
# ifdef YYSTACK_ALLOC
/* Pacify GCC's `empty if-body' warning. */
# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
# ifndef YYSTACK_ALLOC_MAXIMUM
/* The OS might guarantee only one guard page at the bottom of the stack,
and a page size can be as small as 4096 bytes. So we cannot safely
invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
to allow for a few compiler-allocated temporary stack slots. */
# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
# endif
# else
# define YYSTACK_ALLOC YYMALLOC
# define YYSTACK_FREE YYFREE
# ifndef YYSTACK_ALLOC_MAXIMUM
# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
# endif
# if (defined __cplusplus && ! defined _STDLIB_H \
&& ! ((defined YYMALLOC || defined malloc) \
&& (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef _STDLIB_H
# define _STDLIB_H 1
# endif
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# endif
#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
#if (! defined yyoverflow \
&& (! defined __cplusplus \
|| (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
/* A type that is properly aligned for any stack member. */
union yyalloc
{
yytype_int16 yyss_alloc;
YYSTYPE yyvs_alloc;
};
/* The size of the maximum gap between one aligned stack and the next. */
# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
/* The size of an array large to enough to hold all stacks, each with
N elements. */
# define YYSTACK_BYTES(N) \
((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ YYSTACK_GAP_MAXIMUM)
/* Copy COUNT objects from FROM to TO. The source and destination do
not overlap. */
# ifndef YYCOPY
# if defined __GNUC__ && 1 < __GNUC__
# define YYCOPY(To, From, Count) \
__builtin_memcpy (To, From, (Count) * sizeof (*(From)))
# else
# define YYCOPY(To, From, Count) \
do \
{ \
YYSIZE_T yyi; \
for (yyi = 0; yyi < (Count); yyi++) \
(To)[yyi] = (From)[yyi]; \
} \
while (YYID (0))
# endif
# endif
/* Relocate STACK from its old location to the new one. The
local variables YYSIZE and YYSTACKSIZE give the old and new number of
elements in the stack, and YYPTR gives the new location of the
stack. Advance YYPTR to a properly aligned location for the next
stack. */
# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
do \
{ \
YYSIZE_T yynewbytes; \
YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
Stack = &yyptr->Stack_alloc; \
yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
yyptr += yynewbytes / sizeof (*yyptr); \
} \
while (YYID (0))
#endif
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 8
/* YYLAST -- Last index in YYTABLE. */
#define YYLAST 446
/* YYNTOKENS -- Number of terminals. */
#define YYNTOKENS 66
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 13
/* YYNRULES -- Number of rules. */
#define YYNRULES 62
/* YYNRULES -- Number of states. */
#define YYNSTATES 153
/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
#define YYUNDEFTOK 2
#define YYMAXUTOK 303
#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
static const yytype_uint8 yytranslate[] =
{
0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 20, 2, 2, 2, 31, 25, 2,
59, 60, 29, 27, 65, 28, 61, 30, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 64, 58,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 33, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 62, 24, 63, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 21, 22, 23, 26, 32,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57
};
#if YYDEBUG
/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
YYRHS. */
static const yytype_uint16 yyprhs[] =
{
0, 0, 3, 6, 10, 12, 20, 33, 43, 49,
56, 64, 74, 81, 83, 87, 89, 91, 93, 95,
97, 99, 109, 116, 119, 122, 127, 132, 137, 142,
147, 151, 155, 159, 163, 167, 171, 175, 179, 183,
187, 191, 194, 197, 201, 207, 211, 215, 220, 221,
225, 229, 235, 237, 241, 243, 247, 248, 250, 253,
258, 264, 265
};
/* YYRHS -- A `-1'-separated list of the rules' RHS. */
static const yytype_int8 yyrhs[] =
{
67, 0, -1, 68, 58, -1, 67, 68, 58, -1,
69, -1, 4, 11, 44, 72, 43, 4, 71, -1,
4, 11, 35, 3, 50, 59, 3, 60, 45, 59,
73, 60, -1, 4, 11, 35, 3, 57, 45, 59,
73, 60, -1, 4, 11, 37, 4, 76, -1, 4,
11, 46, 4, 38, 75, -1, 4, 11, 44, 72,
43, 4, 77, -1, 40, 4, 41, 3, 50, 59,
3, 60, 78, -1, 40, 4, 41, 3, 78, 57,
-1, 4, -1, 4, 61, 4, -1, 10, -1, 5,
-1, 6, -1, 9, -1, 7, -1, 8, -1, 4,
62, 6, 63, 64, 4, 59, 6, 60, -1, 4,
62, 6, 63, 64, 4, -1, 4, 47, -1, 4,
48, -1, 49, 59, 70, 60, -1, 51, 59, 70,
60, -1, 52, 59, 70, 60, -1, 53, 59, 70,
60, -1, 54, 59, 70, 60, -1, 70, 27, 70,
-1, 70, 28, 70, -1, 70, 29, 70, -1, 70,
30, 70, -1, 70, 31, 70, -1, 70, 32, 70,
-1, 70, 15, 70, -1, 70, 12, 70, -1, 70,
13, 70, -1, 70, 14, 70, -1, 70, 26, 70,
-1, 21, 70, -1, 20, 70, -1, 70, 23, 70,
-1, 70, 23, 59, 69, 60, -1, 59, 70, 60,
-1, 70, 18, 8, -1, 70, 18, 21, 8, -1,
-1, 42, 38, 74, -1, 70, 45, 4, -1, 72,
65, 70, 45, 4, -1, 70, -1, 73, 65, 70,
-1, 70, -1, 70, 65, 74, -1, -1, 74, -1,
38, 70, -1, 39, 4, 56, 70, -1, 39, 4,
56, 70, 77, -1, -1, 55, 6, -1
};
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint8 yyrline[] =
{
0, 137, 137, 138, 142, 145, 147, 149, 151, 153,
155, 157, 159, 164, 165, 166, 167, 168, 169, 170,
171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
184, 185, 186, 187, 188, 189, 191, 192, 193, 194,
195, 196, 197, 198, 200, 201, 205, 206, 209, 212,
216, 217, 221, 222, 226, 227, 230, 232, 235, 238,
239, 241, 244
};
#endif
#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
{
"$end", "error", "$undefined", "FILENAME", "NAME", "STRING", "INTNUM",
"DECIMAL1", "BOOL1", "APPROXNUM", "USERVAR", "ASSIGN", "EQUAL", "OR",
"XOR", "AND", "REGEXP", "LIKE", "IS", "IN", "'!'", "NOT", "BETWEEN",
"COMPARISON", "'|'", "'&'", "SHIFT", "'+'", "'-'", "'*'", "'/'", "'%'",
"MOD", "'^'", "UMINUS", "LOAD", "STREAM", "FILTER", "BY", "JOIN",
"STORE", "INTO", "GROUP", "FROM", "SELECT", "AS", "ORDER", "ASC", "DESC",
"COUNT", "USING", "SUM", "AVG", "MIN", "MAX", "LIMIT", "ON", "BINARY",
"';'", "'('", "')'", "'.'", "'{'", "'}'", "':'", "','", "$accept",
"stmt_list", "stmt", "select_stmt", "expr", "opt_group_list",
"expr_list", "load_list", "val_list", "opt_val_list", "opt_where",
"join_list", "opt_limit", 0
};
#endif
# ifdef YYPRINT
/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
token YYLEX-NUM. */
static const yytype_uint16 yytoknum[] =
{
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
33, 275, 276, 277, 124, 38, 278, 43, 45, 42,
47, 37, 279, 94, 280, 281, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 301, 302, 303, 59, 40,
41, 46, 123, 125, 58, 44
};
# endif
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const yytype_uint8 yyr1[] =
{
0, 66, 67, 67, 68, 69, 69, 69, 69, 69,
69, 69, 69, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 70, 70,
70, 70, 70, 70, 70, 70, 70, 70, 71, 71,
72, 72, 73, 73, 74, 74, 75, 75, 76, 77,
77, 78, 78
};
/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
0, 2, 2, 3, 1, 7, 12, 9, 5, 6,
7, 9, 6, 1, 3, 1, 1, 1, 1, 1,
1, 9, 6, 2, 2, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 2, 2, 3, 5, 3, 3, 4, 0, 3,
3, 5, 1, 3, 1, 3, 0, 1, 2, 4,
5, 0, 2
};
/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
STATE-NUM when YYTABLE doesn't specify something else to do. Zero
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
0, 0, 0, 0, 0, 4, 0, 0, 1, 0,
2, 0, 0, 0, 0, 0, 3, 0, 0, 13,
16, 17, 19, 20, 18, 15, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 61, 0, 0,
0, 8, 23, 24, 0, 0, 42, 41, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56, 0, 0, 0, 0, 0, 58, 14, 0, 0,
0, 0, 0, 0, 45, 37, 38, 39, 36, 46,
0, 0, 43, 40, 30, 31, 32, 33, 34, 35,
50, 48, 0, 54, 57, 9, 0, 62, 12, 0,
0, 0, 25, 26, 27, 28, 29, 47, 13, 0,
0, 0, 5, 10, 0, 0, 0, 0, 52, 0,
0, 44, 0, 0, 51, 55, 61, 0, 7, 0,
22, 0, 49, 11, 0, 53, 0, 59, 0, 0,
60, 6, 21
};
/* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
-1, 3, 4, 5, 103, 122, 35, 129, 104, 105,
41, 123, 73
};
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
#define YYPACT_NINF -122
static const yytype_int16 yypact[] =
{
14, -3, 7, 5, -34, -122, 50, 23, -122, 28,
-122, 52, 61, 62, 77, 85, -122, -35, 51, -45,
-122, -122, -122, -122, -122, -122, 62, 62, 33, 36,
44, 49, 58, 62, 300, -42, 71, -29, 59, 65,
62, -122, -122, -122, 115, 114, 2, 2, 62, 62,
62, 62, 62, 171, 62, 62, 62, 62, -2, 128,
62, 62, 62, 62, 62, 62, 62, 118, 119, 62,
62, 66, 121, 67, 126, 84, 364, -122, 81, 192,
214, 235, 257, 278, -122, 364, 383, 401, 142, -122,
122, 53, 408, 414, 69, 69, -122, -122, -122, -122,
-122, -32, 321, 127, -122, -122, 143, -122, -122, 87,
62, 88, -122, -122, -122, -122, -122, -122, 29, 91,
157, 124, -122, -122, 159, 62, 104, 130, 364, 15,
162, -122, 111, 62, -122, -122, 123, 117, -122, 62,
129, 62, -122, -122, 62, 364, 184, 342, 19, 131,
-122, -122, -122
};
/* YYPGOTO[NTERM-NUM]. */
static const yytype_int16 yypgoto[] =
{
-122, -122, 190, 105, -13, -122, -122, 64, -121, -122,
-122, 48, 73
};
/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
positive, shift that token. If negative, reduce the rule which
number is the opposite. If zero, do what YYDEFACT says.
If YYTABLE_NINF, syntax error. */
#define YYTABLE_NINF -1
static const yytype_uint8 yytable[] =
{
34, 68, 42, 43, 135, 8, 89, 120, 6, 1,
121, 7, 142, 46, 47, 38, 44, 45, 1, 90,
53, 71, 39, 69, 10, 59, 72, 76, 60, 61,
62, 63, 64, 65, 66, 79, 80, 81, 82, 83,
6, 85, 86, 87, 88, 2, 92, 93, 94, 95,
96, 97, 98, 99, 2, 17, 102, 118, 20, 21,
22, 23, 24, 25, 15, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 138, 42, 43, 53, 151,
139, 36, 26, 27, 139, 11, 16, 12, 37, 40,
44, 45, 48, 2, 13, 49, 14, 128, 63, 64,
65, 66, 28, 50, 29, 30, 31, 32, 51, 70,
75, 28, 33, 29, 30, 31, 32, 52, 74, 77,
78, 33, 100, 101, 108, 106, 145, 107, 147, 109,
117, 128, 19, 20, 21, 22, 23, 24, 25, 54,
55, 56, 57, 110, 111, 58, 126, 127, 26, 27,
59, 131, 130, 60, 61, 62, 63, 64, 65, 66,
58, 132, 133, 134, 136, 59, 140, 141, 60, 61,
62, 63, 64, 65, 66, 137, 144, 28, 72, 29,
30, 31, 32, 54, 55, 56, 57, 91, 146, 58,
149, 152, 125, 9, 59, 150, 119, 60, 61, 62,
63, 64, 65, 66, 54, 55, 56, 57, 148, 143,
58, 0, 0, 0, 0, 59, 0, 0, 60, 61,
62, 63, 64, 65, 66, 0, 54, 55, 56, 57,
0, 84, 58, 0, 0, 0, 0, 59, 0, 0,
60, 61, 62, 63, 64, 65, 66, 54, 55, 56,
57, 0, 112, 58, 0, 0, 0, 0, 59, 0,
0, 60, 61, 62, 63, 64, 65, 66, 0, 54,
55, 56, 57, 0, 113, 58, 0, 0, 0, 0,
59, 0, 0, 60, 61, 62, 63, 64, 65, 66,
54, 55, 56, 57, 0, 114, 58, 0, 0, 0,
0, 59, 0, 0, 60, 61, 62, 63, 64, 65,
66, 0, 54, 55, 56, 57, 0, 115, 58, 0,
0, 0, 0, 59, 0, 0, 60, 61, 62, 63,
64, 65, 66, 54, 55, 56, 57, 0, 116, 58,
0, 0, 0, 0, 59, 67, 0, 60, 61, 62,
63, 64, 65, 66, 54, 55, 56, 57, 0, 0,
58, 0, 0, 0, 0, 59, 124, 0, 60, 61,
62, 63, 64, 65, 66, 0, 54, 55, 56, 57,
0, 120, 58, 0, 0, 0, 0, 59, 0, 0,
60, 61, 62, 63, 64, 65, 66, 56, 57, 0,
0, 58, 0, 0, 0, 0, 59, 0, 0, 60,
61, 62, 63, 64, 65, 66, 57, 0, 0, 58,
0, 0, 0, 0, 59, 0, 0, 60, 61, 62,
63, 64, 65, 66, 60, 61, 62, 63, 64, 65,
66, 61, 62, 63, 64, 65, 66
};
static const yytype_int16 yycheck[] =
{
13, 43, 47, 48, 125, 0, 8, 39, 11, 4,
42, 4, 133, 26, 27, 50, 61, 62, 4, 21,
33, 50, 57, 65, 58, 23, 55, 40, 26, 27,
28, 29, 30, 31, 32, 48, 49, 50, 51, 52,
11, 54, 55, 56, 57, 40, 59, 60, 61, 62,
63, 64, 65, 66, 40, 3, 69, 4, 5, 6,
7, 8, 9, 10, 41, 4, 4, 5, 6, 7,
8, 9, 10, 20, 21, 60, 47, 48, 91, 60,
65, 4, 20, 21, 65, 35, 58, 37, 3, 38,
61, 62, 59, 40, 44, 59, 46, 110, 29, 30,
31, 32, 49, 59, 51, 52, 53, 54, 59, 38,
45, 49, 59, 51, 52, 53, 54, 59, 59, 4,
6, 59, 4, 4, 57, 59, 139, 6, 141, 3,
8, 144, 4, 5, 6, 7, 8, 9, 10, 12,
13, 14, 15, 59, 63, 18, 3, 60, 20, 21,
23, 60, 64, 26, 27, 28, 29, 30, 31, 32,
18, 4, 38, 4, 60, 23, 4, 56, 26, 27,
28, 29, 30, 31, 32, 45, 59, 49, 55, 51,
52, 53, 54, 12, 13, 14, 15, 59, 59, 18,
6, 60, 65, 3, 23, 147, 91, 26, 27, 28,
29, 30, 31, 32, 12, 13, 14, 15, 144, 136,
18, -1, -1, -1, -1, 23, -1, -1, 26, 27,
28, 29, 30, 31, 32, -1, 12, 13, 14, 15,
-1, 60, 18, -1, -1, -1, -1, 23, -1, -1,
26, 27, 28, 29, 30, 31, 32, 12, 13, 14,
15, -1, 60, 18, -1, -1, -1, -1, 23, -1,
-1, 26, 27, 28, 29, 30, 31, 32, -1, 12,
13, 14, 15, -1, 60, 18, -1, -1, -1, -1,
23, -1, -1, 26, 27, 28, 29, 30, 31, 32,
12, 13, 14, 15, -1, 60, 18, -1, -1, -1,
-1, 23, -1, -1, 26, 27, 28, 29, 30, 31,
32, -1, 12, 13, 14, 15, -1, 60, 18, -1,
-1, -1, -1, 23, -1, -1, 26, 27, 28, 29,
30, 31, 32, 12, 13, 14, 15, -1, 60, 18,
-1, -1, -1, -1, 23, 45, -1, 26, 27, 28,
29, 30, 31, 32, 12, 13, 14, 15, -1, -1,
18, -1, -1, -1, -1, 23, 45, -1, 26, 27,
28, 29, 30, 31, 32, -1, 12, 13, 14, 15,
-1, 39, 18, -1, -1, -1, -1, 23, -1, -1,
26, 27, 28, 29, 30, 31, 32, 14, 15, -1,
-1, 18, -1, -1, -1, -1, 23, -1, -1, 26,
27, 28, 29, 30, 31, 32, 15, -1, -1, 18,
-1, -1, -1, -1, 23, -1, -1, 26, 27, 28,
29, 30, 31, 32, 26, 27, 28, 29, 30, 31,
32, 27, 28, 29, 30, 31, 32
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const yytype_uint8 yystos[] =
{
0, 4, 40, 67, 68, 69, 11, 4, 0, 68,
58, 35, 37, 44, 46, 41, 58, 3, 4, 4,
5, 6, 7, 8, 9, 10, 20, 21, 49, 51,
52, 53, 54, 59, 70, 72, 4, 3, 50, 57,
38, 76, 47, 48, 61, 62, 70, 70, 59, 59,
59, 59, 59, 70, 12, 13, 14, 15, 18, 23,
26, 27, 28, 29, 30, 31, 32, 45, 43, 65,
38, 50, 55, 78, 59, 45, 70, 4, 6, 70,
70, 70, 70, 70, 60, 70, 70, 70, 70, 8,
21, 59, 70, 70, 70, 70, 70, 70, 70, 70,
4, 4, 70, 70, 74, 75, 59, 6, 57, 3,
59, 63, 60, 60, 60, 60, 60, 8, 4, 69,
39, 42, 71, 77, 45, 65, 3, 60, 70, 73,
64, 60, 4, 38, 4, 74, 60, 45, 60, 65,
4, 56, 74, 78, 59, 70, 59, 70, 73, 6,
77, 60, 60
};
#define yyerrok (yyerrstatus = 0)
#define yyclearin (yychar = YYEMPTY)
#define YYEMPTY (-2)
#define YYEOF 0
#define YYACCEPT goto yyacceptlab
#define YYABORT goto yyabortlab
#define YYERROR goto yyerrorlab
/* Like YYERROR except do call yyerror. This remains here temporarily
to ease the transition to the new meaning of YYERROR, for GCC.
Once GCC version 2 has supplanted version 1, this can go. */
#define YYFAIL goto yyerrlab
#define YYRECOVERING() (!!yyerrstatus)
#define YYBACKUP(Token, Value) \
do \
if (yychar == YYEMPTY && yylen == 1) \
{ \
yychar = (Token); \
yylval = (Value); \
yytoken = YYTRANSLATE (yychar); \
YYPOPSTACK (1); \
goto yybackup; \
} \
else \
{ \
yyerror (YY_("syntax error: cannot back up")); \
YYERROR; \
} \
while (YYID (0))
#define YYTERROR 1
#define YYERRCODE 256
/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
If N is 0, then set CURRENT to the empty location which ends
the previous symbol: RHS[0] (always defined). */
#define YYRHSLOC(Rhs, K) ((Rhs)[K])
#ifndef YYLLOC_DEFAULT
# define YYLLOC_DEFAULT(Current, Rhs, N) \
do \
if (YYID (N)) \
{ \
(Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
(Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
(Current).last_line = YYRHSLOC (Rhs, N).last_line; \
(Current).last_column = YYRHSLOC (Rhs, N).last_column; \
} \
else \
{ \
(Current).first_line = (Current).last_line = \
YYRHSLOC (Rhs, 0).last_line; \
(Current).first_column = (Current).last_column = \
YYRHSLOC (Rhs, 0).last_column; \
} \
while (YYID (0))
#endif
/* YY_LOCATION_PRINT -- Print the location on the stream.
This macro was not mandated originally: define only if we know
we won't break user code: when these are the locations we know. */
#ifndef YY_LOCATION_PRINT
# if YYLTYPE_IS_TRIVIAL
# define YY_LOCATION_PRINT(File, Loc) \
fprintf (File, "%d.%d-%d.%d", \
(Loc).first_line, (Loc).first_column, \
(Loc).last_line, (Loc).last_column)
# else
# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
# endif
#endif
/* YYLEX -- calling `yylex' with the right arguments. */
#ifdef YYLEX_PARAM
# define YYLEX yylex (YYLEX_PARAM)
#else
# define YYLEX yylex ()
#endif
/* Enable debugging if requested. */
#if YYDEBUG
# ifndef YYFPRINTF
# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
# define YYFPRINTF fprintf
# endif
# define YYDPRINTF(Args) \
do { \
if (yydebug) \
YYFPRINTF Args; \
} while (YYID (0))
# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
do { \
if (yydebug) \
{ \
YYFPRINTF (stderr, "%s ", Title); \
yy_symbol_print (stderr, \
Type, Value); \
YYFPRINTF (stderr, "\n"); \
} \
} while (YYID (0))
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_value_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (!yyvaluep)
return;
# ifdef YYPRINT
if (yytype < YYNTOKENS)
YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
# else
YYUSE (yyoutput);
# endif
switch (yytype)
{
default:
break;
}
}
/*--------------------------------.
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
#else
static void
yy_symbol_print (yyoutput, yytype, yyvaluep)
FILE *yyoutput;
int yytype;
YYSTYPE const * const yyvaluep;
#endif
{
if (yytype < YYNTOKENS)
YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
else
YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
yy_symbol_value_print (yyoutput, yytype, yyvaluep);
YYFPRINTF (yyoutput, ")");
}
/*------------------------------------------------------------------.
| yy_stack_print -- Print the state stack from its BOTTOM up to its |
| TOP (included). |
`------------------------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
#else
static void
yy_stack_print (yybottom, yytop)
yytype_int16 *yybottom;
yytype_int16 *yytop;
#endif
{
YYFPRINTF (stderr, "Stack now");
for (; yybottom <= yytop; yybottom++)
{
int yybot = *yybottom;
YYFPRINTF (stderr, " %d", yybot);
}
YYFPRINTF (stderr, "\n");
}
# define YY_STACK_PRINT(Bottom, Top) \
do { \
if (yydebug) \
yy_stack_print ((Bottom), (Top)); \
} while (YYID (0))
/*------------------------------------------------.
| Report that the YYRULE is going to be reduced. |
`------------------------------------------------*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
#else
static void
yy_reduce_print (yyvsp, yyrule)
YYSTYPE *yyvsp;
int yyrule;
#endif
{
int yynrhs = yyr2[yyrule];
int yyi;
unsigned long int yylno = yyrline[yyrule];
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
&(yyvsp[(yyi + 1) - (yynrhs)])
);
YYFPRINTF (stderr, "\n");
}
}
# define YY_REDUCE_PRINT(Rule) \
do { \
if (yydebug) \
yy_reduce_print (yyvsp, Rule); \
} while (YYID (0))
/* Nonzero means print parse trace. It is left uninitialized so that
multiple parsers can coexist. */
int yydebug;
#else /* !YYDEBUG */
# define YYDPRINTF(Args)
# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
# define YY_STACK_PRINT(Bottom, Top)
# define YY_REDUCE_PRINT(Rule)
#endif /* !YYDEBUG */
/* YYINITDEPTH -- initial size of the parser's stacks. */
#ifndef YYINITDEPTH
# define YYINITDEPTH 200
#endif
/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
if the built-in stack extension method is used).
Do not make this value too large; the results are undefined if
YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
evaluated with infinite-precision integer arithmetic. */
#ifndef YYMAXDEPTH
# define YYMAXDEPTH 10000
#endif
#if YYERROR_VERBOSE
# ifndef yystrlen
# if defined __GLIBC__ && defined _STRING_H
# define yystrlen strlen
# else
/* Return the length of YYSTR. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static YYSIZE_T
yystrlen (const char *yystr)
#else
static YYSIZE_T
yystrlen (yystr)
const char *yystr;
#endif
{
YYSIZE_T yylen;
for (yylen = 0; yystr[yylen]; yylen++)
continue;
return yylen;
}
# endif
# endif
# ifndef yystpcpy
# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
# define yystpcpy stpcpy
# else
/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
YYDEST. */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static char *
yystpcpy (char *yydest, const char *yysrc)
#else
static char *
yystpcpy (yydest, yysrc)
char *yydest;
const char *yysrc;
#endif
{
char *yyd = yydest;
const char *yys = yysrc;
while ((*yyd++ = *yys++) != '\0')
continue;
return yyd - 1;
}
# endif
# endif
# ifndef yytnamerr
/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
quotes and backslashes, so that it's suitable for yyerror. The
heuristic is that double-quoting is unnecessary unless the string
contains an apostrophe, a comma, or backslash (other than
backslash-backslash). YYSTR is taken from yytname. If YYRES is
null, do not copy; instead, return the length of what the result
would have been. */
static YYSIZE_T
yytnamerr (char *yyres, const char *yystr)
{
if (*yystr == '"')
{
YYSIZE_T yyn = 0;
char const *yyp = yystr;
for (;;)
switch (*++yyp)
{
case '\'':
case ',':
goto do_not_strip_quotes;
case '\\':
if (*++yyp != '\\')
goto do_not_strip_quotes;
/* Fall through. */
default:
if (yyres)
yyres[yyn] = *yyp;
yyn++;
break;
case '"':
if (yyres)
yyres[yyn] = '\0';
return yyn;
}
do_not_strip_quotes: ;
}
if (! yyres)
return yystrlen (yystr);
return yystpcpy (yyres, yystr) - yyres;
}
# endif
/* Copy into YYRESULT an error message about the unexpected token
YYCHAR while in state YYSTATE. Return the number of bytes copied,
including the terminating null byte. If YYRESULT is null, do not
copy anything; just return the number of bytes that would be
copied. As a special case, return 0 if an ordinary "syntax error"
message will do. Return YYSIZE_MAXIMUM if overflow occurs during
size calculation. */
static YYSIZE_T
yysyntax_error (char *yyresult, int yystate, int yychar)
{
int yyn = yypact[yystate];
if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
return 0;
else
{
int yytype = YYTRANSLATE (yychar);
YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
YYSIZE_T yysize = yysize0;
YYSIZE_T yysize1;
int yysize_overflow = 0;
enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
int yyx;
# if 0
/* This is so xgettext sees the translatable formats that are
constructed on the fly. */
YY_("syntax error, unexpected %s");
YY_("syntax error, unexpected %s, expecting %s");
YY_("syntax error, unexpected %s, expecting %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s");
YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
# endif
char *yyfmt;
char const *yyf;
static char const yyunexpected[] = "syntax error, unexpected %s";
static char const yyexpecting[] = ", expecting %s";
static char const yyor[] = " or %s";
char yyformat[sizeof yyunexpected
+ sizeof yyexpecting - 1
+ ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
* (sizeof yyor - 1))];
char const *yyprefix = yyexpecting;
/* Start YYX at -YYN if negative to avoid negative indexes in
YYCHECK. */
int yyxbegin = yyn < 0 ? -yyn : 0;
/* Stay within bounds of both yycheck and yytname. */
int yychecklim = YYLAST - yyn + 1;
int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
int yycount = 1;
yyarg[0] = yytname[yytype];
yyfmt = yystpcpy (yyformat, yyunexpected);
for (yyx = yyxbegin; yyx < yyxend; ++yyx)
if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
{
if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
{
yycount = 1;
yysize = yysize0;
yyformat[sizeof yyunexpected - 1] = '\0';
break;
}
yyarg[yycount++] = yytname[yyx];
yysize1 = yysize + yytnamerr (0, yytname[yyx]);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
yyfmt = yystpcpy (yyfmt, yyprefix);
yyprefix = yyor;
}
yyf = YY_(yyformat);
yysize1 = yysize + yystrlen (yyf);
yysize_overflow |= (yysize1 < yysize);
yysize = yysize1;
if (yysize_overflow)
return YYSIZE_MAXIMUM;
if (yyresult)
{
/* Avoid sprintf, as that infringes on the user's name space.
Don't have undefined behavior even if the translation
produced a string with the wrong number of "%s"s. */
char *yyp = yyresult;
int yyi = 0;
while ((*yyp = *yyf) != '\0')
{
if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
{
yyp += yytnamerr (yyp, yyarg[yyi++]);
yyf += 2;
}
else
{
yyp++;
yyf++;
}
}
}
return yysize;
}
}
#endif /* YYERROR_VERBOSE */
/*-----------------------------------------------.
| Release the memory associated to this symbol. |
`-----------------------------------------------*/
/*ARGSUSED*/
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
#else
static void
yydestruct (yymsg, yytype, yyvaluep)
const char *yymsg;
int yytype;
YYSTYPE *yyvaluep;
#endif
{
YYUSE (yyvaluep);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
switch (yytype)
{
default:
break;
}
}
/* Prevent warnings from -Wmissing-prototypes. */
#ifdef YYPARSE_PARAM
#if defined __STDC__ || defined __cplusplus
int yyparse (void *YYPARSE_PARAM);
#else
int yyparse ();
#endif
#else /* ! YYPARSE_PARAM */
#if defined __STDC__ || defined __cplusplus
int yyparse (void);
#else
int yyparse ();
#endif
#endif /* ! YYPARSE_PARAM */
/* The lookahead symbol. */
int yychar;
/* The semantic value of the lookahead symbol. */
YYSTYPE yylval;
/* Number of syntax errors so far. */
int yynerrs;
/*-------------------------.
| yyparse or yypush_parse. |
`-------------------------*/
#ifdef YYPARSE_PARAM
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void *YYPARSE_PARAM)
#else
int
yyparse (YYPARSE_PARAM)
void *YYPARSE_PARAM;
#endif
#else /* ! YYPARSE_PARAM */
#if (defined __STDC__ || defined __C99__FUNC__ \
|| defined __cplusplus || defined _MSC_VER)
int
yyparse (void)
#else
int
yyparse ()
#endif
#endif
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
`yyss': related to states.
`yyvs': related to semantic values.
Refer to the stacks thru separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
/* The state stack. */
yytype_int16 yyssa[YYINITDEPTH];
yytype_int16 *yyss;
yytype_int16 *yyssp;
/* The semantic value stack. */
YYSTYPE yyvsa[YYINITDEPTH];
YYSTYPE *yyvs;
YYSTYPE *yyvsp;
YYSIZE_T yystacksize;
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
int yytoken;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
#if YYERROR_VERBOSE
/* Buffer for error messages, and its allocated size. */
char yymsgbuf[128];
char *yymsg = yymsgbuf;
YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
#endif
#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
/* The number of symbols on the RHS of the reduced rule.
Keep to zero when no symbol should be popped. */
int yylen = 0;
yytoken = 0;
yyss = yyssa;
yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
yystate = 0;
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
/* Initialize stack pointers.
Waste one element of value and location stack
so that they stay on the same level as the state stack.
The wasted elements are never initialized. */
yyssp = yyss;
yyvsp = yyvs;
goto yysetstate;
/*------------------------------------------------------------.
| yynewstate -- Push a new state, which is found in yystate. |
`------------------------------------------------------------*/
yynewstate:
/* In all cases, when you get here, the value and location stacks
have just been pushed. So pushing a state here evens the stacks. */
yyssp++;
yysetstate:
*yyssp = yystate;
if (yyss + yystacksize - 1 <= yyssp)
{
/* Get the current used size of the three stacks, in elements. */
YYSIZE_T yysize = yyssp - yyss + 1;
#ifdef yyoverflow
{
/* Give user a chance to reallocate the stack. Use copies of
these so that the &'s don't force the real ones into
memory. */
YYSTYPE *yyvs1 = yyvs;
yytype_int16 *yyss1 = yyss;
/* Each stack pointer address is followed by the size of the
data in use in that stack, in bytes. This used to be a
conditional around just the two extra args, but that might
be undefined if yyoverflow is a macro. */
yyoverflow (YY_("memory exhausted"),
&yyss1, yysize * sizeof (*yyssp),
&yyvs1, yysize * sizeof (*yyvsp),
&yystacksize);
yyss = yyss1;
yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
goto yyexhaustedlab;
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
yystacksize = YYMAXDEPTH;
{
yytype_int16 *yyss1 = yyss;
union yyalloc *yyptr =
(union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
if (! yyptr)
goto yyexhaustedlab;
YYSTACK_RELOCATE (yyss_alloc, yyss);
YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
if (yyss1 != yyssa)
YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
yyssp = yyss + yysize - 1;
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
(unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
if (yystate == YYFINAL)
YYACCEPT;
goto yybackup;
/*-----------.
| yybackup. |
`-----------*/
yybackup:
/* Do appropriate processing given the current state. Read a
lookahead token if we need one and don't already have one. */
/* First try to decide what to do without reference to lookahead token. */
yyn = yypact[yystate];
if (yyn == YYPACT_NINF)
goto yydefault;
/* Not known => get a lookahead token if don't already have one. */
/* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
yychar = YYLEX;
}
if (yychar <= YYEOF)
{
yychar = yytoken = YYEOF;
YYDPRINTF ((stderr, "Now at end of input.\n"));
}
else
{
yytoken = YYTRANSLATE (yychar);
YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
}
/* If the proper action on seeing token YYTOKEN is to reduce or to
detect an error, take that action. */
yyn += yytoken;
if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
goto yydefault;
yyn = yytable[yyn];
if (yyn <= 0)
{
if (yyn == 0 || yyn == YYTABLE_NINF)
goto yyerrlab;
yyn = -yyn;
goto yyreduce;
}
/* Count tokens shifted since error; after three, turn off error
status. */
if (yyerrstatus)
yyerrstatus--;
/* Shift the lookahead token. */
YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
/* Discard the shifted token. */
yychar = YYEMPTY;
yystate = yyn;
*++yyvsp = yylval;
goto yynewstate;
/*-----------------------------------------------------------.
| yydefault -- do the default action for the current state. |
`-----------------------------------------------------------*/
yydefault:
yyn = yydefact[yystate];
if (yyn == 0)
goto yyerrlab;
goto yyreduce;
/*-----------------------------.
| yyreduce -- Do a reduction. |
`-----------------------------*/
yyreduce:
/* yyn is the number of a rule to reduce with. */
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
`$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
users should not rely upon it. Assigning to YYVAL
unconditionally makes the parser a bit smaller, and it avoids a
GCC warning that YYVAL may be used uninitialized. */
yyval = yyvsp[1-yylen];
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
case 4:
/* Line 1455 of yacc.c */
#line 142 "bison.y"
{ emit("STMT"); ;}
break;
case 5:
/* Line 1455 of yacc.c */
#line 146 "bison.y"
{ emit_select((yyvsp[(1) - (7)].strval), (yyvsp[(6) - (7)].strval), (yyvsp[(7) - (7)].intval)); ;}
break;
case 6:
/* Line 1455 of yacc.c */
#line 148 "bison.y"
{ emit_load((yyvsp[(1) - (12)].strval), (yyvsp[(4) - (12)].strval), (yyvsp[(11) - (12)].intval), (yyvsp[(7) - (12)].strval)); ;}
break;
case 7:
/* Line 1455 of yacc.c */
#line 150 "bison.y"
{ emit_load_binary((yyvsp[(1) - (9)].strval), (yyvsp[(4) - (9)].strval), (yyvsp[(8) - (9)].intval)); ;}
break;
case 8:
/* Line 1455 of yacc.c */
#line 152 "bison.y"
{ emit_filter((yyvsp[(1) - (5)].strval), (yyvsp[(4) - (5)].strval), (yyvsp[(5) - (5)].intval));;}
break;
case 9:
/* Line 1455 of yacc.c */
#line 154 "bison.y"
{ emit_order((yyvsp[(1) - (6)].strval), (yyvsp[(4) - (6)].strval), (yyvsp[(6) - (6)].intval));;}
break;
case 10:
/* Line 1455 of yacc.c */
#line 156 "bison.y"
{ emit_join((yyvsp[(1) - (7)].strval),(yyvsp[(6) - (7)].strval)); ;}
break;
case 11:
/* Line 1455 of yacc.c */
#line 158 "bison.y"
{ emit_store((yyvsp[(2) - (9)].strval),(yyvsp[(4) - (9)].strval),(yyvsp[(7) - (9)].strval)); ;}
break;
case 12:
/* Line 1455 of yacc.c */
#line 160 "bison.y"
{ emit_store_binary((yyvsp[(2) - (6)].strval),(yyvsp[(4) - (6)].strval)); ;}
break;
case 13:
/* Line 1455 of yacc.c */
#line 164 "bison.y"
{ emit_name((yyvsp[(1) - (1)].strval)); ;}
break;
case 14:
/* Line 1455 of yacc.c */
#line 165 "bison.y"
{ emit("FIELDNAME %s.%s", (yyvsp[(1) - (3)].strval), (yyvsp[(3) - (3)].strval)); ;}
break;
case 15:
/* Line 1455 of yacc.c */
#line 166 "bison.y"
{ emit("USERVAR %s", (yyvsp[(1) - (1)].strval)); ;}
break;
case 16:
/* Line 1455 of yacc.c */
#line 167 "bison.y"
{ emit_string((yyvsp[(1) - (1)].strval)); ;}
break;
case 17:
/* Line 1455 of yacc.c */
#line 168 "bison.y"
{ emit_number((yyvsp[(1) - (1)].intval)); ;}
break;
case 18:
/* Line 1455 of yacc.c */
#line 169 "bison.y"
{ emit_float((yyvsp[(1) - (1)].floatval)); ;}
break;
case 19:
/* Line 1455 of yacc.c */
#line 170 "bison.y"
{ emit_decimal((yyvsp[(1) - (1)].intval)); ;}
break;
case 20:
/* Line 1455 of yacc.c */
#line 171 "bison.y"
{ emit("BOOL %d", (yyvsp[(1) - (1)].intval)); ;}
break;
case 21:
/* Line 1455 of yacc.c */
#line 172 "bison.y"
{ emit_varchar((yyvsp[(1) - (9)].strval), (yyvsp[(3) - (9)].intval), (yyvsp[(6) - (9)].strval), (yyvsp[(8) - (9)].intval));;}
break;
case 22:
/* Line 1455 of yacc.c */
#line 173 "bison.y"
{ emit_var((yyvsp[(1) - (6)].strval), (yyvsp[(3) - (6)].intval), (yyvsp[(6) - (6)].strval));;}
break;
case 23:
/* Line 1455 of yacc.c */
#line 174 "bison.y"
{ emit_var_asc((yyvsp[(1) - (2)].strval));;}
break;
case 24:
/* Line 1455 of yacc.c */
#line 175 "bison.y"
{ emit_var_desc((yyvsp[(1) - (2)].strval));;}
break;
case 25:
/* Line 1455 of yacc.c */
#line 176 "bison.y"
{ emit_count(); ;}
break;
case 26:
/* Line 1455 of yacc.c */
#line 177 "bison.y"
{ emit_sum(); ;}
break;
case 27:
/* Line 1455 of yacc.c */
#line 178 "bison.y"
{ emit_average(); ;}
break;
case 28:
/* Line 1455 of yacc.c */
#line 179 "bison.y"
{ emit_min(); ;}
break;
case 29:
/* Line 1455 of yacc.c */
#line 180 "bison.y"
{ emit_max(); ;}
break;
case 30:
/* Line 1455 of yacc.c */
#line 184 "bison.y"
{ emit_add(); ;}
break;
case 31:
/* Line 1455 of yacc.c */
#line 185 "bison.y"
{ emit_minus(); ;}
break;
case 32:
/* Line 1455 of yacc.c */
#line 186 "bison.y"
{ emit_mul(); ;}
break;
case 33:
/* Line 1455 of yacc.c */
#line 187 "bison.y"
{ emit_div(); ;}
break;
case 34:
/* Line 1455 of yacc.c */
#line 188 "bison.y"
{ emit("MOD"); ;}
break;
case 35:
/* Line 1455 of yacc.c */
#line 189 "bison.y"
{ emit("MOD"); ;}
break;
case 36:
/* Line 1455 of yacc.c */
#line 191 "bison.y"
{ emit_and(); ;}
break;
case 37:
/* Line 1455 of yacc.c */
#line 192 "bison.y"
{ emit_eq(); ;}
break;
case 38:
/* Line 1455 of yacc.c */
#line 193 "bison.y"
{ emit_or(); ;}
break;
case 39:
/* Line 1455 of yacc.c */
#line 194 "bison.y"
{ emit("XOR"); ;}
break;
case 40:
/* Line 1455 of yacc.c */
#line 195 "bison.y"
{ emit("SHIFT %s", (yyvsp[(2) - (3)].subtok)==1?"left":"right"); ;}
break;
case 41:
/* Line 1455 of yacc.c */
#line 196 "bison.y"
{ emit("NOT"); ;}
break;
case 42:
/* Line 1455 of yacc.c */
#line 197 "bison.y"
{ emit("NOT"); ;}
break;
case 43:
/* Line 1455 of yacc.c */
#line 198 "bison.y"
{ emit_cmp((yyvsp[(2) - (3)].subtok)); ;}
break;
case 44:
/* Line 1455 of yacc.c */
#line 200 "bison.y"
{ emit("CMPSELECT %d", (yyvsp[(2) - (5)].subtok)); ;}
break;
case 45:
/* Line 1455 of yacc.c */
#line 201 "bison.y"
{emit("EXPR");;}
break;
case 46:
/* Line 1455 of yacc.c */
#line 205 "bison.y"
{ emit("ISBOOL %d", (yyvsp[(3) - (3)].intval)); ;}
break;
case 47:
/* Line 1455 of yacc.c */
#line 206 "bison.y"
{ emit("ISBOOL %d", (yyvsp[(4) - (4)].intval)); emit("NOT"); ;}
break;
case 48:
/* Line 1455 of yacc.c */
#line 209 "bison.y"
{ /* nil */
(yyval.intval) = 0;
;}
break;
case 49:
/* Line 1455 of yacc.c */
#line 212 "bison.y"
{ (yyval.intval) = (yyvsp[(3) - (3)].intval);}
break;
case 50:
/* Line 1455 of yacc.c */
#line 216 "bison.y"
{ (yyval.intval) = 1; emit_sel_name((yyvsp[(3) - (3)].strval));;}
break;
case 51:
/* Line 1455 of yacc.c */
#line 217 "bison.y"
{ (yyval.intval) = (yyvsp[(1) - (5)].intval) + 1; emit_sel_name((yyvsp[(5) - (5)].strval));;}
break;
case 52:
/* Line 1455 of yacc.c */
#line 221 "bison.y"
{ (yyval.intval) = 1; ;}
break;
case 53:
/* Line 1455 of yacc.c */
#line 222 "bison.y"
{(yyval.intval) = (yyvsp[(1) - (3)].intval) + 1; ;}
break;
case 54:
/* Line 1455 of yacc.c */
#line 226 "bison.y"
{ (yyval.intval) = 1; ;}
break;
case 55:
/* Line 1455 of yacc.c */
#line 227 "bison.y"
{ (yyval.intval) = 1 + (yyvsp[(3) - (3)].intval); ;}
break;
case 56:
/* Line 1455 of yacc.c */
#line 230 "bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 58:
/* Line 1455 of yacc.c */
#line 235 "bison.y"
{ emit("FILTER BY"); ;}
break;
case 59:
/* Line 1455 of yacc.c */
#line 238 "bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (4)].strval));;}
break;
case 60:
/* Line 1455 of yacc.c */
#line 239 "bison.y"
{ (yyval.intval) = 1; emit_join_tab((yyvsp[(2) - (5)].strval)); ;}
break;
case 61:
/* Line 1455 of yacc.c */
#line 241 "bison.y"
{ /* nil */
(yyval.intval) = 0
;}
break;
case 62:
/* Line 1455 of yacc.c */
#line 244 "bison.y"
{ emit_limit((yyvsp[(2) - (2)].intval)); ;}
break;
/* Line 1455 of yacc.c */
#line 2023 "bison.cu"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
*++yyvsp = yyval;
/* Now `shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
yyn = yyr1[yyn];
yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
yystate = yytable[yystate];
else
yystate = yydefgoto[yyn - YYNTOKENS];
goto yynewstate;
/*------------------------------------.
| yyerrlab -- here on detecting error |
`------------------------------------*/
yyerrlab:
/* If not already recovering from an error, report this error. */
if (!yyerrstatus)
{
++yynerrs;
#if ! YYERROR_VERBOSE
yyerror (YY_("syntax error"));
#else
{
YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
{
YYSIZE_T yyalloc = 2 * yysize;
if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
yyalloc = YYSTACK_ALLOC_MAXIMUM;
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
yymsg = (char *) YYSTACK_ALLOC (yyalloc);
if (yymsg)
yymsg_alloc = yyalloc;
else
{
yymsg = yymsgbuf;
yymsg_alloc = sizeof yymsgbuf;
}
}
if (0 < yysize && yysize <= yymsg_alloc)
{
(void) yysyntax_error (yymsg, yystate, yychar);
yyerror (yymsg);
}
else
{
yyerror (YY_("syntax error"));
if (yysize != 0)
goto yyexhaustedlab;
}
}
#endif
}
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
error, discard it. */
if (yychar <= YYEOF)
{
/* Return failure if at end of input. */
if (yychar == YYEOF)
YYABORT;
}
else
{
yydestruct ("Error: discarding",
yytoken, &yylval);
yychar = YYEMPTY;
}
}
/* Else will try to reuse lookahead token after shifting the error
token. */
goto yyerrlab1;
/*---------------------------------------------------.
| yyerrorlab -- error raised explicitly by YYERROR. |
`---------------------------------------------------*/
yyerrorlab:
/* Pacify compilers like GCC when the user code never invokes
YYERROR and the label yyerrorlab therefore never appears in user
code. */
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
/* Do not reclaim the symbols of the rule which action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
YY_STACK_PRINT (yyss, yyssp);
yystate = *yyssp;
goto yyerrlab1;
/*-------------------------------------------------------------.
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (yyn != YYPACT_NINF)
{
yyn += YYTERROR;
if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
{
yyn = yytable[yyn];
if (0 < yyn)
break;
}
}
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
YYABORT;
yydestruct ("Error: popping",
yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
*++yyvsp = yylval;
/* Shift the error token. */
YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
yystate = yyn;
goto yynewstate;
/*-------------------------------------.
| yyacceptlab -- YYACCEPT comes here. |
`-------------------------------------*/
yyacceptlab:
yyresult = 0;
goto yyreturn;
/*-----------------------------------.
| yyabortlab -- YYABORT comes here. |
`-----------------------------------*/
yyabortlab:
yyresult = 1;
goto yyreturn;
#if !defined(yyoverflow) || YYERROR_VERBOSE
/*-------------------------------------------------.
| yyexhaustedlab -- memory exhaustion comes here. |
`-------------------------------------------------*/
yyexhaustedlab:
yyerror (YY_("memory exhausted"));
yyresult = 2;
/* Fall through. */
#endif
yyreturn:
if (yychar != YYEMPTY)
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
/* Do not reclaim the symbols of the rule which action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
if (yyss != yyssa)
YYSTACK_FREE (yyss);
#endif
#if YYERROR_VERBOSE
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
/* Make sure YYID is used. */
return YYID (yyresult);
}
/* Line 1675 of yacc.c */
#line 247 "bison.y"
#include "filter.cu"
#include "select.cu"
#include "merge.cu"
#include "zone_map.cu"
FILE *file_pointer;
queue<string> namevars;
queue<string> typevars;
queue<int> sizevars;
queue<int> cols;
queue<unsigned int> j_col_count;
unsigned int sel_count = 0;
unsigned int join_cnt = 0;
int join_col_cnt = 0;
unsigned int eqq = 0;
stack<string> op_join;
unsigned int statement_count = 0;
map<string,unsigned int> stat;
bool scan_state = 0;
string separator, f_file;
CUDPPHandle theCudpp;
using namespace thrust::placeholders;
void emit_name(char *name)
{
op_type.push("NAME");
op_value.push(name);
}
void emit_limit(int val)
{
op_nums.push(val);
}
void emit_string(char *str)
{ // remove the float_type quotes
string sss(str,1, strlen(str)-2);
op_type.push("STRING");
op_value.push(sss);
}
void emit_number(int_type val)
{
op_type.push("NUMBER");
op_nums.push(val);
}
void emit_float(float_type val)
{
op_type.push("FLOAT");
op_nums_f.push(val);
}
void emit_decimal(float_type val)
{
op_type.push("DECIMAL");
op_nums_f.push(val);
}
void emit_mul()
{
op_type.push("MUL");
}
void emit_add()
{
op_type.push("ADD");
}
void emit_div()
{
op_type.push("DIV");
}
void emit_and()
{
op_type.push("AND");
if (join_col_cnt == -1)
join_col_cnt++;
join_col_cnt++;
eqq = 0;
}
void emit_eq()
{
//op_type.push("JOIN");
eqq++;
join_cnt++;
if(eqq == join_col_cnt+1) {
j_col_count.push(join_col_cnt+1);
join_col_cnt = -1;
}
else if (join_col_cnt == -1 )
j_col_count.push(1);
}
void emit_or()
{
op_type.push("OR");
}
void emit_minus()
{
op_type.push("MINUS");
}
void emit_cmp(int val)
{
op_type.push("CMP");
op_nums.push(val);
}
void emit(char *s, ...)
{
}
void emit_var(char *s, int c, char *f)
{
namevars.push(s);
typevars.push(f);
sizevars.push(0);
cols.push(c);
}
void emit_var_asc(char *s)
{
op_type.push(s);
op_value.push("ASC");
}
void emit_var_desc(char *s)
{
op_type.push(s);
op_value.push("DESC");
}
void emit_varchar(char *s, int c, char *f, int d)
{
namevars.push(s);
typevars.push(f);
sizevars.push(d);
cols.push(c);
}
void emit_sel_name(char *s)
{
op_type.push("emit sel_name");
op_value.push(s);
sel_count++;
}
void emit_count()
{
op_type.push("COUNT");
}
void emit_sum()
{
op_type.push("SUM");
}
void emit_average()
{
op_type.push("AVG");
}
void emit_min()
{
op_type.push("MIN");
}
void emit_max()
{
op_type.push("MAX");
}
void emit_join_tab(char *s)
{
op_join.push(s);
};
void order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, unsigned int segment)
{
std::clock_t start1 = std::clock();
unsigned int sz = a->mRecCount;
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(sz);
thrust::sequence(permutation, permutation+sz,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
void* temp;
// find the largest mRecSize of all data sources exe_type.top()
unsigned int maxSize = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
CudaSet *t = varNames[setMap[*it]];
//cout << "MAX of " << setMap[*it] << " = " << t->mRecCount << endl;
if(t->mRecCount > maxSize)
maxSize = t->mRecCount;
};
//cout << "max size " << maxSize << endl;
//cout << "sort alloc " << maxSize << endl;
//cout << "order mem " << getFreeMem() << endl;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, maxSize*float_size));
for(int i=0; !exe_type.empty(); ++i, exe_type.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, sz, "ASC", (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, sz,"ASC", (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[colInd]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
update_permutation((c->d_columns)[j], raw_ptr, sz, "ASC", (char*)temp);
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
int i = a->columnNames[*it];
if ((a->type)[i] == 0)
apply_permutation(a->d_columns_int[a->type_index[i]], raw_ptr, sz, (int_type*)temp);
else if ((a->type)[i] == 1)
apply_permutation(a->d_columns_float[a->type_index[i]], raw_ptr, sz, (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[i]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
apply_permutation((c->d_columns)[j], raw_ptr, sz, (char*)temp);
};
};
cudaFree(temp);
thrust::device_free(permutation);
}
void emit_join(char *s, char *j1)
{
string j2 = op_join.top();
op_join.pop();
statement_count++;
if (scan_state == 0) {
if (stat.find(j1) == stat.end()) {
cout << "Join : couldn't find variable " << j1 << endl;
exit(1);
};
if (stat.find(j2) == stat.end()) {
cout << "Join : couldn't find variable " << j2 << endl;
exit(1);
};
stat[s] = statement_count;
stat[j1] = statement_count;
stat[j2] = statement_count;
return;
};
if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) {
clean_queues();
return;
};
CudaSet* left = varNames.find(j1)->second;
CudaSet* right = varNames.find(j2)->second;
queue<string> op_sel;
queue<string> op_sel_as;
for(int i=0; i < sel_count; i++) {
op_sel.push(op_value.front());
op_value.pop();
op_sel_as.push(op_value.front());
op_value.pop();
};
string f1 = op_value.front();
op_value.pop();
string f2 = op_value.front();
op_value.pop();
cout << "JOIN " << s << " " << getFreeMem() << endl;
std::clock_t start1 = std::clock();
CudaSet* c = new CudaSet(right,left,0,op_sel, op_sel_as);
if (left->mRecCount == 0 || right->mRecCount == 0) {
c = new CudaSet(left,right,0, op_sel, op_sel_as);
varNames[s] = c;
clean_queues();
return;
};
unsigned int colInd1 = (left->columnNames).find(f1)->second;
unsigned int colInd2 = (right->columnNames).find(f2)->second;
if ((left->type)[colInd1] != 0 || (right->type)[colInd2] != 0) {
cout << "Right now only integer joins are supported " << endl;
exit(0);
};
set<string> field_names;
stack<string> exe_type;
exe_type.push(f2);
field_names.insert(f2);
// need to allocate all right columns
queue<string> cc;
queue<string> c1(op_sel);;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front())
cc.push(c1.front());
};
c1.pop();
};
cc.push(f2);
if(right->prm.size())
allocColumns(right, cc);
unsigned int rcount;
if(!right->prm.empty()) {
rcount = std::accumulate(right->prm_count.begin(), right->prm_count.end(), 0 );
}
else
rcount = right->mRecCount;
//cout << "rcount = " << rcount << endl;
queue<string> ct(cc);
while(!ct.empty()) {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
ct.pop();
};
unsigned int cnt_r = 0;
if(right->prm.size() == 0) {
//copy all records
for(unsigned int i = 0; i < right->mColumnCount; i++)
right->CopyColumnToGpu(i);
cnt_r = right->mRecCount;
}
else {
//copy and gather all records
for(unsigned int i = 0; i < right->segCount; i++) {
copyColumns(right, cc, i, cnt_r);
cnt_r = cnt_r + right->prm_count[i];
};
};
unsigned int tt;
if(left->maxRecs > rcount)
tt = left->maxRecs;
else
tt = rcount;
//here we need to make sure that rr is ordered. If not then we order it and keep the permutation
bool sorted = thrust::is_sorted(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r);
thrust::device_vector<unsigned int> v(cnt_r);
thrust::sequence(v.begin(),v.end(),0,1);
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(tt);
if(!sorted) {
thrust::sort_by_key(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, v.begin());
for(unsigned int i = 0; i < right->mColumnCount; i++) {
if(i != colInd2) {
if(right->type[i] == 0) {
thrust::gather(v.begin(), v.end(), right->d_columns_int[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->d_columns_int[right->type_index[i]].begin());
}
else if(right->type[i] == 1) {
thrust::gather(v.begin(), v.end(), right->d_columns_float[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->d_columns_float[right->type_index[i]].begin());
}
};
};
thrust::sequence(v.begin(),v.end(),0,1);
};
thrust::device_free(d_tmp);
while(!cc.empty())
cc.pop();
cc.push(f1);
allocColumns(left, cc);
//cout << "successfully loaded l && r " << cnt_l << " " << cnt_r << " " << getFreeMem() << endl;
thrust::device_vector<unsigned int> d_res1;
thrust::device_vector<unsigned int> d_res2;
thrust::device_ptr<uint2> res = thrust::device_malloc<uint2>(left->maxRecs);
unsigned int cnt_l, res_count, tot_count = 0, offset = 0, k = 0;
queue<string> lc(cc);
curr_segment = 10000000;
CUDPPResult result;
// now for 64bit values we need to create several HashTables where each of them will keep a certain range of values
// lets find out how many tables we need
int_type max_val = right->d_columns_int[right->type_index[colInd2]][rcount-1];
unsigned int tab_count = (max_val / std::numeric_limits<unsigned int>::max()) + 1;
vector<CUDPPHandle> tabs;
vector<unsigned int> tab_nums;
unsigned int v_offset = 0;
int_type min_v, max_v;
thrust::device_ptr<unsigned int> d_r = thrust::device_malloc<unsigned int>(tt);
for(unsigned int i = 0; i < tab_count; i ++) {
// find out rcount
min_v = i*std::numeric_limits<unsigned int>::max();
max_v = min_v + std::numeric_limits<unsigned int>::max();
unsigned int loc_count = thrust::count_if(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + rcount,
_1 > min_v && _1 <= max_v );
CUDPPHandle hash_table_handle;
CUDPPHashTableConfig config;
config.type = CUDPP_MULTIVALUE_HASH_TABLE;
config.kInputSize = loc_count;
config.space_usage = 1.5f;
//cout << "creating table with " << loc_count << " " << getFreeMem() << endl;
result = cudppHashTable(theCudpp, &hash_table_handle, &config);
//if (result == CUDPP_SUCCESS)
// cout << "hash table created " << getFreeMem() << endl;
//cout << "INSERT " << " " << loc_count << " " << getFreeMem() << endl;
if(i != 0)
thrust::transform(right->d_columns_int[right->type_index[colInd2]].begin() + v_offset, right->d_columns_int[right->type_index[colInd2]].begin() + v_offset + loc_count,
d_r, _1 - i*std::numeric_limits<unsigned int>::max());
else
thrust::copy(right->d_columns_int[right->type_index[colInd2]].begin() + v_offset, right->d_columns_int[right->type_index[colInd2]].begin() + v_offset + loc_count, d_r);
result = cudppHashInsert(hash_table_handle, thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(v.data() + v_offset), loc_count);
//if (result == CUDPP_SUCCESS)
// cout << "hash table inserted " << getFreeMem() << endl;
v_offset = v_offset + loc_count;
tabs.push_back(hash_table_handle);
tab_nums.push_back(loc_count);
};
for (unsigned int i = 0; i < left->segCount; i++) {
cout << "segment " << i << " " << getFreeMem() << endl;
cnt_l = 0;
copyColumns(left, lc, i, cnt_l);
if(left->prm.size() == 0) {
//copy all records
cnt_l = left->mRecCount;
}
else {
cnt_l = left->prm_count[i];
};
if (cnt_l) {
unsigned int off = 0;
for(unsigned int j = 0; j < tab_count; j ++) {
if(j)
off = off + tab_nums[j-1];
thrust::device_vector<unsigned int> tc(1);
tc[0] = j;
//when copying to d_r need to make sure to set non-relevant values to zero otherwise they will get truncated to relevant values
thrust::counting_iterator<unsigned int, thrust::device_space_tag> begin(0);
trans_int t(thrust::raw_pointer_cast(tc.data()),thrust::raw_pointer_cast(left->d_columns_int[left->type_index[colInd1]].data()), thrust::raw_pointer_cast(d_r));
thrust::for_each(begin, begin + cnt_l, t);
result = cudppHashRetrieve(tabs[j], thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(res), cnt_l);
if (result != CUDPP_SUCCESS)
cout << "Failed retrieve " << endl;
uint2 rr = thrust::reduce(res, res+cnt_l, make_uint2(0,0), Uint2Sum());
res_count = rr.y;
if(res_count) {
uint2_split ff(thrust::raw_pointer_cast(res),thrust::raw_pointer_cast(d_r));
thrust::for_each(begin, begin + cnt_l, ff);
thrust::exclusive_scan(d_r, d_r+cnt_l, d_r ); // addresses
tot_count = tot_count + res_count;
d_res1.resize(res_count);
d_res2.resize(res_count);
join_functor ff1(thrust::raw_pointer_cast(res),
thrust::raw_pointer_cast(d_r),
thrust::raw_pointer_cast(d_res1.data()),
thrust::raw_pointer_cast(d_res2.data()));
thrust::for_each(begin, begin + cnt_l, ff1);
thrust::transform(d_res2.begin(), d_res2.end(), d_res2.begin(), _1 + off);
offset = c->mRecCount;
c->resize(res_count);
queue<string> op_sel1(op_sel);
while(!op_sel1.empty()) {
while(!cc.empty())
cc.pop();
cc.push(op_sel1.front());
if(left->columnNames.find(op_sel1.front()) != left->columnNames.end()) {
// copy field's segment to device, gather it and copy to the host
unsigned int colInd = left->columnNames[op_sel1.front()];
allocColumns(left, cc);
copyColumns(left, cc, i, k);
//gather
if(left->type[colInd] == 0) {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter(left->d_columns_int[left->type_index[colInd]].begin(), d_res1.begin());
thrust::copy(iter, iter + res_count, c->h_columns_int[c->type_index[c->columnNames[op_sel1.front()]]].begin() + offset);
}
else if(left->type[colInd] == 1) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter(left->d_columns_float[left->type_index[colInd]].begin(), d_res1.begin());
thrust::copy(iter, iter + res_count, c->h_columns_float[c->type_index[c->columnNames[op_sel1.front()]]].begin() + offset);
};
}
else {
unsigned int colInd = right->columnNames[op_sel1.front()];
//gather
if(right->type[colInd] == 0) {
thrust::permutation_iterator<ElementIterator_int,IndexIterator> iter(right->d_columns_int[right->type_index[colInd]].begin(), d_res2.begin());
thrust::copy(iter, iter + res_count, c->h_columns_int[c->type_index[c->columnNames[op_sel1.front()]]].begin() + offset);
}
else if(right->type[colInd] == 1) {
thrust::permutation_iterator<ElementIterator_float,IndexIterator> iter(right->d_columns_float[right->type_index[colInd]].begin(), d_res2.begin());
thrust::copy(iter, iter + res_count, c->h_columns_float[c->type_index[c->columnNames[op_sel1.front()]]].begin() + offset);
};
};
op_sel1.pop();
};
};
};
};
};
for(unsigned int i = 0; i < tab_count; i ++)
cudppDestroyHashTable(theCudpp, tabs[i]);
thrust::device_free(res);
thrust::device_free(d_r);
d_res1.resize(0);
d_res1.shrink_to_fit();
d_res2.resize(0);
d_res2.shrink_to_fit();
left->deAllocOnDevice();
right->deAllocOnDevice();
c->deAllocOnDevice();
cout << "join final end " << tot_count << " " << getFreeMem() << endl;
varNames[s] = c;
c->mRecCount = tot_count;
clean_queues();
if(stat[s] == statement_count) {
c->free();
varNames.erase(s);
};
if(stat[j1] == statement_count) {
left->free();
varNames.erase(j1);
};
if(stat[j2] == statement_count && (strcmp(j1,j2.c_str()) != 0)) {
right->free();
varNames.erase(j2);
};
std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_order(char *s, char *f, int e, int ll)
{
if(ll == 0)
statement_count++;
if (scan_state == 0 && ll == 0) {
if (stat.find(f) == stat.end()) {
cout << "Order : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end() ) {
clean_queues();
return;
};
CudaSet* a = varNames.find(f)->second;
if (a->mRecCount == 0) {
if(varNames.find(s) == varNames.end())
varNames[s] = new CudaSet(0,1);
else {
CudaSet* c = varNames.find(s)->second;
c->mRecCount = 0;
};
return;
};
stack<string> exe_type, exe_value;
cout << "order: " << s << " " << f << endl;;
for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) {
if ((op_type.front()).compare("NAME") == 0) {
exe_type.push(op_value.front());
exe_value.push("ASC");
}
else {
exe_type.push(op_type.front());
exe_value.push(op_value.front());
};
};
// initialize permutation to [0, 1, 2, ... ,N-1]
thrust::device_ptr<unsigned int> permutation = thrust::device_malloc<unsigned int>(a->mRecCount);
thrust::sequence(permutation, permutation+(a->mRecCount));
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation);
CudaSet *b = a->copyDeviceStruct();
b->mRecCount = a->mRecCount;
// find the largest mRecSize of all data sources
stack<string> tp(exe_type);
queue<string> op_vx;
while (!tp.empty()) {
op_vx.push(tp.top());
tp.pop();
};
unsigned int maxSize = a->mRecCount, cnt = 0;
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, maxSize*float_size));
varNames[setMap[exe_type.top()]]->oldRecCount = varNames[setMap[exe_type.top()]]->mRecCount;
allocColumns(a, op_vx);
copyColumns(a, op_vx, 0, cnt);
varNames[setMap[exe_type.top()]]->mRecCount = varNames[setMap[exe_type.top()]]->oldRecCount;
for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) {
int colInd = (a->columnNames).find(exe_type.top())->second;
if ((a->type)[colInd] == 0)
update_permutation(a->d_columns_int[a->type_index[colInd]], raw_ptr, a->mRecCount, exe_value.top(), (int_type*)temp);
else if ((a->type)[colInd] == 1)
update_permutation(a->d_columns_float[a->type_index[colInd]], raw_ptr, a->mRecCount,exe_value.top(), (float_type*)temp);
else {
CudaChar* c = a->h_columns_cuda_char[a->type_index[colInd]];
for(int j=(c->mColumnCount)-1; j>=0 ; j--)
update_permutation((c->d_columns)[j], raw_ptr, a->mRecCount, exe_value.top(), (char*)temp);
};
};
// gather a's prm to b's prm
thrust::device_vector<unsigned int> p(a->mRecCount);
if(a->prm.size() != 0) {
thrust::device_vector<unsigned int> p_a(a->mRecCount);
b->prm.push_back(new unsigned int[a->mRecCount]);
b->prm_count.push_back(a->mRecCount);
b->prm_index.push_back('R');
cudaMemcpy((void**)(thrust::raw_pointer_cast(p_a.data())), (void**)a->prm[0], 4*a->mRecCount, cudaMemcpyHostToDevice);
thrust::gather(permutation, permutation+a->mRecCount, p_a.begin(), p.begin());
cudaMemcpy((void**)b->prm[0], (void**)(thrust::raw_pointer_cast(p.data())), 4*a->mRecCount, cudaMemcpyDeviceToHost);
}
else {
b->prm.push_back(new unsigned int[a->mRecCount]);
b->prm_count.push_back(a->mRecCount);
b->prm_index.push_back('R');
thrust::copy(permutation, permutation+a->mRecCount, p.begin());
cudaMemcpy((void**)b->prm[0], (void**)(thrust::raw_pointer_cast(p.data())), 4*a->mRecCount, cudaMemcpyDeviceToHost);
};
b->deAllocOnDevice();
a->deAllocOnDevice();
thrust::device_free(permutation);
cudaFree(temp);
varNames[s] = b;
b->segCount = 1;
if (a->fact_table == 1)
b->fact_table = 1;
else
b->fact_table = 0;
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
}
void emit_select(char *s, char *f, int ll)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Select : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
queue<string> op_v1(op_value);
while(op_v1.size() > ll)
op_v1.pop();
stack<string> op_v2;
queue<string> op_v3;
for(int i=0; i < ll; ++i) {
op_v2.push(op_v1.front());
op_v3.push(op_v1.front());
op_v1.pop();
};
CudaSet *a;
a = varNames.find(f)->second;
if(a->mRecCount == 0) {
CudaSet *c;
c = new CudaSet(0,1);
varNames[s] = c;
clean_queues();
return;
};
cout << "SELECT " << s << " " << f << endl;
std::clock_t start1 = std::clock();
// here we need to determine the column count and composition
queue<string> op_v(op_value);
queue<string> op_vx;
set<string> field_names;
map<string,string> aliases;
string tt;
for(int i=0; !op_v.empty(); ++i, op_v.pop()) {
if(a->columnNames.find(op_v.front()) != a->columnNames.end()) {
field_names.insert(op_v.front());
if(aliases.count(op_v.front()) == 0 && aliases.size() < ll) {
tt = op_v.front();
op_v.pop();
aliases[tt] = op_v.front();
};
};
};
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
op_vx.push(*it);
};
// find out how many columns a new set will have
queue<string> op_t(op_type);
int_type col_count = 0;
for(int i=0; !op_t.empty(); ++i, op_t.pop())
if((op_t.front()).compare("emit sel_name") == 0)
col_count++;
CudaSet* b, *c;
curr_segment = 10000000;
allocColumns(a, op_vx);
unsigned int cycle_count = 1;
if(a->prm.size())
cycle_count = varNames[setMap[op_value.front()]]->segCount;
unsigned int ol_count = a->mRecCount, cnt;
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
b = new CudaSet(0, col_count);
bool b_set = 0, c_set = 0;
for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE
cout << "cycle " << i << " select mem " << getFreeMem() << endl;
std::clock_t start2 = std::clock();
cnt = 0;
copyColumns(a, op_vx, i, cnt);
if(a->mRecCount) {
if (ll != 0) {
order_inplace(a,op_v2,field_names,i);
a->GroupBy(op_v3);
};
select(op_type,op_value,op_nums, op_nums_f,a,b, a->mRecCount);
if(!b_set) {
for ( map<string,int>::iterator it=b->columnNames.begin() ; it != b->columnNames.end(); ++it )
setMap[(*it).first] = s;
b_set = 1;
};
if (ll != 0) {
if (!c_set) {
c = new CudaSet(b->mRecCount, col_count);
c->fact_table = 1;
c->segCount = 1;
c_set = 1;
}
else {
c->resize(b->mRecCount);
};
add(c,b,op_v3);
};
};
};
a->mRecCount = ol_count;
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
a->deAllocOnDevice();
if (ll != 0) {
CudaSet *r = merge(c,op_v3, op_v2, aliases);
c->free();
c = r;
};
c->deAllocOnDevice();
c->maxRecs = c->mRecCount;
c->name = s;
c->keep = 1;
for ( map<string,int>::iterator it=c->columnNames.begin() ; it != c->columnNames.end(); ++it ) {
setMap[(*it).first] = s;
};
cout << "final select " << c->mRecCount << endl;
clean_queues();
if (ll != 0) {
varNames[s] = c;
b->free();
}
else
varNames[s] = b;
varNames[s]->keep = 1;
if(stat[s] == statement_count) {
varNames[s]->free();
varNames.erase(s);
};
if(stat[f] == statement_count && a->keep == 0) {
a->free();
varNames.erase(f);
};
std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void emit_filter(char *s, char *f, int e)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(f) == stat.end()) {
cout << "Filter : couldn't find variable " << f << endl;
exit(1);
};
stat[s] = statement_count;
stat[f] = statement_count;
clean_queues();
return;
};
if(varNames.find(f) == varNames.end()) {
clean_queues();
return;
};
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
std::clock_t start1 = std::clock();
if(a->mRecCount == 0) {
b = new CudaSet(0,1);
}
else {
cout << "FILTER " << s << " " << f << " " << getFreeMem() << endl;
b = a->copyDeviceStruct();
b->name = s;
unsigned int cycle_count = 1, cnt = 0;
allocColumns(a, op_value);
varNames[setMap[op_value.front()]]->oldRecCount = varNames[setMap[op_value.front()]]->mRecCount;
if(a->segCount != 1)
cycle_count = varNames[setMap[op_value.front()]]->segCount;
oldCount = a->mRecCount;
thrust::device_vector<unsigned int> p(a->maxRecs);
for(unsigned int i = 0; i < cycle_count; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, a, i);
cout << "MAP CHECK " << map_check << endl;
if(map_check == 'R') {
copyColumns(a, op_value, i, cnt);
filter(op_type,op_value,op_nums, op_nums_f,a, b, i, p);
}
else {
setPrm(a,b,map_check,i);
}
};
a->mRecCount = oldCount;
varNames[setMap[op_value.front()]]->mRecCount = varNames[setMap[op_value.front()]]->oldRecCount;
cout << "filter is finished " << b->mRecCount << " " << getFreeMem() << endl;
a->deAllocOnDevice();
};
clean_queues();
if (varNames.count(s) > 0)
varNames[s]->free();
varNames[s] = b;
if(stat[s] == statement_count) {
b->free();
varNames.erase(s);
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(f);
};
std::cout<< "filter time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void emit_store(char *s, char *f, char* sep)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
cout << "STORE: " << s << " " << f << " " << sep << endl;
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
a->Store(f,sep, limit, 0);
if(stat[s] == statement_count && a->keep == 0) {
a->free();
varNames.erase(s);
};
};
void emit_store_binary(char *s, char *f)
{
statement_count++;
if (scan_state == 0) {
if (stat.find(s) == stat.end()) {
cout << "Store : couldn't find variable " << s << endl;
exit(1);
};
stat[s] = statement_count;
return;
};
if(varNames.find(s) == varNames.end())
return;
CudaSet* a = varNames.find(s)->second;
if(stat[f] == statement_count)
a->deAllocOnDevice();
printf("STORE: %s %s \n", s, f);
int limit = 0;
if(!op_nums.empty()) {
limit = op_nums.front();
op_nums.pop();
};
total_count = 0;
total_segments = 0;
fact_file_loaded = 0;
while(!fact_file_loaded) {
cout << "LOADING " << f_file << " " << separator << endl;
fact_file_loaded = a->LoadBigFile(f_file.c_str(), separator.c_str());
//cout << "STORING " << f << " " << limit << endl;
a->Store(f,"", limit, 1);
};
if(stat[f] == statement_count && !a->keep) {
a->free();
varNames.erase(s);
};
};
void emit_load_binary(char *s, char *f, int d)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("BINARY LOAD: %s %s \n", s, f);
CudaSet *a;
unsigned int segCount, maxRecs;
char f1[100];
strcpy(f1, f);
strcat(f1,".");
char col_pos[3];
itoaa(cols.front(),col_pos);
strcat(f1,col_pos);
strcat(f1,".header");
FILE* ff = fopen(f1, "rb");
//fseeko(ff, -16, SEEK_END);
fread((char *)&totalRecs, 8, 1, ff);
fread((char *)&segCount, 4, 1, ff);
fread((char *)&maxRecs, 4, 1, ff);
fclose(ff);
queue<string> names(namevars);
while(!names.empty()) {
setMap[names.front()] = s;
names.pop();
};
a = new CudaSet(namevars, typevars, sizevars, cols,totalRecs, f);
a->segCount = segCount;
a->maxRecs = maxRecs;
a->keep = 1;
varNames[s] = a;
if(stat[s] == statement_count ) {
a->free();
varNames.erase(s);
};
}
void emit_load(char *s, char *f, int d, char* sep)
{
statement_count++;
if (scan_state == 0) {
stat[s] = statement_count;
return;
};
printf("LOAD: %s %s %d %s \n", s, f, d, sep);
CudaSet *a;
a = new CudaSet(namevars, typevars, sizevars, cols, process_count);
a->mRecCount = 0;
a->resize(process_count);
a->keep = true;
a->fact_table = 1;
string separator1(sep);
separator = separator1;
string ff(f);
f_file = ff;
a->maxRecs = a->mRecCount;
a->segCount = 0;
varNames[s] = a;
if(stat[s] == statement_count) {
a->free();
varNames.erase(s);
};
}
void yyerror(char *s, ...)
{
extern int yylineno;
va_list ap;
va_start(ap, s);
fprintf(stderr, "%d: error: ", yylineno);
vfprintf(stderr, s, ap);
fprintf(stderr, "\n");
}
void clean_queues()
{
while(!op_type.empty()) op_type.pop();
while(!op_value.empty()) op_value.pop();
while(!op_join.empty()) op_join.pop();
while(!op_nums.empty()) op_nums.pop();
while(!op_nums_f.empty()) op_nums_f.pop();
while(!j_col_count.empty()) j_col_count.pop();
while(!namevars.empty()) namevars.pop();
while(!typevars.empty()) typevars.pop();
while(!sizevars.empty()) sizevars.pop();
while(!cols.empty()) cols.pop();
sel_count = 0;
join_cnt = 0;
join_col_cnt = -1;
eqq = 0;
}
int main(int ac, char **av)
{
extern FILE *yyin;
//cudaDeviceProp deviceProp;
//cudaGetDeviceProperties(&deviceProp, 0);
//if (!deviceProp.canMapHostMemory)
// cout << "Device 0 cannot map host memory" << endl;
//cudaSetDeviceFlags(cudaDeviceMapHost);
cudppCreate(&theCudpp);
if (ac == 1) {
cout << "Usage : alenka -l process_count script.sql" << endl;
exit(1);
};
if(strcmp(av[1],"-l") == 0) {
process_count = atoff(av[2]);
cout << "Process count = " << process_count << endl;
}
else {
process_count = 6200000;
cout << "Process count = 6200000 " << endl;
};
if((yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[ac-1]);
exit(1);
};
if(yyparse()) {
printf("SQL scan parse failed\n");
exit(1);
};
fclose(yyin);
scan_state = 1;
std::clock_t start1 = std::clock();
statement_count = 0;
clean_queues();
if(ac > 1 && (yyin = fopen(av[ac-1], "r")) == NULL) {
perror(av[1]);
exit(1);
}
PROC_FLUSH_BUF ( yyin );
statement_count = 0;
if(!yyparse())
cout << "SQL scan parse worked" << endl;
else
cout << "SQL scan parse failed" << endl;
fclose(yyin);
std::cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
cudppDestroy(theCudpp);
}
|
transpose_gpu.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/transpose/transpose_gpu.h"
#include <hip/hip_runtime.h>
#include <memory>
#include <vector>
#include "dali/core/util.h"
#include "dali/kernels/common/type_erasure.h"
#include "dali/kernels/transpose/transpose_gpu_def.h"
#include "dali/kernels/transpose/transpose_gpu_impl.cuh"
#include "dali/kernels/transpose/transpose_gpu_setup.cuh"
namespace dali {
namespace kernels {
namespace transpose_impl {
struct TransposeInfo {
int element_size;
TransposeMethod method;
TensorShape<> shape;
SmallVector<int, 6> perm;
};
constexpr int kMaxInterleaveSize = 32;
constexpr int kMaxDeinterleaveSize = kMaxInterleaveSize;
inline bool UseTiledTranspose(const int64_t *shape, const int *perm, int ndim, int element_size) {
if (perm[ndim-1] == ndim - 1) {
assert(ndim >= 3);
if (shape[ndim-1] * element_size >= kTiledTransposeMaxVectorSize)
return false;
ndim--; // ignore last dimension - it will be treated as vector lanes
}
int xdim = ndim-1;
int ydim = 0;
for (; ydim < xdim; ydim++) {
if (perm[ydim] == xdim)
break;
}
double tile_coverage = shape[xdim] * shape[ydim];
tile_coverage /= align_up(shape[xdim], kTileSize) * align_up(shape[ydim], kTileSize);
return tile_coverage > 0.4; // for now, it's an educated guess
}
TransposeMethod GetTransposeMethod(const int64_t *shape,
const int *perm,
int ndim,
int element_size) {
if (ndim == 1)
return TransposeMethod::Copy;
if (UseTiledTranspose(shape, perm, ndim, element_size))
return TransposeMethod::Tiled;
if (perm[ndim-1] != ndim - 1) {
if (shape[ndim-1] * element_size <= kMaxDeinterleaveSize)
return TransposeMethod::Deinterleave;
else if (shape[perm[ndim-1]] * element_size <= kMaxInterleaveSize)
return TransposeMethod::Interleave;
}
return TransposeMethod::Generic;
}
void GetTransposeInfo(TransposeInfo &info, int element_size,
span<const int64_t> in_shape, span<const int> perm) {
SimplifyPermute(info.shape, info.perm, in_shape.data(), perm.data(), in_shape.size());
info.element_size = element_size;
int ndim = info.shape.size();
if (ndim > kMaxNDim)
throw std::range_error("Transposition too complex");
info.method = GetTransposeMethod(info.shape.data(), info.perm.data(), ndim, element_size);
}
void GetTransposeInfo(TransposeInfo *infos, int element_size,
const TensorListShape<> &tls, span<const int> perm) {
int N = tls.num_samples();
for (int i = 0; i < N; i++) {
GetTransposeInfo(infos[i], element_size, tls.tensor_shape_span(i), perm);
}
}
} // namespace transpose_impl
using namespace transpose_impl; // NOLINT
class TransposeGPU::Impl {
public:
template <typename T>
KernelRequirements SetupTyped(
const TensorListShape<> &in_shape,
span<const int> permutation) {
int N = in_shape.num_samples();
int ndim = in_shape_.sample_dim();
element_size_ = sizeof(T);
in_shape_ = in_shape;
permute_dims(out_shape_, in_shape_, permutation);
infos_.resize(N);
GetTransposeInfo(infos_.data(), sizeof(T), in_shape, permutation);
tiled_descs_.clear();
deinterleave_descs_.clear();
generic_descs_.clear();
idx_generic_.clear();
idx_tiled_.clear();
idx_deinterleave_.clear();
tiled_descs_.reserve(infos_.size());
deinterleave_descs_.reserve(infos_.size());
generic_descs_.reserve(infos_.size());
for (int i = 0; i < N; i++) {
auto &shape = infos_[i].shape;
auto perm = make_span(infos_[i].perm);
switch (infos_[i].method) {
case TransposeMethod::Tiled:
{
TiledTransposeDesc<T> desc;
InitTiledTranspose(desc, shape, perm);
AddDesc(desc);
idx_tiled_.push_back(i);
}
break;
case TransposeMethod::Deinterleave:
{
DeinterleaveDesc<T> desc;
InitDeinterleave(desc, shape, perm);
AddDesc(desc);
idx_deinterleave_.push_back(i);
}
break;
case TransposeMethod::Interleave: // no specialized implementation yet
case TransposeMethod::Copy: // generic kernel does a good job at just copying
default:
{
GenericTransposeDesc<T> desc;
InitGenericTranspose(desc, shape, perm);
AddDesc(desc);
idx_generic_.push_back(i);
}
break;
}
}
KernelRequirements req;
req.output_shapes = { out_shape_ };
ScratchpadEstimator se;
se.add<TiledTransposeDesc<T>>(AllocType::GPU, tiled_descs_.size());
se.add<DeinterleaveDesc<T>>(AllocType::GPU, deinterleave_descs_.size());
se.add<GenericTransposeDesc<T>>(AllocType::GPU, generic_descs_.size());
req.scratch_sizes = se.sizes;
return req;
}
KernelRequirements Setup(
const TensorListShape<> &in_shape,
span<const int> permutation,
int element_size) {
KernelRequirements req;
VALUE_SWITCH(element_size, static_el_size, (1, 2, 4, 8, 16),
(req = SetupTyped<type_of_size<static_el_size>>(in_shape, permutation)),
(throw std::range_error("Transpose: Unexpected tensor element size."
"Must be one of (1,2,4,8,16)")));
return req;
}
template <typename T>
void RunTyped(KernelContext &ctx, T *const *out, const T *const *in) {
RunTiled(ctx, out, in);
RunDeinterleave(ctx, out, in);
RunGeneric(ctx, out, in);
}
void Run(KernelContext &ctx, void *const *out, const void *const *in) {
VALUE_SWITCH(element_size_, static_el_size, (1, 2, 4, 8, 16),
(
using T = type_of_size<static_el_size>;
RunTyped(ctx, reinterpret_cast<T*const*>(out), reinterpret_cast<const T*const*>(in))
), ( // NOLINT
throw std::range_error("Transpose: Unexpected tensor element size."
"Must be one of (1,2,4,8,16)")
) // NOLINT
); // NOLINT
}
template <typename T>
void AddDesc(const GenericTransposeDesc<T> &desc) {
generic_descs_.push_back(reinterpret_cast<const GenericTransposeDesc<void> &>(desc));
}
template <typename T>
void AddDesc(const DeinterleaveDesc<T> &desc) {
deinterleave_descs_.push_back(reinterpret_cast<const DeinterleaveDesc<void> &>(desc));
}
template <typename T>
void AddDesc(const TiledTransposeDesc<T> &desc) {
tiled_descs_.push_back(reinterpret_cast<const TiledTransposeDesc<void> &>(desc));
}
template <typename T>
void RunGeneric(KernelContext &ctx, T *const *out, const T *const *in) {
if (!generic_descs_.empty()) {
uint64_t max_size = 0;
int block_size = 256;
for (size_t i = 0; i < generic_descs_.size(); i++) {
generic_descs_[i].out = out[idx_generic_[i]];
generic_descs_[i].in = in[idx_generic_[i]];
if (generic_descs_[i].size > max_size)
max_size = generic_descs_[i].size;
}
auto *gpu_descs = reinterpret_cast<GenericTransposeDesc<T>*>(
ctx.scratchpad->ToGPU(ctx.gpu.stream, generic_descs_));
dim3 grid(div_ceil(max_size, block_size * 8), generic_descs_.size());
hipLaunchKernelGGL(( TransposeGenericBatch), dim3(grid), dim3(block_size), 0, ctx.gpu.stream, gpu_descs);
}
}
template <typename T>
void RunTiled(KernelContext &ctx, T *const *out, const T *const *in) {
if (!tiled_descs_.empty()) {
int64_t max_tiles = 0;
for (size_t i = 0; i < tiled_descs_.size(); i++) {
if (tiled_descs_[i].total_tiles > max_tiles)
max_tiles = tiled_descs_[i].total_tiles;
}
int grid_x = max_tiles;
int threshold = 64 / tiled_descs_.size();
if (grid_x > threshold) {
grid_x = threshold + (grid_x - threshold) / 4;
}
for (size_t i = 0; i < tiled_descs_.size(); i++) {
UpdateTiledTranspose(tiled_descs_[i], out[idx_tiled_[i]], in[idx_tiled_[i]], grid_x);
}
auto *gpu_descs = reinterpret_cast<TiledTransposeDesc<T>*>(
ctx.scratchpad->ToGPU(ctx.gpu.stream, tiled_descs_));
int max_threads = MaxThreadsPerBlock(TransposeTiledBatch<T>);
assert(max_threads >= kTileSize);
int block_y = 16; // start with 32x16 block and try smaller until found
while (kTileSize * block_y > max_threads)
block_y >>= 1;
dim3 block(kTileSize, block_y);
dim3 grid(grid_x, tiled_descs_.size());
const int shm_size = kTiledTransposeMaxSharedMem;
hipLaunchKernelGGL(( TransposeTiledBatch), dim3(grid), dim3(block), shm_size, ctx.gpu.stream, gpu_descs);
}
}
template <typename T>
void RunDeinterleave(KernelContext &ctx, T *const *out, const T *const *in) {
if (!deinterleave_descs_.empty()) {
int64_t max_size = 0;
int block_size = 256;
for (size_t i = 0; i < deinterleave_descs_.size(); i++) {
auto &desc = deinterleave_descs_[i];
desc.out = out[idx_deinterleave_[i]];
desc.in = in[idx_deinterleave_[i]];
int64_t outer_size = desc.size / desc.in_strides[desc.ndim-2];
if (outer_size > max_size)
max_size = outer_size;
}
auto *gpu_descs = reinterpret_cast<DeinterleaveDesc<T>*>(
ctx.scratchpad->ToGPU(ctx.gpu.stream, deinterleave_descs_));
dim3 grid(div_ceil(max_size, 4*block_size), deinterleave_descs_.size());
hipLaunchKernelGGL(( TransposeDeinterleaveBatch), dim3(grid), dim3(block_size), 0, ctx.gpu.stream, gpu_descs);
}
}
int element_size_ = 0;
TensorListShape<> in_shape_, out_shape_;
std::vector<TransposeInfo> infos_;
std::vector<GenericTransposeDesc<void>> generic_descs_;
std::vector<TiledTransposeDesc<void>> tiled_descs_;
std::vector<DeinterleaveDesc<void>> deinterleave_descs_;
std::vector<int> idx_generic_, idx_tiled_, idx_deinterleave_; // sample indices
};
TransposeGPU::TransposeGPU() {
impl_ = std::make_unique<Impl>();
}
TransposeGPU::~TransposeGPU() = default;
void TransposeGPU::CheckShapes(const TensorListShape<> &in_shape,
const TensorListShape<> &out_shape,
int element_size) {
assert(impl_ != nullptr);
DALI_ENFORCE(impl_->in_shape_ == in_shape, "Input shape different than used in Setup");
DALI_ENFORCE(impl_->out_shape_ == out_shape,
"Output shape does not match the one produced in Setup");
DALI_ENFORCE(impl_->element_size_ == element_size,
"Different element size than used in Setup");
}
KernelRequirements TransposeGPU::Setup(
KernelContext &ctx,
const TensorListShape<> &in_shape,
span<const int> permutation,
int element_size) {
assert(impl_ != nullptr);
return impl_->Setup(in_shape, permutation, element_size);
}
void TransposeGPU::Run(KernelContext &ctx, void *const *out, const void *const *in) {
assert(impl_ != nullptr);
impl_->Run(ctx, out, in);
}
} // namespace kernels
} // namespace dali
|
transpose_gpu.cu
|
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/transpose/transpose_gpu.h"
#include <cuda_runtime.h>
#include <memory>
#include <vector>
#include "dali/core/util.h"
#include "dali/kernels/common/type_erasure.h"
#include "dali/kernels/transpose/transpose_gpu_def.h"
#include "dali/kernels/transpose/transpose_gpu_impl.cuh"
#include "dali/kernels/transpose/transpose_gpu_setup.cuh"
namespace dali {
namespace kernels {
namespace transpose_impl {
struct TransposeInfo {
int element_size;
TransposeMethod method;
TensorShape<> shape;
SmallVector<int, 6> perm;
};
constexpr int kMaxInterleaveSize = 32;
constexpr int kMaxDeinterleaveSize = kMaxInterleaveSize;
inline bool UseTiledTranspose(const int64_t *shape, const int *perm, int ndim, int element_size) {
if (perm[ndim-1] == ndim - 1) {
assert(ndim >= 3);
if (shape[ndim-1] * element_size >= kTiledTransposeMaxVectorSize)
return false;
ndim--; // ignore last dimension - it will be treated as vector lanes
}
int xdim = ndim-1;
int ydim = 0;
for (; ydim < xdim; ydim++) {
if (perm[ydim] == xdim)
break;
}
double tile_coverage = shape[xdim] * shape[ydim];
tile_coverage /= align_up(shape[xdim], kTileSize) * align_up(shape[ydim], kTileSize);
return tile_coverage > 0.4; // for now, it's an educated guess
}
TransposeMethod GetTransposeMethod(const int64_t *shape,
const int *perm,
int ndim,
int element_size) {
if (ndim == 1)
return TransposeMethod::Copy;
if (UseTiledTranspose(shape, perm, ndim, element_size))
return TransposeMethod::Tiled;
if (perm[ndim-1] != ndim - 1) {
if (shape[ndim-1] * element_size <= kMaxDeinterleaveSize)
return TransposeMethod::Deinterleave;
else if (shape[perm[ndim-1]] * element_size <= kMaxInterleaveSize)
return TransposeMethod::Interleave;
}
return TransposeMethod::Generic;
}
void GetTransposeInfo(TransposeInfo &info, int element_size,
span<const int64_t> in_shape, span<const int> perm) {
SimplifyPermute(info.shape, info.perm, in_shape.data(), perm.data(), in_shape.size());
info.element_size = element_size;
int ndim = info.shape.size();
if (ndim > kMaxNDim)
throw std::range_error("Transposition too complex");
info.method = GetTransposeMethod(info.shape.data(), info.perm.data(), ndim, element_size);
}
void GetTransposeInfo(TransposeInfo *infos, int element_size,
const TensorListShape<> &tls, span<const int> perm) {
int N = tls.num_samples();
for (int i = 0; i < N; i++) {
GetTransposeInfo(infos[i], element_size, tls.tensor_shape_span(i), perm);
}
}
} // namespace transpose_impl
using namespace transpose_impl; // NOLINT
class TransposeGPU::Impl {
public:
template <typename T>
KernelRequirements SetupTyped(
const TensorListShape<> &in_shape,
span<const int> permutation) {
int N = in_shape.num_samples();
int ndim = in_shape_.sample_dim();
element_size_ = sizeof(T);
in_shape_ = in_shape;
permute_dims(out_shape_, in_shape_, permutation);
infos_.resize(N);
GetTransposeInfo(infos_.data(), sizeof(T), in_shape, permutation);
tiled_descs_.clear();
deinterleave_descs_.clear();
generic_descs_.clear();
idx_generic_.clear();
idx_tiled_.clear();
idx_deinterleave_.clear();
tiled_descs_.reserve(infos_.size());
deinterleave_descs_.reserve(infos_.size());
generic_descs_.reserve(infos_.size());
for (int i = 0; i < N; i++) {
auto &shape = infos_[i].shape;
auto perm = make_span(infos_[i].perm);
switch (infos_[i].method) {
case TransposeMethod::Tiled:
{
TiledTransposeDesc<T> desc;
InitTiledTranspose(desc, shape, perm);
AddDesc(desc);
idx_tiled_.push_back(i);
}
break;
case TransposeMethod::Deinterleave:
{
DeinterleaveDesc<T> desc;
InitDeinterleave(desc, shape, perm);
AddDesc(desc);
idx_deinterleave_.push_back(i);
}
break;
case TransposeMethod::Interleave: // no specialized implementation yet
case TransposeMethod::Copy: // generic kernel does a good job at just copying
default:
{
GenericTransposeDesc<T> desc;
InitGenericTranspose(desc, shape, perm);
AddDesc(desc);
idx_generic_.push_back(i);
}
break;
}
}
KernelRequirements req;
req.output_shapes = { out_shape_ };
ScratchpadEstimator se;
se.add<TiledTransposeDesc<T>>(AllocType::GPU, tiled_descs_.size());
se.add<DeinterleaveDesc<T>>(AllocType::GPU, deinterleave_descs_.size());
se.add<GenericTransposeDesc<T>>(AllocType::GPU, generic_descs_.size());
req.scratch_sizes = se.sizes;
return req;
}
KernelRequirements Setup(
const TensorListShape<> &in_shape,
span<const int> permutation,
int element_size) {
KernelRequirements req;
VALUE_SWITCH(element_size, static_el_size, (1, 2, 4, 8, 16),
(req = SetupTyped<type_of_size<static_el_size>>(in_shape, permutation)),
(throw std::range_error("Transpose: Unexpected tensor element size."
"Must be one of (1,2,4,8,16)")));
return req;
}
template <typename T>
void RunTyped(KernelContext &ctx, T *const *out, const T *const *in) {
RunTiled(ctx, out, in);
RunDeinterleave(ctx, out, in);
RunGeneric(ctx, out, in);
}
void Run(KernelContext &ctx, void *const *out, const void *const *in) {
VALUE_SWITCH(element_size_, static_el_size, (1, 2, 4, 8, 16),
(
using T = type_of_size<static_el_size>;
RunTyped(ctx, reinterpret_cast<T*const*>(out), reinterpret_cast<const T*const*>(in))
), ( // NOLINT
throw std::range_error("Transpose: Unexpected tensor element size."
"Must be one of (1,2,4,8,16)")
) // NOLINT
); // NOLINT
}
template <typename T>
void AddDesc(const GenericTransposeDesc<T> &desc) {
generic_descs_.push_back(reinterpret_cast<const GenericTransposeDesc<void> &>(desc));
}
template <typename T>
void AddDesc(const DeinterleaveDesc<T> &desc) {
deinterleave_descs_.push_back(reinterpret_cast<const DeinterleaveDesc<void> &>(desc));
}
template <typename T>
void AddDesc(const TiledTransposeDesc<T> &desc) {
tiled_descs_.push_back(reinterpret_cast<const TiledTransposeDesc<void> &>(desc));
}
template <typename T>
void RunGeneric(KernelContext &ctx, T *const *out, const T *const *in) {
if (!generic_descs_.empty()) {
uint64_t max_size = 0;
int block_size = 256;
for (size_t i = 0; i < generic_descs_.size(); i++) {
generic_descs_[i].out = out[idx_generic_[i]];
generic_descs_[i].in = in[idx_generic_[i]];
if (generic_descs_[i].size > max_size)
max_size = generic_descs_[i].size;
}
auto *gpu_descs = reinterpret_cast<GenericTransposeDesc<T>*>(
ctx.scratchpad->ToGPU(ctx.gpu.stream, generic_descs_));
dim3 grid(div_ceil(max_size, block_size * 8), generic_descs_.size());
TransposeGenericBatch<<<grid, block_size, 0, ctx.gpu.stream>>>(gpu_descs);
}
}
template <typename T>
void RunTiled(KernelContext &ctx, T *const *out, const T *const *in) {
if (!tiled_descs_.empty()) {
int64_t max_tiles = 0;
for (size_t i = 0; i < tiled_descs_.size(); i++) {
if (tiled_descs_[i].total_tiles > max_tiles)
max_tiles = tiled_descs_[i].total_tiles;
}
int grid_x = max_tiles;
int threshold = 64 / tiled_descs_.size();
if (grid_x > threshold) {
grid_x = threshold + (grid_x - threshold) / 4;
}
for (size_t i = 0; i < tiled_descs_.size(); i++) {
UpdateTiledTranspose(tiled_descs_[i], out[idx_tiled_[i]], in[idx_tiled_[i]], grid_x);
}
auto *gpu_descs = reinterpret_cast<TiledTransposeDesc<T>*>(
ctx.scratchpad->ToGPU(ctx.gpu.stream, tiled_descs_));
int max_threads = MaxThreadsPerBlock(TransposeTiledBatch<T>);
assert(max_threads >= kTileSize);
int block_y = 16; // start with 32x16 block and try smaller until found
while (kTileSize * block_y > max_threads)
block_y >>= 1;
dim3 block(kTileSize, block_y);
dim3 grid(grid_x, tiled_descs_.size());
const int shm_size = kTiledTransposeMaxSharedMem;
TransposeTiledBatch<<<grid, block, shm_size, ctx.gpu.stream>>>(gpu_descs);
}
}
template <typename T>
void RunDeinterleave(KernelContext &ctx, T *const *out, const T *const *in) {
if (!deinterleave_descs_.empty()) {
int64_t max_size = 0;
int block_size = 256;
for (size_t i = 0; i < deinterleave_descs_.size(); i++) {
auto &desc = deinterleave_descs_[i];
desc.out = out[idx_deinterleave_[i]];
desc.in = in[idx_deinterleave_[i]];
int64_t outer_size = desc.size / desc.in_strides[desc.ndim-2];
if (outer_size > max_size)
max_size = outer_size;
}
auto *gpu_descs = reinterpret_cast<DeinterleaveDesc<T>*>(
ctx.scratchpad->ToGPU(ctx.gpu.stream, deinterleave_descs_));
dim3 grid(div_ceil(max_size, 4*block_size), deinterleave_descs_.size());
TransposeDeinterleaveBatch<<<grid, block_size, 0, ctx.gpu.stream>>>(gpu_descs);
}
}
int element_size_ = 0;
TensorListShape<> in_shape_, out_shape_;
std::vector<TransposeInfo> infos_;
std::vector<GenericTransposeDesc<void>> generic_descs_;
std::vector<TiledTransposeDesc<void>> tiled_descs_;
std::vector<DeinterleaveDesc<void>> deinterleave_descs_;
std::vector<int> idx_generic_, idx_tiled_, idx_deinterleave_; // sample indices
};
TransposeGPU::TransposeGPU() {
impl_ = std::make_unique<Impl>();
}
TransposeGPU::~TransposeGPU() = default;
void TransposeGPU::CheckShapes(const TensorListShape<> &in_shape,
const TensorListShape<> &out_shape,
int element_size) {
assert(impl_ != nullptr);
DALI_ENFORCE(impl_->in_shape_ == in_shape, "Input shape different than used in Setup");
DALI_ENFORCE(impl_->out_shape_ == out_shape,
"Output shape does not match the one produced in Setup");
DALI_ENFORCE(impl_->element_size_ == element_size,
"Different element size than used in Setup");
}
KernelRequirements TransposeGPU::Setup(
KernelContext &ctx,
const TensorListShape<> &in_shape,
span<const int> permutation,
int element_size) {
assert(impl_ != nullptr);
return impl_->Setup(in_shape, permutation, element_size);
}
void TransposeGPU::Run(KernelContext &ctx, void *const *out, const void *const *in) {
assert(impl_ != nullptr);
impl_->Run(ctx, out, in);
}
} // namespace kernels
} // namespace dali
|
4a07db9b53a6d3ae41bfb2ca5f5595addd804847.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zset_pointer.cu normal z -> c, Tue Feb 9 16:05:38 2016
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
///////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_cset_pointer(
magmaFloatComplex **output_array,
magmaFloatComplex *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset)
{
output_array[blockIdx.x] = input + blockIdx.x * batch_offset + row + column * lda;
//printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]);
}
/*
Purpose
-------
convert consecutive stored variable to array stored
for example the size of A is N*batchCount; N is the size of A(batch_offset)
change into dA_array[0] dA_array[1],... dA_array[batchCount-1], where the size of each dA_array[i] is N
Arguments
----------
@param[out]
output_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of DIMENSION ( lda, column ) on the GPU
@param[in]
input COMPLEX array of dimension ( LDDA, N*batchCount ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
row INTEGER
On entry, row specifies the number of rows of the matrix A.
@param[in]
column INTEGER
On entry, column specifies the number of columns of the matrix A
@param[in]
batch_offset INTEGER
The starting pointer of each matrix A in input arrray
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
*/
extern "C"
void magma_cset_pointer(
magmaFloatComplex **output_array,
magmaFloatComplex *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset,
magma_int_t batchCount,
magma_queue_t queue)
{
hipLaunchKernelGGL(( kernel_cset_pointer)
, dim3(batchCount), dim3(1), 0, queue->cuda_stream() ,
output_array, input, lda, row, column, batch_offset);
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void zdisplace_pointers_kernel(magmaFloatComplex **output_array,
magmaFloatComplex **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column)
{
magmaFloatComplex *inpt = input_array[blockIdx.x];
output_array[blockIdx.x] = &inpt[row + column * lda];
}
/*
Purpose
-------
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda * column;
Arguments
----------
@param[out]
output_array Array of pointers, dimension (batchCount).
Each pointer points to the new displacement of array A in input_array on the GPU
@param[in]
input_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of DIMENSION ( lda, column ) on the GPU
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
row INTEGER
On entry, row specifies the number of rows of the matrix A.
@param[in]
column INTEGER
On entry, column specifies the number of columns of the matrix A
@param[in]
batch_offset INTEGER
The starting pointer of each matrix A in input arrray
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
*/
extern "C"
void magma_cdisplace_pointers(magmaFloatComplex **output_array,
magmaFloatComplex **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
hipLaunchKernelGGL(( zdisplace_pointers_kernel)
, dim3(batchCount), dim3(1), 0, queue->cuda_stream() ,
output_array, input_array, lda, row, column);
}
|
4a07db9b53a6d3ae41bfb2ca5f5595addd804847.cu
|
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zset_pointer.cu normal z -> c, Tue Feb 9 16:05:38 2016
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
///////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_cset_pointer(
magmaFloatComplex **output_array,
magmaFloatComplex *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset)
{
output_array[blockIdx.x] = input + blockIdx.x * batch_offset + row + column * lda;
//printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]);
}
/*
Purpose
-------
convert consecutive stored variable to array stored
for example the size of A is N*batchCount; N is the size of A(batch_offset)
change into dA_array[0] dA_array[1],... dA_array[batchCount-1], where the size of each dA_array[i] is N
Arguments
----------
@param[out]
output_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of DIMENSION ( lda, column ) on the GPU
@param[in]
input COMPLEX array of dimension ( LDDA, N*batchCount ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
row INTEGER
On entry, row specifies the number of rows of the matrix A.
@param[in]
column INTEGER
On entry, column specifies the number of columns of the matrix A
@param[in]
batch_offset INTEGER
The starting pointer of each matrix A in input arrray
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
*/
extern "C"
void magma_cset_pointer(
magmaFloatComplex **output_array,
magmaFloatComplex *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset,
magma_int_t batchCount,
magma_queue_t queue)
{
kernel_cset_pointer
<<< batchCount, 1, 0, queue->cuda_stream() >>>
(output_array, input, lda, row, column, batch_offset);
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void zdisplace_pointers_kernel(magmaFloatComplex **output_array,
magmaFloatComplex **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column)
{
magmaFloatComplex *inpt = input_array[blockIdx.x];
output_array[blockIdx.x] = &inpt[row + column * lda];
}
/*
Purpose
-------
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda * column;
Arguments
----------
@param[out]
output_array Array of pointers, dimension (batchCount).
Each pointer points to the new displacement of array A in input_array on the GPU
@param[in]
input_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of DIMENSION ( lda, column ) on the GPU
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
row INTEGER
On entry, row specifies the number of rows of the matrix A.
@param[in]
column INTEGER
On entry, column specifies the number of columns of the matrix A
@param[in]
batch_offset INTEGER
The starting pointer of each matrix A in input arrray
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
*/
extern "C"
void magma_cdisplace_pointers(magmaFloatComplex **output_array,
magmaFloatComplex **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
zdisplace_pointers_kernel
<<< batchCount, 1, 0, queue->cuda_stream() >>>
(output_array, input_array, lda, row, column);
}
|
61b85c241ec9823bded8aed0539ed362ff9eb268.hip
|
// !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
struct VAbsDiff4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff4(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff4() {}
__host__ __device__ __forceinline__ VAbsDiff4(const VAbsDiff4&) {}
};
struct VAbsDiff2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff2(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff2() {}
__host__ __device__ __forceinline__ VAbsDiff2(const VAbsDiff2&) {}
};
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__host__ __device__ __forceinline__ AbsDiffMat() {}
__host__ __device__ __forceinline__ AbsDiffMat(const AbsDiffMat&) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VAbsDiff2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void absDiffMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VAbsDiff4(), WithOutMask(), stream);
}
void absDiffMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VAbsDiff2(), WithOutMask(), stream);
}
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
}
#endif // CUDA_DISABLER
|
61b85c241ec9823bded8aed0539ed362ff9eb268.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
struct VAbsDiff4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff4(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff4() {}
__host__ __device__ __forceinline__ VAbsDiff4(const VAbsDiff4&) {}
};
struct VAbsDiff2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff2(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff2() {}
__host__ __device__ __forceinline__ VAbsDiff2(const VAbsDiff2&) {}
};
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__host__ __device__ __forceinline__ AbsDiffMat() {}
__host__ __device__ __forceinline__ AbsDiffMat(const AbsDiffMat&) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VAbsDiff2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void absDiffMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VAbsDiff4(), WithOutMask(), stream);
}
void absDiffMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VAbsDiff2(), WithOutMask(), stream);
}
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
8f1f6909fff9e44f01528d6aaaabe210237870e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/data_norm_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using DataLayout = framework::DataLayout;
using platform::PADDLE_CUDA_NUM_THREADS;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__global__ void KernelDataNormFF(int N, int C, const T *x, T *y, const T *mean,
const T *scale) {
CUDA_KERNEL_LOOP(i, N * C) {
int col = i % C;
y[i] = (x[i] - mean[col]) * scale[col];
}
}
template <typename T>
__global__ void KernelMeanScale(int C, const T *batch_size, const T *batch_sum,
const T *batch_square_sum, T *mean, T *scale) {
CUDA_KERNEL_LOOP(i, C) {
mean[i] = batch_sum[i] / batch_size[i];
scale[i] = sqrt(batch_size[i] / batch_square_sum[i]);
}
}
template <typename T>
__global__ void KernelDataNormBP(int N, int C, const T *y_grad, const T *scale,
T *x_grad) {
CUDA_KERNEL_LOOP(i, N * C) { x_grad[i] = y_grad[i] * scale[i % C]; }
}
template <typename T>
__global__ void KernelDataNormBPStat(int N, int C, const T *x_val,
const T *means,
const float squared_sum_epsilon,
T *batch_size, T *batch_sum,
T *batch_square_sum) {
CUDA_KERNEL_LOOP(i, C) {
T val_sum = 0;
T square_sum = 0;
for (int j = 0; j < N; j++) {
val_sum += x_val[j * C + i];
square_sum +=
(x_val[j * C + i] - means[i]) * (x_val[j * C + i] - means[i]);
}
batch_size[i] = 1;
batch_sum[i] = val_sum / N;
batch_square_sum[i] = square_sum / N + squared_sum_epsilon;
}
}
template <typename T>
__global__ void KernelUpdateParam(int C, const T *d_batch_size,
const T *d_batch_sum,
const T *d_batch_square_sum, T *batch_size,
T *batch_sum, T *batch_square_sum,
const float decay_rate) {
CUDA_KERNEL_LOOP(i, C) {
batch_size[i] = batch_size[i] * decay_rate + d_batch_size[i];
batch_sum[i] = batch_sum[i] * decay_rate + d_batch_sum[i];
batch_square_sum[i] =
batch_square_sum[i] * decay_rate + d_batch_square_sum[i];
}
}
template <typename T>
class DataNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
// Align with CPU version, but should we add this restriction?
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::PreconditionNotMet(
"The Input dim size should be 2"));
const int N = x_dims[0];
const int C = x_dims[1];
const T *batch_size_in = ctx.Input<Tensor>("BatchSize")->data<T>();
const T *batch_sum_in = ctx.Input<Tensor>("BatchSum")->data<T>();
const T *batch_square_sum_in =
ctx.Input<Tensor>("BatchSquareSum")->data<T>();
auto *x_data = x->data<T>();
// alloc memory
T *y_data = ctx.Output<Tensor>("Y")->mutable_data<T>(ctx.GetPlace());
T *mean_out_data =
ctx.Output<Tensor>("Means")->mutable_data<T>(ctx.GetPlace());
T *scale_out_data =
ctx.Output<Tensor>("Scales")->mutable_data<T>(ctx.GetPlace());
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( KernelMeanScale), dim3(GET_BLOCKS(C)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
C, batch_size_in, batch_sum_in, batch_square_sum_in, mean_out_data,
scale_out_data);
hipLaunchKernelGGL(( KernelDataNormFF), dim3(GET_BLOCKS(C * N)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
N, C, x_data, y_data, mean_out_data, scale_out_data);
}
};
template <typename T>
class DataNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scales = ctx.Input<Tensor>("Scales");
const auto *means = ctx.Input<Tensor>("Means");
const float epsilon = ctx.Attr<float>("epsilon");
const float dr = ctx.Attr<float>("summary_decay_rate");
const bool need_sync_stats = ctx.Attr<bool>("sync_stats");
const auto &x_dims = x->dims();
// Align with CPU version, but should we add this restriction?
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::PreconditionNotMet(
"The Input dim size should be 2"));
const int N = x_dims[0];
const int C = x_dims[1];
// init output
Tensor *d_x = nullptr;
if (ctx.HasOutput(framework::GradVarName("X"))) {
d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
}
T *d_batch_size = ctx.Output<Tensor>(framework::GradVarName("BatchSize"))
->mutable_data<T>(ctx.GetPlace());
T *d_batch_sum = ctx.Output<Tensor>(framework::GradVarName("BatchSum"))
->mutable_data<T>(ctx.GetPlace());
T *d_batch_square_sum =
ctx.Output<Tensor>(framework::GradVarName("BatchSquareSum"))
->mutable_data<T>(ctx.GetPlace());
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
if (d_x != nullptr) {
hipLaunchKernelGGL(( KernelDataNormBP), dim3(GET_BLOCKS(C * N)), dim3(PADDLE_CUDA_NUM_THREADS), 0,
stream, N, C, d_y->data<T>(), scales->data<T>(),
d_x->mutable_data<T>(ctx.GetPlace()));
}
hipLaunchKernelGGL(( KernelDataNormBPStat), dim3(GET_BLOCKS(C)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
N, C, x->data<T>(), means->data<T>(), epsilon, d_batch_size,
d_batch_sum, d_batch_square_sum);
if (need_sync_stats) {
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
auto comm = platform::NCCLCommContext::Instance().Get(0, ctx.GetPlace());
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_size),
reinterpret_cast<void *>(d_batch_size), C,
platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream));
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_sum),
reinterpret_cast<void *>(d_batch_sum), C,
platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream));
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_square_sum),
reinterpret_cast<void *>(d_batch_square_sum), C,
platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream));
hipError_t e_sync = hipStreamSynchronize(stream);
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync nccl stream: "
<< hipGetErrorString(e_sync);
}
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with GPU, and need_sync_stats connot be "
"supported on windows now."));
#endif
}
T *batch_size_data =
ctx.Output<Tensor>("BatchSize")->mutable_data<T>(ctx.GetPlace());
T *batch_sum_data =
ctx.Output<Tensor>("BatchSum")->mutable_data<T>(ctx.GetPlace());
T *batch_square_sum_data =
ctx.Output<Tensor>("BatchSquareSum")->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( KernelUpdateParam), dim3(GET_BLOCKS(C)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
C, d_batch_size, d_batch_sum, d_batch_square_sum, batch_size_data,
batch_sum_data, batch_square_sum_data, dr);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
data_norm, ops::DataNormKernel<paddle::platform::CUDADeviceContext, float>,
ops::DataNormKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
data_norm_grad,
ops::DataNormGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::DataNormGradKernel<paddle::platform::CUDADeviceContext, double>);
|
8f1f6909fff9e44f01528d6aaaabe210237870e1.cu
|
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/data_norm_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using DataLayout = framework::DataLayout;
using platform::PADDLE_CUDA_NUM_THREADS;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__global__ void KernelDataNormFF(int N, int C, const T *x, T *y, const T *mean,
const T *scale) {
CUDA_KERNEL_LOOP(i, N * C) {
int col = i % C;
y[i] = (x[i] - mean[col]) * scale[col];
}
}
template <typename T>
__global__ void KernelMeanScale(int C, const T *batch_size, const T *batch_sum,
const T *batch_square_sum, T *mean, T *scale) {
CUDA_KERNEL_LOOP(i, C) {
mean[i] = batch_sum[i] / batch_size[i];
scale[i] = sqrt(batch_size[i] / batch_square_sum[i]);
}
}
template <typename T>
__global__ void KernelDataNormBP(int N, int C, const T *y_grad, const T *scale,
T *x_grad) {
CUDA_KERNEL_LOOP(i, N * C) { x_grad[i] = y_grad[i] * scale[i % C]; }
}
template <typename T>
__global__ void KernelDataNormBPStat(int N, int C, const T *x_val,
const T *means,
const float squared_sum_epsilon,
T *batch_size, T *batch_sum,
T *batch_square_sum) {
CUDA_KERNEL_LOOP(i, C) {
T val_sum = 0;
T square_sum = 0;
for (int j = 0; j < N; j++) {
val_sum += x_val[j * C + i];
square_sum +=
(x_val[j * C + i] - means[i]) * (x_val[j * C + i] - means[i]);
}
batch_size[i] = 1;
batch_sum[i] = val_sum / N;
batch_square_sum[i] = square_sum / N + squared_sum_epsilon;
}
}
template <typename T>
__global__ void KernelUpdateParam(int C, const T *d_batch_size,
const T *d_batch_sum,
const T *d_batch_square_sum, T *batch_size,
T *batch_sum, T *batch_square_sum,
const float decay_rate) {
CUDA_KERNEL_LOOP(i, C) {
batch_size[i] = batch_size[i] * decay_rate + d_batch_size[i];
batch_sum[i] = batch_sum[i] * decay_rate + d_batch_sum[i];
batch_square_sum[i] =
batch_square_sum[i] * decay_rate + d_batch_square_sum[i];
}
}
template <typename T>
class DataNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
// Align with CPU version, but should we add this restriction?
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::PreconditionNotMet(
"The Input dim size should be 2"));
const int N = x_dims[0];
const int C = x_dims[1];
const T *batch_size_in = ctx.Input<Tensor>("BatchSize")->data<T>();
const T *batch_sum_in = ctx.Input<Tensor>("BatchSum")->data<T>();
const T *batch_square_sum_in =
ctx.Input<Tensor>("BatchSquareSum")->data<T>();
auto *x_data = x->data<T>();
// alloc memory
T *y_data = ctx.Output<Tensor>("Y")->mutable_data<T>(ctx.GetPlace());
T *mean_out_data =
ctx.Output<Tensor>("Means")->mutable_data<T>(ctx.GetPlace());
T *scale_out_data =
ctx.Output<Tensor>("Scales")->mutable_data<T>(ctx.GetPlace());
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
KernelMeanScale<<<GET_BLOCKS(C), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
C, batch_size_in, batch_sum_in, batch_square_sum_in, mean_out_data,
scale_out_data);
KernelDataNormFF<<<GET_BLOCKS(C * N), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
N, C, x_data, y_data, mean_out_data, scale_out_data);
}
};
template <typename T>
class DataNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *x = ctx.Input<Tensor>("X");
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scales = ctx.Input<Tensor>("Scales");
const auto *means = ctx.Input<Tensor>("Means");
const float epsilon = ctx.Attr<float>("epsilon");
const float dr = ctx.Attr<float>("summary_decay_rate");
const bool need_sync_stats = ctx.Attr<bool>("sync_stats");
const auto &x_dims = x->dims();
// Align with CPU version, but should we add this restriction?
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::PreconditionNotMet(
"The Input dim size should be 2"));
const int N = x_dims[0];
const int C = x_dims[1];
// init output
Tensor *d_x = nullptr;
if (ctx.HasOutput(framework::GradVarName("X"))) {
d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
}
T *d_batch_size = ctx.Output<Tensor>(framework::GradVarName("BatchSize"))
->mutable_data<T>(ctx.GetPlace());
T *d_batch_sum = ctx.Output<Tensor>(framework::GradVarName("BatchSum"))
->mutable_data<T>(ctx.GetPlace());
T *d_batch_square_sum =
ctx.Output<Tensor>(framework::GradVarName("BatchSquareSum"))
->mutable_data<T>(ctx.GetPlace());
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
if (d_x != nullptr) {
KernelDataNormBP<<<GET_BLOCKS(C * N), PADDLE_CUDA_NUM_THREADS, 0,
stream>>>(N, C, d_y->data<T>(), scales->data<T>(),
d_x->mutable_data<T>(ctx.GetPlace()));
}
KernelDataNormBPStat<<<GET_BLOCKS(C), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
N, C, x->data<T>(), means->data<T>(), epsilon, d_batch_size,
d_batch_sum, d_batch_square_sum);
if (need_sync_stats) {
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
auto comm = platform::NCCLCommContext::Instance().Get(0, ctx.GetPlace());
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_size),
reinterpret_cast<void *>(d_batch_size), C,
platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream));
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_sum),
reinterpret_cast<void *>(d_batch_sum), C,
platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream));
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce(
reinterpret_cast<const void *>(d_batch_square_sum),
reinterpret_cast<void *>(d_batch_square_sum), C,
platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream));
cudaError_t e_sync = cudaStreamSynchronize(stream);
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync nccl stream: "
<< cudaGetErrorString(e_sync);
}
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with GPU, and need_sync_stats connot be "
"supported on windows now."));
#endif
}
T *batch_size_data =
ctx.Output<Tensor>("BatchSize")->mutable_data<T>(ctx.GetPlace());
T *batch_sum_data =
ctx.Output<Tensor>("BatchSum")->mutable_data<T>(ctx.GetPlace());
T *batch_square_sum_data =
ctx.Output<Tensor>("BatchSquareSum")->mutable_data<T>(ctx.GetPlace());
KernelUpdateParam<<<GET_BLOCKS(C), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
C, d_batch_size, d_batch_sum, d_batch_square_sum, batch_size_data,
batch_sum_data, batch_square_sum_data, dr);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
data_norm, ops::DataNormKernel<paddle::platform::CUDADeviceContext, float>,
ops::DataNormKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
data_norm_grad,
ops::DataNormGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::DataNormGradKernel<paddle::platform::CUDADeviceContext, double>);
|
c4e063b4587db769bd53c94bf8a5fa3b7246afe6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SimpleMOC-kernel_header.h"
/* My parallelization scheme here is to basically have a single
* block be a geometrical segment, with each thread within the
* block represent a single energy phase. On the CPU, the
* inner SIMD-ized loop is over energy (i.e, 100 energy groups).
* This should allow for each BLOCK to have:
* - A single state variable for the RNG
* - A set of __shared__ SIMD vectors, each thread id being its idx
*/
__global__ void run_kernel( Input I, Source * S,
Source_Arrays SA, Table * table, hiprandState_t * state,
float * state_fluxes, int N_state_fluxes)
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x; // geometric segment
if( blockId >= I.segments / I.seg_per_thread )
return;
// Assign RNG state
hiprandState_t * localState = &state[blockId % I.streams];
blockId *= I.seg_per_thread;
blockId--;
int g = threadIdx.x; // Each energy group (g) is one thread in a block
// Thread Local (i.e., specific to E group) variables
// Similar to SIMD vectors in CPU code
float q0 ;
float q1 ;
float q2 ;
float sigT ;
float tau ;
float sigT2 ;
float expVal ;
float reuse ;
float flux_integral;
float tally ;
float t1 ;
float t2 ;
float t3 ;
float t4 ;
// Randomized variables (common accross all thread within block)
extern __shared__ int shm[];
int * state_flux_id = &shm[0];
int * QSR_id = &shm[I.seg_per_thread];
int * FAI_id = &shm[I.seg_per_thread * 2];
if( threadIdx.x == 0 )
{
for( int i = 0; i < I.seg_per_thread; i++ )
{
state_flux_id[i] = hiprand(localState) % N_state_fluxes;
QSR_id[i] = hiprand(localState) % I.source_regions;
FAI_id[i] = hiprand(localState) % I.fine_axial_intervals;
}
}
__syncthreads();
for( int i = 0; i < I.seg_per_thread; i++ )
{
blockId++;
float * state_flux = &state_fluxes[state_flux_id[i]];
__syncthreads();
//////////////////////////////////////////////////////////
// Attenuate Segment
//////////////////////////////////////////////////////////
// Some placeholder constants - In the full app some of these are
// calculated based off position in geometry. This treatment
// shaves off a few FLOPS, but is not significant compared to the
// rest of the function.
float dz = 0.1f;
float zin = 0.3f;
float weight = 0.5f;
float mu = 0.9f;
float mu2 = 0.3f;
float ds = 0.7f;
const int egroups = I.egroups;
// load fine source region flux vector
float * FSR_flux = &SA.fine_flux_arr[ S[QSR_id[i]].fine_flux_id + FAI_id[i] * egroups];
if( FAI_id[i] == 0 )
{
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
float * f3 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]+1)*egroups];
// cycle over energy groups
// load neighboring sources
float y2 = __ldg(&f2[g]);
float y3 = __ldg(&f3[g]);
// do linear "fitting"
float c0 = y2;
float c1 = (y3 - y2) / dz;
// calculate q0, q1, q2
q0 = c0 + c1*zin;
q1 = c1;
q2 = 0;
}
else if ( FAI_id[i] == I.fine_axial_intervals - 1 )
{
float * f1 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]-1)*egroups];
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
// cycle over energy groups
// load neighboring sources
float y1 = __ldg(&f1[g]);
float y2 = __ldg(&f2[g]);
// do linear "fitting"
float c0 = y2;
float c1 = (y2 - y1) / dz;
// calculate q0, q1, q2
q0 = c0 + c1*zin;
q1 = c1;
q2 = 0;
}
else
{
float * f1 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]-1)*egroups];
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
float * f3 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]+1)*egroups];
// cycle over energy groups
// load neighboring sources
float y1 = __ldg(&f1[g]);
float y2 = __ldg(&f2[g]);
float y3 = __ldg(&f3[g]);
// do quadratic "fitting"
float c0 = y2;
float c1 = (y1 - y3) / (2.f*dz);
float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz);
// calculate q0, q1, q2
q0 = c0 + c1*zin + c2*zin*zin;
q1 = c1 + 2.f*c2*zin;
q2 = c2;
}
// load total cross section
sigT = __ldg(&SA.sigT_arr[ S[QSR_id[i]].sigT_id + g]);
// calculate common values for efficiency
tau = sigT * ds;
sigT2 = sigT * sigT;
//interpolateTable( table, tau, &expVal );
expVal = 1.f - exp( -tau); // EXP function is fater than table lookup
// Flux Integral
// Re-used Term
reuse = tau * (tau - 2.f) + 2.f * expVal
/ (sigT * sigT2);
// add contribution to new source flux
flux_integral = (q0 * tau + (sigT * __ldg(&state_flux[g]) - q0)
* expVal) / sigT2 + q1 * mu * reuse + q2 * mu2
* (tau * (tau * (tau - 3.f) + 6.f) - 6.f * expVal)
/ (3.f * sigT2 * sigT2);
// Prepare tally
tally = weight * flux_integral;
// SHOULD BE ATOMIC HERE!
//FSR_flux[g] += tally;
atomicAdd(&FSR_flux[g], (float) tally);
// Term 1
t1 = q0 * expVal / sigT;
// Term 2
t2 = q1 * mu * (tau - expVal) / sigT2;
// Term 3
t3 = q2 * mu2 * reuse;
// Term 4
t4 = state_flux[g] * (1.f - expVal);
// Total psi
state_flux[g] = t1 + t2 + t3 + t4;
}
}
/* Interpolates a formed exponential table to compute ( 1- exp(-x) )
* at the desired x value */
__device__ void interpolateTable(Table * table, float x, float * out)
{
// check to ensure value is in domain
if( x > table->maxVal )
*out = 1.0f;
else
{
int interval = (int) ( x / table->dx + 0.5f * table->dx );
interval = interval * 2;
float slope = table->values[ interval ];
float intercept = table->values[ interval + 1 ];
float val = slope * x + intercept;
*out = val;
}
}
|
c4e063b4587db769bd53c94bf8a5fa3b7246afe6.cu
|
#include "SimpleMOC-kernel_header.h"
/* My parallelization scheme here is to basically have a single
* block be a geometrical segment, with each thread within the
* block represent a single energy phase. On the CPU, the
* inner SIMD-ized loop is over energy (i.e, 100 energy groups).
* This should allow for each BLOCK to have:
* - A single state variable for the RNG
* - A set of __shared__ SIMD vectors, each thread id being its idx
*/
__global__ void run_kernel( Input I, Source * S,
Source_Arrays SA, Table * table, curandState * state,
float * state_fluxes, int N_state_fluxes)
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x; // geometric segment
if( blockId >= I.segments / I.seg_per_thread )
return;
// Assign RNG state
curandState * localState = &state[blockId % I.streams];
blockId *= I.seg_per_thread;
blockId--;
int g = threadIdx.x; // Each energy group (g) is one thread in a block
// Thread Local (i.e., specific to E group) variables
// Similar to SIMD vectors in CPU code
float q0 ;
float q1 ;
float q2 ;
float sigT ;
float tau ;
float sigT2 ;
float expVal ;
float reuse ;
float flux_integral;
float tally ;
float t1 ;
float t2 ;
float t3 ;
float t4 ;
// Randomized variables (common accross all thread within block)
extern __shared__ int shm[];
int * state_flux_id = &shm[0];
int * QSR_id = &shm[I.seg_per_thread];
int * FAI_id = &shm[I.seg_per_thread * 2];
if( threadIdx.x == 0 )
{
for( int i = 0; i < I.seg_per_thread; i++ )
{
state_flux_id[i] = curand(localState) % N_state_fluxes;
QSR_id[i] = curand(localState) % I.source_regions;
FAI_id[i] = curand(localState) % I.fine_axial_intervals;
}
}
__syncthreads();
for( int i = 0; i < I.seg_per_thread; i++ )
{
blockId++;
float * state_flux = &state_fluxes[state_flux_id[i]];
__syncthreads();
//////////////////////////////////////////////////////////
// Attenuate Segment
//////////////////////////////////////////////////////////
// Some placeholder constants - In the full app some of these are
// calculated based off position in geometry. This treatment
// shaves off a few FLOPS, but is not significant compared to the
// rest of the function.
float dz = 0.1f;
float zin = 0.3f;
float weight = 0.5f;
float mu = 0.9f;
float mu2 = 0.3f;
float ds = 0.7f;
const int egroups = I.egroups;
// load fine source region flux vector
float * FSR_flux = &SA.fine_flux_arr[ S[QSR_id[i]].fine_flux_id + FAI_id[i] * egroups];
if( FAI_id[i] == 0 )
{
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
float * f3 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]+1)*egroups];
// cycle over energy groups
// load neighboring sources
float y2 = __ldg(&f2[g]);
float y3 = __ldg(&f3[g]);
// do linear "fitting"
float c0 = y2;
float c1 = (y3 - y2) / dz;
// calculate q0, q1, q2
q0 = c0 + c1*zin;
q1 = c1;
q2 = 0;
}
else if ( FAI_id[i] == I.fine_axial_intervals - 1 )
{
float * f1 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]-1)*egroups];
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
// cycle over energy groups
// load neighboring sources
float y1 = __ldg(&f1[g]);
float y2 = __ldg(&f2[g]);
// do linear "fitting"
float c0 = y2;
float c1 = (y2 - y1) / dz;
// calculate q0, q1, q2
q0 = c0 + c1*zin;
q1 = c1;
q2 = 0;
}
else
{
float * f1 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]-1)*egroups];
float * f2 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i])*egroups];
float * f3 = &SA.fine_source_arr[ S[QSR_id[i]].fine_source_id + (FAI_id[i]+1)*egroups];
// cycle over energy groups
// load neighboring sources
float y1 = __ldg(&f1[g]);
float y2 = __ldg(&f2[g]);
float y3 = __ldg(&f3[g]);
// do quadratic "fitting"
float c0 = y2;
float c1 = (y1 - y3) / (2.f*dz);
float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz);
// calculate q0, q1, q2
q0 = c0 + c1*zin + c2*zin*zin;
q1 = c1 + 2.f*c2*zin;
q2 = c2;
}
// load total cross section
sigT = __ldg(&SA.sigT_arr[ S[QSR_id[i]].sigT_id + g]);
// calculate common values for efficiency
tau = sigT * ds;
sigT2 = sigT * sigT;
//interpolateTable( table, tau, &expVal );
expVal = 1.f - exp( -tau); // EXP function is fater than table lookup
// Flux Integral
// Re-used Term
reuse = tau * (tau - 2.f) + 2.f * expVal
/ (sigT * sigT2);
// add contribution to new source flux
flux_integral = (q0 * tau + (sigT * __ldg(&state_flux[g]) - q0)
* expVal) / sigT2 + q1 * mu * reuse + q2 * mu2
* (tau * (tau * (tau - 3.f) + 6.f) - 6.f * expVal)
/ (3.f * sigT2 * sigT2);
// Prepare tally
tally = weight * flux_integral;
// SHOULD BE ATOMIC HERE!
//FSR_flux[g] += tally;
atomicAdd(&FSR_flux[g], (float) tally);
// Term 1
t1 = q0 * expVal / sigT;
// Term 2
t2 = q1 * mu * (tau - expVal) / sigT2;
// Term 3
t3 = q2 * mu2 * reuse;
// Term 4
t4 = state_flux[g] * (1.f - expVal);
// Total psi
state_flux[g] = t1 + t2 + t3 + t4;
}
}
/* Interpolates a formed exponential table to compute ( 1- exp(-x) )
* at the desired x value */
__device__ void interpolateTable(Table * table, float x, float * out)
{
// check to ensure value is in domain
if( x > table->maxVal )
*out = 1.0f;
else
{
int interval = (int) ( x / table->dx + 0.5f * table->dx );
interval = interval * 2;
float slope = table->values[ interval ];
float intercept = table->values[ interval + 1 ];
float val = slope * x + intercept;
*out = val;
}
}
|
27604eaa271442b41657b6cec9b9b6c7f3ffa791.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
#include "utils/math_functions.h"
namespace dragon {
namespace kernel {
/*! PRelu <T = float32, Device = CUDA> */
template <typename T>
__global__ void _PRelu(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[0];
}
}
template <typename T>
__global__ void _PReluNCHW(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = (idx / dim) % channels;
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[c];
}
}
template <typename T>
__global__ void _PReluNHWC(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % channels;
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[c];
}
}
template<> void PRelu<float, CUDAContext>(
const int count,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* x,
const float* w,
float* y,
CUDAContext* ctx) {
if (channel_shared) {
_PRelu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, x, w, y);
} else {
if (data_format == "NCHW") {
_PReluNCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, x, w, y);
} else if (data_format == "NHWC") {
_PReluNHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, x, w, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
/*! PReluGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _PReluGrad(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (
(x[idx] > 0) + (x[idx] <= 0) * w[0]
);
}
}
template <typename T>
__global__ void _PReluGradNCHW(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = (idx / dim) % channels;
dx[idx] = dy[idx] * (
(x[idx] > 0) + (x[idx] <= 0) * w[c]
);
}
}
template <typename T>
__global__ void _PReluGradNHWC(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % channels;
dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]);
}
}
template<> void PReluGrad<float, CUDAContext>(
const int count,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* dy,
const float* x,
const float* w,
float* dx,
CUDAContext* ctx) {
if (channel_shared) {
_PReluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, dy, x, w, dx);
} else {
if (data_format == "NCHW") {
_PReluGradNCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, dy, x, w, dx);
} else if (data_format == "NHWC") {
_PReluGradNHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, dy, x, w, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
/*! PReluWGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _PReluWGradBcast(
const int count,
const int rows,
const int row_offset,
const T* dy,
const T* x,
T* bcast_dw) {
CUDA_1D_KERNEL_LOOP(idx, count) {
bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0);
for (int n = 1; n < rows; n++) {
const int cur_idx = idx + n * row_offset;
bcast_dw[idx] +=
dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0);
}
}
}
template<> void PReluWGrad<float, CUDAContext>(
const int rows,
const int row_offset,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* dy,
const float* x,
const float* multiplier,
float* bcast_dw,
float* dw,
CUDAContext* ctx) {
const int cdim = channels * dim;
_PReluWGradBcast<float>
<< < CUDA_BLOCKS(cdim), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(cdim, rows, row_offset, dy, x, bcast_dw);
if (channel_shared) {
math::Dot<float, CUDAContext>(channels * dim,
bcast_dw, multiplier, dw, ctx);
} else {
if (data_format == "NCHW") {
math::Gemv<float, CUDAContext>(
CblasNoTrans, channels, dim,
1.f, bcast_dw, multiplier,
0.f, dw, ctx);
} else if (data_format == "NHWC") {
math::Gemv<float, CUDAContext>(
CblasTrans, dim, channels,
1.f, bcast_dw, multiplier,
0.f, dw, ctx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA
|
27604eaa271442b41657b6cec9b9b6c7f3ffa791.cu
|
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
#include "utils/math_functions.h"
namespace dragon {
namespace kernel {
/*! PRelu <T = float32, Device = CUDA> */
template <typename T>
__global__ void _PRelu(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[0];
}
}
template <typename T>
__global__ void _PReluNCHW(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = (idx / dim) % channels;
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[c];
}
}
template <typename T>
__global__ void _PReluNHWC(
const int count,
const int channels,
const int dim,
const T* x,
const T* w,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % channels;
y[idx] = (x[idx] > 0) * x[idx] +
(x[idx] < 0) * x[idx] * w[c];
}
}
template<> void PRelu<float, CUDAContext>(
const int count,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* x,
const float* w,
float* y,
CUDAContext* ctx) {
if (channel_shared) {
_PRelu<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, x, w, y);
} else {
if (data_format == "NCHW") {
_PReluNCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, x, w, y);
} else if (data_format == "NHWC") {
_PReluNHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, x, w, y);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
/*! PReluGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _PReluGrad(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx[idx] = dy[idx] * (
(x[idx] > 0) + (x[idx] <= 0) * w[0]
);
}
}
template <typename T>
__global__ void _PReluGradNCHW(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = (idx / dim) % channels;
dx[idx] = dy[idx] * (
(x[idx] > 0) + (x[idx] <= 0) * w[c]
);
}
}
template <typename T>
__global__ void _PReluGradNHWC(
const int count,
const int channels,
const int dim,
const T* dy,
const T* x,
const T* w,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int c = idx % channels;
dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]);
}
}
template<> void PReluGrad<float, CUDAContext>(
const int count,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* dy,
const float* x,
const float* w,
float* dx,
CUDAContext* ctx) {
if (channel_shared) {
_PReluGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, dy, x, w, dx);
} else {
if (data_format == "NCHW") {
_PReluGradNCHW<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, dy, x, w, dx);
} else if (data_format == "NHWC") {
_PReluGradNHWC<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, channels, dim, dy, x, w, dx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
/*! PReluWGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _PReluWGradBcast(
const int count,
const int rows,
const int row_offset,
const T* dy,
const T* x,
T* bcast_dw) {
CUDA_1D_KERNEL_LOOP(idx, count) {
bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0);
for (int n = 1; n < rows; n++) {
const int cur_idx = idx + n * row_offset;
bcast_dw[idx] +=
dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0);
}
}
}
template<> void PReluWGrad<float, CUDAContext>(
const int rows,
const int row_offset,
const int channels,
const int dim,
const bool channel_shared,
const string& data_format,
const float* dy,
const float* x,
const float* multiplier,
float* bcast_dw,
float* dw,
CUDAContext* ctx) {
const int cdim = channels * dim;
_PReluWGradBcast<float>
<< < CUDA_BLOCKS(cdim), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(cdim, rows, row_offset, dy, x, bcast_dw);
if (channel_shared) {
math::Dot<float, CUDAContext>(channels * dim,
bcast_dw, multiplier, dw, ctx);
} else {
if (data_format == "NCHW") {
math::Gemv<float, CUDAContext>(
CblasNoTrans, channels, dim,
1.f, bcast_dw, multiplier,
0.f, dw, ctx);
} else if (data_format == "NHWC") {
math::Gemv<float, CUDAContext>(
CblasTrans, dim, channels,
1.f, bcast_dw, multiplier,
0.f, dw, ctx);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA
|
7514a0e8960de81d55f6ec5a557a81e5315c6db8.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "utilities.h"
#include "parasort.h"
#include <stdio.h>
#include <iostream>
#include <algorithm>
#include <thread>
#include <boost\thread\barrier.hpp>
#include <boost\sort\sort.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <thrust\device_vector.h>
using namespace std;
void performKnapsackDynamicCudaCalculations(const string& dirName, vector<string>& fileNames);
void performKnapsackDynamicCPUCalculations(const string& dirName, vector<string>& fileNames);
void performKnapsackParallelDynamicCPUCalculations(const string& dirName, vector<string>& fileNames, unsigned threadCount);
hipError_t knapsackCudaDynamic(int *output, const int *val, const int *wt, unsigned int n, unsigned int W);
void performKnapsackSortingCudaCalculations(const string& dirName, vector<string>& fileNames);
void performKnapsackSortingCPUCalculations(const string& dirName, vector<string>& fileNames, unsigned threadCount);
__device__ int maxi(int a, int b) {
return (a > b) ? a : b;
}
__global__ void knapsackDynamicKernelPrepare(int *output, int n, int W) {
int w = blockIdx.x * blockDim.x + threadIdx.x;
if (w > W) return;
output[w] = -1;
if (w == 0)
output[w] = 0;
}
__global__ void knapsackDynamicKernel(int *wt, int *val, int *output, int i, int n, int W) {
int w = blockIdx.x * blockDim.x + threadIdx.x;
if (w > W) return;
int currentIndex = (i % 2)*(W + 1) + w;
int previousIndex = ((i - 1) % 2)*(W + 1) + w;
if (w - wt[i - 1] < 0 || output[previousIndex - wt[i - 1]] < 0)
output[currentIndex] = output[previousIndex];
else
output[currentIndex] = maxi(val[i - 1] + output[previousIndex - wt[i - 1]], output[previousIndex]);
__syncthreads();
}
int main() {
//generateRandomDataFile("myDataSet1", 100000, 5000000);
//generateRandomDataFile("myDataSet2", 1000000, 500000);
//generateRandomDataFile("myDataSet3", 100000, 2000000);
//generateRandomDataFile("myDataSet4", 1000000, 2000000);
vector<string> lowDimensional, largeScale, hugeScale;
read_directory("low_dimensional", lowDimensional);
read_directory("large_scale", largeScale);
read_directory("huge_scale", hugeScale);
std::cout << "===DANE MALEJ SKALI - PODEJSCIE DYNAMICZNE - CUDA===" << endl;
performKnapsackDynamicCudaCalculations("low_dimensional", lowDimensional);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CUDA===" << endl;
performKnapsackDynamicCudaCalculations("large_scale", largeScale);
std::cout << endl << "===WASNE DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CUDA===" << endl;
performKnapsackDynamicCudaCalculations("huge_scale", hugeScale);
std::cout << "===DANE MALEJ SKALI - PODEJSCIE DYNAMICZNE - CPU===" << endl;
performKnapsackDynamicCPUCalculations("low_dimensional", lowDimensional);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CPU===" << endl;
performKnapsackDynamicCPUCalculations("large_scale", largeScale);
//std::cout << endl << "===WASNE DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CPU===" << endl;
//performKnapsackDynamicCPUCalculations("huge_scale", hugeScale);
for (unsigned i = 2; i <= 4; i *= 2) {
std::cout << "===DANE MALEJ SKALI - PODEJSCIE DYNAMICZNE - CPU "<< i << "===" << endl;
performKnapsackParallelDynamicCPUCalculations("low_dimensional", lowDimensional, i);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CPU " << i << "===" << endl;
performKnapsackParallelDynamicCPUCalculations("large_scale", largeScale, i);
}
for (unsigned i = 4; i <= 4; i *= 2) {
std::cout << endl << "===WASNE DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CPU " << i << "===" << endl;
performKnapsackParallelDynamicCPUCalculations("huge_scale", hugeScale, i);
}
std::cout << endl << "===DANE MALEJ SKALI - PODEJSCIE APROKSYMACYJNE - CUDA===" << endl;
performKnapsackSortingCudaCalculations("low_dimensional", lowDimensional);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE APROKSYMACYJNE - CUDA===" << endl;
performKnapsackSortingCudaCalculations("large_scale", largeScale);
std::cout << endl << "===WASNE DANE DUZEJ SKALI - PODEJSCIE APROKSYMACYJNE - CUDA===" << endl;
performKnapsackSortingCudaCalculations("huge_scale", hugeScale);
for (unsigned i = 1; i <= 4; i *= 2) {
std::cout << endl << "===DANE MALEJ SKALI - PODEJSCIE APROKSYMACYJNE - CPU " << i << "===" << endl;
performKnapsackSortingCPUCalculations("low_dimensional", lowDimensional, i);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE APROKSYMACYJNE - CPU " << i << "===" << endl;
performKnapsackSortingCPUCalculations("large_scale", largeScale, i);
std::cout << endl << "===WASNE DANE DUZEJ SKALI - PODEJSCIE APROKSYMACYJNE - CPU " << i << "===" << endl;
performKnapsackSortingCPUCalculations("huge_scale", hugeScale, i);
}
system("pause");
return 0;
}
void performKnapsackSortingCudaCalculations(const string& dirName, vector<string>& fileNames) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
thrust::device_vector<int> dev_values(values, values + n);
thrust::device_vector<int> dev_weights(weights, weights + n);
thrust::device_vector<float> dev_output(n);
thrust::device_vector<int> indexes(n);
thrust::transform(dev_values.begin(), dev_values.end(), dev_weights.begin(), dev_output.begin(),
thrust::divides<float>());
thrust::sequence(indexes.begin(), indexes.end());
thrust::sort_by_key(dev_output.begin(), dev_output.end(), indexes.begin(), thrust::greater<float>());
thrust::host_vector<int> h_indexes(indexes);
unsigned int weight = 0, maxValue = 0;
for (auto it2 = h_indexes.begin(); it2 != h_indexes.end(); it2++) {
if (weight + weights[*it2] <= W) {
weight += weights[*it2];
maxValue += values[*it2];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(maxValue), 10) << StringPadding(to_string(((float)((int)expectedResult - maxValue) / (float)expectedResult)*100.0), 10) << std::endl;
}
}
bool wayToSort(pair<float, int> i, pair<float, int> j) { return i.first > j.first; }
void performKnapsackSortingCPUCalculations(const string& dirName, vector<string>& fileNames, unsigned threadCount) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
pair<float, int> *output = new pair<float, int>[n];
for (int i = 0; i < n; ++i) {
output[i] = pair<float, int>(float(values[i]) / float(weights[i]), i);
}
if (threadCount == 1) {
std::sort(output, output + n, wayToSort);
}
else {
parasort(n, output, threadCount);
std::reverse(output, output + n);
}
unsigned int weight = 0, maxValue = 0;
for (auto i = 0; i < n; ++i) {
//cout << output[i].first << " ";
if (weight + weights[output[i].second] <= W) {
weight += weights[output[i].second];
maxValue += values[output[i].second];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(maxValue), 10) << StringPadding(to_string(((float)((int)expectedResult - maxValue) / (float)expectedResult)*100.0), 10) << std::endl;
}
}
void performKnapsackDynamicCudaCalculations(const string& dirName, vector<string>& fileNames) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
int *output = new int[2 * (W + 1) * sizeof(int)];
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
hipError_t cudaStatus = knapsackCudaDynamic(output, values, weights, n, W);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "knapsackCuda failed!");
return;
}
int max = -1;
for (int j = 0; j <= W; j++) {
//std::cout << output[(n % 2)*W + j] << " ";
if (max < output[(n % 2)*W + j]) {
max = output[(n % 2)*W + j];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(max), 10) << StringPadding(to_string((int)expectedResult - max), 10) << std::endl;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return;
}
delete[] values;
delete[] weights;
delete[] output;
}
}
void performKnapsackDynamicCPUCalculations(const string& dirName, vector<string>& fileNames) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
int *output = new int[2 * (W + 1) * sizeof(int)];
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
for (int w = 0; w <= W; ++w) {
output[w] = -1;
}
output[0] = 0;
for (int i = 1; i <= n; ++i) {
for (int w = 0; w < W + 1; ++w) {
int currentIndex = (i % 2)*(W + 1) + w;
int previousIndex = ((i - 1) % 2)*(W + 1) + w;
if (w - weights[i - 1] < 0 || output[previousIndex - weights[i - 1]] < 0)
output[currentIndex] = output[previousIndex];
else
output[currentIndex] = max(values[i - 1] + output[previousIndex - weights[i - 1]], output[previousIndex]);
}
}
int max = -1;
for (int j = 0; j <= W; j++) {
//std::cout << output[(n % 2)*W + j] << " ";
if (max < output[(n % 2)*W + j]) {
max = output[(n % 2)*W + j];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(max), 10) << StringPadding(to_string((int)expectedResult - max), 10) << std::endl;
delete[] values;
delete[] weights;
delete[] output;
}
}
boost::mutex io_mutex;
void dynamicCPUThread(boost::barrier &b, int* values, int* weights, int* output, const unsigned &n, const unsigned &W, const unsigned &threadCount, const int start, const int end) {
for (int i = 1; i <= n; ++i) {
//cout << i << " ";
b.wait();
for (int w = start; w < end; ++w) {
int currentIndex = (i % 2)*(W + 1) + w;
int previousIndex = ((i - 1) % 2)*(W + 1) + w;
if (w - weights[i - 1] < 0 || output[previousIndex - weights[i - 1]] < 0)
output[currentIndex] = output[previousIndex];
else
output[currentIndex] = max(values[i - 1] + output[previousIndex - weights[i - 1]], output[previousIndex]);
}
}
}
void performKnapsackParallelDynamicCPUCalculations(const string& dirName, vector<string>& fileNames, unsigned threadCount) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
int *output = new int[2 * (W + 1) * sizeof(int)];
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
for (int w = 0; w <= W; ++w) {
output[w] = -1;
}
output[0] = 0;
vector<thread> threads;
boost::barrier b(threadCount);
//dynamicCPUThread(int* values, int* weights, int* output, int &i, int &W, int start, int end)
for (int j = 0; j < threadCount-1; ++j) {
thread t(&dynamicCPUThread, ref(b), values, weights, output, ref(n), ref(W), ref(threadCount),
int(W/threadCount)*j, int(W / threadCount)*(j+1));
//cout << "Starts " << int(W / threadCount)*j << "-" << int(W / threadCount)*(j + 1) << endl;
threads.push_back(move(t));
}
thread t(&dynamicCPUThread, ref(b), values, weights, output, ref(n), ref(W), ref(threadCount),
int(W / threadCount)*(threadCount - 1), int(W + 1));
//cout << "Starts " << int(W / threadCount)*(threadCount - 1) << "-" << int(W + 2) << endl;
threads.push_back(move(t));
for (auto it = threads.begin(); it != threads.end(); ++it) {
(*it).join();
}
int max = -1;
for (int j = 0; j <= W; j++) {
//std::cout << output[(n % 2)*W + j] << " ";
if (max < output[(n % 2)*W + j]) {
max = output[(n % 2)*W + j];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(max), 10) << StringPadding(to_string((int)expectedResult - max), 10) << std::endl;
delete[] values;
delete[] weights;
delete[] output;
}
}
hipError_t knapsackCudaDynamic(int *output, const int *values, const int *weights, unsigned int n, unsigned int W) {
int *dev_values = 0;
int *dev_weights = 0;
int *dev_output = 0;
int i = 1;
hipError_t cudaStatus;
int *h_output = 0;
int *h_values = 0;
int *h_weights = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipHostMalloc((void**)&h_output, 2 * (W + 1) * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc 1 failed!");
goto Error;
}
cudaStatus = hipHostMalloc((void**)&h_values, n * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc 2 failed!");
goto Error;
}
memcpy(h_values, values, n * sizeof(int));
cudaStatus = hipHostMalloc((void**)&h_weights, n * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc 3 failed!");
goto Error;
}
memcpy(h_weights, weights, n * sizeof(int));
cudaStatus = hipMalloc((void**)&dev_output, 2 * (W + 1) * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc 1 failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_values, n * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc 2 failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_weights, n * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc 3 failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_values, h_values, n * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy 1 failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_weights, h_weights, n * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy 2 failed!");
goto Error;
}
hipEventRecord(start);
knapsackDynamicKernelPrepare << <int((W + 1) / 1024) + 1, 1024 >> >(dev_output, n, W);
while (i <= n) {
knapsackDynamicKernel << <int((W + 1) / 1024) + 1, 1024 >> >(dev_weights, dev_values, dev_output, i, n, W);
i++;
}
hipEventRecord(stop);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "knapsackKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching knapsackKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(h_output, dev_output, 2 * (W + 1) * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy 4 failed!");
goto Error;
}
memcpy(output, h_output, 2 * (W + 1) * sizeof(int));
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
//std::cout << "Execution Time : " << milliseconds << " milliseconds" << std::endl;
Error:
hipFree(dev_output);
hipFree(dev_values);
hipFree(dev_weights);
hipHostFree(h_output);
hipHostFree(h_values);
hipHostFree(h_weights);
return cudaStatus;
}
|
7514a0e8960de81d55f6ec5a557a81e5315c6db8.cu
|
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "utilities.h"
#include "parasort.h"
#include <stdio.h>
#include <iostream>
#include <algorithm>
#include <thread>
#include <boost\thread\barrier.hpp>
#include <boost\sort\sort.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <thrust\device_vector.h>
using namespace std;
void performKnapsackDynamicCudaCalculations(const string& dirName, vector<string>& fileNames);
void performKnapsackDynamicCPUCalculations(const string& dirName, vector<string>& fileNames);
void performKnapsackParallelDynamicCPUCalculations(const string& dirName, vector<string>& fileNames, unsigned threadCount);
cudaError_t knapsackCudaDynamic(int *output, const int *val, const int *wt, unsigned int n, unsigned int W);
void performKnapsackSortingCudaCalculations(const string& dirName, vector<string>& fileNames);
void performKnapsackSortingCPUCalculations(const string& dirName, vector<string>& fileNames, unsigned threadCount);
__device__ int maxi(int a, int b) {
return (a > b) ? a : b;
}
__global__ void knapsackDynamicKernelPrepare(int *output, int n, int W) {
int w = blockIdx.x * blockDim.x + threadIdx.x;
if (w > W) return;
output[w] = -1;
if (w == 0)
output[w] = 0;
}
__global__ void knapsackDynamicKernel(int *wt, int *val, int *output, int i, int n, int W) {
int w = blockIdx.x * blockDim.x + threadIdx.x;
if (w > W) return;
int currentIndex = (i % 2)*(W + 1) + w;
int previousIndex = ((i - 1) % 2)*(W + 1) + w;
if (w - wt[i - 1] < 0 || output[previousIndex - wt[i - 1]] < 0)
output[currentIndex] = output[previousIndex];
else
output[currentIndex] = maxi(val[i - 1] + output[previousIndex - wt[i - 1]], output[previousIndex]);
__syncthreads();
}
int main() {
//generateRandomDataFile("myDataSet1", 100000, 5000000);
//generateRandomDataFile("myDataSet2", 1000000, 500000);
//generateRandomDataFile("myDataSet3", 100000, 2000000);
//generateRandomDataFile("myDataSet4", 1000000, 2000000);
vector<string> lowDimensional, largeScale, hugeScale;
read_directory("low_dimensional", lowDimensional);
read_directory("large_scale", largeScale);
read_directory("huge_scale", hugeScale);
std::cout << "===DANE MALEJ SKALI - PODEJSCIE DYNAMICZNE - CUDA===" << endl;
performKnapsackDynamicCudaCalculations("low_dimensional", lowDimensional);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CUDA===" << endl;
performKnapsackDynamicCudaCalculations("large_scale", largeScale);
std::cout << endl << "===WŁASNE DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CUDA===" << endl;
performKnapsackDynamicCudaCalculations("huge_scale", hugeScale);
std::cout << "===DANE MALEJ SKALI - PODEJSCIE DYNAMICZNE - CPU===" << endl;
performKnapsackDynamicCPUCalculations("low_dimensional", lowDimensional);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CPU===" << endl;
performKnapsackDynamicCPUCalculations("large_scale", largeScale);
//std::cout << endl << "===WŁASNE DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CPU===" << endl;
//performKnapsackDynamicCPUCalculations("huge_scale", hugeScale);
for (unsigned i = 2; i <= 4; i *= 2) {
std::cout << "===DANE MALEJ SKALI - PODEJSCIE DYNAMICZNE - CPU "<< i << "===" << endl;
performKnapsackParallelDynamicCPUCalculations("low_dimensional", lowDimensional, i);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CPU " << i << "===" << endl;
performKnapsackParallelDynamicCPUCalculations("large_scale", largeScale, i);
}
for (unsigned i = 4; i <= 4; i *= 2) {
std::cout << endl << "===WŁASNE DANE DUZEJ SKALI - PODEJSCIE DYNAMICZNE - CPU " << i << "===" << endl;
performKnapsackParallelDynamicCPUCalculations("huge_scale", hugeScale, i);
}
std::cout << endl << "===DANE MALEJ SKALI - PODEJSCIE APROKSYMACYJNE - CUDA===" << endl;
performKnapsackSortingCudaCalculations("low_dimensional", lowDimensional);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE APROKSYMACYJNE - CUDA===" << endl;
performKnapsackSortingCudaCalculations("large_scale", largeScale);
std::cout << endl << "===WŁASNE DANE DUZEJ SKALI - PODEJSCIE APROKSYMACYJNE - CUDA===" << endl;
performKnapsackSortingCudaCalculations("huge_scale", hugeScale);
for (unsigned i = 1; i <= 4; i *= 2) {
std::cout << endl << "===DANE MALEJ SKALI - PODEJSCIE APROKSYMACYJNE - CPU " << i << "===" << endl;
performKnapsackSortingCPUCalculations("low_dimensional", lowDimensional, i);
std::cout << endl << "===DANE DUZEJ SKALI - PODEJSCIE APROKSYMACYJNE - CPU " << i << "===" << endl;
performKnapsackSortingCPUCalculations("large_scale", largeScale, i);
std::cout << endl << "===WŁASNE DANE DUZEJ SKALI - PODEJSCIE APROKSYMACYJNE - CPU " << i << "===" << endl;
performKnapsackSortingCPUCalculations("huge_scale", hugeScale, i);
}
system("pause");
return 0;
}
void performKnapsackSortingCudaCalculations(const string& dirName, vector<string>& fileNames) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
thrust::device_vector<int> dev_values(values, values + n);
thrust::device_vector<int> dev_weights(weights, weights + n);
thrust::device_vector<float> dev_output(n);
thrust::device_vector<int> indexes(n);
thrust::transform(dev_values.begin(), dev_values.end(), dev_weights.begin(), dev_output.begin(),
thrust::divides<float>());
thrust::sequence(indexes.begin(), indexes.end());
thrust::sort_by_key(dev_output.begin(), dev_output.end(), indexes.begin(), thrust::greater<float>());
thrust::host_vector<int> h_indexes(indexes);
unsigned int weight = 0, maxValue = 0;
for (auto it2 = h_indexes.begin(); it2 != h_indexes.end(); it2++) {
if (weight + weights[*it2] <= W) {
weight += weights[*it2];
maxValue += values[*it2];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(maxValue), 10) << StringPadding(to_string(((float)((int)expectedResult - maxValue) / (float)expectedResult)*100.0), 10) << std::endl;
}
}
bool wayToSort(pair<float, int> i, pair<float, int> j) { return i.first > j.first; }
void performKnapsackSortingCPUCalculations(const string& dirName, vector<string>& fileNames, unsigned threadCount) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
pair<float, int> *output = new pair<float, int>[n];
for (int i = 0; i < n; ++i) {
output[i] = pair<float, int>(float(values[i]) / float(weights[i]), i);
}
if (threadCount == 1) {
std::sort(output, output + n, wayToSort);
}
else {
parasort(n, output, threadCount);
std::reverse(output, output + n);
}
unsigned int weight = 0, maxValue = 0;
for (auto i = 0; i < n; ++i) {
//cout << output[i].first << " ";
if (weight + weights[output[i].second] <= W) {
weight += weights[output[i].second];
maxValue += values[output[i].second];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(maxValue), 10) << StringPadding(to_string(((float)((int)expectedResult - maxValue) / (float)expectedResult)*100.0), 10) << std::endl;
}
}
void performKnapsackDynamicCudaCalculations(const string& dirName, vector<string>& fileNames) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
int *output = new int[2 * (W + 1) * sizeof(int)];
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
cudaError_t cudaStatus = knapsackCudaDynamic(output, values, weights, n, W);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "knapsackCuda failed!");
return;
}
int max = -1;
for (int j = 0; j <= W; j++) {
//std::cout << output[(n % 2)*W + j] << " ";
if (max < output[(n % 2)*W + j]) {
max = output[(n % 2)*W + j];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(max), 10) << StringPadding(to_string((int)expectedResult - max), 10) << std::endl;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return;
}
delete[] values;
delete[] weights;
delete[] output;
}
}
void performKnapsackDynamicCPUCalculations(const string& dirName, vector<string>& fileNames) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
int *output = new int[2 * (W + 1) * sizeof(int)];
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
for (int w = 0; w <= W; ++w) {
output[w] = -1;
}
output[0] = 0;
for (int i = 1; i <= n; ++i) {
for (int w = 0; w < W + 1; ++w) {
int currentIndex = (i % 2)*(W + 1) + w;
int previousIndex = ((i - 1) % 2)*(W + 1) + w;
if (w - weights[i - 1] < 0 || output[previousIndex - weights[i - 1]] < 0)
output[currentIndex] = output[previousIndex];
else
output[currentIndex] = max(values[i - 1] + output[previousIndex - weights[i - 1]], output[previousIndex]);
}
}
int max = -1;
for (int j = 0; j <= W; j++) {
//std::cout << output[(n % 2)*W + j] << " ";
if (max < output[(n % 2)*W + j]) {
max = output[(n % 2)*W + j];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(max), 10) << StringPadding(to_string((int)expectedResult - max), 10) << std::endl;
delete[] values;
delete[] weights;
delete[] output;
}
}
boost::mutex io_mutex;
void dynamicCPUThread(boost::barrier &b, int* values, int* weights, int* output, const unsigned &n, const unsigned &W, const unsigned &threadCount, const int start, const int end) {
for (int i = 1; i <= n; ++i) {
//cout << i << " ";
b.wait();
for (int w = start; w < end; ++w) {
int currentIndex = (i % 2)*(W + 1) + w;
int previousIndex = ((i - 1) % 2)*(W + 1) + w;
if (w - weights[i - 1] < 0 || output[previousIndex - weights[i - 1]] < 0)
output[currentIndex] = output[previousIndex];
else
output[currentIndex] = max(values[i - 1] + output[previousIndex - weights[i - 1]], output[previousIndex]);
}
}
}
void performKnapsackParallelDynamicCPUCalculations(const string& dirName, vector<string>& fileNames, unsigned threadCount) {
std::cout << StringPadding("file", 25) << StringPadding("n", 8) << StringPadding("W", 10)
<< StringPadding("time(ms)", 14) << StringPadding("expected", 10) << StringPadding("obtained", 10)
<< StringPadding("error(\%)", 10) << endl;
for (auto it = fileNames.begin(); it != fileNames.end(); it++) {
unsigned int n, W, expectedResult;
int *values, *weights;
auto ret = loadData(dirName, (*it), n, W, expectedResult);
values = ret.first;
weights = ret.second;
int *output = new int[2 * (W + 1) * sizeof(int)];
std::cout << StringPadding((*it), 25) << StringPadding(to_string(n), 8) << StringPadding(to_string(W), 10);
auto start = std::chrono::system_clock::now();
for (int w = 0; w <= W; ++w) {
output[w] = -1;
}
output[0] = 0;
vector<thread> threads;
boost::barrier b(threadCount);
//dynamicCPUThread(int* values, int* weights, int* output, int &i, int &W, int start, int end)
for (int j = 0; j < threadCount-1; ++j) {
thread t(&dynamicCPUThread, ref(b), values, weights, output, ref(n), ref(W), ref(threadCount),
int(W/threadCount)*j, int(W / threadCount)*(j+1));
//cout << "Starts " << int(W / threadCount)*j << "-" << int(W / threadCount)*(j + 1) << endl;
threads.push_back(move(t));
}
thread t(&dynamicCPUThread, ref(b), values, weights, output, ref(n), ref(W), ref(threadCount),
int(W / threadCount)*(threadCount - 1), int(W + 1));
//cout << "Starts " << int(W / threadCount)*(threadCount - 1) << "-" << int(W + 2) << endl;
threads.push_back(move(t));
for (auto it = threads.begin(); it != threads.end(); ++it) {
(*it).join();
}
int max = -1;
for (int j = 0; j <= W; j++) {
//std::cout << output[(n % 2)*W + j] << " ";
if (max < output[(n % 2)*W + j]) {
max = output[(n % 2)*W + j];
}
}
auto end = std::chrono::system_clock::now();
auto elapsed = chrono::duration_cast<chrono::microseconds>(end - start).count();
std::cout << StringPadding(to_string(elapsed), 14);
std::cout << StringPadding(to_string(expectedResult), 10) << StringPadding(to_string(max), 10) << StringPadding(to_string((int)expectedResult - max), 10) << std::endl;
delete[] values;
delete[] weights;
delete[] output;
}
}
cudaError_t knapsackCudaDynamic(int *output, const int *values, const int *weights, unsigned int n, unsigned int W) {
int *dev_values = 0;
int *dev_weights = 0;
int *dev_output = 0;
int i = 1;
cudaError_t cudaStatus;
int *h_output = 0;
int *h_values = 0;
int *h_weights = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMallocHost((void**)&h_output, 2 * (W + 1) * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc 1 failed!");
goto Error;
}
cudaStatus = cudaMallocHost((void**)&h_values, n * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc 2 failed!");
goto Error;
}
memcpy(h_values, values, n * sizeof(int));
cudaStatus = cudaMallocHost((void**)&h_weights, n * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc 3 failed!");
goto Error;
}
memcpy(h_weights, weights, n * sizeof(int));
cudaStatus = cudaMalloc((void**)&dev_output, 2 * (W + 1) * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc 1 failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_values, n * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc 2 failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_weights, n * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc 3 failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_values, h_values, n * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 1 failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_weights, h_weights, n * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 2 failed!");
goto Error;
}
cudaEventRecord(start);
knapsackDynamicKernelPrepare << <int((W + 1) / 1024) + 1, 1024 >> >(dev_output, n, W);
while (i <= n) {
knapsackDynamicKernel << <int((W + 1) / 1024) + 1, 1024 >> >(dev_weights, dev_values, dev_output, i, n, W);
i++;
}
cudaEventRecord(stop);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "knapsackKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching knapsackKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(h_output, dev_output, 2 * (W + 1) * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 4 failed!");
goto Error;
}
memcpy(output, h_output, 2 * (W + 1) * sizeof(int));
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
//std::cout << "Execution Time : " << milliseconds << " milliseconds" << std::endl;
Error:
cudaFree(dev_output);
cudaFree(dev_values);
cudaFree(dev_weights);
cudaFreeHost(h_output);
cudaFreeHost(h_values);
cudaFreeHost(h_weights);
return cudaStatus;
}
|
86733610ee92d80e42d23d2a30c2433c2a496f6a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <iostream>
#include <chrono>
#include <omp.h>
static long STEPS = 1000000000;
static int THREADS_PER_BLOCK = 1024;
static double STEP = 1.0 / (double) STEPS;
__global__ void pi_cuda(double *a, double step) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double x = (i + 0.5) * step;
a[i] = 4.0 / (1.0 + x * x);
}
double pi_omp_parallel_for() {
double sum;
double x = 0;
#pragma omp parallel for reduction(+:sum) firstprivate(x)
for (int i = 0; i < STEPS; i++) {
x = (i + 0.5) * STEP;
sum = sum + 4.0 / (1.0 + x * x);
}
return STEP * sum;
}
int main(int argc, char **argv) {
thrust::device_vector<double> v(STEPS);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
hipLaunchKernelGGL(( pi_cuda), dim3(ceil((double) STEPS/THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, 0, thrust::raw_pointer_cast(v.data()), STEP);
double sum = thrust::reduce(v.begin(), v.end(), 0.0, thrust::plus<double>());
double pi = sum * STEP;
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
printf("Time= %2.5f\n",msecTotal);
std::cout << pi << std::endl;
auto start_time = std::chrono::high_resolution_clock::now();
pi = pi_omp_parallel_for();
auto end_time = std::chrono::high_resolution_clock::now();
auto runtime = std::chrono::duration_cast<std::chrono::milliseconds> (end_time - start_time);
std::cout << "Time(omp)" << runtime.count() << std::endl;
std::cout << pi << std::endl;
}
|
86733610ee92d80e42d23d2a30c2433c2a496f6a.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <iostream>
#include <chrono>
#include <omp.h>
static long STEPS = 1000000000;
static int THREADS_PER_BLOCK = 1024;
static double STEP = 1.0 / (double) STEPS;
__global__ void pi_cuda(double *a, double step) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double x = (i + 0.5) * step;
a[i] = 4.0 / (1.0 + x * x);
}
double pi_omp_parallel_for() {
double sum;
double x = 0;
#pragma omp parallel for reduction(+:sum) firstprivate(x)
for (int i = 0; i < STEPS; i++) {
x = (i + 0.5) * STEP;
sum = sum + 4.0 / (1.0 + x * x);
}
return STEP * sum;
}
int main(int argc, char **argv) {
thrust::device_vector<double> v(STEPS);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
pi_cuda<<<ceil((double) STEPS/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(thrust::raw_pointer_cast(v.data()), STEP);
double sum = thrust::reduce(v.begin(), v.end(), 0.0, thrust::plus<double>());
double pi = sum * STEP;
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
printf("Time= %2.5f\n",msecTotal);
std::cout << pi << std::endl;
auto start_time = std::chrono::high_resolution_clock::now();
pi = pi_omp_parallel_for();
auto end_time = std::chrono::high_resolution_clock::now();
auto runtime = std::chrono::duration_cast<std::chrono::milliseconds> (end_time - start_time);
std::cout << "Time(omp)" << runtime.count() << std::endl;
std::cout << pi << std::endl;
}
|
fc3094b8c6844e78e871b63ef1ef2cc9e0a87a6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "erl_nif.h"
#include "rocblas.h"
#include "stdio.h"
#include "time.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define IDX3C(c,i,j,in_h,in_w) ((c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define IDX4C(n,c,i,j,in_c,in_h,in_w) ((n)*((in_c)*(in_h)*(in_w)) + (c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define BREAK return(enif_make_int(env, 0));
#define PI 3.14159265358979323846
#define SIGMOID(x) (1 / (1+exp(-1*x)))
#define DEBUG 0
#define DISP(x) if(DEBUG){printf(x);fflush(stdout);}
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
return enif_make_int(env,10000+(int)error); \
} \
}
#define CUBLAS(call) \
{ \
const cublasStatus error = call; \
if (error != HIPBLAS_STATUS_SUCCESS) \
{ \
return enif_make_int(env,11000+(int)error); \
} \
}
__global__ void pooling_kernel(float *a, float *b, float *c, int st, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,in_h2,in_w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w;
float max,fmax_h,fmax_w;
if(tid < n)
{
n1 = tid;
in_h2 = in_h / st;
in_w2 = in_w / st;
for(c1=0;c1<in_c;c1++){
for(w2=0;w2<in_w2;w2++){
for(h2=0;h2<in_h2;h2++){
max = 0.0;
start_h1 = st*h2;
end_h1 = st*(h2+1);
start_w1 = st*w2;
end_w1 = st*(w2+1);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)] >= max){
max = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
max_h = h1;
max_w = w1;
}
}
}
b[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = max;
fmax_h = (float)max_h;
fmax_w = (float)max_w;
c[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = fmax_h * 1000.0 + fmax_w;
}
}
}
}
}
/*
1st arg in_n of tensor
2nd arg in_c of tensor
3rd arg in_h of tensor
4th arg in_w of tensor
5th arg binary of tensor
6th arg stride
return list [ts1,ts2]
ts1 is result data for forward
ts2 is result data dor backward. this is sparse matrix
e.g.
|0.1,0.2,0.3,0.4|
|0.5,0.6,0.7,0.8|
|0.9,1.0,1.1,1.2|
|1.3,1.4,1.5,1.6|
ts1
|0.6,0.8|
|1.4,1.6|
ts2
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
*/
static ERL_NIF_TERM
pooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin,c_bin,tuple;
int in_n,in_c,in_h,in_w,st, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
DISP("pooling1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &st)) return enif_make_int(env,6);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h / st) * (in_w / st);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n2 * sizeof(float), &b_bin);
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n2 * sizeof(float), hipMemcpyHostToDevice));
pooling_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, st, in_c, in_h, in_w, in_n);
// copy to host b,c from GPU dev_b,dev_c
CHECK(hipMemcpy(b, dev_b, n2 * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(c, dev_c, n2 * sizeof(float), hipMemcpyDeviceToHost));
// return forward data and backward data with tuple {b_bin,c_bin}
tuple = enif_make_tuple2(env,b_bin,c_bin);
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(tuple);
}
__global__ void unpooling_kernel(float *a, float *b, float *c, int st, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w,in_h1,in_w1;
float loss,elt;
if(tid < n)
{
n1 = tid;
in_h1 = in_h * st;
in_w1 = in_w * st;
for(c1=0;c1<in_c;c1++){
for(h2=0;h2<in_h;h2++){
for(w2=0;w2<in_w;w2++){
start_h1 = st*h2;
end_h1 = st*(h2+1);
start_w1 = st*w2;
end_w1 = st*(w2+1);
elt = a[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
loss = b[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
max_h = (int) floor(elt / 1000.0);
max_w = (int) fmodf(elt,1000.0);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 == max_h && w1 == max_w){
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = loss;
}
else{
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = 0.0;
}
}
}
}
}
}
}
}
/*
1st arg in_n of sparse-tensor
2nd arg in_c of sparse-tensor
3rd arg in_h of sparse-tensor
4th arg in_w of sparse-tensor
5th arg binary of sparse-tensor
6th arg binary of loss-tensor
7th arg stride
return gradiate tensor
e.g.
ts1 index-tensor
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
ts2 loss-tensor
|0.1,0.2|
|0.3,0.4|
return
|0.0,0.0,0.0,0.0|
|0.0,0.1,0.0,0.2|
|0.0,0.0,0.0,0.0|
|0.0,3.4,0.0,0.4|
*/
static ERL_NIF_TERM
unpooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,st, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
DISP("unpooling")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &st)) return enif_make_int(env,7);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h * st) * (in_w * st);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n2 * sizeof(float), hipMemcpyHostToDevice));
unpooling_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, st, in_c, in_h, in_w, in_n);
// copy to host d from GPU dev_d
CHECK(hipMemcpy(c, dev_c, n2 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void convolute_kernel(float *a, float *b, float *c, int filt_h, int filt_w, int st, int pad, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,oh,ow,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
if(tid < n)
{
n1 = tid;
oh = (in_h+2*pad-filt_h)/st + 1;
ow = (in_w+2*pad-filt_w)/st + 1;
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
sum = 0.0;
start_h1 = st*h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = st*w2-pad;
end_w1 = start_w1 + filt_w;
for(c1=0;c1<in_c;c1++){
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
elt2 = b[IDX3C(c1,h1-start_h1,w1-start_w1,filt_h,filt_w)];
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,0,h2,w2,in_c,oh,ow)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_h of filter tensor
6th arg filt_w of filter tensor
7th arg binary of input tensor
8th arg binary of filter tensor
9th arg stride
10th arg padding
*/
static ERL_NIF_TERM
convolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_h, filt_w, st,pad, n1, n2, n3, oh, ow;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
DISP("convolute1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_h)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_w)) return enif_make_int(env,6);
if (!enif_inspect_binary(env, argv[6], &a_bin )) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &st)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &pad)) return enif_make_int(env,10);
n1 = in_n * in_c * in_h * in_w;
n2 = in_c * filt_h * filt_w;
oh = (in_h+2*pad-filt_h)/st + 1;
ow = (in_w+2*pad-filt_w)/st + 1;
n3 = in_n * oh * ow;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n3 * sizeof(float), hipMemcpyHostToDevice));
convolute_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, filt_h, filt_w, st, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n3 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void deconvolute1_kernel(float *a, float *b, float *c, int filt_h, int filt_w, int st, int pad, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,oh,ow,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
if(tid < n)
{
n1 = tid;
oh = (in_h+2*pad-filt_h)/st + 1;
ow = (in_w+2*pad-filt_w)/st + 1;
//full convolute. stride=1 always
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(c1=0;c1<in_c;c1++){
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,0,h1,w1,in_c,in_h,in_w)];
elt2 = b[IDX3C(c1,h1-start_h1,w1-start_w1,filt_h,filt_w)];
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,0,h2,w2,in_c,oh,ow)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_h of filter tensor
6th arg filt_w of filter tensor
7th arg binary of input loss tensor
8th arg binary of filter tensor
9th arg stride
10th arg padding
*/
static ERL_NIF_TERM
deconvolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_h, filt_w, st,pad, pad1, n1, n2, n3, oh, ow, i,j,k;
float *a,*b, *b1, *c;
float *dev_a, *dev_b, *dev_c;
DISP("deconvolute1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_h)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_w)) return enif_make_int(env,6);
if (!enif_inspect_binary(env, argv[6], &a_bin )) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &st)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &pad)) return enif_make_int(env,10);
n1 = in_n * in_c * in_h * in_w;
n2 = in_c * filt_h * filt_w;
pad1 = filt_h - 1 + pad;
oh = (in_h+2*pad1-filt_h)/st + 1;
ow = (in_w+2*pad1-filt_w)/st + 1;
n3 = in_n * 1 * oh * ow;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<in_c;i++){
for(j=0;j<filt_h;j++){
for(k=0;k<filt_w;k++){
//if(IDX3C(i,filt_h-j-1,filt_w-k-1,filt_h,filt_w) >= n2) return enif_make_int(env,11001);
b1[IDX3C(i,filt_h-j-1,filt_w-k-1,filt_h,filt_w)] = b[IDX3C(i,j,k,filt_h,filt_w)];
}
}
}
/*
for(i=0;i<in_c;i++){
for(j=0;j<filt_h;j++){
for(k=0;k<filt_w;k++){
printf("%f", b1[IDX3C(i,j,k,filt_h,filt_w)]);
}
}
}
*/
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b1, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n3 * sizeof(float), hipMemcpyHostToDevice));
deconvolute1_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, filt_h, filt_w, st, pad1, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n3 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
enif_free(b1);
return(c_bin);
}
__global__ void deconvolute2_kernel(float *a1, float *a, float *b, float *c, int filt_h, int filt_w, int st, int pad, int in_c, int in_h, int in_w, int loss_h, int loss_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,oh,ow,start_h1,end_h1,start_w1,end_w1;
int k,l,k1,l1;
float sum,elt1,elt2;
if(tid < n)
{
n1 = tid;
// caution! stride=1
oh = (in_h+2*pad-filt_h) + 1;
ow = (in_w+2*pad-filt_w) + 1;
//dilate loss tensor. loss tensor is 1 channel
for(k=0;k<loss_h;k++){
for(l=0;l<loss_w;l++){
elt1 = a[IDX4C(n1,0,k,l,1,loss_h,loss_w)];
k1 = st*k;
l1 = st*l;
a1[IDX4C(n1,0,k1,l1,1,in_h,in_w)] = elt1;
}
}
//full convulute. stride=1
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(c1=0;c1<in_c;c1++){
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a1[IDX4C(n1,0,h1,w1,1,in_h,in_w)];
elt2 = b[IDX3C(c1,h1-start_h1,w1-start_w1,filt_h,filt_w)];
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,0,h2,w2,1,oh,ow)] = sum;
}
}
}
}
/*
dilate loss tensor
e.g.
|1.0,2.0|
|3.0,4.0|
dilated stride=2
|1.0,0.0,2.0|
|0.0,0.0,0.0|
|3.0,0.0,4.0|
*/
/*
1st arg in_n of input loss tensor
2nd arg in_c of input loss tensor
3rd arg in_h of input loss tensor
4th arg in_w of input loss tensor
5th arg filt_h of filter tensor
6th arg filt_w of filter tensor
7th arg binary of input loss tensor
8th arg binary of filter tensor
9th arg stride
10th arg padding
*/
static ERL_NIF_TERM
deconvolute2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_h, filt_w, st,pad, pad1, n1, n2, n3, oh, ow, i,j,k, loss_h, loss_w;
float *a, *a1, *b, *b1, *c;
float *dev_a, *dev_a1, *dev_b, *dev_c;
DISP("deconvolute2")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &loss_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &loss_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_h)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_w)) return enif_make_int(env,6);
if (!enif_inspect_binary(env, argv[6], &a_bin )) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &st)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &pad)) return enif_make_int(env,10);
// size for dilate
in_h = loss_h + (loss_h - 1)*(st - 1);
in_w = loss_w + (loss_w - 1)*(st - 1);
n1 = in_n * 1 * in_h * in_w; //loss tensor size
n2 = in_c * filt_h * filt_w; //filter tensor size
pad1 = (filt_h - 1) + pad; //padding size with dilate
oh = (in_h+2*pad1-filt_h) + 1; //output deconvolute tensor size. caution stride=1.
ow = (in_w+2*pad1-filt_w) + 1; //
n3 = in_n * 1 * oh * ow; //
a = (float *) a_bin.data;
b = (float *) b_bin.data;
a1 = (float *) enif_alloc(n1 * sizeof(float));
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<in_c;i++){
for(j=0;j<filt_h;j++){
for(k=0;k<filt_w;k++){
//if(IDX3C(i,filt_h-j-1,filt_w-k-1,filt_h,filt_w) >= n2) return enif_make_int(env,11001);
b1[IDX3C(i,filt_h-j-1,filt_w-k-1,filt_h,filt_w)] = b[IDX3C(i,j,k,filt_h,filt_w)];
}
}
}
// dilate
for(i=0;i<n1;i++){
a1[i] = 0.0;
}
CHECK(hipMalloc((void**)&dev_a1, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_a, in_n*1*loss_h*loss_w * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n3 * sizeof(float)));
CHECK(hipMemcpy(dev_a1, a1, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_a, a, in_n*1*loss_h*loss_w * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b1, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n3 * sizeof(float), hipMemcpyHostToDevice));
deconvolute2_kernel << <1, in_n>> >(dev_a1, dev_a, dev_b, dev_c, filt_h, filt_w, st, pad1, in_c, in_h, in_w, loss_h, loss_w, in_n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n3 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_a1);
hipFree(dev_b);
hipFree(dev_c);
enif_free(a1);
enif_free(b1);
return(c_bin);
}
__global__ void gradfilter_kernel(float *a, float *b, float *c, int filt_h, int filt_w, int loss_h, int loss_w, int st, int pad, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,h3,w3;
float sum,elt1,elt2;
if(tid < n)
{
n1 = tid;
for(c1=0;c1<in_c;c1++){
//h1,w1 is index of filter
for(h1=0;h1<filt_h;h1++){
for(w1=0;w1<filt_w;w1++){
//h2,w2 is index of loss tensor
sum = 0.0;
for(h2=0;h2<loss_h;h2++){
for(w2=0;w2<loss_w;w2++){
//h3,w3 is index of input tensor
h3 = h1*st-pad + h2;
w3 = w1*st-pad + w2;
if(h3>=0 && h3<in_h && w3>=0 && w3<in_w){
elt1 = a[IDX4C(n1,c1,h3,w3,in_c,in_h,in_w)]; //input tensor
elt2 = b[IDX4C(n1,0,h2,w2,in_c,loss_h,loss_w)]; //loss tensor
sum = sum + elt1*elt2;
}
}
}
//set filter tensor
c[IDX3C(c1,h1,w1,filt_h,filt_w)] = c[IDX3C(c1,h1,w1,filt_h,filt_w)] + sum;
}
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_h of filter tensor
6th arg filt_w of filter tensor
7th arg loss_h of loss tensor
8th arg loss_w of loss tensor
9th arg binary of input tensor
10th arg binary of loss tensor
11th arg stride
12th arg padding
*/
static ERL_NIF_TERM
gradfilter1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_h,filt_w,loss_h,loss_w,st,pad,n1,n2,n3,i;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
float count;
DISP("gradfilter1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_h)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_w)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &loss_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &loss_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st)) return enif_make_int(env,10);
if (!enif_get_int(env, argv[11], &pad)) return enif_make_int(env,11);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * loss_h * loss_w;
n3 = in_c * filt_h * filt_w;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//initialize c
for(i=0;i<n3;i++){
c[i] = 0.0;
}
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n2 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n3 * sizeof(float), hipMemcpyHostToDevice));
gradfilter_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, filt_h, filt_w, loss_h, loss_w, st, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n3 * sizeof(float), hipMemcpyDeviceToHost));
//average
count = (float) in_n;
if(in_n != 0){
for(i=0;i<n3;i++){
c[i] = c[i] / count;
}
}
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void full_kernel(float *a, float *b, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_h;i++){
for(j=0;j<in_w;j++){
elt = a[IDX4C(n1,0,i,j,1,in_h,in_w)];
b[IDX2C(n1,i*in_w + j,n)] = elt;
}
}
}
}
/*
1st arg in_n of input tensor
2rd arg in_h of input tensor
3rd arg in_w of input tensor
4th arg binary of input tensor
*/
static ERL_NIF_TERM
full1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_h,in_w,n1;
float *a,*b;
float *dev_a, *dev_b;
DISP("full1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_w)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
//printf("%d %d %d \n\r", in_n, in_h, in_w);
// in_c is allways 1
n1 = in_n * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n1 * sizeof(float), hipMemcpyHostToDevice));
full_kernel << <1, in_n>> >(dev_a, dev_b, in_h, in_w, in_n);
// copy to host d from GPU dev_d
CHECK(hipMemcpy(b, dev_b, n1 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
__global__ void unfull_kernel(float *a, float *b, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_h;i++){
for(j=0;j<in_w;j++){
elt = a[IDX4C(n1,0,i,j,1,in_h,in_w)];
b[IDX2C(n1,i*in_w + j,n)] = elt;
}
}
}
}
/*
1st arg in_n of input tensor
2rd arg in_h of input tensor
3th arg in_w of input tensor
4th arg binary of input tensor
*/
static ERL_NIF_TERM
unfull1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_h,in_w,n1;
float *a,*b;
float *dev_a, *dev_b;
DISP("unfull1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_w)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
// in_c is allways 1
n1 = in_n * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(hipMemcpy(dev_a, a, n1 * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n1 * sizeof(float), hipMemcpyHostToDevice));
unfull_kernel << <1, in_n>> >(dev_a, dev_b, in_h, in_w, in_n);
// copy to host d from GPU dev_d
CHECK(hipMemcpy(b, dev_b, n1 * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
print1(ErlNifEnv *env, int argc, const ERL_NIF_TERM *argv) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
float *a;
int r,c,i,j;
DISP("print1")
if (!enif_get_int(env, argv[0], &r )) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
for(i=0;i<r;i++){
for(j=0;j<c;j++){
printf("%f ", a[IDX2C(i,j,r)]);
}
printf("\n\r");
}
printf("\n\r");
result = enif_make_atom(env,"true");
return result;
}
static ERL_NIF_TERM
new1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
ERL_NIF_TERM a_bin;
float *a;
double d;
DISP("new1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_double(env, argv[1], &d)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
a[i] = (float)d;
}
return(a_bin);
}
static ERL_NIF_TERM
new2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int r1,c1,i,j;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
DISP("new2")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, r1 * c1 * sizeof(float), &a_bin);
// Set matrix data
list = argv[2]; /* matrix1 */
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX2C(i,j,r1)] = (float)d;
}
}
return(a_bin);
}
static ERL_NIF_TERM
new3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int c,h,w,i,j,k;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
DISP("new3")
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
a = (float *) enif_make_new_binary(env, c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[3]; /* matrix1 */
for(i=0;i<c;i++){
for(j=0;j<h;j++){
for(k=0;k<w;k++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX3C(i,j,k,h,w)] = (float)d;
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
new4(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,c,h,w,i,j,k,l;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
DISP("new4")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
a = (float *) enif_make_new_binary(env, n * c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[4]; /* matrix1 */
for(i=0;i<n;i++){
for(j=0;j<c;j++){
for(k=0;k<h;k++){
for(l=0;l<w;l++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX4C(i,j,k,l,c,h,w)] = (float)d;
}
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
rand1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
float x,y,val;
float *result_data;
ERL_NIF_TERM result;
DISP("rand1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
result_data = (float *) enif_make_new_binary(env, n * sizeof(float), &result);
srand((unsigned) time(NULL));
for(i=0;i<n;i++){
//box_muller
x = (float)rand()/(float)RAND_MAX;
y = (float)rand()/(float)RAND_MAX;
val = sqrt(-2.0 * log(x)) * cos(2.0 * PI * y);
result_data[i] = val;
}
return(result);
}
static ERL_NIF_TERM
mult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int r1, c1, r2, c2, n, i, j;
float *a,*b,*c;
float* devPtrA;
float* devPtrB;
float* devPtrC;
DISP("mult1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin)) return enif_make_int(env,6);
n = r1*c2;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
for(j=0;j<c2;j++)
for(i=0;i<r1;i++)
c[IDX2C(i,j,r1)] = 0.0;
// Initialize CUBLAS
hipblasInit();
CUBLAS(hipblasAlloc (r1*c1, sizeof(*a), (void**)&devPtrA));
CUBLAS(hipblasAlloc (r2*c2, sizeof(*b), (void**)&devPtrB));
CUBLAS(hipblasAlloc (r1*c2, sizeof(*c), (void**)&devPtrC));
CUBLAS(hipblasSetMatrix (r1, c1, sizeof(*a), a, r1, devPtrA, r1));
CUBLAS(hipblasSetMatrix (r2, c2, sizeof(*b), b, r2, devPtrB, r2));
CUBLAS(hipblasSetMatrix (r1, c2, sizeof(*c), c, r1, devPtrC, r1));
//Sgemm
hipblasSgemm('N', 'N', r1, c2, c1, 1.0, devPtrA, r1, devPtrB, r2, 0.0, devPtrC, r1);
CUBLAS(hipblasGetMatrix (r1, c2, sizeof(*c), devPtrC, r1, c, r1));
// Shutdown CUBLAS
hipblasFree(devPtrA);
hipblasFree(devPtrB);
hipblasFree(devPtrC);
hipblasShutdown();
return(c_bin);
}
__global__ void add1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
add1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("add1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
add1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void sub1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] - b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
sub1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("sub1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
sub1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void emult1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
emult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int r1, c1, n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("emult1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin)) return enif_make_int(env,4);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
emult1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
static ERL_NIF_TERM
transpose1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j;
float *a,*b;
DISP("transpose1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
b[IDX2C(j,i,c1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
ident1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i,j;
ERL_NIF_TERM a_bin;
float *a;
DISP("ident1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
a = (float *) enif_make_new_binary(env, n * n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
for(j=0;j<n;j++){
if(i==j)
a[IDX2C(i,j,n)] = 1.0;
else
a[IDX2C(i,j,n)] = 0.0;
}
}
return(a_bin);
}
__global__ void sigmoid_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = SIGMOID(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
DISP("activate_sigmoid")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(b, dev_b, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
__global__ void tanh_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = tanh(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
DISP("activate_tanh")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
tanh_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(b, dev_b, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
__global__ void relu_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(a[tid] >= 0)
b[tid] = a[tid];
else
b[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
DISP("activate_relu")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
relu_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(b, dev_b, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
activate_softmax(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, k;
float *a,*b;
float max,sum,delta;
DISP("activate_softmax")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
//calculate softmax
delta = 0.01;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
max = -3.402823e38;
for(k=0;k<c1;k++){
if(a[IDX2C(i,k,r1)] > max)
max = a[IDX2C(i,k,r1)];
}
sum = 0.0;
for(k=0;k<c1;k++){
sum = sum + exp(a[IDX2C(i,k,r1)] - max);
}
b[IDX2C(i,j,r1)] = exp(a[IDX2C(i,j,r1)] - max) / (sum+delta);
}
}
return(b_bin);
}
__global__ void differ_sigmoid_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * ((1 - SIGMOID(b[tid])) * SIGMOID(b[tid]));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("differ_sigmoid")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
differ_sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void differ_tanh_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * (1/(cosh(b[tid]) * cosh(b[tid])));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("differ_tanh")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
differ_tanh_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void differ_relu_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(b[tid] >= 0)
c[tid] = a[tid];
else
c[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("differ_relu")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
differ_relu_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
__global__ void smult_kernel(float d, float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = d * a[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
smult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
double s;
DISP("smult1")
if (!enif_get_double(env, argv[0], &s)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &n)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
smult_kernel << <128, 128 >> >((float)s,dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(b, dev_b, n * sizeof(float), hipMemcpyDeviceToHost));
// free
hipFree(dev_a);
hipFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
trace1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float trace;
DISP("trace1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
trace = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==j)
trace = trace + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,trace);
return(result);
}
static ERL_NIF_TERM
mean_square(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s;
DISP("mean_square")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = a[IDX2C(i,j,r1)] - b[IDX2C(i,j,r1)];
s = s + d*d;
}
}
s = s / (2.0*(float(r1)));
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
cross_entropy(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s,delta;
DISP("cross_entropy")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
delta = 1e-7;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = fabsf(a[IDX2C(i,j,r1)]) + delta;
s = s + b[IDX2C(i,j,r1)] * log(d);
}
}
s = -1.0 * s / (float)r1;
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
elt1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
DISP("elt1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &i)) enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &j)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
a = (float *) a_bin.data;
result = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
return(result);
}
static ERL_NIF_TERM
set1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
DISP("set1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
DISP("add_diff1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)] + (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int c1, h1, w1, n, i, j, k, x, y, z;
float *a,*b;
double val;
DISP("add_diff2")
if (!enif_get_int(env, argv[0], &c1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[4], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[5], &y)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[6], &z)) return enif_make_int(env,6);
if (!enif_get_double(env, argv[7], &val)) return enif_make_int(env,7);
n = c1*h1*w1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<c1;i++){
for(j=0;j<h1;j++){
for(k=0;k<w1;k++){
if(i==x && j==y && k==z)
b[IDX3C(i,j,k,h1,w1)] = a[IDX3C(i,j,k,h1,w1)] + (float)val;
else
b[IDX3C(i,j,k,h1,w1)] = a[IDX3C(i,j,k,h1,w1)];
}
}
}
return(b_bin);
}
static ERL_NIF_TERM
average1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, i, j;
float *a,*b;
float sum;
DISP("average1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, c1 * sizeof(float), &b_bin);
for(j=0;j<c1;j++){
sum = 0.0;
for(i=0;i<r1;i++){
sum = sum + a[IDX2C(i,j,r1)];
}
b[j] = sum / (float)r1;
}
return(b_bin);
}
/*
1st arg row-size of matrix
2nd arg col-size of matrix
3rd arg matrix data binary
*/
static ERL_NIF_TERM
sum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float sum;
DISP("sum1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
sum = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
sum = sum + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,sum);
return(result);
}
/*
transfer 2 DIm matrix to list
*/
static ERL_NIF_TERM
to_list1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int r1, c1, i, j;
float *a;
DISP("to_list1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=r1-1;i>=0;i--){
for(j=c1-1;j>=0;j--){
head = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
list = enif_make_list_cell(env,head,list);
}
}
return(list);
}
/*
transfer 3 DIm matrix to list
*/
static ERL_NIF_TERM
to_list2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int c, h, w, i, j, k;
float *a;
DISP("to_list2")
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=c-1;i>=0;i--){
for(j=h-1;j>=0;j--){
for(k=w-1;k>=0;k--){
head = enif_make_double(env,(double)a[IDX3C(i,j,k,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
return(list);
}
/*
transfer 4 DIm matrix to list
*/
static ERL_NIF_TERM
to_list3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int n, c, h, w, i, j, k, l;
float *a;
DISP("to_list3")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_badarg(env);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=n-1;i>=0;i--){
for(j=c-1;j>=0;j--){
for(k=h-1;k>=0;k--){
for(l=w-1;l>=0;l--){
head = enif_make_double(env,(double)a[IDX4C(i,j,k,l,c,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
}
return(list);
}
__global__ void sgd1_kernel(float *a, float *b, float *c, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(a[tid] != 0.0)
c[tid] = a[tid] - b[tid]*lr;
else
c[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
/*
w - g*lr |> dropout()
for sgd
w is weight matrix.
g is gradient matrix.
when element of w is zero result is zero. This means dropout.
return updated weight matrix.
1st arg is size of vectorized matrix
2nd arg is weight matrix or tensor
3rd arg is gradient matrix or tensor
4th arg is learning rate
5th arg is dropout rate
*/
static ERL_NIF_TERM
sgd1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n,r;
float *a,*b,*c,*dev_a, *dev_b, *dev_c;
float lr,dr,randfloat;
double learning_rate,dropout_rate;
DISP("sgd1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_get_double(env, argv[3], &learning_rate)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &dropout_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
lr = (float) learning_rate;
dr = (float) dropout_rate;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
sgd1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, lr, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_c, n * sizeof(float), hipMemcpyDeviceToHost));
// dropout
randfloat = (double)(rand() % 100) / 100.0;
if(dr != 0.0 && dr < randfloat){
r = rand() % n;
c[r] = 0.0;
}
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(c_bin);
}
/*
def momentum(v, g, lr) do
Matrex.apply(v, g, fn v, g -> 0.5 * v - lr * g end)
end
*/
__global__ void momentum_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
d[tid] = (0.5 * b[tid]) - (lr * c[tid]);
if(a[tid] != 0.0)
e[tid] = a[tid] + d[tid];
else
e[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix
3rd arg v-matrix
4th arg g-matrix
5th arg learning rate
6th arg deropout rate
return tuple
*/
static ERL_NIF_TERM
momentum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n,r;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c ,*dev_d, *dev_e;
float lr,dr,randfloat;
double learning_rate,dropout_rate;
DISP("momentum1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin )) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &dropout_rate)) return enif_make_int(env,6);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
dr = (float) dropout_rate;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_d, d, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_e, e, n * sizeof(float), hipMemcpyHostToDevice));
momentum_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d from GPU dev_d
CHECK(hipMemcpy(d, dev_d, n * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(e, dev_e, n * sizeof(float), hipMemcpyDeviceToHost));
// dropout
randfloat = (double)(rand() % 100) / 100.0;
if(dr != 0.0 && dr < randfloat){
r = rand() % n;
e[r] = 0.0;
}
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipFree(dev_d);
hipFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/*
h1 = h + grad*grad
w1 = w - lr * 1/sqrt(h1) * grad
*/
__global__ void adagrad_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
d[tid] = b[tid] + c[tid]*c[tid];
if(d[tid] != 0)
e[tid] = a[tid] - (lr * (1 / sqrt(d[tid])) * c[tid]);
else
e[tid] = a[tid] - (lr * c[tid]);
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a_bin)
3rd arg h-matrix (b_bin)
4th arg grad-matrix (c_bin)
5th arg learning rate
6th arg deropout rate
return tuple {new-h,new-w}
*/
static ERL_NIF_TERM
adagrad1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n,r;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e;
float lr,dr,randfloat;
double learning_rate,dropout_rate;
DISP("adagrad1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &dropout_rate)) return enif_make_int(env,6);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
dr = (float) dropout_rate;
// Allocate for GPU
CHECK(hipMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(hipMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(hipMemcpy(dev_a, a, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_b, b, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_c, c, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_d, d, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(dev_e, e, n * sizeof(float), hipMemcpyHostToDevice));
adagrad_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host c from GPU dev_c
CHECK(hipMemcpy(c, dev_d, n * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(c, dev_e, n * sizeof(float), hipMemcpyDeviceToHost));
// dropout
randfloat = (double)(rand() % 100) / 100.0;
if(dr != 0.0 && dr < randfloat){
r = rand() % n;
e[r] = 0.0;
}
// free
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipFree(dev_d);
hipFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/*
1st arg row-size of matrix
2nd arg col-size of matris
3rd arg predicted matrix
4th arg list of label. each element is integer
*/
static ERL_NIF_TERM
accuracy1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list,result;
int r1, c1, i, j, n, index,sum;
float *a;
double max,rate;
DISP("accuracy1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
// calculate accuracy
sum = 0;
list = argv[3];
for(i=0;i<r1;i++){
max = 0.0;
enif_get_list_cell(env, list, &head, &list);
enif_get_int(env,head,&n);
for(j=0;j<c1;j++){
if(a[IDX2C(i,j,r1)] > max){
max = a[IDX2C(i,j,r1)];
index = j;
}
}
if(index == n)
sum++;
}
rate = (double)sum / (double)r1;
result = enif_make_double(env,rate);
return(result);
}
static ERL_NIF_TERM
random_select1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int r1, c1, r2, c2, i, j, n, r;
float *a, *b, *c, *d;
DISP("random_select1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &n)) return enif_make_int(env,7);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % r1;
for(j=0;j<c1;j++){
c[IDX2C(i,j,n)] = a[IDX2C(r,j,r1)];
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
static ERL_NIF_TERM
random_select2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int n1,c1,h1,w1,r2,c2, i, j, k, l, n, r;
float *a, *b, *c, *d;
DISP("random_select2")
if (!enif_get_int(env, argv[0], &n1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h1)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w1)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &r2)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &c2)) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &n)) return enif_make_int(env,9);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1*h1*w1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*r2*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % n1;
for(j=0;j<c1;j++){
for(k=0;k<h1;k++){
for(l=0;l<w1;l++){
c[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(r,j,k,l,c1,h1,w1)];
}
}
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
// define the array of ErlNifFunc
static ErlNifFunc nif_funcs[] = {
// {erl_function_name, erl_function_arity, c_function}
{"print1", 3, print1},
{"mult1", 6, mult1},
{"new1", 2, new1},
{"new2", 3, new2},
{"new3", 4, new3},
{"new4", 5, new4},
{"rand1", 1, rand1},
{"add1", 3, add1},
{"sub1", 3, sub1},
{"emult1", 4, emult1},
{"transpose1", 3, transpose1},
{"ident1", 1, ident1},
{"activate_sigmoid", 2 ,activate_sigmoid},
{"activate_tanh", 2 , activate_tanh},
{"activate_relu", 2, activate_relu},
{"activate_softmax", 3, activate_softmax},
{"differ_sigmoid", 3, differ_sigmoid},
{"differ_tanh", 3, differ_tanh},
{"differ_relu", 3, differ_relu},
{"smult1", 3, smult1},
{"trace1", 3, trace1},
{"mean_square", 4, mean_square},
{"cross_entropy", 4, cross_entropy},
{"elt1", 5, elt1},
{"set1", 6, set1},
{"add_diff1", 6, add_diff1},
{"add_diff2", 8, add_diff2},
{"average1", 3, average1},
{"sum1", 3, sum1},
{"to_list1", 3, to_list1},
{"to_list2", 4, to_list2},
{"to_list3", 5, to_list3},
{"momentum1", 6, momentum1},
{"adagrad1", 6, adagrad1},
{"accuracy1", 4, accuracy1},
{"pooling1", 6, pooling1},
{"unpooling1", 7, unpooling1},
{"convolute1", 10, convolute1},
{"deconvolute1", 10, deconvolute1},
{"deconvolute2", 10, deconvolute2},
{"gradfilter1", 12, gradfilter1},
{"full1", 4, full1},
{"unfull1", 4, unfull1},
{"sgd1", 5, sgd1},
{"random_select1", 7, random_select1},
{"random_select2", 9, random_select2}
};
ERL_NIF_INIT(Elixir.Cumatrix, nif_funcs, NULL, NULL, NULL, NULL)
|
fc3094b8c6844e78e871b63ef1ef2cc9e0a87a6d.cu
|
#include "erl_nif.h"
#include "cublas.h"
#include "stdio.h"
#include "time.h"
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
#define IDX3C(c,i,j,in_h,in_w) ((c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define IDX4C(n,c,i,j,in_c,in_h,in_w) ((n)*((in_c)*(in_h)*(in_w)) + (c)*((in_h)*(in_w)) + (i)*(in_w) +(j))
#define BREAK return(enif_make_int(env, 0));
#define PI 3.14159265358979323846
#define SIGMOID(x) (1 / (1+exp(-1*x)))
#define DEBUG 0
#define DISP(x) if(DEBUG){printf(x);fflush(stdout);}
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
return enif_make_int(env,10000+(int)error); \
} \
}
#define CUBLAS(call) \
{ \
const cublasStatus error = call; \
if (error != CUBLAS_STATUS_SUCCESS) \
{ \
return enif_make_int(env,11000+(int)error); \
} \
}
__global__ void pooling_kernel(float *a, float *b, float *c, int st, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,in_h2,in_w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w;
float max,fmax_h,fmax_w;
if(tid < n)
{
n1 = tid;
in_h2 = in_h / st;
in_w2 = in_w / st;
for(c1=0;c1<in_c;c1++){
for(w2=0;w2<in_w2;w2++){
for(h2=0;h2<in_h2;h2++){
max = 0.0;
start_h1 = st*h2;
end_h1 = st*(h2+1);
start_w1 = st*w2;
end_w1 = st*(w2+1);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)] >= max){
max = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
max_h = h1;
max_w = w1;
}
}
}
b[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = max;
fmax_h = (float)max_h;
fmax_w = (float)max_w;
c[IDX4C(n1,c1,h2,w2,in_c,in_h2,in_w2)] = fmax_h * 1000.0 + fmax_w;
}
}
}
}
}
/*
1st arg in_n of tensor
2nd arg in_c of tensor
3rd arg in_h of tensor
4th arg in_w of tensor
5th arg binary of tensor
6th arg stride
return list [ts1,ts2]
ts1 is result data for forward
ts2 is result data dor backward. this is sparse matrix
e.g.
|0.1,0.2,0.3,0.4|
|0.5,0.6,0.7,0.8|
|0.9,1.0,1.1,1.2|
|1.3,1.4,1.5,1.6|
ts1
|0.6,0.8|
|1.4,1.6|
ts2
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
*/
static ERL_NIF_TERM
pooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin,c_bin,tuple;
int in_n,in_c,in_h,in_w,st, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
DISP("pooling1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &st)) return enif_make_int(env,6);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h / st) * (in_w / st);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n2 * sizeof(float), &b_bin);
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n2 * sizeof(float), cudaMemcpyHostToDevice));
pooling_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, st, in_c, in_h, in_w, in_n);
// copy to host b,c from GPU dev_b,dev_c
CHECK(cudaMemcpy(b, dev_b, n2 * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(c, dev_c, n2 * sizeof(float), cudaMemcpyDeviceToHost));
// return forward data and backward data with tuple {b_bin,c_bin}
tuple = enif_make_tuple2(env,b_bin,c_bin);
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(tuple);
}
__global__ void unpooling_kernel(float *a, float *b, float *c, int st, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,start_h1,end_h1,start_w1,end_w1,max_h,max_w,in_h1,in_w1;
float loss,elt;
if(tid < n)
{
n1 = tid;
in_h1 = in_h * st;
in_w1 = in_w * st;
for(c1=0;c1<in_c;c1++){
for(h2=0;h2<in_h;h2++){
for(w2=0;w2<in_w;w2++){
start_h1 = st*h2;
end_h1 = st*(h2+1);
start_w1 = st*w2;
end_w1 = st*(w2+1);
elt = a[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
loss = b[IDX4C(n1,c1,h2,w2,in_c,in_h,in_w)];
max_h = (int) floor(elt / 1000.0);
max_w = (int) fmodf(elt,1000.0);
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 == max_h && w1 == max_w){
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = loss;
}
else{
c[IDX4C(n1,c1,h1,w1,in_c,in_h1,in_w1)] = 0.0;
}
}
}
}
}
}
}
}
/*
1st arg in_n of sparse-tensor
2nd arg in_c of sparse-tensor
3rd arg in_h of sparse-tensor
4th arg in_w of sparse-tensor
5th arg binary of sparse-tensor
6th arg binary of loss-tensor
7th arg stride
return gradiate tensor
e.g.
ts1 index-tensor
each element is row*1000+col
|1.0*1000+1.0,1.0*1000*3.0|
|3.0*1000+1.0,3.0*1000+3.0|
ts2 loss-tensor
|0.1,0.2|
|0.3,0.4|
return
|0.0,0.0,0.0,0.0|
|0.0,0.1,0.0,0.2|
|0.0,0.0,0.0,0.0|
|0.0,3.4,0.0,0.4|
*/
static ERL_NIF_TERM
unpooling1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,st, n1, n2;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
DISP("unpooling")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &st)) return enif_make_int(env,7);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * in_c * (in_h * st) * (in_w * st);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n2 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n2 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n2 * sizeof(float), cudaMemcpyHostToDevice));
unpooling_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, st, in_c, in_h, in_w, in_n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(c, dev_c, n2 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void convolute_kernel(float *a, float *b, float *c, int filt_h, int filt_w, int st, int pad, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,oh,ow,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
if(tid < n)
{
n1 = tid;
oh = (in_h+2*pad-filt_h)/st + 1;
ow = (in_w+2*pad-filt_w)/st + 1;
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
sum = 0.0;
start_h1 = st*h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = st*w2-pad;
end_w1 = start_w1 + filt_w;
for(c1=0;c1<in_c;c1++){
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,c1,h1,w1,in_c,in_h,in_w)];
elt2 = b[IDX3C(c1,h1-start_h1,w1-start_w1,filt_h,filt_w)];
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,0,h2,w2,in_c,oh,ow)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_h of filter tensor
6th arg filt_w of filter tensor
7th arg binary of input tensor
8th arg binary of filter tensor
9th arg stride
10th arg padding
*/
static ERL_NIF_TERM
convolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_h, filt_w, st,pad, n1, n2, n3, oh, ow;
float *a,*b, *c;
float *dev_a, *dev_b, *dev_c;
DISP("convolute1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_h)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_w)) return enif_make_int(env,6);
if (!enif_inspect_binary(env, argv[6], &a_bin )) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &st)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &pad)) return enif_make_int(env,10);
n1 = in_n * in_c * in_h * in_w;
n2 = in_c * filt_h * filt_w;
oh = (in_h+2*pad-filt_h)/st + 1;
ow = (in_w+2*pad-filt_w)/st + 1;
n3 = in_n * oh * ow;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
convolute_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, filt_h, filt_w, st, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void deconvolute1_kernel(float *a, float *b, float *c, int filt_h, int filt_w, int st, int pad, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,oh,ow,start_h1,end_h1,start_w1,end_w1;
float sum,elt1,elt2;
if(tid < n)
{
n1 = tid;
oh = (in_h+2*pad-filt_h)/st + 1;
ow = (in_w+2*pad-filt_w)/st + 1;
//full convolute. stride=1 always
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(c1=0;c1<in_c;c1++){
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a[IDX4C(n1,0,h1,w1,in_c,in_h,in_w)];
elt2 = b[IDX3C(c1,h1-start_h1,w1-start_w1,filt_h,filt_w)];
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,0,h2,w2,in_c,oh,ow)] = sum;
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_h of filter tensor
6th arg filt_w of filter tensor
7th arg binary of input loss tensor
8th arg binary of filter tensor
9th arg stride
10th arg padding
*/
static ERL_NIF_TERM
deconvolute1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_h, filt_w, st,pad, pad1, n1, n2, n3, oh, ow, i,j,k;
float *a,*b, *b1, *c;
float *dev_a, *dev_b, *dev_c;
DISP("deconvolute1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_h)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_w)) return enif_make_int(env,6);
if (!enif_inspect_binary(env, argv[6], &a_bin )) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &st)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &pad)) return enif_make_int(env,10);
n1 = in_n * in_c * in_h * in_w;
n2 = in_c * filt_h * filt_w;
pad1 = filt_h - 1 + pad;
oh = (in_h+2*pad1-filt_h)/st + 1;
ow = (in_w+2*pad1-filt_w)/st + 1;
n3 = in_n * 1 * oh * ow;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<in_c;i++){
for(j=0;j<filt_h;j++){
for(k=0;k<filt_w;k++){
//if(IDX3C(i,filt_h-j-1,filt_w-k-1,filt_h,filt_w) >= n2) return enif_make_int(env,11001);
b1[IDX3C(i,filt_h-j-1,filt_w-k-1,filt_h,filt_w)] = b[IDX3C(i,j,k,filt_h,filt_w)];
}
}
}
/*
for(i=0;i<in_c;i++){
for(j=0;j<filt_h;j++){
for(k=0;k<filt_w;k++){
printf("%f", b1[IDX3C(i,j,k,filt_h,filt_w)]);
}
}
}
*/
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b1, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
deconvolute1_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, filt_h, filt_w, st, pad1, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
enif_free(b1);
return(c_bin);
}
__global__ void deconvolute2_kernel(float *a1, float *a, float *b, float *c, int filt_h, int filt_w, int st, int pad, int in_c, int in_h, int in_w, int loss_h, int loss_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,oh,ow,start_h1,end_h1,start_w1,end_w1;
int k,l,k1,l1;
float sum,elt1,elt2;
if(tid < n)
{
n1 = tid;
// caution! stride=1
oh = (in_h+2*pad-filt_h) + 1;
ow = (in_w+2*pad-filt_w) + 1;
//dilate loss tensor. loss tensor is 1 channel
for(k=0;k<loss_h;k++){
for(l=0;l<loss_w;l++){
elt1 = a[IDX4C(n1,0,k,l,1,loss_h,loss_w)];
k1 = st*k;
l1 = st*l;
a1[IDX4C(n1,0,k1,l1,1,in_h,in_w)] = elt1;
}
}
//full convulute. stride=1
for(w2=0;w2<ow;w2++){
for(h2=0;h2<oh;h2++){
start_h1 = h2-pad;
end_h1 = start_h1 + filt_h;
start_w1 = w2-pad;
end_w1 = start_w1 + filt_w;
sum = 0.0;
for(c1=0;c1<in_c;c1++){
for(h1=start_h1;h1<end_h1;h1++){
for(w1=start_w1;w1<end_w1;w1++){
if(h1 >= 0 && h1 < in_h && w1 >= 0 && w1 < in_w){
elt1 = a1[IDX4C(n1,0,h1,w1,1,in_h,in_w)];
elt2 = b[IDX3C(c1,h1-start_h1,w1-start_w1,filt_h,filt_w)];
sum = sum + elt1*elt2;
}
}
}
}
c[IDX4C(n1,0,h2,w2,1,oh,ow)] = sum;
}
}
}
}
/*
dilate loss tensor
e.g.
|1.0,2.0|
|3.0,4.0|
dilated stride=2
|1.0,0.0,2.0|
|0.0,0.0,0.0|
|3.0,0.0,4.0|
*/
/*
1st arg in_n of input loss tensor
2nd arg in_c of input loss tensor
3rd arg in_h of input loss tensor
4th arg in_w of input loss tensor
5th arg filt_h of filter tensor
6th arg filt_w of filter tensor
7th arg binary of input loss tensor
8th arg binary of filter tensor
9th arg stride
10th arg padding
*/
static ERL_NIF_TERM
deconvolute2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_h, filt_w, st,pad, pad1, n1, n2, n3, oh, ow, i,j,k, loss_h, loss_w;
float *a, *a1, *b, *b1, *c;
float *dev_a, *dev_a1, *dev_b, *dev_c;
DISP("deconvolute2")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &loss_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &loss_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_h)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_w)) return enif_make_int(env,6);
if (!enif_inspect_binary(env, argv[6], &a_bin )) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &st)) return enif_make_int(env,9);
if (!enif_get_int(env, argv[9], &pad)) return enif_make_int(env,10);
// size for dilate
in_h = loss_h + (loss_h - 1)*(st - 1);
in_w = loss_w + (loss_w - 1)*(st - 1);
n1 = in_n * 1 * in_h * in_w; //loss tensor size
n2 = in_c * filt_h * filt_w; //filter tensor size
pad1 = (filt_h - 1) + pad; //padding size with dilate
oh = (in_h+2*pad1-filt_h) + 1; //output deconvolute tensor size. caution stride=1.
ow = (in_w+2*pad1-filt_w) + 1; //
n3 = in_n * 1 * oh * ow; //
a = (float *) a_bin.data;
b = (float *) b_bin.data;
a1 = (float *) enif_alloc(n1 * sizeof(float));
b1 = (float *) enif_alloc(n2 * sizeof(float));
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//rotate 180 degree
for(i=0;i<in_c;i++){
for(j=0;j<filt_h;j++){
for(k=0;k<filt_w;k++){
//if(IDX3C(i,filt_h-j-1,filt_w-k-1,filt_h,filt_w) >= n2) return enif_make_int(env,11001);
b1[IDX3C(i,filt_h-j-1,filt_w-k-1,filt_h,filt_w)] = b[IDX3C(i,j,k,filt_h,filt_w)];
}
}
}
// dilate
for(i=0;i<n1;i++){
a1[i] = 0.0;
}
CHECK(cudaMalloc((void**)&dev_a1, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_a, in_n*1*loss_h*loss_w * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
CHECK(cudaMemcpy(dev_a1, a1, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_a, a, in_n*1*loss_h*loss_w * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b1, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
deconvolute2_kernel << <1, in_n>> >(dev_a1, dev_a, dev_b, dev_c, filt_h, filt_w, st, pad1, in_c, in_h, in_w, loss_h, loss_w, in_n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_a1);
cudaFree(dev_b);
cudaFree(dev_c);
enif_free(a1);
enif_free(b1);
return(c_bin);
}
__global__ void gradfilter_kernel(float *a, float *b, float *c, int filt_h, int filt_w, int loss_h, int loss_w, int st, int pad, int in_c, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,c1,h1,w1,h2,w2,h3,w3;
float sum,elt1,elt2;
if(tid < n)
{
n1 = tid;
for(c1=0;c1<in_c;c1++){
//h1,w1 is index of filter
for(h1=0;h1<filt_h;h1++){
for(w1=0;w1<filt_w;w1++){
//h2,w2 is index of loss tensor
sum = 0.0;
for(h2=0;h2<loss_h;h2++){
for(w2=0;w2<loss_w;w2++){
//h3,w3 is index of input tensor
h3 = h1*st-pad + h2;
w3 = w1*st-pad + w2;
if(h3>=0 && h3<in_h && w3>=0 && w3<in_w){
elt1 = a[IDX4C(n1,c1,h3,w3,in_c,in_h,in_w)]; //input tensor
elt2 = b[IDX4C(n1,0,h2,w2,in_c,loss_h,loss_w)]; //loss tensor
sum = sum + elt1*elt2;
}
}
}
//set filter tensor
c[IDX3C(c1,h1,w1,filt_h,filt_w)] = c[IDX3C(c1,h1,w1,filt_h,filt_w)] + sum;
}
}
}
}
}
/*
1st arg in_n of input tensor
2nd arg in_c of input tensor
3rd arg in_h of input tensor
4th arg in_w of input tensor
5th arg filt_h of filter tensor
6th arg filt_w of filter tensor
7th arg loss_h of loss tensor
8th arg loss_w of loss tensor
9th arg binary of input tensor
10th arg binary of loss tensor
11th arg stride
12th arg padding
*/
static ERL_NIF_TERM
gradfilter1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin;
int in_n,in_c,in_h,in_w,filt_h,filt_w,loss_h,loss_w,st,pad,n1,n2,n3,i;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
float count;
DISP("gradfilter1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &in_w)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &filt_h)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &filt_w)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &loss_h)) return enif_make_int(env,7);
if (!enif_get_int(env, argv[7], &loss_w)) return enif_make_int(env,8);
if (!enif_inspect_binary(env, argv[8], &a_bin )) return enif_make_int(env,9);
if (!enif_inspect_binary(env, argv[9], &b_bin )) return enif_make_int(env,10);
if (!enif_get_int(env, argv[10], &st)) return enif_make_int(env,10);
if (!enif_get_int(env, argv[11], &pad)) return enif_make_int(env,11);
n1 = in_n * in_c * in_h * in_w;
n2 = in_n * loss_h * loss_w;
n3 = in_c * filt_h * filt_w;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n3 * sizeof(float), &c_bin);
//initialize c
for(i=0;i<n3;i++){
c[i] = 0.0;
}
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n2 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n3 * sizeof(float)));
// copy from host a,b,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n2 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n3 * sizeof(float), cudaMemcpyHostToDevice));
gradfilter_kernel << <1, in_n>> >(dev_a, dev_b, dev_c, filt_h, filt_w, loss_h, loss_w, st, pad, in_c, in_h, in_w, in_n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n3 * sizeof(float), cudaMemcpyDeviceToHost));
//average
count = (float) in_n;
if(in_n != 0){
for(i=0;i<n3;i++){
c[i] = c[i] / count;
}
}
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void full_kernel(float *a, float *b, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_h;i++){
for(j=0;j<in_w;j++){
elt = a[IDX4C(n1,0,i,j,1,in_h,in_w)];
b[IDX2C(n1,i*in_w + j,n)] = elt;
}
}
}
}
/*
1st arg in_n of input tensor
2rd arg in_h of input tensor
3rd arg in_w of input tensor
4th arg binary of input tensor
*/
static ERL_NIF_TERM
full1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_h,in_w,n1;
float *a,*b;
float *dev_a, *dev_b;
DISP("full1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_w)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
//printf("%d %d %d \n\r", in_n, in_h, in_w);
// in_c is allways 1
n1 = in_n * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n1 * sizeof(float), cudaMemcpyHostToDevice));
full_kernel << <1, in_n>> >(dev_a, dev_b, in_h, in_w, in_n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(b, dev_b, n1 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
__global__ void unfull_kernel(float *a, float *b, int in_h, int in_w, int n)
{
int tid = threadIdx.x;
int n1,i,j;
float elt;
if(tid < n)
{
n1 = tid;
for(i=0;i<in_h;i++){
for(j=0;j<in_w;j++){
elt = a[IDX4C(n1,0,i,j,1,in_h,in_w)];
b[IDX2C(n1,i*in_w + j,n)] = elt;
}
}
}
}
/*
1st arg in_n of input tensor
2rd arg in_h of input tensor
3th arg in_w of input tensor
4th arg binary of input tensor
*/
static ERL_NIF_TERM
unfull1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int in_n,in_h,in_w,n1;
float *a,*b;
float *dev_a, *dev_b;
DISP("unfull1")
if (!enif_get_int(env, argv[0], &in_n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &in_h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &in_w)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
// in_c is allways 1
n1 = in_n * in_h * in_w;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n1 * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n1 * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n1 * sizeof(float)));
// copy from host a,b1,c to GPU dev_a, dev_b, dev_c
CHECK(cudaMemcpy(dev_a, a, n1 * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n1 * sizeof(float), cudaMemcpyHostToDevice));
unfull_kernel << <1, in_n>> >(dev_a, dev_b, in_h, in_w, in_n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(b, dev_b, n1 * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
print1(ErlNifEnv *env, int argc, const ERL_NIF_TERM *argv) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
float *a;
int r,c,i,j;
DISP("print1")
if (!enif_get_int(env, argv[0], &r )) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
for(i=0;i<r;i++){
for(j=0;j<c;j++){
printf("%f ", a[IDX2C(i,j,r)]);
}
printf("\n\r");
}
printf("\n\r");
result = enif_make_atom(env,"true");
return result;
}
static ERL_NIF_TERM
new1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
ERL_NIF_TERM a_bin;
float *a;
double d;
DISP("new1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_double(env, argv[1], &d)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
a[i] = (float)d;
}
return(a_bin);
}
static ERL_NIF_TERM
new2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int r1,c1,i,j;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
DISP("new2")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
a = (float *) enif_make_new_binary(env, r1 * c1 * sizeof(float), &a_bin);
// Set matrix data
list = argv[2]; /* matrix1 */
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX2C(i,j,r1)] = (float)d;
}
}
return(a_bin);
}
static ERL_NIF_TERM
new3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int c,h,w,i,j,k;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
DISP("new3")
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
a = (float *) enif_make_new_binary(env, c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[3]; /* matrix1 */
for(i=0;i<c;i++){
for(j=0;j<h;j++){
for(k=0;k<w;k++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX3C(i,j,k,h,w)] = (float)d;
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
new4(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,c,h,w,i,j,k,l;
ERL_NIF_TERM head, list, a_bin;
float *a;
double d;
DISP("new4")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
a = (float *) enif_make_new_binary(env, n * c * h * w * sizeof(float), &a_bin);
// Set matrix data
list = argv[4]; /* matrix1 */
for(i=0;i<n;i++){
for(j=0;j<c;j++){
for(k=0;k<h;k++){
for(l=0;l<w;l++){
enif_get_list_cell(env, list, &head, &list);
enif_get_double(env,head,&d);
a[IDX4C(i,j,k,l,c,h,w)] = (float)d;
}
}
}
}
return(a_bin);
}
static ERL_NIF_TERM
rand1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i;
float x,y,val;
float *result_data;
ERL_NIF_TERM result;
DISP("rand1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
result_data = (float *) enif_make_new_binary(env, n * sizeof(float), &result);
srand((unsigned) time(NULL));
for(i=0;i<n;i++){
//box_muller
x = (float)rand()/(float)RAND_MAX;
y = (float)rand()/(float)RAND_MAX;
val = sqrt(-2.0 * log(x)) * cos(2.0 * PI * y);
result_data[i] = val;
}
return(result);
}
static ERL_NIF_TERM
mult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int r1, c1, r2, c2, n, i, j;
float *a,*b,*c;
float* devPtrA;
float* devPtrB;
float* devPtrC;
DISP("mult1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin)) return enif_make_int(env,6);
n = r1*c2;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
for(j=0;j<c2;j++)
for(i=0;i<r1;i++)
c[IDX2C(i,j,r1)] = 0.0;
// Initialize CUBLAS
cublasInit();
CUBLAS(cublasAlloc (r1*c1, sizeof(*a), (void**)&devPtrA));
CUBLAS(cublasAlloc (r2*c2, sizeof(*b), (void**)&devPtrB));
CUBLAS(cublasAlloc (r1*c2, sizeof(*c), (void**)&devPtrC));
CUBLAS(cublasSetMatrix (r1, c1, sizeof(*a), a, r1, devPtrA, r1));
CUBLAS(cublasSetMatrix (r2, c2, sizeof(*b), b, r2, devPtrB, r2));
CUBLAS(cublasSetMatrix (r1, c2, sizeof(*c), c, r1, devPtrC, r1));
//Sgemm
cublasSgemm('N', 'N', r1, c2, c1, 1.0, devPtrA, r1, devPtrB, r2, 0.0, devPtrC, r1);
CUBLAS(cublasGetMatrix (r1, c2, sizeof(*c), devPtrC, r1, c, r1));
// Shutdown CUBLAS
cublasFree(devPtrA);
cublasFree(devPtrB);
cublasFree(devPtrC);
cublasShutdown();
return(c_bin);
}
__global__ void add1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
add1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("add1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
add1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void sub1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] - b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
sub1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("sub1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
sub1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void emult1_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
emult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int r1, c1, n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("emult1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin)) return enif_make_int(env,4);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
emult1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
static ERL_NIF_TERM
transpose1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j;
float *a,*b;
DISP("transpose1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
b[IDX2C(j,i,c1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
ident1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
int n,i,j;
ERL_NIF_TERM a_bin;
float *a;
DISP("ident1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
a = (float *) enif_make_new_binary(env, n * n * sizeof(float), &a_bin);
// Set matrix data
for(i=0;i<n;i++){
for(j=0;j<n;j++){
if(i==j)
a[IDX2C(i,j,n)] = 1.0;
else
a[IDX2C(i,j,n)] = 0.0;
}
}
return(a_bin);
}
__global__ void sigmoid_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = SIGMOID(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
DISP("activate_sigmoid")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
__global__ void tanh_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = tanh(a[tid]);
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
DISP("activate_tanh")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
tanh_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
__global__ void relu_kernel(float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(a[tid] >= 0)
b[tid] = a[tid];
else
b[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
activate_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
DISP("activate_relu")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
relu_kernel << <128, 128 >> >(dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
activate_softmax(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, k;
float *a,*b;
float max,sum,delta;
DISP("activate_softmax")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
//calculate softmax
delta = 0.01;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
max = -3.402823e38;
for(k=0;k<c1;k++){
if(a[IDX2C(i,k,r1)] > max)
max = a[IDX2C(i,k,r1)];
}
sum = 0.0;
for(k=0;k<c1;k++){
sum = sum + exp(a[IDX2C(i,k,r1)] - max);
}
b[IDX2C(i,j,r1)] = exp(a[IDX2C(i,j,r1)] - max) / (sum+delta);
}
}
return(b_bin);
}
__global__ void differ_sigmoid_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * ((1 - SIGMOID(b[tid])) * SIGMOID(b[tid]));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_sigmoid(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("differ_sigmoid")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
differ_sigmoid_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void differ_tanh_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
c[tid] = a[tid] * (1/(cosh(b[tid]) * cosh(b[tid])));
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_tanh(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("differ_tanh")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
differ_tanh_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void differ_relu_kernel(float *a, float *b, float *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(b[tid] >= 0)
c[tid] = a[tid];
else
c[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
differ_relu(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n;
float *a,*b,*c;
float *dev_a, *dev_b, *dev_c;
DISP("differ_relu")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
differ_relu_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
__global__ void smult_kernel(float d, float *a, float *b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
b[tid] = d * a[tid];
tid += blockDim.x * gridDim.x;
}
}
static ERL_NIF_TERM
smult1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int n;
float *a,*b;
float *dev_a, *dev_b;
double s;
DISP("smult1")
if (!enif_get_double(env, argv[0], &s)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &n)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
smult_kernel << <128, 128 >> >((float)s,dev_a, dev_b, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost));
// free
cudaFree(dev_a);
cudaFree(dev_b);
return(b_bin);
}
static ERL_NIF_TERM
trace1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float trace;
DISP("trace1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
trace = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==j)
trace = trace + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,trace);
return(result);
}
static ERL_NIF_TERM
mean_square(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s;
DISP("mean_square")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = a[IDX2C(i,j,r1)] - b[IDX2C(i,j,r1)];
s = s + d*d;
}
}
s = s / (2.0*(float(r1)));
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
cross_entropy(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a, *b;
float d,s,delta;
DISP("cross_entropy")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &b_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
delta = 1e-7;
s = 0.0;
for(i=0;i<r1;i++){
for (j=0;j<c1;j++){
d = fabsf(a[IDX2C(i,j,r1)]) + delta;
s = s + b[IDX2C(i,j,r1)] * log(d);
}
}
s = -1.0 * s / (float)r1;
result = enif_make_double(env,s);
return(result);
}
static ERL_NIF_TERM
elt1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
DISP("elt1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &i)) enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &j)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
a = (float *) a_bin.data;
result = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
return(result);
}
static ERL_NIF_TERM
set1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
DISP("set1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, n, i, j, x, y;
float *a,*b;
double val;
DISP("add_diff1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &y)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &val)) return enif_make_int(env,6);
n = r1*c1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
if(i==x && j==y)
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)] + (float)val;
else
b[IDX2C(i,j,r1)] = a[IDX2C(i,j,r1)];
}
}
return(b_bin);
}
static ERL_NIF_TERM
add_diff2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int c1, h1, w1, n, i, j, k, x, y, z;
float *a,*b;
double val;
DISP("add_diff2")
if (!enif_get_int(env, argv[0], &c1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[4], &x)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[5], &y)) return enif_make_int(env,5);
if (!enif_get_int(env, argv[6], &z)) return enif_make_int(env,6);
if (!enif_get_double(env, argv[7], &val)) return enif_make_int(env,7);
n = c1*h1*w1;
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, n * sizeof(float), &b_bin);
for(i=0;i<c1;i++){
for(j=0;j<h1;j++){
for(k=0;k<w1;k++){
if(i==x && j==y && k==z)
b[IDX3C(i,j,k,h1,w1)] = a[IDX3C(i,j,k,h1,w1)] + (float)val;
else
b[IDX3C(i,j,k,h1,w1)] = a[IDX3C(i,j,k,h1,w1)];
}
}
}
return(b_bin);
}
static ERL_NIF_TERM
average1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM b_bin;
int r1, c1, i, j;
float *a,*b;
float sum;
DISP("average1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
b = (float *) enif_make_new_binary(env, c1 * sizeof(float), &b_bin);
for(j=0;j<c1;j++){
sum = 0.0;
for(i=0;i<r1;i++){
sum = sum + a[IDX2C(i,j,r1)];
}
b[j] = sum / (float)r1;
}
return(b_bin);
}
/*
1st arg row-size of matrix
2nd arg col-size of matrix
3rd arg matrix data binary
*/
static ERL_NIF_TERM
sum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM result;
int r1, c1, i, j;
float *a;
float sum;
DISP("sum1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
sum = 0.0;
for(i=0;i<r1;i++){
for(j=0;j<c1;j++){
sum = sum + a[IDX2C(i,j,r1)];
}
}
result = enif_make_double(env,sum);
return(result);
}
/*
transfer 2 DIm matrix to list
*/
static ERL_NIF_TERM
to_list1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int r1, c1, i, j;
float *a;
DISP("to_list1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=r1-1;i>=0;i--){
for(j=c1-1;j>=0;j--){
head = enif_make_double(env,(double)a[IDX2C(i,j,r1)]);
list = enif_make_list_cell(env,head,list);
}
}
return(list);
}
/*
transfer 3 DIm matrix to list
*/
static ERL_NIF_TERM
to_list2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int c, h, w, i, j, k;
float *a;
DISP("to_list2")
if (!enif_get_int(env, argv[0], &c)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &h)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &w)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &a_bin )) return enif_make_int(env,4);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=c-1;i>=0;i--){
for(j=h-1;j>=0;j--){
for(k=w-1;k>=0;k--){
head = enif_make_double(env,(double)a[IDX3C(i,j,k,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
return(list);
}
/*
transfer 4 DIm matrix to list
*/
static ERL_NIF_TERM
to_list3(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list;
int n, c, h, w, i, j, k, l;
float *a;
DISP("to_list3")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_badarg(env);
a = (float *) a_bin.data;
list = enif_make_list(env, 0);
for(i=n-1;i>=0;i--){
for(j=c-1;j>=0;j--){
for(k=h-1;k>=0;k--){
for(l=w-1;l>=0;l--){
head = enif_make_double(env,(double)a[IDX4C(i,j,k,l,c,h,w)]);
list = enif_make_list_cell(env,head,list);
}
}
}
}
return(list);
}
__global__ void sgd1_kernel(float *a, float *b, float *c, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
if(a[tid] != 0.0)
c[tid] = a[tid] - b[tid]*lr;
else
c[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
/*
w - g*lr |> dropout()
for sgd
w is weight matrix.
g is gradient matrix.
when element of w is zero result is zero. This means dropout.
return updated weight matrix.
1st arg is size of vectorized matrix
2nd arg is weight matrix or tensor
3rd arg is gradient matrix or tensor
4th arg is learning rate
5th arg is dropout rate
*/
static ERL_NIF_TERM
sgd1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin, b_bin;
ERL_NIF_TERM c_bin;
int n,r;
float *a,*b,*c,*dev_a, *dev_b, *dev_c;
float lr,dr,randfloat;
double learning_rate,dropout_rate;
DISP("sgd1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin )) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_get_double(env, argv[3], &learning_rate)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &dropout_rate)) return enif_make_int(env,5);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n * sizeof(float), &c_bin);
lr = (float) learning_rate;
dr = (float) dropout_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
sgd1_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, lr, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost));
// dropout
randfloat = (double)(rand() % 100) / 100.0;
if(dr != 0.0 && dr < randfloat){
r = rand() % n;
c[r] = 0.0;
}
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(c_bin);
}
/*
def momentum(v, g, lr) do
Matrex.apply(v, g, fn v, g -> 0.5 * v - lr * g end)
end
*/
__global__ void momentum_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
d[tid] = (0.5 * b[tid]) - (lr * c[tid]);
if(a[tid] != 0.0)
e[tid] = a[tid] + d[tid];
else
e[tid] = 0.0;
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix
3rd arg v-matrix
4th arg g-matrix
5th arg learning rate
6th arg deropout rate
return tuple
*/
static ERL_NIF_TERM
momentum1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n,r;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c ,*dev_d, *dev_e;
float lr,dr,randfloat;
double learning_rate,dropout_rate;
DISP("momentum1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin )) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin )) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &dropout_rate)) return enif_make_int(env,6);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
dr = (float) dropout_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
momentum_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host d from GPU dev_d
CHECK(cudaMemcpy(d, dev_d, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(e, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
// dropout
randfloat = (double)(rand() % 100) / 100.0;
if(dr != 0.0 && dr < randfloat){
r = rand() % n;
e[r] = 0.0;
}
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/*
h1 = h + grad*grad
w1 = w - lr * 1/sqrt(h1) * grad
*/
__global__ void adagrad_kernel(float *a, float *b, float *c, float *d, float *e, float lr, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n)
{
d[tid] = b[tid] + c[tid]*c[tid];
if(d[tid] != 0)
e[tid] = a[tid] - (lr * (1 / sqrt(d[tid])) * c[tid]);
else
e[tid] = a[tid] - (lr * c[tid]);
tid += blockDim.x * gridDim.x;
}
}
/*
1st arg row-size of vectorized each-matrix
2nd arg wight-matrix (a_bin)
3rd arg h-matrix (b_bin)
4th arg grad-matrix (c_bin)
5th arg learning rate
6th arg deropout rate
return tuple {new-h,new-w}
*/
static ERL_NIF_TERM
adagrad1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin,c_bin;
ERL_NIF_TERM d_bin,e_bin,tuple;
int n,r;
float *a,*b,*c,*d,*e;
float *dev_a, *dev_b, *dev_c, *dev_d, *dev_e;
float lr,dr,randfloat;
double learning_rate,dropout_rate;
DISP("adagrad1")
if (!enif_get_int(env, argv[0], &n)) return enif_make_int(env,1);
if (!enif_inspect_binary(env, argv[1], &a_bin)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &b_bin)) return enif_make_int(env,3);
if (!enif_inspect_binary(env, argv[3], &c_bin)) return enif_make_int(env,4);
if (!enif_get_double(env, argv[4], &learning_rate)) return enif_make_int(env,5);
if (!enif_get_double(env, argv[5], &dropout_rate)) return enif_make_int(env,6);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) c_bin.data;
d = (float *) enif_make_new_binary(env, n * sizeof(float), &d_bin);
e = (float *) enif_make_new_binary(env, n * sizeof(float), &e_bin);
lr = (float) learning_rate;
dr = (float) dropout_rate;
// Allocate for GPU
CHECK(cudaMalloc((void**)&dev_a, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_b, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_c, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_d, n * sizeof(float)));
CHECK(cudaMalloc((void**)&dev_e, n * sizeof(float)));
// copy from host a,b to GPU dev_a, dev_b
CHECK(cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_c, c, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_d, d, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dev_e, e, n * sizeof(float), cudaMemcpyHostToDevice));
adagrad_kernel << <128, 128 >> >(dev_a, dev_b, dev_c, dev_d, dev_e, lr, n);
// copy to host c from GPU dev_c
CHECK(cudaMemcpy(c, dev_d, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(c, dev_e, n * sizeof(float), cudaMemcpyDeviceToHost));
// dropout
randfloat = (double)(rand() % 100) / 100.0;
if(dr != 0.0 && dr < randfloat){
r = rand() % n;
e[r] = 0.0;
}
// free
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
tuple = enif_make_tuple2(env,d_bin,e_bin);
return(tuple);
}
/*
1st arg row-size of matrix
2nd arg col-size of matris
3rd arg predicted matrix
4th arg list of label. each element is integer
*/
static ERL_NIF_TERM
accuracy1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin;
ERL_NIF_TERM head,list,result;
int r1, c1, i, j, n, index,sum;
float *a;
double max,rate;
DISP("accuracy1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
a = (float *) a_bin.data;
// calculate accuracy
sum = 0;
list = argv[3];
for(i=0;i<r1;i++){
max = 0.0;
enif_get_list_cell(env, list, &head, &list);
enif_get_int(env,head,&n);
for(j=0;j<c1;j++){
if(a[IDX2C(i,j,r1)] > max){
max = a[IDX2C(i,j,r1)];
index = j;
}
}
if(index == n)
sum++;
}
rate = (double)sum / (double)r1;
result = enif_make_double(env,rate);
return(result);
}
static ERL_NIF_TERM
random_select1(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int r1, c1, r2, c2, i, j, n, r;
float *a, *b, *c, *d;
DISP("random_select1")
if (!enif_get_int(env, argv[0], &r1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_inspect_binary(env, argv[2], &a_bin )) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &r2)) return enif_make_int(env,4);
if (!enif_get_int(env, argv[4], &c2)) return enif_make_int(env,5);
if (!enif_inspect_binary(env, argv[5], &b_bin )) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &n)) return enif_make_int(env,7);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % r1;
for(j=0;j<c1;j++){
c[IDX2C(i,j,n)] = a[IDX2C(r,j,r1)];
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
static ERL_NIF_TERM
random_select2(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ErlNifBinary a_bin,b_bin;
ERL_NIF_TERM c_bin,d_bin,tuple;
int n1,c1,h1,w1,r2,c2, i, j, k, l, n, r;
float *a, *b, *c, *d;
DISP("random_select2")
if (!enif_get_int(env, argv[0], &n1)) return enif_make_int(env,1);
if (!enif_get_int(env, argv[1], &c1)) return enif_make_int(env,2);
if (!enif_get_int(env, argv[2], &h1)) return enif_make_int(env,3);
if (!enif_get_int(env, argv[3], &w1)) return enif_make_int(env,4);
if (!enif_inspect_binary(env, argv[4], &a_bin )) return enif_make_int(env,5);
if (!enif_get_int(env, argv[5], &r2)) return enif_make_int(env,6);
if (!enif_get_int(env, argv[6], &c2)) return enif_make_int(env,7);
if (!enif_inspect_binary(env, argv[7], &b_bin )) return enif_make_int(env,8);
if (!enif_get_int(env, argv[8], &n)) return enif_make_int(env,9);
a = (float *) a_bin.data;
b = (float *) b_bin.data;
c = (float *) enif_make_new_binary(env, n*c1*h1*w1 * sizeof(float), &c_bin);
d = (float *) enif_make_new_binary(env, n*r2*c2 * sizeof(float), &d_bin);
// random-select
for(i=0;i<n;i++){
r = rand() % n1;
for(j=0;j<c1;j++){
for(k=0;k<h1;k++){
for(l=0;l<w1;l++){
c[IDX4C(i,j,k,l,c1,h1,w1)] = a[IDX4C(r,j,k,l,c1,h1,w1)];
}
}
}
for(j=0;j<c2;j++){
d[IDX2C(i,j,n)] = b[IDX2C(r,j,r2)];
}
}
tuple = enif_make_tuple2(env,c_bin,d_bin);
return(tuple);
}
// define the array of ErlNifFunc
static ErlNifFunc nif_funcs[] = {
// {erl_function_name, erl_function_arity, c_function}
{"print1", 3, print1},
{"mult1", 6, mult1},
{"new1", 2, new1},
{"new2", 3, new2},
{"new3", 4, new3},
{"new4", 5, new4},
{"rand1", 1, rand1},
{"add1", 3, add1},
{"sub1", 3, sub1},
{"emult1", 4, emult1},
{"transpose1", 3, transpose1},
{"ident1", 1, ident1},
{"activate_sigmoid", 2 ,activate_sigmoid},
{"activate_tanh", 2 , activate_tanh},
{"activate_relu", 2, activate_relu},
{"activate_softmax", 3, activate_softmax},
{"differ_sigmoid", 3, differ_sigmoid},
{"differ_tanh", 3, differ_tanh},
{"differ_relu", 3, differ_relu},
{"smult1", 3, smult1},
{"trace1", 3, trace1},
{"mean_square", 4, mean_square},
{"cross_entropy", 4, cross_entropy},
{"elt1", 5, elt1},
{"set1", 6, set1},
{"add_diff1", 6, add_diff1},
{"add_diff2", 8, add_diff2},
{"average1", 3, average1},
{"sum1", 3, sum1},
{"to_list1", 3, to_list1},
{"to_list2", 4, to_list2},
{"to_list3", 5, to_list3},
{"momentum1", 6, momentum1},
{"adagrad1", 6, adagrad1},
{"accuracy1", 4, accuracy1},
{"pooling1", 6, pooling1},
{"unpooling1", 7, unpooling1},
{"convolute1", 10, convolute1},
{"deconvolute1", 10, deconvolute1},
{"deconvolute2", 10, deconvolute2},
{"gradfilter1", 12, gradfilter1},
{"full1", 4, full1},
{"unfull1", 4, unfull1},
{"sgd1", 5, sgd1},
{"random_select1", 7, random_select1},
{"random_select2", 9, random_select2}
};
ERL_NIF_INIT(Elixir.Cumatrix, nif_funcs, NULL, NULL, NULL, NULL)
|
83aba964c0bd7e5dbf06cd9bf5d560e3d483363b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __NVCC__
template < class U >
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K);
template < class T, class U >
__global__ void A_star_expand(int* off,int* edge,T* W, U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag );
template < class U >
__global__ void keepHeapPQ(unsigned int* PQ, unsigned int* PQ_size,U* Cx,int N,int K);
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N);
template <class U >
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList);
template < class U >
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K);
template <class U>
__global__ void getCx(U* Cx,int dest,U* val);
#include "kernels/a_star_kernels.cu"
#include "a_star.cuh"
#ifdef DEBUG
#include <cstdio>
#endif
template <class T, class U >
GPU_A_Star< T, U> :: GPU_A_Star(GPU_Dynamic_Graph<T> *graph, unsigned int start_node,unsigned int end_node, unsigned int K )
{
this->graph = graph;
this->num_pq = K;
this->start_node = start_node;
this->end_node = end_node;
int N = this->graph->get_graph().get_num_nodes();
this->PQ = (unsigned int*)malloc(sizeof(unsigned int)*N );
this->PQ_size = (unsigned int*)malloc(sizeof(unsigned int)*K);
this->Cx = (U*)malloc(sizeof(U)*N);
this->Hx = (U*)malloc(sizeof(U)*N);
this->parent = (int*)malloc(sizeof(int)*N);
this->open_list = (int*)malloc(sizeof(int)*N);
memset(this->parent,-1,sizeof(int)*N);
memset(this->open_list,-1,sizeof(int)*N);
memset(this->PQ_size,0,sizeof(int)*K);
is_set_hx = false;
//todo make it memset
for(int i=0;i<N;i++){
this->Cx[i] = INT_MAX;
}
}
template <class T, class U >
void GPU_A_Star< T, U> :: __alloc_gpu()
{
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( hipMalloc(&d_Cx,sizeof(U)*N ) );
gpuErrchk ( hipMalloc(&d_parent,sizeof(int)*N ) );
gpuErrchk ( hipMalloc(&d_open_list,sizeof(int)*N ) );
gpuErrchk ( hipMalloc(&d_PQ,sizeof(unsigned int)*N ) );
gpuErrchk ( hipMalloc(&d_PQ_size,sizeof(unsigned int)*num_pq ) );
gpuErrchk ( hipMemcpy(d_Cx,Cx,sizeof(U)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_PQ_size,PQ_size,sizeof(unsigned int)*num_pq,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_parent,parent,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_open_list,open_list,sizeof(int)*N,hipMemcpyHostToDevice) );
}
template <class T, class U >
void GPU_A_Star< T, U> :: set_huiristics(U* hx)
{
this->Hx = hx;
is_set_hx = true;
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( hipMalloc(&d_Hx,sizeof(U)*N ) );
gpuErrchk ( hipMemcpy(d_Hx,Hx,sizeof(U)*N,hipMemcpyHostToDevice) );
}
template <class T, class U >
std::vector<int> GPU_A_Star< T, U>:: get_path()
{
int N = this->graph->get_graph().get_num_nodes();
int E = this->graph->get_graph().get_num_edges();
int K = this->num_pq;
//init Host var
int* flag_end = (int*)malloc(sizeof(int));
int* flag_found = (int*)malloc(sizeof(int));
int* __a0 = (int*)malloc(sizeof(int));
*__a0 = 0;
//required coz if many tries to add same in diff threads high low lower
int* next_vertices_flag = (int*)malloc(sizeof(int)*N);
memset(next_vertices_flag,-1,sizeof(int)*N);
*flag_end = 0;
*flag_found = 0;
//insert startNode in PQ[0]
Cx[this->start_node] = Hx[this->start_node];
PQ[0] = this->start_node;
PQ_size[0]=1;
open_list[this->start_node]=0;
//alloc
__alloc_gpu();
//next nodes flag
int* d_next_vertices_flag;
//next nodes array to insert PQ
int* d_next_vertices;
int* d_next_vertices_size;
//nodes to be expanded ( extracted from PQ )
int* d_expand_nodes;
int* d_expand_nodes_size;
//flag to end while loop and found the destination
int* d_flag_end;
int* d_flag_found;
//cost of endNode
U* d_dest_cost;
//lock for nodes
int* d_lock;
gpuErrchk ( hipMalloc(&d_lock,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&d_dest_cost,sizeof(U)) );
//for next set of vertices to add in PQ
gpuErrchk ( hipMalloc(&d_next_vertices,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&d_next_vertices_size,sizeof(int)) );
gpuErrchk ( hipMalloc(&d_next_vertices_flag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( hipMalloc(&d_expand_nodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( hipMalloc(&d_expand_nodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( hipMalloc(&d_flag_end,sizeof(int)) );
gpuErrchk( hipMalloc(&d_flag_found,sizeof(int)) );
gpuErrchk ( hipMemcpy(d_flag_end,flag_end,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_flag_found,flag_found,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(d_next_vertices_flag,next_vertices_flag,sizeof(int)*N,hipMemcpyHostToDevice) );
// gpuErrchk ( hipMemcpy(d_next_vertices_size,__a0,sizeof(int),hipMemcpyHostToDevice) );
// gpuErrchk ( hipMemcpy(d_expand_nodes_size,__a0,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk ( hipMemset(d_next_vertices_size,0,sizeof(int)) );
gpuErrchk ( hipMemset(d_expand_nodes_size,0,sizeof(int)) );
gpuErrchk ( hipMemset(d_lock,0,sizeof(int)*N) );
int flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(PQ_size[i]>0)
flag_PQ_not_empty=1;
}
int numThreads = 512;
int numBlocks = (K+numThreads-1)/numThreads;
int N_numBlocks = (N+numThreads-1)/numThreads;
#ifdef DEBUG
printf("[INFO] A* started\n");
#endif
//DO A* initailly on whole graph
while(*flag_end==0 && flag_PQ_not_empty==1){
//extract min
hipLaunchKernelGGL(( extractMin <U>) , dim3(numBlocks),dim3(numThreads), 0, 0, d_PQ, d_PQ_size, d_expand_nodes, d_expand_nodes_size, d_Cx, d_open_list, N, K );
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipLaunchKernelGGL(( A_star_expand < T, U >) , dim3(numBlocks),dim3(numThreads), 0, 0,
this->graph->get_graph().get_offsets(),this->graph->get_graph().get_edges(),this->graph->get_graph().get_weight(),
d_Hx,d_parent,d_Cx,
d_expand_nodes,d_expand_nodes_size, d_lock ,d_flag_found,d_open_list,
N,E,K,this->end_node,d_next_vertices_flag
);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipLaunchKernelGGL(( keepHeapPQ < U >) , dim3(numBlocks),dim3(numThreads), 0, 0, d_PQ, d_PQ_size, d_Cx, N, K );
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
//gen from flag d_next_vertices
//for N in parallel
hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, d_next_vertices_flag, d_next_vertices, d_next_vertices_size, N );
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipLaunchKernelGGL(( insertPQ < U >) , dim3(numBlocks),dim3(numThreads), 0, 0, d_PQ, d_PQ_size, d_next_vertices, d_next_vertices_size, d_Cx, K, N, d_open_list );
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
//cpy flagend and flagEmpty
gpuErrchk( hipMemcpy(flag_found,d_flag_found, sizeof(int),hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(PQ_size,d_PQ_size, sizeof(int)*K,hipMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( hipMemcpy(d_next_vertices_flag,next_vertices_flag,sizeof(int)*N,hipMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( hipMemcpy(d_next_vertices_size, __a0,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_expand_nodes_size, __a0,sizeof(int),hipMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *flag_found==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( hipMemcpy(d_flag_end,flag_found,sizeof(int),hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( checkMIN < U >) , dim3(numBlocks),dim3(numThreads) , 0, 0, d_PQ, d_PQ_size, d_flag_end, d_Cx, this->end_node, N, K );
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
gpuErrchk( hipMemcpy(flag_end,d_flag_end, sizeof(int),hipMemcpyDeviceToHost) );
}
}
hipLaunchKernelGGL(( getCx < U >) , dim3(1),dim3(1), 0, 0, d_Cx, this->end_node,d_dest_cost);
U dest_cost;
gpuErrchk( hipMemcpy(&dest_cost,d_dest_cost, sizeof(U),hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(parent,d_parent, sizeof(int)*N,hipMemcpyDeviceToHost) );
std::vector<int> Path;
if(dest_cost != INT_MAX){
int p = this->end_node;
while(parent[p]!=-1){
Path.push_back(p);
p = parent[p];
}
Path.push_back(p);
}
std::reverse(Path.begin(),Path.end());
return Path;
}
#endif
|
83aba964c0bd7e5dbf06cd9bf5d560e3d483363b.cu
|
#ifdef __NVCC__
template < class U >
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K);
template < class T, class U >
__global__ void A_star_expand(int* off,int* edge,T* W, U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag );
template < class U >
__global__ void keepHeapPQ(unsigned int* PQ, unsigned int* PQ_size,U* Cx,int N,int K);
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N);
template <class U >
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList);
template < class U >
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K);
template <class U>
__global__ void getCx(U* Cx,int dest,U* val);
#include "kernels/a_star_kernels.cu"
#include "a_star.cuh"
#ifdef DEBUG
#include <cstdio>
#endif
template <class T, class U >
GPU_A_Star< T, U> :: GPU_A_Star(GPU_Dynamic_Graph<T> *graph, unsigned int start_node,unsigned int end_node, unsigned int K )
{
this->graph = graph;
this->num_pq = K;
this->start_node = start_node;
this->end_node = end_node;
int N = this->graph->get_graph().get_num_nodes();
this->PQ = (unsigned int*)malloc(sizeof(unsigned int)*N );
this->PQ_size = (unsigned int*)malloc(sizeof(unsigned int)*K);
this->Cx = (U*)malloc(sizeof(U)*N);
this->Hx = (U*)malloc(sizeof(U)*N);
this->parent = (int*)malloc(sizeof(int)*N);
this->open_list = (int*)malloc(sizeof(int)*N);
memset(this->parent,-1,sizeof(int)*N);
memset(this->open_list,-1,sizeof(int)*N);
memset(this->PQ_size,0,sizeof(int)*K);
is_set_hx = false;
//todo make it memset
for(int i=0;i<N;i++){
this->Cx[i] = INT_MAX;
}
}
template <class T, class U >
void GPU_A_Star< T, U> :: __alloc_gpu()
{
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( cudaMalloc(&d_Cx,sizeof(U)*N ) );
gpuErrchk ( cudaMalloc(&d_parent,sizeof(int)*N ) );
gpuErrchk ( cudaMalloc(&d_open_list,sizeof(int)*N ) );
gpuErrchk ( cudaMalloc(&d_PQ,sizeof(unsigned int)*N ) );
gpuErrchk ( cudaMalloc(&d_PQ_size,sizeof(unsigned int)*num_pq ) );
gpuErrchk ( cudaMemcpy(d_Cx,Cx,sizeof(U)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_PQ_size,PQ_size,sizeof(unsigned int)*num_pq,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_parent,parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_open_list,open_list,sizeof(int)*N,cudaMemcpyHostToDevice) );
}
template <class T, class U >
void GPU_A_Star< T, U> :: set_huiristics(U* hx)
{
this->Hx = hx;
is_set_hx = true;
int N = this->graph->get_graph().get_num_nodes();
gpuErrchk ( cudaMalloc(&d_Hx,sizeof(U)*N ) );
gpuErrchk ( cudaMemcpy(d_Hx,Hx,sizeof(U)*N,cudaMemcpyHostToDevice) );
}
template <class T, class U >
std::vector<int> GPU_A_Star< T, U>:: get_path()
{
int N = this->graph->get_graph().get_num_nodes();
int E = this->graph->get_graph().get_num_edges();
int K = this->num_pq;
//init Host var
int* flag_end = (int*)malloc(sizeof(int));
int* flag_found = (int*)malloc(sizeof(int));
int* __a0 = (int*)malloc(sizeof(int));
*__a0 = 0;
//required coz if many tries to add same in diff threads high low lower
int* next_vertices_flag = (int*)malloc(sizeof(int)*N);
memset(next_vertices_flag,-1,sizeof(int)*N);
*flag_end = 0;
*flag_found = 0;
//insert startNode in PQ[0]
Cx[this->start_node] = Hx[this->start_node];
PQ[0] = this->start_node;
PQ_size[0]=1;
open_list[this->start_node]=0;
//alloc
__alloc_gpu();
//next nodes flag
int* d_next_vertices_flag;
//next nodes array to insert PQ
int* d_next_vertices;
int* d_next_vertices_size;
//nodes to be expanded ( extracted from PQ )
int* d_expand_nodes;
int* d_expand_nodes_size;
//flag to end while loop and found the destination
int* d_flag_end;
int* d_flag_found;
//cost of endNode
U* d_dest_cost;
//lock for nodes
int* d_lock;
gpuErrchk ( cudaMalloc(&d_lock,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&d_dest_cost,sizeof(U)) );
//for next set of vertices to add in PQ
gpuErrchk ( cudaMalloc(&d_next_vertices,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&d_next_vertices_size,sizeof(int)) );
gpuErrchk ( cudaMalloc(&d_next_vertices_flag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( cudaMalloc(&d_expand_nodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( cudaMalloc(&d_expand_nodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( cudaMalloc(&d_flag_end,sizeof(int)) );
gpuErrchk( cudaMalloc(&d_flag_found,sizeof(int)) );
gpuErrchk ( cudaMemcpy(d_flag_end,flag_end,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_flag_found,flag_found,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(d_next_vertices_flag,next_vertices_flag,sizeof(int)*N,cudaMemcpyHostToDevice) );
// gpuErrchk ( cudaMemcpy(d_next_vertices_size,__a0,sizeof(int),cudaMemcpyHostToDevice) );
// gpuErrchk ( cudaMemcpy(d_expand_nodes_size,__a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemset(d_next_vertices_size,0,sizeof(int)) );
gpuErrchk ( cudaMemset(d_expand_nodes_size,0,sizeof(int)) );
gpuErrchk ( cudaMemset(d_lock,0,sizeof(int)*N) );
int flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(PQ_size[i]>0)
flag_PQ_not_empty=1;
}
int numThreads = 512;
int numBlocks = (K+numThreads-1)/numThreads;
int N_numBlocks = (N+numThreads-1)/numThreads;
#ifdef DEBUG
printf("[INFO] A* started\n");
#endif
//DO A* initailly on whole graph
while(*flag_end==0 && flag_PQ_not_empty==1){
//extract min
extractMin <U> <<<numBlocks,numThreads>>>( d_PQ, d_PQ_size, d_expand_nodes, d_expand_nodes_size, d_Cx, d_open_list, N, K );
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
A_star_expand < T, U > <<<numBlocks,numThreads>>> (
this->graph->get_graph().get_offsets(),this->graph->get_graph().get_edges(),this->graph->get_graph().get_weight(),
d_Hx,d_parent,d_Cx,
d_expand_nodes,d_expand_nodes_size, d_lock ,d_flag_found,d_open_list,
N,E,K,this->end_node,d_next_vertices_flag
);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
keepHeapPQ < U > <<<numBlocks,numThreads>>>( d_PQ, d_PQ_size, d_Cx, N, K );
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//gen from flag d_next_vertices
//for N in parallel
setNV<<<N_numBlocks,numThreads>>>(d_next_vertices_flag, d_next_vertices, d_next_vertices_size, N );
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
insertPQ < U > <<<numBlocks,numThreads>>>( d_PQ, d_PQ_size, d_next_vertices, d_next_vertices_size, d_Cx, K, N, d_open_list );
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//cpy flagend and flagEmpty
gpuErrchk( cudaMemcpy(flag_found,d_flag_found, sizeof(int),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(PQ_size,d_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( cudaMemcpy(d_next_vertices_flag,next_vertices_flag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( cudaMemcpy(d_next_vertices_size, __a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_expand_nodes_size, __a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *flag_found==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( cudaMemcpy(d_flag_end,flag_found,sizeof(int),cudaMemcpyHostToDevice) );
checkMIN < U > <<< numBlocks,numThreads >>>(d_PQ, d_PQ_size, d_flag_end, d_Cx, this->end_node, N, K );
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
gpuErrchk( cudaMemcpy(flag_end,d_flag_end, sizeof(int),cudaMemcpyDeviceToHost) );
}
}
getCx < U > <<<1,1>>>( d_Cx, this->end_node,d_dest_cost);
U dest_cost;
gpuErrchk( cudaMemcpy(&dest_cost,d_dest_cost, sizeof(U),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(parent,d_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
std::vector<int> Path;
if(dest_cost != INT_MAX){
int p = this->end_node;
while(parent[p]!=-1){
Path.push_back(p);
p = parent[p];
}
Path.push_back(p);
}
std::reverse(Path.begin(),Path.end());
return Path;
}
#endif
|
8185321bc3c01a691cfaef349b00e352d23c7dfa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cudaFunctions.h"
int maxThreadsPerSM(hipDeviceProp_t * prop) {
if (prop->major >= 3)
return 2048;
if (prop->major == 2)
return 1536;
if (prop->minor >= 2)
return 1024;
return 768;
}
int maxBlocksPerSM(hipDeviceProp_t * prop) {
if (prop->major >= 5)
return 32;
if (prop->major == 3)
return 16;
return 8;
}
__device__ int getBlockId(dim3 blockIdx, dim3 gridDim) {
return blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
}
__device__ int getThreadId(int blockId, dim3 threadIdx, dim3 blockDim) {
return blockId * (blockDim.x * blockDim.y * blockDim.z) +
(threadIdx.z * (blockDim.x * blockDim.y)) +
(threadIdx.y * blockDim.x) + threadIdx.x;
}
|
8185321bc3c01a691cfaef349b00e352d23c7dfa.cu
|
#include "cudaFunctions.h"
int maxThreadsPerSM(cudaDeviceProp * prop) {
if (prop->major >= 3)
return 2048;
if (prop->major == 2)
return 1536;
if (prop->minor >= 2)
return 1024;
return 768;
}
int maxBlocksPerSM(cudaDeviceProp * prop) {
if (prop->major >= 5)
return 32;
if (prop->major == 3)
return 16;
return 8;
}
__device__ int getBlockId(dim3 blockIdx, dim3 gridDim) {
return blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
}
__device__ int getThreadId(int blockId, dim3 threadIdx, dim3 blockDim) {
return blockId * (blockDim.x * blockDim.y * blockDim.z) +
(threadIdx.z * (blockDim.x * blockDim.y)) +
(threadIdx.y * blockDim.x) + threadIdx.x;
}
|
c307b9a9ff2eca3b07cd078de2181905def5db1c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
__global__ void reduce4(float *g_idata, float *g_odata) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) { sdata[tid] += sdata[tid + s]; }
__syncthreads();
}
}
int main(void) {
int N = 100000000;
float *g_indata_host, *g_indata_device, *g_outdata_host, *g_outdata_device;
g_indata_host = (float *) malloc(N * sizeof(float));
g_outdata_host = (float *) malloc(sizeof(float));
hipMalloc(&g_indata_device, N * sizeof(float));
hipMalloc(&g_outdata_device, sizeof(float));
for (auto i = 0; i < N; i++) {
g_indata_host[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);;
}
hipMemcpy(g_indata_device, g_indata_host, N * sizeof(float), hipMemcpyHostToDevice);
// This is where the code is run
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( reduce4), dim3((N + 255) / 256), dim3(256), 0, 0, g_indata_device, g_outdata_device);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
std::cout << "Time taken by function: "
<< duration.count() << " microseconds" << std::endl;
hipFree(g_indata_device);
hipFree(g_outdata_device);
free(g_indata_host);
free(g_outdata_host);
}
|
c307b9a9ff2eca3b07cd078de2181905def5db1c.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
__global__ void reduce4(float *g_idata, float *g_odata) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) { sdata[tid] += sdata[tid + s]; }
__syncthreads();
}
}
int main(void) {
int N = 100000000;
float *g_indata_host, *g_indata_device, *g_outdata_host, *g_outdata_device;
g_indata_host = (float *) malloc(N * sizeof(float));
g_outdata_host = (float *) malloc(sizeof(float));
cudaMalloc(&g_indata_device, N * sizeof(float));
cudaMalloc(&g_outdata_device, sizeof(float));
for (auto i = 0; i < N; i++) {
g_indata_host[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);;
}
cudaMemcpy(g_indata_device, g_indata_host, N * sizeof(float), cudaMemcpyHostToDevice);
// This is where the code is run
auto start = high_resolution_clock::now();
reduce4<<<(N + 255) / 256, 256>>>(g_indata_device, g_outdata_device);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
std::cout << "Time taken by function: "
<< duration.count() << " microseconds" << std::endl;
cudaFree(g_indata_device);
cudaFree(g_outdata_device);
free(g_indata_host);
free(g_outdata_host);
}
|
03668ee9afe838ac0e37b735ebd8f80279e0eb19.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utilities.h"
#include "diffuse.h"
#include "metal.h"
#include "dielectric.h"
#ifndef __HIPCC__
#include "device_launch_parameters.h"
#endif
/*
#define checkCudaErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
}*/
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
const char* stringError = hipGetErrorString(result);
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
std::cerr << "CUDA error string " << stringError <<"\n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
//whether the ray intesects with sphere or not
/*float hit_sphere(const vec3& center, float radius, const Ray& r)
{
vec3 oc = r.origin() - center;
float a = dot(r.direction(), r.direction());
float b = 2 * dot(oc, r.direction());
float c = dot(oc, oc) - radius*radius;
float delta = b*b - 4*a*c;
if (delta < 0)
return -1;
else
return (-b - sqrt(delta)) / (2 * a);
}*/
//get a random scene made of sphere
//this kernel creates the scene composed of hitables
__global__ void create_world(Hitable* d_list,hiprandState_t* world_rand_states, int limit)
{
int a = blockIdx.x - limit;
int b = threadIdx.x - limit;
if (a >= limit && b >= limit)
return;
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index == 0)
d_list[0] = Sphere(vec3(0, -1000, 0), 1000, new diffuse(vec3(0.5, 0.5, 0.5), world_rand_states[index]));
float choose_mat = hiprand_uniform(&world_rand_states[index]);
float x_offset = hiprand_uniform(&world_rand_states[index]);
float z_offset = hiprand_uniform(&world_rand_states[index]);
vec3 center = vec3(a + 0.9*x_offset, 0.2, b + 0.9 * z_offset);
if ((center - vec3(4.0, 0.2, 0.0)).length() > 0.9)
{
if (choose_mat < 0.8) //choose diffuse
{
d_list[index] = Sphere(center, 0.2, new diffuse(vec3(hiprand_uniform(&world_rand_states[index])*hiprand_uniform(&world_rand_states[index]), hiprand_uniform(&world_rand_states[index])*hiprand_uniform(&world_rand_states[index]), hiprand_uniform(&world_rand_states[index])*hiprand_uniform(&world_rand_states[index])),world_rand_states[index]));
}
else if (choose_mat < 0.95)//choose metal
{
d_list[index] = Sphere(center, 0.2, new metal(vec3(0.5*(hiprand_uniform(&world_rand_states[index]) + 1), 0.5*(hiprand_uniform(&world_rand_states[index]) + 1), 0.5*(hiprand_uniform(&world_rand_states[index]) + 1)), 0.5*hiprand_uniform(&world_rand_states[index]),world_rand_states[26]));
}
else//choose glass
{
d_list[index] = Sphere(center, 0.2, new dielectric(1.5, world_rand_states[73]));
}
}
}
//initialize random states for hitables in world creation
__global__ void world_init(hiprandState_t* world_rand_states, int n_objects)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index >= n_objects)
return;
//Each thread gets same seed, a different sequence number, no offset
hiprand_init(1984, index, 0, &world_rand_states[index]);
}
//initialize rendering and random states for pixels
__global__ void render_init(int max_x, int max_y, hiprandState_t* pixel_rand_states, camera* cam)
{
int i = threadIdx.x + threadIdx.x * blockDim.x;
int j = threadIdx.y + threadIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
int index = j * max_x + i;
//Each thread gets same seed, a different sequence number, no offset
hiprand_init(1984, index, 0, &pixel_rand_states[index]);
//creating the camera
vec3 lookfrom(2, 2, 2);
vec3 lookat(0, 1, 0);
float dist_to_focus = (lookat - lookfrom).length();
float aperture = 2.0;
*cam = camera(lookfrom, lookat, vec3(0, 1, 0), 60, float(max_x) / float(max_y), aperture, dist_to_focus);
}
/*
__device__ Hitable *generate_random_scene(int n)
{
Hitable** d_list;
checkCudaError(hipMalloc((void**)&d_list, 2 * sizeof(Hitable*)));
d_list[0] = new Sphere(vec3(0, -1000, 0), 1000, new diffuse(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++)
{
for (int b = -11; b < 11; b++)
{
float choose_mat = drand48();
vec3 center(a + 0.9*drand48(), 0.2, b + 0.9*drand48());
if ((center - vec3(4.0, 0.2, 0.0)).length() > 0.9)
{
if (choose_mat < 0.8) //choose diffuse
{
list[i++] = new Sphere(center, 0.2, new diffuse(vec3(drand48()*drand48(), drand48()*drand48(), drand48()*drand48())));
}
else if (choose_mat < 0.95)//choose metal
{
list[i++] = new Sphere(center, 0.2, new metal(vec3(0.5*(drand48() + 1), 0.5*(drand48() + 1), 0.5*(drand48() + 1)), 0.5*drand48()));
}
else//choose glass
{
list[i++] = new Sphere(center, 0.2, new dielectric(1.5));
}
}
}
}
list[i++] = new Sphere(vec3(0, 1, 0), 1, new dielectric(1.5));
list[i++] = new Sphere(vec3(-4, 1, 0), 1, new diffuse(vec3(0.4, 0.2, 0.1)));
list[i++] = new Sphere(vec3(4, 1, 0), 1, new metal(vec3(0.7, 0.6, 0.5), 0.0));
return new Hitable_list(list, i);
}
*/
__global__ void render(float* fb, int max_x, int max_y, int ns,camera* cam, Hitable* world, hiprandState_t* pixel_rand_states)
{
int i = threadIdx.x + threadIdx.x * blockDim.x;
int j = threadIdx.y + threadIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
//do stuff here
int index = j * max_x + i;
hiprandState_t local_rand_state = pixel_rand_states[index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++)
{
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
Ray r = (cam)->get_ray(u, v);
col += d_color(r,world,50,local_rand_state);
fb[index] = col.x();
fb[index + 1] = col.y();
fb[index + 2] = col.z();
}
}
int main()
{
/*int devicesCount;
hipGetDeviceCount(&devicesCount);
for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex)
{
hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, deviceIndex);
std::cout << deviceProperties.name << std::endl;
}
*/
//initalizing thr world rand states
hiprandState_t* world_rand_states = nullptr;
int limit = 5;
//number of object hitables that will need random in their creation
int n_objects = limit * limit;
size_t world_rand_states_size = n_objects * sizeof(hiprandState_t);
checkCudaErrors(hipMalloc((void**)&world_rand_states, world_rand_states_size));
dim3 blocks(n_objects / 8 + 1);
dim3 threads(8);
hipLaunchKernelGGL(( world_init), dim3(blocks),dim3(threads), 0, 0, world_rand_states, n_objects);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//creating the hitables in random positions and with random materials
Hitable* d_list;
//allocating memeory
checkCudaErrors(hipMalloc((void**)&d_list, (n_objects + 4) * sizeof(Hitable)));
//assigning the first sphere
blocks.x = n_objects/limit;
threads.x = limit;
hipLaunchKernelGGL(( create_world), dim3(blocks), dim3(threads), 0, 0, d_list, world_rand_states, limit);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
std::cout << "world created";
//initalizing height and width and number of samples
int nx = 600;
int ny = 300;
int ns = 100;
camera* cam;
hipMalloc((void**)&cam, sizeof(camera));
//frame buffer size
size_t fb_size = nx * ny * sizeof(float);
//allocating frame buffer
float* fb;
checkCudaErrors(hipMallocManaged((void**)&fb, fb_size));
//initalizing thr pixel rand states
hiprandState_t* pixel_rand_states = nullptr;
int tx = 8;
int ty = 8;
dim3 b(nx / tx + 1, ny / ty + 1);
dim3 t(tx, ty);
hipLaunchKernelGGL(( render_init), dim3(b),dim3(t), 0, 0, nx, ny, pixel_rand_states, cam);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//call the kernel
hipLaunchKernelGGL(( render), dim3(b),dim3(t), 0, 0, fb, nx, ny, ns, cam, d_list, pixel_rand_states);
checkCudaErrors(hipGetLastError());
//bloc until job is done on the GPU
checkCudaErrors(hipDeviceSynchronize());
//print out the rendered frame to the ppm file
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
int total = nx * ny * 3;
for (int j = ny - 1; j >= 0; j--)
{
for (int i = 0; i < nx; i++)
{
size_t index = (i + j * nx)*3;
if (index < total)
{
float r = fb[index];
float g = fb[index + 1];
float b = fb[index + 2];
int ir = int(255.99*r);
int ig = int(255.99*g);
int ib = int(255.99*b);
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
}
//freeing the frame buffer memory
checkCudaErrors(hipFree(fb));
checkCudaErrors(hipFree(d_list));
//checkCudaError(hipFree(fb));
//checkCudaError(hipFree(d_list));
//checkCudaError(hipFree(fb));
//file header
}
|
03668ee9afe838ac0e37b735ebd8f80279e0eb19.cu
|
#include "utilities.h"
#include "diffuse.h"
#include "metal.h"
#include "dielectric.h"
#ifndef __CUDACC__
#include "device_launch_parameters.h"
#endif
/*
#define checkCudaErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
}*/
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
const char* stringError = cudaGetErrorString(result);
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
std::cerr << "CUDA error string " << stringError <<"\n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
//whether the ray intesects with sphere or not
/*float hit_sphere(const vec3& center, float radius, const Ray& r)
{
vec3 oc = r.origin() - center;
float a = dot(r.direction(), r.direction());
float b = 2 * dot(oc, r.direction());
float c = dot(oc, oc) - radius*radius;
float delta = b*b - 4*a*c;
if (delta < 0)
return -1;
else
return (-b - sqrt(delta)) / (2 * a);
}*/
//get a random scene made of sphere
//this kernel creates the scene composed of hitables
__global__ void create_world(Hitable* d_list,curandState* world_rand_states, int limit)
{
int a = blockIdx.x - limit;
int b = threadIdx.x - limit;
if (a >= limit && b >= limit)
return;
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index == 0)
d_list[0] = Sphere(vec3(0, -1000, 0), 1000, new diffuse(vec3(0.5, 0.5, 0.5), world_rand_states[index]));
float choose_mat = curand_uniform(&world_rand_states[index]);
float x_offset = curand_uniform(&world_rand_states[index]);
float z_offset = curand_uniform(&world_rand_states[index]);
vec3 center = vec3(a + 0.9*x_offset, 0.2, b + 0.9 * z_offset);
if ((center - vec3(4.0, 0.2, 0.0)).length() > 0.9)
{
if (choose_mat < 0.8) //choose diffuse
{
d_list[index] = Sphere(center, 0.2, new diffuse(vec3(curand_uniform(&world_rand_states[index])*curand_uniform(&world_rand_states[index]), curand_uniform(&world_rand_states[index])*curand_uniform(&world_rand_states[index]), curand_uniform(&world_rand_states[index])*curand_uniform(&world_rand_states[index])),world_rand_states[index]));
}
else if (choose_mat < 0.95)//choose metal
{
d_list[index] = Sphere(center, 0.2, new metal(vec3(0.5*(curand_uniform(&world_rand_states[index]) + 1), 0.5*(curand_uniform(&world_rand_states[index]) + 1), 0.5*(curand_uniform(&world_rand_states[index]) + 1)), 0.5*curand_uniform(&world_rand_states[index]),world_rand_states[26]));
}
else//choose glass
{
d_list[index] = Sphere(center, 0.2, new dielectric(1.5, world_rand_states[73]));
}
}
}
//initialize random states for hitables in world creation
__global__ void world_init(curandState* world_rand_states, int n_objects)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index >= n_objects)
return;
//Each thread gets same seed, a different sequence number, no offset
curand_init(1984, index, 0, &world_rand_states[index]);
}
//initialize rendering and random states for pixels
__global__ void render_init(int max_x, int max_y, curandState* pixel_rand_states, camera* cam)
{
int i = threadIdx.x + threadIdx.x * blockDim.x;
int j = threadIdx.y + threadIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
int index = j * max_x + i;
//Each thread gets same seed, a different sequence number, no offset
curand_init(1984, index, 0, &pixel_rand_states[index]);
//creating the camera
vec3 lookfrom(2, 2, 2);
vec3 lookat(0, 1, 0);
float dist_to_focus = (lookat - lookfrom).length();
float aperture = 2.0;
*cam = camera(lookfrom, lookat, vec3(0, 1, 0), 60, float(max_x) / float(max_y), aperture, dist_to_focus);
}
/*
__device__ Hitable *generate_random_scene(int n)
{
Hitable** d_list;
checkCudaError(cudaMalloc((void**)&d_list, 2 * sizeof(Hitable*)));
d_list[0] = new Sphere(vec3(0, -1000, 0), 1000, new diffuse(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++)
{
for (int b = -11; b < 11; b++)
{
float choose_mat = drand48();
vec3 center(a + 0.9*drand48(), 0.2, b + 0.9*drand48());
if ((center - vec3(4.0, 0.2, 0.0)).length() > 0.9)
{
if (choose_mat < 0.8) //choose diffuse
{
list[i++] = new Sphere(center, 0.2, new diffuse(vec3(drand48()*drand48(), drand48()*drand48(), drand48()*drand48())));
}
else if (choose_mat < 0.95)//choose metal
{
list[i++] = new Sphere(center, 0.2, new metal(vec3(0.5*(drand48() + 1), 0.5*(drand48() + 1), 0.5*(drand48() + 1)), 0.5*drand48()));
}
else//choose glass
{
list[i++] = new Sphere(center, 0.2, new dielectric(1.5));
}
}
}
}
list[i++] = new Sphere(vec3(0, 1, 0), 1, new dielectric(1.5));
list[i++] = new Sphere(vec3(-4, 1, 0), 1, new diffuse(vec3(0.4, 0.2, 0.1)));
list[i++] = new Sphere(vec3(4, 1, 0), 1, new metal(vec3(0.7, 0.6, 0.5), 0.0));
return new Hitable_list(list, i);
}
*/
__global__ void render(float* fb, int max_x, int max_y, int ns,camera* cam, Hitable* world, curandState* pixel_rand_states)
{
int i = threadIdx.x + threadIdx.x * blockDim.x;
int j = threadIdx.y + threadIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
//do stuff here
int index = j * max_x + i;
curandState local_rand_state = pixel_rand_states[index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++)
{
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
Ray r = (cam)->get_ray(u, v);
col += d_color(r,world,50,local_rand_state);
fb[index] = col.x();
fb[index + 1] = col.y();
fb[index + 2] = col.z();
}
}
int main()
{
/*int devicesCount;
cudaGetDeviceCount(&devicesCount);
for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex)
{
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, deviceIndex);
std::cout << deviceProperties.name << std::endl;
}
*/
//initalizing thr world rand states
curandState* world_rand_states = nullptr;
int limit = 5;
//number of object hitables that will need random in their creation
int n_objects = limit * limit;
size_t world_rand_states_size = n_objects * sizeof(curandState);
checkCudaErrors(cudaMalloc((void**)&world_rand_states, world_rand_states_size));
dim3 blocks(n_objects / 8 + 1);
dim3 threads(8);
world_init<<<blocks,threads>>>(world_rand_states, n_objects);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//creating the hitables in random positions and with random materials
Hitable* d_list;
//allocating memeory
checkCudaErrors(cudaMalloc((void**)&d_list, (n_objects + 4) * sizeof(Hitable)));
//assigning the first sphere
blocks.x = n_objects/limit;
threads.x = limit;
create_world<<<blocks, threads>>>(d_list, world_rand_states, limit);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "world created";
//initalizing height and width and number of samples
int nx = 600;
int ny = 300;
int ns = 100;
camera* cam;
cudaMalloc((void**)&cam, sizeof(camera));
//frame buffer size
size_t fb_size = nx * ny * sizeof(float);
//allocating frame buffer
float* fb;
checkCudaErrors(cudaMallocManaged((void**)&fb, fb_size));
//initalizing thr pixel rand states
curandState* pixel_rand_states = nullptr;
int tx = 8;
int ty = 8;
dim3 b(nx / tx + 1, ny / ty + 1);
dim3 t(tx, ty);
render_init<<<b,t>>>(nx, ny, pixel_rand_states, cam);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//call the kernel
render<<<b,t>>>(fb, nx, ny, ns, cam, d_list, pixel_rand_states);
checkCudaErrors(cudaGetLastError());
//bloc until job is done on the GPU
checkCudaErrors(cudaDeviceSynchronize());
//print out the rendered frame to the ppm file
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
int total = nx * ny * 3;
for (int j = ny - 1; j >= 0; j--)
{
for (int i = 0; i < nx; i++)
{
size_t index = (i + j * nx)*3;
if (index < total)
{
float r = fb[index];
float g = fb[index + 1];
float b = fb[index + 2];
int ir = int(255.99*r);
int ig = int(255.99*g);
int ib = int(255.99*b);
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
}
//freeing the frame buffer memory
checkCudaErrors(cudaFree(fb));
checkCudaErrors(cudaFree(d_list));
//checkCudaError(cudaFree(fb));
//checkCudaError(cudaFree(d_list));
//checkCudaError(cudaFree(fb));
//file header
}
|
2f404ca4b7da55e68a4db5876df5e81e713d2ebc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include "timer.h"
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result) {
if (result != hipSuccess) {
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(1);
}
}
__global__ void histogramKernel(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
// insert operation here
}
void histogramCuda(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int threadBlockSize = 512;
float kernelTime = 0;
float h2dTime, d2hTime, memTime = 0;
hipEvent_t s1,s2,s3,s4,s5,s6;
// allocate the vectors on the GPU
unsigned char* deviceImage = NULL;
checkCudaCall(hipMalloc((void **) &deviceImage, img_size * sizeof(unsigned char)));
if (deviceImage == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
unsigned int* deviceHisto = NULL;
checkCudaCall(hipMalloc((void **) &deviceHisto, hist_size * sizeof(unsigned int)));
if (deviceHisto == NULL) {
checkCudaCall(hipFree(deviceImage));
cout << "could not allocate memory!" << endl;
return;
}
hipEventCreate(&s1);
hipEventCreate(&s2);
hipEventCreate(&s3);
hipEventCreate(&s4);
hipEventCreate(&s5);
hipEventCreate(&s6);
// copy the original vectors to the GPU
hipEventRecord(s1,0);
checkCudaCall(hipMemcpy(deviceImage, image, img_size*sizeof(unsigned char), hipMemcpyHostToDevice));
hipEventRecord(s2,0);
// execute kernel
hipEventRecord(s3,0);
hipLaunchKernelGGL(( histogramKernel), dim3(img_size/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceImage, img_size, deviceHisto, hist_size);
hipEventRecord(s4,0);
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
hipEventRecord(s5,0);
checkCudaCall(hipMemcpy(histogram, deviceHisto, hist_size * sizeof(unsigned int), hipMemcpyDeviceToHost));
hipEventRecord(s6,0);
checkCudaCall(hipFree(deviceImage));
checkCudaCall(hipFree(deviceHisto));
hipEventSynchronize(s6);
hipEventElapsedTime(&h2dTime, s1, s2);
hipEventElapsedTime(&kernelTime, s3, s4);
hipEventElapsedTime(&d2hTime, s5, s6);
cout << "histogram (kernel): \t\t" << kernelTime / 1000 << " seconds." << endl;
cout << "histogram (memory): \t\t" << (h2dTime+d2hTime)/1000 << " seconds." << endl;
/*
hipEventDestroy(s1);
hipEventDestroy(s2);
hipEventDestroy(s3);
hipEventDestroy(s4);
hipEventDestroy(s5);
hipEventDestroy(s6);
*/
}
void histogramSeq(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int i;
timer sequentialTime = timer("Sequential");
for (i=0; i<hist_size; i++) histogram[i]=0;
sequentialTime.start();
for (i=0; i<img_size; i++) {
histogram[image[i]]++;
}
sequentialTime.stop();
cout << "histogram (sequential): \t\t" << sequentialTime << endl;
}
int main(int argc, char* argv[]) {
long img_size = 655360;
int hist_size = 256;
if (argc > 1) img_size = atoi(argv[1]);
if (img_size < 1024) {
cout << "Error in parameter" << endl;
exit(-1);
}
unsigned char *image = (unsigned char *)malloc(img_size * sizeof(unsigned char));
unsigned int *histogramS = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
unsigned int *histogram = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
// initialize the vectors.
for(long i=0; i<img_size; i++) {
image[i] = (unsigned char) (i % hist_size);
}
cout << "Compute the histogram of a gray image with " << img_size << " pixels." << endl;
histogramSeq(image, img_size, histogramS, hist_size);
histogramCuda(image, img_size, histogram, hist_size);
// verify the resuls
for(int i=0; i<hist_size; i++) {
if (histogram[i]!=histogramS[i]) {
cout << "error in results! Bin " << i << " is "<< histogram[i] << ", but should be " << histogramS[i] << endl;
exit(1);
}
}
cout << "results OK!" << endl;
free(image);
free(histogram);
free(histogramS);
return 0;
}
|
2f404ca4b7da55e68a4db5876df5e81e713d2ebc.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include "timer.h"
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
__global__ void histogramKernel(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
// insert operation here
}
void histogramCuda(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int threadBlockSize = 512;
float kernelTime = 0;
float h2dTime, d2hTime, memTime = 0;
cudaEvent_t s1,s2,s3,s4,s5,s6;
// allocate the vectors on the GPU
unsigned char* deviceImage = NULL;
checkCudaCall(cudaMalloc((void **) &deviceImage, img_size * sizeof(unsigned char)));
if (deviceImage == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
unsigned int* deviceHisto = NULL;
checkCudaCall(cudaMalloc((void **) &deviceHisto, hist_size * sizeof(unsigned int)));
if (deviceHisto == NULL) {
checkCudaCall(cudaFree(deviceImage));
cout << "could not allocate memory!" << endl;
return;
}
cudaEventCreate(&s1);
cudaEventCreate(&s2);
cudaEventCreate(&s3);
cudaEventCreate(&s4);
cudaEventCreate(&s5);
cudaEventCreate(&s6);
// copy the original vectors to the GPU
cudaEventRecord(s1,0);
checkCudaCall(cudaMemcpy(deviceImage, image, img_size*sizeof(unsigned char), cudaMemcpyHostToDevice));
cudaEventRecord(s2,0);
// execute kernel
cudaEventRecord(s3,0);
histogramKernel<<<img_size/threadBlockSize, threadBlockSize>>>(deviceImage, img_size, deviceHisto, hist_size);
cudaEventRecord(s4,0);
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
cudaEventRecord(s5,0);
checkCudaCall(cudaMemcpy(histogram, deviceHisto, hist_size * sizeof(unsigned int), cudaMemcpyDeviceToHost));
cudaEventRecord(s6,0);
checkCudaCall(cudaFree(deviceImage));
checkCudaCall(cudaFree(deviceHisto));
cudaEventSynchronize(s6);
cudaEventElapsedTime(&h2dTime, s1, s2);
cudaEventElapsedTime(&kernelTime, s3, s4);
cudaEventElapsedTime(&d2hTime, s5, s6);
cout << "histogram (kernel): \t\t" << kernelTime / 1000 << " seconds." << endl;
cout << "histogram (memory): \t\t" << (h2dTime+d2hTime)/1000 << " seconds." << endl;
/*
cudaEventDestroy(s1);
cudaEventDestroy(s2);
cudaEventDestroy(s3);
cudaEventDestroy(s4);
cudaEventDestroy(s5);
cudaEventDestroy(s6);
*/
}
void histogramSeq(unsigned char* image, long img_size, unsigned int* histogram, int hist_size) {
int i;
timer sequentialTime = timer("Sequential");
for (i=0; i<hist_size; i++) histogram[i]=0;
sequentialTime.start();
for (i=0; i<img_size; i++) {
histogram[image[i]]++;
}
sequentialTime.stop();
cout << "histogram (sequential): \t\t" << sequentialTime << endl;
}
int main(int argc, char* argv[]) {
long img_size = 655360;
int hist_size = 256;
if (argc > 1) img_size = atoi(argv[1]);
if (img_size < 1024) {
cout << "Error in parameter" << endl;
exit(-1);
}
unsigned char *image = (unsigned char *)malloc(img_size * sizeof(unsigned char));
unsigned int *histogramS = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
unsigned int *histogram = (unsigned int *)malloc(hist_size * sizeof(unsigned int));
// initialize the vectors.
for(long i=0; i<img_size; i++) {
image[i] = (unsigned char) (i % hist_size);
}
cout << "Compute the histogram of a gray image with " << img_size << " pixels." << endl;
histogramSeq(image, img_size, histogramS, hist_size);
histogramCuda(image, img_size, histogram, hist_size);
// verify the resuls
for(int i=0; i<hist_size; i++) {
if (histogram[i]!=histogramS[i]) {
cout << "error in results! Bin " << i << " is "<< histogram[i] << ", but should be " << histogramS[i] << endl;
exit(1);
}
}
cout << "results OK!" << endl;
free(image);
free(histogram);
free(histogramS);
return 0;
}
|
25958fdc993aae6d8d7c616d3cb59f695145d1c6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<assert.h>
#include<cuda.h>
#define N 1000000
#define HANDLE_ERROR( err )(handleCudaError( err, __FILE__, __LINE__ ) )
int handleCudaError(hipError_t cut,const char* file, int line)
{
if(cut != hipSuccess)
{
printf("%s : File: %s Line: %d \n",hipGetErrorString(cut),file,line);
return -1 ;
}
return 0;
}
__global__ void parallel_add(int n, int *a ,int *b , int *c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread
if(i < n)
{
c[i] = a[i] + b[i]; // simple add
}
}
__global__ void parallel_scalar_product(int n, int *a,int *b, int *erg)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread
if(i < n)
{
int product = a[i] * b[i];
atomicAdd(erg,product); // simple add
}
}
int main(int argc, char* argv[])
{
// ============= INIT =====================
int *a_host= NULL;
int *b_host=NULL;
int erg_host=0;
int *a_device_ptr = NULL ;
int *b_device_ptr = NULL ;
int *erg_device_ptr = NULL ;
a_host=(int*)malloc(sizeof(int)*N);
b_host=(int*)malloc(sizeof(int)*N);
for(unsigned int i = 0; i < N; ++i)
{
a_host[i] = 1 ;
b_host[i] = 1;
}
//============TRANSFER======================
HANDLE_ERROR(hipMalloc(&a_device_ptr, sizeof(int)*N)); // malloc of a_device
HANDLE_ERROR(hipMalloc(&b_device_ptr, sizeof(int)*N)); // malloc of b_device
HANDLE_ERROR(hipMalloc(&erg_device_ptr, sizeof(int))); // malloc of erg_device
//Transfer a_host to a_device
HANDLE_ERROR(hipMemcpy(a_device_ptr, a_host, sizeof(int)*N, hipMemcpyHostToDevice));
//Transfer b_host to b_device
HANDLE_ERROR(hipMemcpy(b_device_ptr, b_host, sizeof(int)*N, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(erg_device_ptr, &erg_host, sizeof(int), hipMemcpyHostToDevice));
//=============Calculation ==================
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( parallel_scalar_product), dim3(ceil((float)N/(float)256)),dim3(256), 0, 0, N,a_device_ptr,b_device_ptr,erg_device_ptr);
hipEventRecord(stop);
//===========CHECK============================
HANDLE_ERROR(hipMemcpy(&erg_host,erg_device_ptr, sizeof(int), hipMemcpyDeviceToHost));
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Time %f milliseconds \n", milliseconds) ;
/* int erg = 0
for(unsigned int i = 0; i < N; ++i)
{
//correct_transfer = correct_transfer & (a_host[i] == b_host[i]);
erg += a[i] *b[i] ;
} */
if(erg_host == N)
{
printf("Correct Calculation \n");
}
else
{
printf(" Non Correct Calculation %d %d \n", erg_host , N);
}
//============CLEAN==============================
HANDLE_ERROR(hipFree(a_device_ptr));
HANDLE_ERROR(hipFree(b_device_ptr));
HANDLE_ERROR(hipFree(erg_device_ptr));
free(a_host);
free(b_host);
a_host= NULL;
b_host= NULL;
return 0 ;
}
|
25958fdc993aae6d8d7c616d3cb59f695145d1c6.cu
|
#include<stdio.h>
#include<assert.h>
#include<cuda.h>
#define N 1000000
#define HANDLE_ERROR( err )(handleCudaError( err, __FILE__, __LINE__ ) )
int handleCudaError(cudaError_t cut,const char* file, int line)
{
if(cut != cudaSuccess)
{
printf("%s : File: %s Line: %d \n",cudaGetErrorString(cut),file,line);
return -1 ;
}
return 0;
}
__global__ void parallel_add(int n, int *a ,int *b , int *c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread
if(i < n)
{
c[i] = a[i] + b[i]; // simple add
}
}
__global__ void parallel_scalar_product(int n, int *a,int *b, int *erg)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread
if(i < n)
{
int product = a[i] * b[i];
atomicAdd(erg,product); // simple add
}
}
int main(int argc, char* argv[])
{
// ============= INIT =====================
int *a_host= NULL;
int *b_host=NULL;
int erg_host=0;
int *a_device_ptr = NULL ;
int *b_device_ptr = NULL ;
int *erg_device_ptr = NULL ;
a_host=(int*)malloc(sizeof(int)*N);
b_host=(int*)malloc(sizeof(int)*N);
for(unsigned int i = 0; i < N; ++i)
{
a_host[i] = 1 ;
b_host[i] = 1;
}
//============TRANSFER======================
HANDLE_ERROR(cudaMalloc(&a_device_ptr, sizeof(int)*N)); // malloc of a_device
HANDLE_ERROR(cudaMalloc(&b_device_ptr, sizeof(int)*N)); // malloc of b_device
HANDLE_ERROR(cudaMalloc(&erg_device_ptr, sizeof(int))); // malloc of erg_device
//Transfer a_host to a_device
HANDLE_ERROR(cudaMemcpy(a_device_ptr, a_host, sizeof(int)*N, cudaMemcpyHostToDevice));
//Transfer b_host to b_device
HANDLE_ERROR(cudaMemcpy(b_device_ptr, b_host, sizeof(int)*N, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(erg_device_ptr, &erg_host, sizeof(int), cudaMemcpyHostToDevice));
//=============Calculation ==================
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
parallel_scalar_product<<<ceil((float)N/(float)256),256>>>(N,a_device_ptr,b_device_ptr,erg_device_ptr);
cudaEventRecord(stop);
//===========CHECK============================
HANDLE_ERROR(cudaMemcpy(&erg_host,erg_device_ptr, sizeof(int), cudaMemcpyDeviceToHost));
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time %f milliseconds \n", milliseconds) ;
/* int erg = 0
for(unsigned int i = 0; i < N; ++i)
{
//correct_transfer = correct_transfer & (a_host[i] == b_host[i]);
erg += a[i] *b[i] ;
} */
if(erg_host == N)
{
printf("Correct Calculation \n");
}
else
{
printf(" Non Correct Calculation %d %d \n", erg_host , N);
}
//============CLEAN==============================
HANDLE_ERROR(cudaFree(a_device_ptr));
HANDLE_ERROR(cudaFree(b_device_ptr));
HANDLE_ERROR(cudaFree(erg_device_ptr));
free(a_host);
free(b_host);
a_host= NULL;
b_host= NULL;
return 0 ;
}
|
40a7e1d14ad169f8d7216f37365af179385048ad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "kNN-brute-force-reduce.cuh"
#include <knn_gpgpu.h>
#include "kNN-brute-force-reduce.cuh"
#include "test-common.cuh"
TEST(knn_brute_force_reduce, correctness)
{
float *ref, *dist;
float *query;
int *ind;
unsigned int ref_nb = 131072;
unsigned int query_nb = 1;
unsigned int dim = 3;
unsigned int k = 100;
unsigned int iterations = 1;
unsigned int i;
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(k * sizeof(float));
ind = (int *) malloc(k * sizeof(float));
for (unsigned int count = 0; count < ref_nb * dim; count++)
{
ref[count] = (float)ref_nb * dim - count;
}
for (unsigned int count = 0; count < query_nb * dim; count++)
{
query[count] = 0;
}
for (i = 0; i < iterations; i++)
{
knn_brute_force(ref, ref_nb, query, dim, k, dist, ind);
}
for (unsigned int i = 0; i < k; ++i)
{
ASSERT_EQ(ind[i], ref_nb - 1 - i) << "Faild with i = " << i << " and n = " << ref_nb;;
}
free(dist);
free(ind);
free(query);
free(ref);
hipDeviceSynchronize();
hipDeviceReset();
}
TEST(knn_brute_force_reduce, timing)
{
float *ref, *dist;
float *query;
int *ind;
unsigned int ref_nb = 8388608;
unsigned int query_nb = 1;
unsigned int dim = 3;
unsigned int k = 100;
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(k * sizeof(float));
ind = (int *) malloc(k * sizeof(float));
for (unsigned int count = 0; count < ref_nb * dim; count++)
{
ref[count] = (float)ref_nb * dim - count;
}
for (unsigned int count = 0; count < query_nb * dim; count++)
{
query[count] = 0;
}
hipEvent_t start, stop;
unsigned int bytes = ref_nb * (sizeof(float));
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
float elapsed_time = 0;
checkCudaErrors(hipEventRecord(start, 0));
knn_brute_force(ref, ref_nb, query, dim, k, dist, ind);
checkCudaErrors(hipEventRecord(stop, 0));
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
double throughput = 1.0e-9 * ((double)bytes) / (elapsed_time * 1e-3);
printf("kNN-brute-force-reduce, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u Elements, k = %u NumDevsUsed = %d\n",
throughput, elapsed_time, ref_nb, k, 1);
free(dist);
free(ind);
free(query);
free(ref);
hipDeviceReset();
hipDeviceSynchronize();
}
|
40a7e1d14ad169f8d7216f37365af179385048ad.cu
|
#include "kNN-brute-force-reduce.cuh"
#include <knn_gpgpu.h>
#include "kNN-brute-force-reduce.cuh"
#include "test-common.cuh"
TEST(knn_brute_force_reduce, correctness)
{
float *ref, *dist;
float *query;
int *ind;
unsigned int ref_nb = 131072;
unsigned int query_nb = 1;
unsigned int dim = 3;
unsigned int k = 100;
unsigned int iterations = 1;
unsigned int i;
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(k * sizeof(float));
ind = (int *) malloc(k * sizeof(float));
for (unsigned int count = 0; count < ref_nb * dim; count++)
{
ref[count] = (float)ref_nb * dim - count;
}
for (unsigned int count = 0; count < query_nb * dim; count++)
{
query[count] = 0;
}
for (i = 0; i < iterations; i++)
{
knn_brute_force(ref, ref_nb, query, dim, k, dist, ind);
}
for (unsigned int i = 0; i < k; ++i)
{
ASSERT_EQ(ind[i], ref_nb - 1 - i) << "Faild with i = " << i << " and n = " << ref_nb;;
}
free(dist);
free(ind);
free(query);
free(ref);
cudaDeviceSynchronize();
cudaDeviceReset();
}
TEST(knn_brute_force_reduce, timing)
{
float *ref, *dist;
float *query;
int *ind;
unsigned int ref_nb = 8388608;
unsigned int query_nb = 1;
unsigned int dim = 3;
unsigned int k = 100;
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(k * sizeof(float));
ind = (int *) malloc(k * sizeof(float));
for (unsigned int count = 0; count < ref_nb * dim; count++)
{
ref[count] = (float)ref_nb * dim - count;
}
for (unsigned int count = 0; count < query_nb * dim; count++)
{
query[count] = 0;
}
cudaEvent_t start, stop;
unsigned int bytes = ref_nb * (sizeof(float));
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
float elapsed_time = 0;
checkCudaErrors(cudaEventRecord(start, 0));
knn_brute_force(ref, ref_nb, query, dim, k, dist, ind);
checkCudaErrors(cudaEventRecord(stop, 0));
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
double throughput = 1.0e-9 * ((double)bytes) / (elapsed_time * 1e-3);
printf("kNN-brute-force-reduce, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u Elements, k = %u NumDevsUsed = %d\n",
throughput, elapsed_time, ref_nb, k, 1);
free(dist);
free(ind);
free(query);
free(ref);
cudaDeviceReset();
cudaDeviceSynchronize();
}
|
77b87f52daa748391a18e10a5082b7be4093ed0a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by jglrxavpok on 05/09/2020.
//
#include "Texture.h"
#include "stb_image.h"
#include "cudautils.h"
__device__ Texture::Texture(int width, int height, Color *pixels): width(width), height(height), pixels(pixels) {}
__device__ Color Texture::at(Point3 position) {
Color black{};
// TODO: wrapping?
if(position.x() < 0.0 || position.x() > 1.0)
return black;
if(position.y() < 0.0 || position.y() > 1.0)
return black;
// TODO: filtering
int px = static_cast<int>(position.x() * (width-1));
int py = static_cast<int>(position.y() * (height-1));
return pixels[py*width+px];
}
__global__
void compileTexture(Texture** destination, int w, int h, Color* pixels) {
*destination = new Texture(w, h, pixels);
}
__host__ Texture** Texture::loadFromFile(const std::string& name) {
std::cout << "Loading texture " << name << std::endl;
int width;
int height;
int channels;
uint8_t* pixels = stbi_load(name.c_str(), &width, &height, &channels, 3);
if(!pixels) {
std::cerr << "Failed to load texture " << name << std::endl;
exit(-1);
}
Color* colors;
checkCudaErrors(hipMallocManaged(&colors, sizeof(Color)*width*height));
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
size_t ptr = (x + width*y)*3;
int red = pixels[ptr+0];
int green = pixels[ptr+1];
int blue = pixels[ptr+2];
colors[ptr/3] = Color(double(red)/255.0, double(green)/255.0, double(blue)/255.0);
}
}
Texture** result;
checkCudaErrors(hipMallocManaged(&result, sizeof(Texture*)));
hipLaunchKernelGGL(( compileTexture), dim3(1), dim3(1), 0, 0, result, width, height, colors);
checkCudaErrors(hipDeviceSynchronize());
return result;
}
|
77b87f52daa748391a18e10a5082b7be4093ed0a.cu
|
//
// Created by jglrxavpok on 05/09/2020.
//
#include "Texture.h"
#include "stb_image.h"
#include "cudautils.h"
__device__ Texture::Texture(int width, int height, Color *pixels): width(width), height(height), pixels(pixels) {}
__device__ Color Texture::at(Point3 position) {
Color black{};
// TODO: wrapping?
if(position.x() < 0.0 || position.x() > 1.0)
return black;
if(position.y() < 0.0 || position.y() > 1.0)
return black;
// TODO: filtering
int px = static_cast<int>(position.x() * (width-1));
int py = static_cast<int>(position.y() * (height-1));
return pixels[py*width+px];
}
__global__
void compileTexture(Texture** destination, int w, int h, Color* pixels) {
*destination = new Texture(w, h, pixels);
}
__host__ Texture** Texture::loadFromFile(const std::string& name) {
std::cout << "Loading texture " << name << std::endl;
int width;
int height;
int channels;
uint8_t* pixels = stbi_load(name.c_str(), &width, &height, &channels, 3);
if(!pixels) {
std::cerr << "Failed to load texture " << name << std::endl;
exit(-1);
}
Color* colors;
checkCudaErrors(cudaMallocManaged(&colors, sizeof(Color)*width*height));
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
size_t ptr = (x + width*y)*3;
int red = pixels[ptr+0];
int green = pixels[ptr+1];
int blue = pixels[ptr+2];
colors[ptr/3] = Color(double(red)/255.0, double(green)/255.0, double(blue)/255.0);
}
}
Texture** result;
checkCudaErrors(cudaMallocManaged(&result, sizeof(Texture*)));
compileTexture<<<1, 1>>>(result, width, height, colors);
checkCudaErrors(cudaDeviceSynchronize());
return result;
}
|
e8508fb8599233f258828d5798b89c413e851a49.hip
|
// !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.hip"
#include "support.cu"
const unsigned int numStream = 3;
int main (int argc, char *argv[])
{
//set standard seed
srand(217);
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d[numStream], *B_d[numStream], *C_d[numStream];
size_t A_sz, B_sz, C_sz;
unsigned VecSize;
// Initialize streams
hipStream_t streams[numStream];
for (int i = 0; i < numStream; i++)
hipStreamCreate(&streams[i]);
dim3 dim_grid, dim_block;
if (argc == 1) {
VecSize = 1000000;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
} else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
A_sz = VecSize;
B_sz = VecSize;
C_sz = VecSize;
const int segmentLen = VecSize / numStream;
// A_h = (float*) malloc( sizeof(float)*A_sz );
hipHostMalloc((void**)&A_h, A_sz*sizeof(float), hipHostMallocDefault);
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
// B_h = (float*) malloc( sizeof(float)*B_sz );
hipHostMalloc((void**)&B_h, B_sz*sizeof(float), hipHostMallocDefault);
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
// C_h = (float*) malloc( sizeof(float)*C_sz );
hipHostMalloc((void**)&C_h, C_sz*sizeof(float), hipHostMallocDefault);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
// hipMalloc((float**) &A_d, sizeof(float) * VecSize);
// hipMalloc((float**) &B_d, sizeof(float) * VecSize);
// hipMalloc((float**) &C_d, sizeof(float) * VecSize);
for (int i = 0; i < numStream; i++)
{
if (i != numStream-1)
{
hipMalloc((float**) &A_d[i], sizeof(float) * segmentLen);
hipMalloc((float**) &B_d[i], sizeof(float) * segmentLen);
hipMalloc((float**) &C_d[i], sizeof(float) * segmentLen);
}
else // remainder
{
hipMalloc((float**) &A_d[i], sizeof(float) * (segmentLen + VecSize % numStream));
hipMalloc((float**) &B_d[i], sizeof(float) * (segmentLen + VecSize % numStream));
hipMalloc((float**) &C_d[i], sizeof(float) * (segmentLen + VecSize % numStream));
}
}
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device...\n"); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
// hipMemcpy(A_d, A_h, sizeof(float) * VecSize, hipMemcpyHostToDevice);
// hipMemcpy(B_d, B_h, sizeof(float) * VecSize, hipMemcpyHostToDevice);
for (int i = 0; i < numStream; i++)
{
if (i != numStream-1)
{
hipMemcpyAsync(A_d[i], A_h + i*segmentLen, sizeof(float)*segmentLen, hipMemcpyHostToDevice, streams[i]);
hipMemcpyAsync(B_d[i], B_h + i*segmentLen, sizeof(float)*segmentLen, hipMemcpyHostToDevice, streams[i]);
}
else
{
hipMemcpyAsync(A_d[i], A_h + i*segmentLen, sizeof(float)*(segmentLen + VecSize % numStream), hipMemcpyHostToDevice, streams[i]);
hipMemcpyAsync(B_d[i], B_h + i*segmentLen, sizeof(float)*(segmentLen + VecSize % numStream), hipMemcpyHostToDevice, streams[i]);
}
}
// hipDeviceSynchronize();
// stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel...\n"); fflush(stdout);
// startTime(&timer);
// basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu
for (int i = 0; i < numStream; i++)
{
if (i != numStream-1)
{
basicVecAdd(A_d[i], B_d[i], C_d[i], segmentLen, streams[i]);
}
else
{
basicVecAdd(A_d[i], B_d[i], C_d[i], segmentLen + VecSize % numStream, streams[i]);
}
}
// cuda_ret = hipDeviceSynchronize();
// if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
// stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
// startTime(&timer);
//INSERT CODE HERE
// hipMemcpy(C_h, C_d, sizeof(float) * VecSize, hipMemcpyDeviceToHost);
for (int i = 0; i < numStream; i++)
{
if (i != numStream-1)
{
hipMemcpyAsync(C_h + i*segmentLen, C_d[i], sizeof(float)*segmentLen, hipMemcpyDeviceToHost, streams[i]);
}
else
{
hipMemcpyAsync(C_h + i*segmentLen, C_d[i], sizeof(float)*(segmentLen + VecSize % numStream), hipMemcpyDeviceToHost, streams[i]);
}
}
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, VecSize);
// Free memory ------------------------------------------------------------
// free(A_h);
// free(B_h);
// free(C_h);
hipHostFree(A_h);
hipHostFree(B_h);
hipHostFree(C_h);
//INSERT CODE HERE
// hipFree(A_d);
// hipFree(B_d);
// hipFree(C_d);
for (int i = 0; i < numStream; i++)
{
hipFree(A_d[i]);
hipFree(B_d[i]);
hipFree(C_d[i]);
hipStreamDestroy(streams[i]);
}
return 0;
}
|
e8508fb8599233f258828d5798b89c413e851a49.cu
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.cu"
const unsigned int numStream = 3;
int main (int argc, char *argv[])
{
//set standard seed
srand(217);
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d[numStream], *B_d[numStream], *C_d[numStream];
size_t A_sz, B_sz, C_sz;
unsigned VecSize;
// Initialize streams
cudaStream_t streams[numStream];
for (int i = 0; i < numStream; i++)
cudaStreamCreate(&streams[i]);
dim3 dim_grid, dim_block;
if (argc == 1) {
VecSize = 1000000;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
} else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
A_sz = VecSize;
B_sz = VecSize;
C_sz = VecSize;
const int segmentLen = VecSize / numStream;
// A_h = (float*) malloc( sizeof(float)*A_sz );
cudaHostAlloc((void**)&A_h, A_sz*sizeof(float), cudaHostAllocDefault);
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
// B_h = (float*) malloc( sizeof(float)*B_sz );
cudaHostAlloc((void**)&B_h, B_sz*sizeof(float), cudaHostAllocDefault);
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
// C_h = (float*) malloc( sizeof(float)*C_sz );
cudaHostAlloc((void**)&C_h, C_sz*sizeof(float), cudaHostAllocDefault);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
// cudaMalloc((float**) &A_d, sizeof(float) * VecSize);
// cudaMalloc((float**) &B_d, sizeof(float) * VecSize);
// cudaMalloc((float**) &C_d, sizeof(float) * VecSize);
for (int i = 0; i < numStream; i++)
{
if (i != numStream-1)
{
cudaMalloc((float**) &A_d[i], sizeof(float) * segmentLen);
cudaMalloc((float**) &B_d[i], sizeof(float) * segmentLen);
cudaMalloc((float**) &C_d[i], sizeof(float) * segmentLen);
}
else // remainder
{
cudaMalloc((float**) &A_d[i], sizeof(float) * (segmentLen + VecSize % numStream));
cudaMalloc((float**) &B_d[i], sizeof(float) * (segmentLen + VecSize % numStream));
cudaMalloc((float**) &C_d[i], sizeof(float) * (segmentLen + VecSize % numStream));
}
}
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device...\n"); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
// cudaMemcpy(A_d, A_h, sizeof(float) * VecSize, cudaMemcpyHostToDevice);
// cudaMemcpy(B_d, B_h, sizeof(float) * VecSize, cudaMemcpyHostToDevice);
for (int i = 0; i < numStream; i++)
{
if (i != numStream-1)
{
cudaMemcpyAsync(A_d[i], A_h + i*segmentLen, sizeof(float)*segmentLen, cudaMemcpyHostToDevice, streams[i]);
cudaMemcpyAsync(B_d[i], B_h + i*segmentLen, sizeof(float)*segmentLen, cudaMemcpyHostToDevice, streams[i]);
}
else
{
cudaMemcpyAsync(A_d[i], A_h + i*segmentLen, sizeof(float)*(segmentLen + VecSize % numStream), cudaMemcpyHostToDevice, streams[i]);
cudaMemcpyAsync(B_d[i], B_h + i*segmentLen, sizeof(float)*(segmentLen + VecSize % numStream), cudaMemcpyHostToDevice, streams[i]);
}
}
// cudaDeviceSynchronize();
// stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel...\n"); fflush(stdout);
// startTime(&timer);
// basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu
for (int i = 0; i < numStream; i++)
{
if (i != numStream-1)
{
basicVecAdd(A_d[i], B_d[i], C_d[i], segmentLen, streams[i]);
}
else
{
basicVecAdd(A_d[i], B_d[i], C_d[i], segmentLen + VecSize % numStream, streams[i]);
}
}
// cuda_ret = cudaDeviceSynchronize();
// if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
// stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
// startTime(&timer);
//INSERT CODE HERE
// cudaMemcpy(C_h, C_d, sizeof(float) * VecSize, cudaMemcpyDeviceToHost);
for (int i = 0; i < numStream; i++)
{
if (i != numStream-1)
{
cudaMemcpyAsync(C_h + i*segmentLen, C_d[i], sizeof(float)*segmentLen, cudaMemcpyDeviceToHost, streams[i]);
}
else
{
cudaMemcpyAsync(C_h + i*segmentLen, C_d[i], sizeof(float)*(segmentLen + VecSize % numStream), cudaMemcpyDeviceToHost, streams[i]);
}
}
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, VecSize);
// Free memory ------------------------------------------------------------
// free(A_h);
// free(B_h);
// free(C_h);
cudaFreeHost(A_h);
cudaFreeHost(B_h);
cudaFreeHost(C_h);
//INSERT CODE HERE
// cudaFree(A_d);
// cudaFree(B_d);
// cudaFree(C_d);
for (int i = 0; i < numStream; i++)
{
cudaFree(A_d[i]);
cudaFree(B_d[i]);
cudaFree(C_d[i]);
cudaStreamDestroy(streams[i]);
}
return 0;
}
|
06ce6870aa572a03f07053d6573c228f3055b265.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 Microsoft Corporation
// Licensed under the MIT license.
// Author: Paul Koch <[email protected]>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <type_traits>
#include "ebm_native.h"
#include "logging.h"
#include "common_c.h"
#include "bridge_c.h"
#include "zones.h"
#include "common_cpp.hpp"
#include "bridge_cpp.hpp"
#include "Registration.hpp"
#include "Loss.hpp"
namespace DEFINED_ZONE_NAME {
#ifndef DEFINED_ZONE_NAME
#error DEFINED_ZONE_NAME must be defined
#endif // DEFINED_ZONE_NAME
template <typename TLoss>
GPU_GLOBAL void TestGpuAdd(const Loss * const pLoss, const int * const pVal1, const int * const pVal2, int * const pResult) {
TLoss * const pLossSpecific = static_cast<TLoss *>(pLoss);
const size_t iGpuThread = threadIdx.x;
pResult[iGpuThread] = static_cast<int>(static_cast<float>(pLossSpecific->CalculateGradient(static_cast<float>(pVal1[iGpuThread]), static_cast<float>(pVal2[iGpuThread]))));
}
struct Cuda_32_Operators final {
// https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE
// https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__DOUBLE.html#group__CUDA__MATH__DOUBLE
constexpr static size_t countPackedItems = 1; // the number of Unpacked items in a Packed structure
typedef float Unpacked;
typedef float Packed;
private:
Packed m_data;
public:
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators() {
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const float data) : m_data(static_cast<Unpacked>(data)) {
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const double data) : m_data(static_cast<Unpacked>(data)) {
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const int data) : m_data(static_cast<Unpacked>(data)) {
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator+ (const Cuda_32_Operators & other) const {
return Cuda_32_Operators(m_data + other.m_data);
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator- (const Cuda_32_Operators & other) const {
return Cuda_32_Operators(m_data - other.m_data);
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator* (const Cuda_32_Operators & other) const {
return Cuda_32_Operators(m_data * other.m_data);
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator/ (const Cuda_32_Operators & other) const {
return Cuda_32_Operators(m_data / other.m_data);
}
GPU_BOTH INLINE_ALWAYS bool IsAnyEqual(const Cuda_32_Operators & other) const {
return m_data == other.m_data;
}
GPU_BOTH INLINE_ALWAYS operator float() const {
return m_data;
}
GPU_BOTH INLINE_ALWAYS operator double() const {
return m_data;
}
GPU_BOTH INLINE_ALWAYS bool IsAnyInf() const {
return isinf(m_data);
}
GPU_BOTH INLINE_ALWAYS bool IsAnyNaN() const {
return isnan(m_data);
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators Sqrt() const {
return Cuda_32_Operators(sqrtf(m_data));
}
template<template <typename, typename, ptrdiff_t, ptrdiff_t, bool> class TExecute, typename TLoss, typename TFloat, ptrdiff_t cCompilerScores, ptrdiff_t cCompilerPack, bool bHessian>
INLINE_RELEASE_TEMPLATED static ErrorEbmType ApplyTraining(const Loss * const pLoss, ApplyTrainingData * const pData) {
constexpr size_t k_cItems = 5;
bool bExitError = true;
const int aVal1[k_cItems] = { 5, 4, 3, 2, 1 };
const int aVal2[k_cItems] = { 100, 200, 300, 400, 500 };
int aResult[k_cItems];
static_assert(std::is_standard_layout<TLoss>::value &&
std::is_trivially_copyable<TLoss>::value,
"This allows offsetof, memcpy, memset, inter-language, GPU and cross-machine use where needed");
int * aDeviceVal1 = nullptr;
int * aDeviceVal2 = nullptr;
int * aDeviceResult = nullptr;
void * pDeviceLoss = nullptr;
hipError_t error;
error = hipSetDevice(0);
if(hipSuccess != error) {
goto exit_error;
}
error = hipMalloc((void **)&aDeviceVal1, k_cItems * sizeof(int));
if(hipSuccess != error) {
goto exit_error;
}
error = hipMalloc((void **)&aDeviceVal2, k_cItems * sizeof(int));
if(hipSuccess != error) {
goto exit_error;
}
error = hipMalloc((void **)&aDeviceResult, k_cItems * sizeof(int));
if(hipSuccess != error) {
goto exit_error;
}
if(!std::is_empty<TLoss>::value) {
error = hipMalloc((void **)&pDeviceLoss, sizeof(TLoss));
if(hipSuccess != error) {
goto exit_error;
}
error = hipMemcpy(pDeviceLoss, pLoss, sizeof(TLoss), hipMemcpyHostToDevice);
if(hipSuccess != error) {
goto exit_error;
}
}
error = hipMemcpy(aDeviceVal1, aVal1, k_cItems * sizeof(int), hipMemcpyHostToDevice);
if(hipSuccess != error) {
goto exit_error;
}
error = hipMemcpy(aDeviceVal2, aVal2, k_cItems * sizeof(int), hipMemcpyHostToDevice);
if(hipSuccess != error) {
goto exit_error;
}
hipLaunchKernelGGL(( TestGpuAdd<TLoss>), dim3(1), dim3(k_cItems), 0, 0, static_cast<Loss *>(pDeviceLoss), aDeviceVal1, aDeviceVal2, aDeviceResult);
hipLaunchKernelGGL(( ExecuteApplyTraining<TExecute, TLoss, TFloat, cCompilerScores, cCompilerPack, bHessian>), dim3(1), dim3(k_cItems), 0, 0,
pLoss,
pData->m_cRuntimeScores,
pData->m_cRuntimePack
);
error = hipGetLastError();
if(hipSuccess != error) {
goto exit_error;
}
error = hipDeviceSynchronize();
if(hipSuccess != error) {
goto exit_error;
}
error = hipMemcpy(aResult, aDeviceResult, k_cItems * sizeof(int), hipMemcpyDeviceToHost);
if(hipSuccess != error) {
goto exit_error;
}
bExitError = false;
exit_error:
bool bExitHard = false;
if(nullptr != pDeviceLoss) {
error = hipFree(pDeviceLoss);
if(hipSuccess != error) {
bExitHard = true;
}
}
if(nullptr != aDeviceResult) {
error = hipFree(aDeviceResult);
if(hipSuccess != error) {
bExitHard = true;
}
}
if(nullptr != aDeviceVal2) {
error = hipFree(aDeviceVal2);
if(hipSuccess != error) {
bExitHard = true;
}
}
if(nullptr != aDeviceVal1) {
error = hipFree(aDeviceVal1);
if(hipSuccess != error) {
bExitHard = true;
}
}
if(bExitHard) {
bExitError = true;
// not much to do with the error if we fail hipDeviceReset after failing hipFree
error = hipDeviceReset();
}
return bExitError ? Error_UnexpectedInternal : Error_None;
}
template<template <typename, typename, ptrdiff_t, ptrdiff_t, bool> class TExecute, typename TLoss, typename TFloat, ptrdiff_t cCompilerScores, ptrdiff_t cCompilerPack, bool bHessian>
INLINE_RELEASE_TEMPLATED static ErrorEbmType ApplyValidation(const Loss * const pLoss, ApplyValidationData * const pData) {
// this allows us to switch execution onto GPU, FPGA, or other local computation
// TODO: use something other than <<<1, 1>>>
hipLaunchKernelGGL(( ExecuteApplyValidation<TExecute, TLoss, TFloat, cCompilerScores, cCompilerPack, bHessian>), dim3(1), dim3(1), 0, 0,
pLoss,
pData->m_cRuntimeScores,
pData->m_cRuntimePack,
nullptr
);
return Error_None;
}
};
static_assert(std::is_standard_layout<Cuda_32_Operators>::value &&
std::is_trivially_copyable<Cuda_32_Operators>::value,
"This allows offsetof, memcpy, memset, inter-language, GPU and cross-machine use where needed");
// FIRST, define the RegisterLoss function that we'll be calling from our registrations. This is a static
// function, so we can have duplicate named functions in other files and they'll refer to different functions
template<template <typename> class TRegistrable, typename... Args>
static INLINE_ALWAYS std::shared_ptr<const Registration> RegisterLoss(const char * const sRegistrationName, const Args...args) {
return Register<TRegistrable, Cuda_32_Operators>(sRegistrationName, args...);
}
// now include all our special loss registrations which will use the RegisterLoss function we defined above!
#include "loss_registrations.hpp"
INTERNAL_IMPORT_EXPORT_BODY ErrorEbmType CreateLoss_Cuda_32(
const Config * const pConfig,
const char * const sLoss,
const char * const sLossEnd,
LossWrapper * const pLossWrapperOut
) {
return Loss::CreateLoss(&RegisterLosses, pConfig, sLoss, sLossEnd, pLossWrapperOut);
}
} // DEFINED_ZONE_NAME
|
06ce6870aa572a03f07053d6573c228f3055b265.cu
|
// Copyright (c) 2018 Microsoft Corporation
// Licensed under the MIT license.
// Author: Paul Koch <[email protected]>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <type_traits>
#include "ebm_native.h"
#include "logging.h"
#include "common_c.h"
#include "bridge_c.h"
#include "zones.h"
#include "common_cpp.hpp"
#include "bridge_cpp.hpp"
#include "Registration.hpp"
#include "Loss.hpp"
namespace DEFINED_ZONE_NAME {
#ifndef DEFINED_ZONE_NAME
#error DEFINED_ZONE_NAME must be defined
#endif // DEFINED_ZONE_NAME
template <typename TLoss>
GPU_GLOBAL void TestGpuAdd(const Loss * const pLoss, const int * const pVal1, const int * const pVal2, int * const pResult) {
TLoss * const pLossSpecific = static_cast<TLoss *>(pLoss);
const size_t iGpuThread = threadIdx.x;
pResult[iGpuThread] = static_cast<int>(static_cast<float>(pLossSpecific->CalculateGradient(static_cast<float>(pVal1[iGpuThread]), static_cast<float>(pVal2[iGpuThread]))));
}
struct Cuda_32_Operators final {
// https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE
// https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__DOUBLE.html#group__CUDA__MATH__DOUBLE
constexpr static size_t countPackedItems = 1; // the number of Unpacked items in a Packed structure
typedef float Unpacked;
typedef float Packed;
private:
Packed m_data;
public:
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators() {
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const float data) : m_data(static_cast<Unpacked>(data)) {
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const double data) : m_data(static_cast<Unpacked>(data)) {
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const int data) : m_data(static_cast<Unpacked>(data)) {
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator+ (const Cuda_32_Operators & other) const {
return Cuda_32_Operators(m_data + other.m_data);
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator- (const Cuda_32_Operators & other) const {
return Cuda_32_Operators(m_data - other.m_data);
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator* (const Cuda_32_Operators & other) const {
return Cuda_32_Operators(m_data * other.m_data);
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator/ (const Cuda_32_Operators & other) const {
return Cuda_32_Operators(m_data / other.m_data);
}
GPU_BOTH INLINE_ALWAYS bool IsAnyEqual(const Cuda_32_Operators & other) const {
return m_data == other.m_data;
}
GPU_BOTH INLINE_ALWAYS operator float() const {
return m_data;
}
GPU_BOTH INLINE_ALWAYS operator double() const {
return m_data;
}
GPU_BOTH INLINE_ALWAYS bool IsAnyInf() const {
return isinf(m_data);
}
GPU_BOTH INLINE_ALWAYS bool IsAnyNaN() const {
return isnan(m_data);
}
GPU_BOTH INLINE_ALWAYS Cuda_32_Operators Sqrt() const {
return Cuda_32_Operators(sqrtf(m_data));
}
template<template <typename, typename, ptrdiff_t, ptrdiff_t, bool> class TExecute, typename TLoss, typename TFloat, ptrdiff_t cCompilerScores, ptrdiff_t cCompilerPack, bool bHessian>
INLINE_RELEASE_TEMPLATED static ErrorEbmType ApplyTraining(const Loss * const pLoss, ApplyTrainingData * const pData) {
constexpr size_t k_cItems = 5;
bool bExitError = true;
const int aVal1[k_cItems] = { 5, 4, 3, 2, 1 };
const int aVal2[k_cItems] = { 100, 200, 300, 400, 500 };
int aResult[k_cItems];
static_assert(std::is_standard_layout<TLoss>::value &&
std::is_trivially_copyable<TLoss>::value,
"This allows offsetof, memcpy, memset, inter-language, GPU and cross-machine use where needed");
int * aDeviceVal1 = nullptr;
int * aDeviceVal2 = nullptr;
int * aDeviceResult = nullptr;
void * pDeviceLoss = nullptr;
cudaError_t error;
error = cudaSetDevice(0);
if(cudaSuccess != error) {
goto exit_error;
}
error = cudaMalloc((void **)&aDeviceVal1, k_cItems * sizeof(int));
if(cudaSuccess != error) {
goto exit_error;
}
error = cudaMalloc((void **)&aDeviceVal2, k_cItems * sizeof(int));
if(cudaSuccess != error) {
goto exit_error;
}
error = cudaMalloc((void **)&aDeviceResult, k_cItems * sizeof(int));
if(cudaSuccess != error) {
goto exit_error;
}
if(!std::is_empty<TLoss>::value) {
error = cudaMalloc((void **)&pDeviceLoss, sizeof(TLoss));
if(cudaSuccess != error) {
goto exit_error;
}
error = cudaMemcpy(pDeviceLoss, pLoss, sizeof(TLoss), cudaMemcpyHostToDevice);
if(cudaSuccess != error) {
goto exit_error;
}
}
error = cudaMemcpy(aDeviceVal1, aVal1, k_cItems * sizeof(int), cudaMemcpyHostToDevice);
if(cudaSuccess != error) {
goto exit_error;
}
error = cudaMemcpy(aDeviceVal2, aVal2, k_cItems * sizeof(int), cudaMemcpyHostToDevice);
if(cudaSuccess != error) {
goto exit_error;
}
TestGpuAdd<TLoss><<<1, k_cItems>>>(static_cast<Loss *>(pDeviceLoss), aDeviceVal1, aDeviceVal2, aDeviceResult);
ExecuteApplyTraining<TExecute, TLoss, TFloat, cCompilerScores, cCompilerPack, bHessian><<<1, k_cItems>>>(
pLoss,
pData->m_cRuntimeScores,
pData->m_cRuntimePack
);
error = cudaGetLastError();
if(cudaSuccess != error) {
goto exit_error;
}
error = cudaDeviceSynchronize();
if(cudaSuccess != error) {
goto exit_error;
}
error = cudaMemcpy(aResult, aDeviceResult, k_cItems * sizeof(int), cudaMemcpyDeviceToHost);
if(cudaSuccess != error) {
goto exit_error;
}
bExitError = false;
exit_error:
bool bExitHard = false;
if(nullptr != pDeviceLoss) {
error = cudaFree(pDeviceLoss);
if(cudaSuccess != error) {
bExitHard = true;
}
}
if(nullptr != aDeviceResult) {
error = cudaFree(aDeviceResult);
if(cudaSuccess != error) {
bExitHard = true;
}
}
if(nullptr != aDeviceVal2) {
error = cudaFree(aDeviceVal2);
if(cudaSuccess != error) {
bExitHard = true;
}
}
if(nullptr != aDeviceVal1) {
error = cudaFree(aDeviceVal1);
if(cudaSuccess != error) {
bExitHard = true;
}
}
if(bExitHard) {
bExitError = true;
// not much to do with the error if we fail cudaDeviceReset after failing cudaFree
error = cudaDeviceReset();
}
return bExitError ? Error_UnexpectedInternal : Error_None;
}
template<template <typename, typename, ptrdiff_t, ptrdiff_t, bool> class TExecute, typename TLoss, typename TFloat, ptrdiff_t cCompilerScores, ptrdiff_t cCompilerPack, bool bHessian>
INLINE_RELEASE_TEMPLATED static ErrorEbmType ApplyValidation(const Loss * const pLoss, ApplyValidationData * const pData) {
// this allows us to switch execution onto GPU, FPGA, or other local computation
// TODO: use something other than <<<1, 1>>>
ExecuteApplyValidation<TExecute, TLoss, TFloat, cCompilerScores, cCompilerPack, bHessian><<<1, 1>>>(
pLoss,
pData->m_cRuntimeScores,
pData->m_cRuntimePack,
nullptr
);
return Error_None;
}
};
static_assert(std::is_standard_layout<Cuda_32_Operators>::value &&
std::is_trivially_copyable<Cuda_32_Operators>::value,
"This allows offsetof, memcpy, memset, inter-language, GPU and cross-machine use where needed");
// FIRST, define the RegisterLoss function that we'll be calling from our registrations. This is a static
// function, so we can have duplicate named functions in other files and they'll refer to different functions
template<template <typename> class TRegistrable, typename... Args>
static INLINE_ALWAYS std::shared_ptr<const Registration> RegisterLoss(const char * const sRegistrationName, const Args...args) {
return Register<TRegistrable, Cuda_32_Operators>(sRegistrationName, args...);
}
// now include all our special loss registrations which will use the RegisterLoss function we defined above!
#include "loss_registrations.hpp"
INTERNAL_IMPORT_EXPORT_BODY ErrorEbmType CreateLoss_Cuda_32(
const Config * const pConfig,
const char * const sLoss,
const char * const sLossEnd,
LossWrapper * const pLossWrapperOut
) {
return Loss::CreateLoss(&RegisterLosses, pConfig, sLoss, sLossEnd, pLossWrapperOut);
}
} // DEFINED_ZONE_NAME
|
233e94f15a9018e94af93027ca431e92ff87520d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <float.h>
#include <cstdlib>
#include "./lib/read_csv.cpp"
#include "../lib/types.cpp"
#include "../lib/helpers.cu"
using namespace std;
__device__
float euclidean_distance_array(const float * x,const float * y, int n) {
float sum = 0;
for (int i=0; i < n; i++) {
sum += pow(x[i] - y[i], 2);
}
return sqrt(sum);
}
void calculate_cluster_size(int k, int *cluster_assignment,int n, int * cluster_size){
for (int i=0; i<k; i++)
cluster_size[i] = 0;
for (int i=0; i<n; i++){
cluster_size[cluster_assignment[i]]++;
}
}
// cluster assignment using randomization
__global__
void init_cluster_assignment(int k, int size, int * cluster_size, int * cluster_assignment){
for (int i=0; i<k; i++)
cluster_size[i] = 0;
int group = 0;
// srand(static_cast<unsigned int>(clock()));
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int random = index % k ;
// int random = hiprandGenerate();
// std::printf("block id : %d \t block dim: %d \t thread id : %d \t index : %d \t random : %d \n",blockIdx.x, blockDim.x, threadIdx.x, index, random);
// cluster_assignment = new int[size];
for (int i=index; i<size; i+=stride){
group = (int) random;
// group = (int) (rand() % k);
cluster_assignment[i] = group;
cluster_size[group] += 1;
}
}
__global__
void update_clusters(int k, float ** cluster, const int * cluster_assignment, int data_size, \
int dimensions, float ** feature_vector,const int * cluster_size, int * response){
response[0] = 0;
float ** temp;
temp = new float* [k];
for (int i=0; i<k; i++)
temp[i] = new float[dimensions];
for (int i=0; i<k; i++){
for (int j=0; j<dimensions; j++){
temp[i][j] = (float) 0;
}
}
for (int i=0; i<data_size; i++){
for (int j=0; j<dimensions; j++){
temp[cluster_assignment[i]][j] += feature_vector[i][j];
}
}
for (int i=0; i<k; i++){
if (cluster_size[i] == 0){
std::printf("ZERO ::: %d \n", i);
// cout << "ZERO ::: " << i << endl;
continue;
}
for (int j=0; j<dimensions; j++){
if (cluster[i][j] != temp[i][j]/cluster_size[i]){
response[0] = 1;
}
cluster[i][j] = temp[i][j]/cluster_size[i];
}
}
}
__device__
int find_nearest_center(int k, const float * features, int dimensions,float ** cluster){
float minDist = FLT_MAX;
int minIndex = 0;
float dist = 0;
for (int i=0; i<k; i++){
dist = euclidean_distance_array(features, cluster[i], dimensions);
if (dist < minDist) {
minDist = dist;
minIndex = i;
}
}
return minIndex;
}
__global__
void update_cluster_assignment(int k, int * cluster_assignment, int * cluster_size, float ** cluster, int size, int dimension, float ** features){
for (int i=0; i<k; i++){
cluster_size[i] = 0;
}
for (int i=0; i<size; i++){
cluster_assignment[i] = find_nearest_center(k, features[i], dimension, cluster);
cluster_size[cluster_assignment[i]]++;
}
}
void parse_data(const data_map &data, int &size, int &dimensions, string ** data_title, float *** data_features){
vector<float> sample_map_data = data.begin()->second;
size = data.size();
dimensions = sample_map_data.size();
hipMallocManaged(data_title, size*sizeof(string));
hipMallocManaged(data_features, size*sizeof(float*));
float ** data_v = *data_features;
for (int i=0; i<size; i++)
hipMallocManaged(&data_v[i], dimensions*sizeof(float));
int index = 0;
for (data_map::const_iterator it = data.begin(); it != data.end(); it++) {
(*data_title)[index] = it->first;
for (int j=0; j<dimensions; j++)
data_v[index][j] = (it->second)[j];
index++;
}
}
int * find_clusters(int k, const data_map data, int max_iter) {
// int iter = 0;
int * cluster_size;
int * cluster_assignment;
// college data parsing
int data_dimensions;
int data_size;
string * data_title;
float ** data_features;
parse_data(data, data_size, data_dimensions, &data_title, &data_features);
cout << "Size : " << data_size << " dim : " << data_dimensions << endl;
hipMallocManaged(&cluster_size, k*sizeof(int));
hipMallocManaged(&cluster_assignment, data_size*sizeof(int));
int blockSize = 256;
int numBlocks = (data_size + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( init_cluster_assignment), dim3(numBlocks) ,dim3(blockSize) , 0, 0, k, data_size, cluster_size, cluster_assignment);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
calculate_cluster_size(k, cluster_assignment, data_size, cluster_size);
float ** cluster;
hipMallocManaged(&cluster, k*sizeof(float*));
for (int i=0; i<k; i++)
hipMallocManaged(&cluster[i], data_dimensions*sizeof(float));
int * did_change;
hipMallocManaged(&did_change, sizeof(int));
for (int i=0; i < max_iter; i++) {
hipLaunchKernelGGL(( update_clusters), dim3(numBlocks) ,dim3(blockSize), 0, 0, k, cluster, cluster_assignment, data_size, data_dimensions, data_features, cluster_size, did_change);
hipDeviceSynchronize();
if (did_change[0] == 1){
// update_cluster_assignment(k, cluster_assignment, cluster_size, cluster, data);
hipLaunchKernelGGL(( update_cluster_assignment), dim3(1),dim3(1), 0, 0, k, cluster_assignment, cluster_size, cluster, data_size, data_dimensions, data_features);
hipDeviceSynchronize();
}
else{
print_cluster_size(k, cluster_assignment,data_size);
return cluster_assignment;
}
}
print_cluster_size(k, cluster_assignment,data_size);
return cluster_assignment;
}
// // CUDA Kernel function to add the elements of two arrays on the GPU
// __global__
// void add(int n, float *x, float *y)
// {
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// int stride = blockDim.x * gridDim.x;
// for (int i = index; i < n; i += stride)
// y[i] =i;
// }
int main(){
data_map data = read_csv("./datasets/College.csv");
int k = 10;
int * cluster = find_clusters(k, data, 1);
// hipFree(x);
// hipFree(y);
}
|
233e94f15a9018e94af93027ca431e92ff87520d.cu
|
#include <float.h>
#include <cstdlib>
#include "./lib/read_csv.cpp"
#include "../lib/types.cpp"
#include "../lib/helpers.cu"
using namespace std;
__device__
float euclidean_distance_array(const float * x,const float * y, int n) {
float sum = 0;
for (int i=0; i < n; i++) {
sum += pow(x[i] - y[i], 2);
}
return sqrt(sum);
}
void calculate_cluster_size(int k, int *cluster_assignment,int n, int * cluster_size){
for (int i=0; i<k; i++)
cluster_size[i] = 0;
for (int i=0; i<n; i++){
cluster_size[cluster_assignment[i]]++;
}
}
// cluster assignment using randomization
__global__
void init_cluster_assignment(int k, int size, int * cluster_size, int * cluster_assignment){
for (int i=0; i<k; i++)
cluster_size[i] = 0;
int group = 0;
// srand(static_cast<unsigned int>(clock()));
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int random = index % k ;
// int random = curandGenerate();
// std::printf("block id : %d \t block dim: %d \t thread id : %d \t index : %d \t random : %d \n",blockIdx.x, blockDim.x, threadIdx.x, index, random);
// cluster_assignment = new int[size];
for (int i=index; i<size; i+=stride){
group = (int) random;
// group = (int) (rand() % k);
cluster_assignment[i] = group;
cluster_size[group] += 1;
}
}
__global__
void update_clusters(int k, float ** cluster, const int * cluster_assignment, int data_size, \
int dimensions, float ** feature_vector,const int * cluster_size, int * response){
response[0] = 0;
float ** temp;
temp = new float* [k];
for (int i=0; i<k; i++)
temp[i] = new float[dimensions];
for (int i=0; i<k; i++){
for (int j=0; j<dimensions; j++){
temp[i][j] = (float) 0;
}
}
for (int i=0; i<data_size; i++){
for (int j=0; j<dimensions; j++){
temp[cluster_assignment[i]][j] += feature_vector[i][j];
}
}
for (int i=0; i<k; i++){
if (cluster_size[i] == 0){
std::printf("ZERO ::: %d \n", i);
// cout << "ZERO ::: " << i << endl;
continue;
}
for (int j=0; j<dimensions; j++){
if (cluster[i][j] != temp[i][j]/cluster_size[i]){
response[0] = 1;
}
cluster[i][j] = temp[i][j]/cluster_size[i];
}
}
}
__device__
int find_nearest_center(int k, const float * features, int dimensions,float ** cluster){
float minDist = FLT_MAX;
int minIndex = 0;
float dist = 0;
for (int i=0; i<k; i++){
dist = euclidean_distance_array(features, cluster[i], dimensions);
if (dist < minDist) {
minDist = dist;
minIndex = i;
}
}
return minIndex;
}
__global__
void update_cluster_assignment(int k, int * cluster_assignment, int * cluster_size, float ** cluster, int size, int dimension, float ** features){
for (int i=0; i<k; i++){
cluster_size[i] = 0;
}
for (int i=0; i<size; i++){
cluster_assignment[i] = find_nearest_center(k, features[i], dimension, cluster);
cluster_size[cluster_assignment[i]]++;
}
}
void parse_data(const data_map &data, int &size, int &dimensions, string ** data_title, float *** data_features){
vector<float> sample_map_data = data.begin()->second;
size = data.size();
dimensions = sample_map_data.size();
cudaMallocManaged(data_title, size*sizeof(string));
cudaMallocManaged(data_features, size*sizeof(float*));
float ** data_v = *data_features;
for (int i=0; i<size; i++)
cudaMallocManaged(&data_v[i], dimensions*sizeof(float));
int index = 0;
for (data_map::const_iterator it = data.begin(); it != data.end(); it++) {
(*data_title)[index] = it->first;
for (int j=0; j<dimensions; j++)
data_v[index][j] = (it->second)[j];
index++;
}
}
int * find_clusters(int k, const data_map data, int max_iter) {
// int iter = 0;
int * cluster_size;
int * cluster_assignment;
// college data parsing
int data_dimensions;
int data_size;
string * data_title;
float ** data_features;
parse_data(data, data_size, data_dimensions, &data_title, &data_features);
cout << "Size : " << data_size << " dim : " << data_dimensions << endl;
cudaMallocManaged(&cluster_size, k*sizeof(int));
cudaMallocManaged(&cluster_assignment, data_size*sizeof(int));
int blockSize = 256;
int numBlocks = (data_size + blockSize - 1) / blockSize;
init_cluster_assignment<<<numBlocks ,blockSize >>>(k, data_size, cluster_size, cluster_assignment);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
calculate_cluster_size(k, cluster_assignment, data_size, cluster_size);
float ** cluster;
cudaMallocManaged(&cluster, k*sizeof(float*));
for (int i=0; i<k; i++)
cudaMallocManaged(&cluster[i], data_dimensions*sizeof(float));
int * did_change;
cudaMallocManaged(&did_change, sizeof(int));
for (int i=0; i < max_iter; i++) {
update_clusters<<<numBlocks ,blockSize>>>(k, cluster, cluster_assignment, data_size, data_dimensions, data_features, cluster_size, did_change);
cudaDeviceSynchronize();
if (did_change[0] == 1){
// update_cluster_assignment(k, cluster_assignment, cluster_size, cluster, data);
update_cluster_assignment<<<1,1>>>(k, cluster_assignment, cluster_size, cluster, data_size, data_dimensions, data_features);
cudaDeviceSynchronize();
}
else{
print_cluster_size(k, cluster_assignment,data_size);
return cluster_assignment;
}
}
print_cluster_size(k, cluster_assignment,data_size);
return cluster_assignment;
}
// // CUDA Kernel function to add the elements of two arrays on the GPU
// __global__
// void add(int n, float *x, float *y)
// {
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// int stride = blockDim.x * gridDim.x;
// for (int i = index; i < n; i += stride)
// y[i] =i;
// }
int main(){
data_map data = read_csv("./datasets/College.csv");
int k = 10;
int * cluster = find_clusters(k, data, 1);
// cudaFree(x);
// cudaFree(y);
}
|
99079e484cd56d8503ea842646bf4ad2352e3f73.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* SumSquares.cu
*
* Copyright 2021 mike <mike@fedora33>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*
*
*/
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
__global__ void kernel(ulong* d_squares, const ulong n_squares, ulong* d_results, ulong N) {
ulong i = threadIdx.x + (blockIdx.x * blockDim.x);
if(i < N) {
// scan in reverse the squares array
// save first square which divides i in results[i]
if(i > 3) {
for(int x = n_squares-1; x > 0; x -= 1) {
if((i % d_squares[x]) == 0) {
d_results[i] = d_squares[x];
break;
}
} // for...
} else {
d_results[i] = i;
}
} //
}
int main(int argc, char **argv)
{
hipError_t error_id;
// Allocate and set the host 'squares' array
ulong N = 1024*1024*2*2*2;
ulong root_max = (ulong)floor(sqrt((double)N));
const ulong n_squares = root_max + 1;
ulong h_squares[n_squares];
for(int x = 0; x < n_squares; x += 1) h_squares[x] = x*x;
// Allocate host results array
ulong *h_results = (ulong*)malloc(sizeof(ulong)*(N+1));
if(h_results == NULL) {
printf("malloc failed\n");
exit(1);
}
// Allocate memory on device for 'squares'
ulong *d_squares;
error_id = hipMalloc((void**)&d_squares, sizeof(ulong)*n_squares);
if(error_id != hipSuccess) {
printf("hipMalloc squares failed with %d\n", error_id);
exit(1);
}
// Copy squares to device
error_id = hipMemcpy(d_squares, h_squares, sizeof(ulong)*n_squares,
hipMemcpyHostToDevice);
if(error_id != hipSuccess) {
printf("hipMemcpy squares to device failed with %d\n", error_id);
exit(1);
}
// Allocate memory on device for N results
ulong *d_results;
error_id = hipMalloc((void**)&d_results, sizeof(ulong)*(N+1));
if(error_id != hipSuccess) {
printf("hipMalloc results failed with %d\n", error_id);
exit(1);
}
// Set configuration parameters
const ulong Nthreads = 1024; // max number threads/block
const ulong Nblocks = (N/Nthreads)+1;
dim3 grid_size=(Nblocks); dim3 block_size=Nthreads;
// launch kernel
hipLaunchKernelGGL(( kernel), dim3(grid_size), dim3(block_size), 0, 0, d_squares, n_squares, d_results, (N+1));
// Wait for device to finish?
//hipDeviceSynchronize();
// copy N results back to host
error_id = hipMemcpy(h_results, d_results, sizeof(ulong)*(N+1),
hipMemcpyDeviceToHost);
if(error_id != hipSuccess) {
printf("hipMemcpy to host failed with %d\n", error_id);
exit(1);
}
// Print results array
// for(int x = 0; x < N+1; ++x) printf("%d:%ld ", x, h_results[x]);
// printf("\n");
// Cleanup
hipFree(d_squares);
hipFree(d_results);
hipFree(h_results);
return 0;
}
|
99079e484cd56d8503ea842646bf4ad2352e3f73.cu
|
/*
* SumSquares.cu
*
* Copyright 2021 mike <mike@fedora33>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*
*
*/
#include <stdio.h>
#include <math.h>
#include <cuda.h>
__global__ void kernel(ulong* d_squares, const ulong n_squares, ulong* d_results, ulong N) {
ulong i = threadIdx.x + (blockIdx.x * blockDim.x);
if(i < N) {
// scan in reverse the squares array
// save first square which divides i in results[i]
if(i > 3) {
for(int x = n_squares-1; x > 0; x -= 1) {
if((i % d_squares[x]) == 0) {
d_results[i] = d_squares[x];
break;
}
} // for...
} else {
d_results[i] = i;
}
} //
}
int main(int argc, char **argv)
{
cudaError_t error_id;
// Allocate and set the host 'squares' array
ulong N = 1024*1024*2*2*2;
ulong root_max = (ulong)floor(sqrt((double)N));
const ulong n_squares = root_max + 1;
ulong h_squares[n_squares];
for(int x = 0; x < n_squares; x += 1) h_squares[x] = x*x;
// Allocate host results array
ulong *h_results = (ulong*)malloc(sizeof(ulong)*(N+1));
if(h_results == NULL) {
printf("malloc failed\n");
exit(1);
}
// Allocate memory on device for 'squares'
ulong *d_squares;
error_id = cudaMalloc((void**)&d_squares, sizeof(ulong)*n_squares);
if(error_id != cudaSuccess) {
printf("cudaMalloc squares failed with %d\n", error_id);
exit(1);
}
// Copy squares to device
error_id = cudaMemcpy(d_squares, h_squares, sizeof(ulong)*n_squares,
cudaMemcpyHostToDevice);
if(error_id != cudaSuccess) {
printf("cudaMemcpy squares to device failed with %d\n", error_id);
exit(1);
}
// Allocate memory on device for N results
ulong *d_results;
error_id = cudaMalloc((void**)&d_results, sizeof(ulong)*(N+1));
if(error_id != cudaSuccess) {
printf("cudaMalloc results failed with %d\n", error_id);
exit(1);
}
// Set configuration parameters
const ulong Nthreads = 1024; // max number threads/block
const ulong Nblocks = (N/Nthreads)+1;
dim3 grid_size=(Nblocks); dim3 block_size=Nthreads;
// launch kernel
kernel<<<grid_size, block_size>>>(d_squares, n_squares, d_results, (N+1));
// Wait for device to finish?
//cudaDeviceSynchronize();
// copy N results back to host
error_id = cudaMemcpy(h_results, d_results, sizeof(ulong)*(N+1),
cudaMemcpyDeviceToHost);
if(error_id != cudaSuccess) {
printf("cudaMemcpy to host failed with %d\n", error_id);
exit(1);
}
// Print results array
// for(int x = 0; x < N+1; ++x) printf("%d:%ld ", x, h_results[x]);
// printf("\n");
// Cleanup
cudaFree(d_squares);
cudaFree(d_results);
cudaFree(h_results);
return 0;
}
|
b7985054bd8a87f9ab1199a72df049c2c7990582.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
/*
* Configuration
*/
static const std::string output_path = "output.ppm";
static const int N = 1024;
/*
* Image output
*/
void write_image(const std::string& file_name, const unsigned char *data, std::size_t width, std::size_t height)
{
std::ostringstream hdr_oss;
hdr_oss << "P6\n";
hdr_oss << width << " " << height << "\n";
hdr_oss << 255 << "\n";
std::string hdr = hdr_oss.str();
std::ofstream ofs(file_name);
ofs.write(hdr.data(), hdr.size());
ofs.write(reinterpret_cast<const char*>(data), width * height * 3);
}
/*
* Complex arithmetic
*/
struct cu_complex
{
float r; // real part
float i; // imaginary part
__device__ cu_complex() : r{0}, i{0} { }
__device__ cu_complex(float r, float i) : r{r}, i{i} { }
__device__ float modulus2() { return r * r + i * i; }
};
__device__ cu_complex operator+(const cu_complex &a, const cu_complex &b)
{
return cu_complex{a.r + b.r, a.i + b.i};
}
__device__ cu_complex operator*(const cu_complex &a, const cu_complex &b)
{
return cu_complex{a.r * b.r - a.i * b.i, a.r * b.i + a.i * b.r};
}
/*
* Mandelbrot logic
*/
__device__ int mandelbrot(int x, int y)
{
cu_complex z{static_cast<float>(x - N/2) / (N / 4), -static_cast<float>(y - N/2) * 4/N};
cu_complex a;
for (int i = 0; i < 255; ++i)
{
a = a * a + z;
if (a.modulus2() > 4)
{
// diverges
return 255 - i;
}
}
// probably converges
return 0;
}
/*
* Kernel and main function
*/
__global__ void kernel(unsigned char *img)
{
const int x = blockIdx.x;
const int y = blockIdx.y;
const int result = mandelbrot(x, y);
img[0 + 3 * x + 3 * N * y] = result;
img[1 + 3 * x + 3 * N * y] = result;
img[2 + 3 * x + 3 * N * y] = result;
}
int main(int argc, char **argv)
{
unsigned char *img = new unsigned char[3 * N * N];
unsigned char *dev_img;
hipMalloc(&dev_img, 3 * N * N);
dim3 grid_size{N, N};
hipLaunchKernelGGL(( kernel), dim3(grid_size), dim3(1), 0, 0, dev_img);
hipMemcpy(img, dev_img, 3 * N * N, hipMemcpyDeviceToHost);
write_image(output_path, img, N, N);
hipFree(dev_img);
}
|
b7985054bd8a87f9ab1199a72df049c2c7990582.cu
|
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
/*
* Configuration
*/
static const std::string output_path = "output.ppm";
static const int N = 1024;
/*
* Image output
*/
void write_image(const std::string& file_name, const unsigned char *data, std::size_t width, std::size_t height)
{
std::ostringstream hdr_oss;
hdr_oss << "P6\n";
hdr_oss << width << " " << height << "\n";
hdr_oss << 255 << "\n";
std::string hdr = hdr_oss.str();
std::ofstream ofs(file_name);
ofs.write(hdr.data(), hdr.size());
ofs.write(reinterpret_cast<const char*>(data), width * height * 3);
}
/*
* Complex arithmetic
*/
struct cu_complex
{
float r; // real part
float i; // imaginary part
__device__ cu_complex() : r{0}, i{0} { }
__device__ cu_complex(float r, float i) : r{r}, i{i} { }
__device__ float modulus2() { return r * r + i * i; }
};
__device__ cu_complex operator+(const cu_complex &a, const cu_complex &b)
{
return cu_complex{a.r + b.r, a.i + b.i};
}
__device__ cu_complex operator*(const cu_complex &a, const cu_complex &b)
{
return cu_complex{a.r * b.r - a.i * b.i, a.r * b.i + a.i * b.r};
}
/*
* Mandelbrot logic
*/
__device__ int mandelbrot(int x, int y)
{
cu_complex z{static_cast<float>(x - N/2) / (N / 4), -static_cast<float>(y - N/2) * 4/N};
cu_complex a;
for (int i = 0; i < 255; ++i)
{
a = a * a + z;
if (a.modulus2() > 4)
{
// diverges
return 255 - i;
}
}
// probably converges
return 0;
}
/*
* Kernel and main function
*/
__global__ void kernel(unsigned char *img)
{
const int x = blockIdx.x;
const int y = blockIdx.y;
const int result = mandelbrot(x, y);
img[0 + 3 * x + 3 * N * y] = result;
img[1 + 3 * x + 3 * N * y] = result;
img[2 + 3 * x + 3 * N * y] = result;
}
int main(int argc, char **argv)
{
unsigned char *img = new unsigned char[3 * N * N];
unsigned char *dev_img;
cudaMalloc(&dev_img, 3 * N * N);
dim3 grid_size{N, N};
kernel<<<grid_size, 1>>>(dev_img);
cudaMemcpy(img, dev_img, 3 * N * N, cudaMemcpyDeviceToHost);
write_image(output_path, img, N, N);
cudaFree(dev_img);
}
|
899c499e8e49e01eeab170a0d61e34b1b834b3e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "taso/cuda_helper.h"
using namespace taso;
__global__
void assign_kernel(float* ptr, int size, float value)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = value;
}
}
__global__
void copy_kernel(float* dst, const float* src, int size)
{
CUDA_KERNEL_LOOP(i, size)
{
dst[i] = src[i];
}
}
cudnnActivationMode_t get_activation_mode(ActiMode activation)
{
switch (activation) {
case AC_MODE_SIGMOID:
return CUDNN_ACTIVATION_SIGMOID;
case AC_MODE_RELU:
return CUDNN_ACTIVATION_RELU;
case AC_MODE_TANH:
return CUDNN_ACTIVATION_TANH;
default:
assert(false);
}
// return RELU as default
return CUDNN_ACTIVATION_RELU;
}
void helperSetTensorDescriptor(const Tensor& tensor,
cudnnTensorDescriptor_t tensorDesc)
{
switch(tensor.numDim) {
case 1:
{
int dims[] = {tensor.dim[0], 1, 1, 1};
int strides[] = {tensor.stride[0], 1, 1, 1};
checkCUDNN(cudnnSetTensorNdDescriptor(tensorDesc, CUDNN_DATA_FLOAT,
4, dims, strides));
break;
}
case 2:
{
int dims[] = {tensor.dim[0], tensor.dim[1], 1, 1};
int strides[] = {tensor.stride[0], tensor.stride[1], 1, 1};
checkCUDNN(cudnnSetTensorNdDescriptor(tensorDesc, CUDNN_DATA_FLOAT,
4, dims, strides));
break;
}
default:
{
assert(tensor.numDim >= 3);
checkCUDNN(cudnnSetTensorNdDescriptor(tensorDesc, CUDNN_DATA_FLOAT,
tensor.numDim, tensor.dim, tensor.stride));
}
}
}
void helperSetBroadcastableTensorDescriptor(const Tensor& input,
const Tensor& output,
cudnnTensorDescriptor_t tensorDesc)
{
int dims[16], strides[16];
assert(output.numDim <= 16);
assert(input.numDim <= output.numDim);
for (int i = 0; i < output.numDim; i++) {
dims[output.numDim-1-i] = output.dim[output.numDim-1-i];
if (i < input.numDim && input.dim[input.numDim-1-i] > 0) {
strides[output.numDim-1-i] = input.stride[input.numDim-1-i];
} else {
if (dims[output.numDim-1-i] > 1) {
fprintf(stderr, "cuDNN does not suppoort zero stride for broadcast\n"
"Consider switch to other library for broadcastable operators.\n");
assert(false);
}
strides[output.numDim-1-i] = 1;
}
}
int num_dim = output.numDim;
if (output.numDim < 4) {
num_dim = 4;
for (int i = output.numDim; i < num_dim; i++) {
dims[i] = 1;
strides[i] = 1;
}
}
//for (int i = 0; i < num_dim; i++)
// printf("dims[%d] = %d input.dim(%d) output.dim(%d)\n", i, dims[i], input.dim[i], output.dim[i]);
//for (int i = 0; i < num_dim; i++)
// printf("strides[%d] = %d input.stride(%d) output.stride(%d)\n", i, strides[i], input.stride[i], output.stride[i]);
checkCUDNN(cudnnSetTensorNdDescriptor(tensorDesc, CUDNN_DATA_FLOAT,
num_dim, dims, strides));
}
|
899c499e8e49e01eeab170a0d61e34b1b834b3e5.cu
|
#include "taso/cuda_helper.h"
using namespace taso;
__global__
void assign_kernel(float* ptr, int size, float value)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = value;
}
}
__global__
void copy_kernel(float* dst, const float* src, int size)
{
CUDA_KERNEL_LOOP(i, size)
{
dst[i] = src[i];
}
}
cudnnActivationMode_t get_activation_mode(ActiMode activation)
{
switch (activation) {
case AC_MODE_SIGMOID:
return CUDNN_ACTIVATION_SIGMOID;
case AC_MODE_RELU:
return CUDNN_ACTIVATION_RELU;
case AC_MODE_TANH:
return CUDNN_ACTIVATION_TANH;
default:
assert(false);
}
// return RELU as default
return CUDNN_ACTIVATION_RELU;
}
void helperSetTensorDescriptor(const Tensor& tensor,
cudnnTensorDescriptor_t tensorDesc)
{
switch(tensor.numDim) {
case 1:
{
int dims[] = {tensor.dim[0], 1, 1, 1};
int strides[] = {tensor.stride[0], 1, 1, 1};
checkCUDNN(cudnnSetTensorNdDescriptor(tensorDesc, CUDNN_DATA_FLOAT,
4, dims, strides));
break;
}
case 2:
{
int dims[] = {tensor.dim[0], tensor.dim[1], 1, 1};
int strides[] = {tensor.stride[0], tensor.stride[1], 1, 1};
checkCUDNN(cudnnSetTensorNdDescriptor(tensorDesc, CUDNN_DATA_FLOAT,
4, dims, strides));
break;
}
default:
{
assert(tensor.numDim >= 3);
checkCUDNN(cudnnSetTensorNdDescriptor(tensorDesc, CUDNN_DATA_FLOAT,
tensor.numDim, tensor.dim, tensor.stride));
}
}
}
void helperSetBroadcastableTensorDescriptor(const Tensor& input,
const Tensor& output,
cudnnTensorDescriptor_t tensorDesc)
{
int dims[16], strides[16];
assert(output.numDim <= 16);
assert(input.numDim <= output.numDim);
for (int i = 0; i < output.numDim; i++) {
dims[output.numDim-1-i] = output.dim[output.numDim-1-i];
if (i < input.numDim && input.dim[input.numDim-1-i] > 0) {
strides[output.numDim-1-i] = input.stride[input.numDim-1-i];
} else {
if (dims[output.numDim-1-i] > 1) {
fprintf(stderr, "cuDNN does not suppoort zero stride for broadcast\n"
"Consider switch to other library for broadcastable operators.\n");
assert(false);
}
strides[output.numDim-1-i] = 1;
}
}
int num_dim = output.numDim;
if (output.numDim < 4) {
num_dim = 4;
for (int i = output.numDim; i < num_dim; i++) {
dims[i] = 1;
strides[i] = 1;
}
}
//for (int i = 0; i < num_dim; i++)
// printf("dims[%d] = %d input.dim(%d) output.dim(%d)\n", i, dims[i], input.dim[i], output.dim[i]);
//for (int i = 0; i < num_dim; i++)
// printf("strides[%d] = %d input.stride(%d) output.stride(%d)\n", i, strides[i], input.stride[i], output.stride[i]);
checkCUDNN(cudnnSetTensorNdDescriptor(tensorDesc, CUDNN_DATA_FLOAT,
num_dim, dims, strides));
}
|
b8fee604e52236ebf32b3a621f117e3c5b807b2e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
#define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
// int readFile(filename:str, data: array);
#include "file_io.h"
// includes, kernels
#include <vector_reduction_kernel.cu>
//#include <vector_reduction_kernel_adv.cu>
// For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements.
#define NUM_ELEMENTS 512
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
float computeOnDevice(float* h_data, int array_mem_size);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run naive scan test
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
//changed num_elements to 2nd arg
int num_elements = 2047;
int errorM = 0;
const unsigned int array_mem_size = sizeof( float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc( array_mem_size);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Read the input data array from the given file.
switch(argc-1)
{
case 1: // One Argument
errorM = readFile(argv[1], h_data);
if(errorM != 1)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
}
break;
}
// compute reference solution
float reference = 0.0f;
computeGold(&reference , h_data, num_elements);
// **===-------- Modify the body of this function -----------===**
float result = computeOnDevice(h_data, num_elements);
// **===-----------------------------------------------------------===**
// We can use an epsilon of 0 since values are integral and in a range
// that can be exactly represented
float epsilon = 0.0f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
printf( "device: %f host: %f\n", result, reference);
// cleanup memory
free( h_data);
}
// **===----------------- Modify this function ---------------------===**
// Take h_data from host, copies it to device, setup grid and thread
// dimentions, executes kernel function, and copy result of scan back
// to h_data.
// Note: float* h_data is both the input and the output of this function.
float computeOnDevice(float* h_data, int num_elements)
{
// num is zero or 1 just return solution
if(num_elements == 0)
return 0.0f;
else if (num_elements == 1)
return h_data[0];
else if (num_elements % 2 != 0)
num_elements += 1; //will just have a 0 at the end
// declare device vector
float *d_data;
// calc number of bytes
size_t bytes = (num_elements * sizeof(float));
// malloc on device
hipMalloc(&d_data, bytes);
// copy data to device
hipMemcpy( d_data, h_data, bytes, hipMemcpyHostToDevice);
int block_size, grid_size;
block_size = 512;
//send to appropriate function
if(num_elements == 512){
hipLaunchKernelGGL(( reduction), dim3(1), dim3(block_size) , 0, 0, d_data, num_elements);
} else if (num_elements > 512){
//find the layer
int layer = -1, temp = 1;
while(temp < num_elements){
temp <<= 9;
layer++;
}
temp >>= 9;
printf("temp: %d, layer: %d \n", temp, layer);
grid_size = num_elements;
for(int i = 0; i < layer; i++){
//resize grid every layer
grid_size /= block_size;
//run kernel on decreasing layers, increasing the stride
hipLaunchKernelGGL(( reduction_adv), dim3(grid_size), dim3(block_size) , 0, 0, d_data, num_elements, i, temp);
}
} else{
int exp_less_2 = 1;
while(exp_less_2 < num_elements){
exp_less_2 <<= 1;
}
exp_less_2 >>= 1;
printf("%d\n", exp_less_2);
hipLaunchKernelGGL(( reduction_less), dim3(1), dim3(block_size) , 0, 0, d_data, num_elements, exp_less_2);
}
// Copy result back to host
hipMemcpy( h_data, d_data, bytes, hipMemcpyDeviceToHost );
// print out result
for(int i = 0; i < num_elements; i++){
printf("%lf ", h_data[i]);
}
printf("\n");
// release memory
hipFree(d_data);
// return single point
return h_data[0];
}
|
b8fee604e52236ebf32b3a621f117e3c5b807b2e.cu
|
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
#define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
// int readFile(filename:str, data: array);
#include "file_io.h"
// includes, kernels
#include <vector_reduction_kernel.cu>
//#include <vector_reduction_kernel_adv.cu>
// For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements.
#define NUM_ELEMENTS 512
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
float computeOnDevice(float* h_data, int array_mem_size);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run naive scan test
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
//changed num_elements to 2nd arg
int num_elements = 2047;
int errorM = 0;
const unsigned int array_mem_size = sizeof( float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc( array_mem_size);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Read the input data array from the given file.
switch(argc-1)
{
case 1: // One Argument
errorM = readFile(argv[1], h_data);
if(errorM != 1)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
}
break;
}
// compute reference solution
float reference = 0.0f;
computeGold(&reference , h_data, num_elements);
// **===-------- Modify the body of this function -----------===**
float result = computeOnDevice(h_data, num_elements);
// **===-----------------------------------------------------------===**
// We can use an epsilon of 0 since values are integral and in a range
// that can be exactly represented
float epsilon = 0.0f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
printf( "device: %f host: %f\n", result, reference);
// cleanup memory
free( h_data);
}
// **===----------------- Modify this function ---------------------===**
// Take h_data from host, copies it to device, setup grid and thread
// dimentions, executes kernel function, and copy result of scan back
// to h_data.
// Note: float* h_data is both the input and the output of this function.
float computeOnDevice(float* h_data, int num_elements)
{
// num is zero or 1 just return solution
if(num_elements == 0)
return 0.0f;
else if (num_elements == 1)
return h_data[0];
else if (num_elements % 2 != 0)
num_elements += 1; //will just have a 0 at the end
// declare device vector
float *d_data;
// calc number of bytes
size_t bytes = (num_elements * sizeof(float));
// malloc on device
cudaMalloc(&d_data, bytes);
// copy data to device
cudaMemcpy( d_data, h_data, bytes, cudaMemcpyHostToDevice);
int block_size, grid_size;
block_size = 512;
//send to appropriate function
if(num_elements == 512){
reduction<<<1, block_size >>>(d_data, num_elements);
} else if (num_elements > 512){
//find the layer
int layer = -1, temp = 1;
while(temp < num_elements){
temp <<= 9;
layer++;
}
temp >>= 9;
printf("temp: %d, layer: %d \n", temp, layer);
grid_size = num_elements;
for(int i = 0; i < layer; i++){
//resize grid every layer
grid_size /= block_size;
//run kernel on decreasing layers, increasing the stride
reduction_adv<<<grid_size, block_size >>>(d_data, num_elements, i, temp);
}
} else{
int exp_less_2 = 1;
while(exp_less_2 < num_elements){
exp_less_2 <<= 1;
}
exp_less_2 >>= 1;
printf("%d\n", exp_less_2);
reduction_less<<<1, block_size >>>(d_data, num_elements, exp_less_2);
}
// Copy result back to host
cudaMemcpy( h_data, d_data, bytes, cudaMemcpyDeviceToHost );
// print out result
for(int i = 0; i < num_elements; i++){
printf("%lf ", h_data[i]);
}
printf("\n");
// release memory
cudaFree(d_data);
// return single point
return h_data[0];
}
|
47e2e0558a2f54bf5ff54ea8efecf14bb931b28a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "WTAddKernel.cuh"
void WTAdditionKernel(WTAll &argWT, Document &argDoc, hipStream_t &stream) {
int blockCounter = 0;
int iterBlock = (argWT.numOfWordS - 1) / GridDim + 1;
int* deviceWordLength;
int numOfWordD = argWT.wordLength-argWT.numOfWordS;
hipSetDevice(0);
/*hipMalloc((void**)&deviceWordLength, (1) * sizeof(int));
hipMemcpy(deviceWordLength, &argWT.numOfWordS, sizeof(int),hipMemcpyHostToDevice);*/
for (int i = 0; i < iterBlock; i++) {
/*hipMemcpy(argDoc.d_blockCounter, &blockCounter, (1) * sizeof(int), hipMemcpyHostToDevice);*/
sparseMatrixAdd << <GridDim, BlockDim>> >(argWT.WTGPUChunkVec[0].deviceWTCount, argWT.WTGPUChunkVec[0].deviceWTOffset, argWT.WTGPUChunkVec[0].deviceNZWTCount, argWT.WTGPUChunkVec[0].deviceWTIndex, argWT.WTGPUChunkVec[0].deviceWTValue, argWT.deviceZeroChunkWTCount, argWT.deviceZeroChunkWTOffset, argWT.deviceZeroChunkNZWTCount, argWT.deviceZeroChunkWTIndex, argWT.deviceZeroChunkWTValue, argDoc.GPUChunkVec[0].d_dense, argWT.numOfWordS, blockCounter, argWT.WTGPUChunkVec[0].deviceWTRowSum, numOfWordD);
H_ERR(hipDeviceSynchronize());
blockCounter++;
}
}
void WTDenAdditionKernel(WTD &argWTDen, WTAll &argWT, Document &argDoc, hipStream_t &stream) {
hipSetDevice(0);
denseMatrixAddKernel << <GridDim, BlockDim >> > (argWTDen.WTDenseGPUChunkVec[0].deviceWTDense, argWTDen.deviceZeroWTDense, argWT.WTGPUChunkVec[0].deviceWTOffset, argWTDen.numOfWordD);
H_ERR(hipDeviceSynchronize());
}
|
47e2e0558a2f54bf5ff54ea8efecf14bb931b28a.cu
|
#include "WTAddKernel.cuh"
void WTAdditionKernel(WTAll &argWT, Document &argDoc, cudaStream_t &stream) {
int blockCounter = 0;
int iterBlock = (argWT.numOfWordS - 1) / GridDim + 1;
int* deviceWordLength;
int numOfWordD = argWT.wordLength-argWT.numOfWordS;
cudaSetDevice(0);
/*cudaMalloc((void**)&deviceWordLength, (1) * sizeof(int));
cudaMemcpy(deviceWordLength, &argWT.numOfWordS, sizeof(int),cudaMemcpyHostToDevice);*/
for (int i = 0; i < iterBlock; i++) {
/*cudaMemcpy(argDoc.d_blockCounter, &blockCounter, (1) * sizeof(int), cudaMemcpyHostToDevice);*/
sparseMatrixAdd << <GridDim, BlockDim>> >(argWT.WTGPUChunkVec[0].deviceWTCount, argWT.WTGPUChunkVec[0].deviceWTOffset, argWT.WTGPUChunkVec[0].deviceNZWTCount, argWT.WTGPUChunkVec[0].deviceWTIndex, argWT.WTGPUChunkVec[0].deviceWTValue, argWT.deviceZeroChunkWTCount, argWT.deviceZeroChunkWTOffset, argWT.deviceZeroChunkNZWTCount, argWT.deviceZeroChunkWTIndex, argWT.deviceZeroChunkWTValue, argDoc.GPUChunkVec[0].d_dense, argWT.numOfWordS, blockCounter, argWT.WTGPUChunkVec[0].deviceWTRowSum, numOfWordD);
H_ERR(cudaDeviceSynchronize());
blockCounter++;
}
}
void WTDenAdditionKernel(WTD &argWTDen, WTAll &argWT, Document &argDoc, cudaStream_t &stream) {
cudaSetDevice(0);
denseMatrixAddKernel << <GridDim, BlockDim >> > (argWTDen.WTDenseGPUChunkVec[0].deviceWTDense, argWTDen.deviceZeroWTDense, argWT.WTGPUChunkVec[0].deviceWTOffset, argWTDen.numOfWordD);
H_ERR(cudaDeviceSynchronize());
}
|
64a1bfdeb811e375afa97775f164d0673066cbd6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "head.h"
void read_data(char *filename, char *buffer, int num){
FILE *fh;
fh = fopen(filename, "r");
fread(buffer, 1, num, fh);
buffer[num] = '\0';
fclose(fh);
}
// shin
__global__ void bwt(char* T, char* BWT, int* SA, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
BWT[idx] = SA[idx] == 0 ? 'A' : T[SA[idx] - 1];
}
int main(int argc, char* argv[])
{
float milliseconds = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
char* filename = "genome.txt"; //load the local data set
int n; //input size
char *data; //data set pointer
int i = 0; //index
int *SA; //Suffix Array pointer
printf("Please input the size of dataset you want to evaluate (10 - 1000000): \t");
scanf("%d", &n);
data = (char *)malloc((n + 1)*sizeof(char));
read_data(filename, data, n); //read data set from the local file
data[n - 1] = 'A'; // shin
thrust::host_vector<int> h_inp(n + 3);
thrust::host_vector<int> h_SA(n + 3, 0);
thrust::device_vector<int>d_inp;
thrust::device_vector<int>d_SA;
for (i = 0; i<n; i++) //Ascii 'A' -> integer 0 by 'A' - 65
{
h_inp[i] = to_i(data[i]);
}
h_inp[i] = 0; h_inp[i + 1] = 0; h_inp[i + 2] = 0; //prepare for triples
d_inp = h_inp;
d_SA = h_SA;
hipEventRecord(start);
suffixArray(d_inp, d_SA, n, MAX_ALPHA); //dc3/skew algorithm
h_SA = d_SA;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("GPU construct Suffix Array\nNUM: %d \t Time: %f Sec\n", n, milliseconds / 1000);
// shin bwt parallel ---------------------------
puts("aaaaa");
thrust::device_vector<char> d_T(data, data + n + 1);
thrust::device_vector<char> d_BWT(n + 1);
char *pd_T = thrust::raw_pointer_cast(&d_T[0]);
char *pd_BWT = thrust::raw_pointer_cast(&d_BWT[0]);
int *pd_SA = thrust::raw_pointer_cast(&d_SA[0]);
dim3 block(8, 1);
dim3 grid((n + block.x - 1) / block.x, 1);
hipLaunchKernelGGL(( bwt), dim3(grid), dim3(block), 0, 0, pd_T, pd_BWT, pd_SA, n);
thrust::host_vector<char> h_BWT = d_BWT;
printf("T: %s\n", data);
printf("SA:\n");
for (i = 0; i < n; i++) {
printf("%d ", h_SA[i]);
}
printf("\nBWT:\n");
for (i = 0; i < n; i++) {
printf("%c", h_BWT[i]);
}
putchar('\n');
// ---------------------------------------------
hipEventDestroy(start);
hipEventDestroy(stop);
free(data); //free allocated memory
return 0;
}
void suffixArray(thrust::device_vector<int>& s, thrust::device_vector<int>& SA, int n, int K) {
int n0 = (n + 2) / 3, n1 = (n + 1) / 3, n2 = n / 3, n02 = n0 + n2;
thrust::device_vector<int>d_s12(n02 + 3, 0);
int *pd_s12 = thrust::raw_pointer_cast(&d_s12[0]);
thrust::device_vector<int>d_SA12(n02 + 3, 0);
int *pd_SA12 = thrust::raw_pointer_cast(&d_SA12[0]);
thrust::device_vector<int>d_s0(n0, 0);
int *pd_s0 = thrust::raw_pointer_cast(&d_s0[0]);
thrust::device_vector<int>d_SA0(n0, 0);
int *pd_SA0 = thrust::raw_pointer_cast(&d_SA0[0]);
thrust::device_vector<int>d_scan(n02 + 3);
int *pd_scan = thrust::raw_pointer_cast(&d_scan[0]);
int *pd_s = thrust::raw_pointer_cast(&s[0]);
int *pd_SA = thrust::raw_pointer_cast(&SA[0]);
dim3 numThreads(1024, 1, 1);
dim3 numBlocks((n02 - 1) / 1024 + 1, 1, 1);
// S12 initialization:
//thrust::sequence(d_s12.begin(), d_s12.begin() + n02);
//thrust::transform(d_s12.begin(), d_s12.begin() + n02, d_s12.begin(), mapping());
hipLaunchKernelGGL(( Init_d_s12) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_s12, n02);
//radix sort - using SA12 to store keys
hipLaunchKernelGGL(( keybits) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_SA12, pd_s12, pd_s, n02, 2);
thrust::sort_by_key(d_SA12.begin(), d_SA12.begin() + n02, d_s12.begin());
hipLaunchKernelGGL(( keybits) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_SA12, pd_s12, pd_s, n02, 1);
thrust::sort_by_key(d_SA12.begin(), d_SA12.begin() + n02, d_s12.begin());
hipLaunchKernelGGL(( keybits) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_SA12, pd_s12, pd_s, n02, 0);
thrust::sort_by_key(d_SA12.begin(), d_SA12.begin() + n02, d_s12.begin());
d_SA12 = d_s12;
// stably sort the mod 0 suffixes from SA12 by their first character
// find lexicographic names of triples
hipLaunchKernelGGL(( InitScan) , dim3(numBlocks), dim3(numThreads), 0, 0, pd_s, pd_SA12, pd_scan, n02);
thrust::exclusive_scan(d_scan.begin(), d_scan.begin() + n02 + 1, d_scan.begin());
hipLaunchKernelGGL(( Set_suffix_rank) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_s12, pd_SA12, pd_scan, n02, n0);
int max_rank = d_scan[n02];
// if max_rank is less than the size of s12, we have a repeat. repeat dc3.
// else generate the suffix array of s12 directly
if (max_rank < n02)
{
suffixArray(d_s12, d_SA12, n02, max_rank);
hipLaunchKernelGGL(( Store_unique_ranks) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_s12, pd_SA12, n02);
}
else{
hipLaunchKernelGGL(( Compute_SA_From_UniqueRank) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_s12, pd_SA12, n02);
}
hipLaunchKernelGGL(( InitScan2) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_SA12, pd_scan, n0, n02);
thrust::exclusive_scan(d_scan.begin(), d_scan.begin() + n02, d_scan.begin());
hipLaunchKernelGGL(( Set_S0) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_s0, pd_SA12, pd_scan, n0, n02);
dim3 numBlocks3((n0 - 1) / 1024 + 1);
hipLaunchKernelGGL(( keybits) , dim3(numBlocks3), dim3(numThreads) , 0, 0, pd_SA0, pd_s0, pd_s, n0, 0);
thrust::sort_by_key(d_SA0.begin(), d_SA0.begin() + n0, d_s0.begin());
d_SA0 = d_s0;
// merge sorted SA0 suffixes and sorted SA12 suffixes
dim3 numBlocks2((n - 1) / 1024 + 1);
hipLaunchKernelGGL(( merge_suffixes) , dim3(numBlocks), dim3(numThreads) , 0, 0, pd_SA0, pd_SA12, pd_SA, pd_s, pd_s12, n0, n02, n);
}
|
64a1bfdeb811e375afa97775f164d0673066cbd6.cu
|
#include "head.h"
void read_data(char *filename, char *buffer, int num){
FILE *fh;
fh = fopen(filename, "r");
fread(buffer, 1, num, fh);
buffer[num] = '\0';
fclose(fh);
}
// shin
__global__ void bwt(char* T, char* BWT, int* SA, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
BWT[idx] = SA[idx] == 0 ? 'A' : T[SA[idx] - 1];
}
int main(int argc, char* argv[])
{
float milliseconds = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
char* filename = "genome.txt"; //load the local data set
int n; //input size
char *data; //data set pointer
int i = 0; //index
int *SA; //Suffix Array pointer
printf("Please input the size of dataset you want to evaluate (10 - 1000000): \t");
scanf("%d", &n);
data = (char *)malloc((n + 1)*sizeof(char));
read_data(filename, data, n); //read data set from the local file
data[n - 1] = 'A'; // shin
thrust::host_vector<int> h_inp(n + 3);
thrust::host_vector<int> h_SA(n + 3, 0);
thrust::device_vector<int>d_inp;
thrust::device_vector<int>d_SA;
for (i = 0; i<n; i++) //Ascii 'A' -> integer 0 by 'A' - 65
{
h_inp[i] = to_i(data[i]);
}
h_inp[i] = 0; h_inp[i + 1] = 0; h_inp[i + 2] = 0; //prepare for triples
d_inp = h_inp;
d_SA = h_SA;
cudaEventRecord(start);
suffixArray(d_inp, d_SA, n, MAX_ALPHA); //dc3/skew algorithm
h_SA = d_SA;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU construct Suffix Array\nNUM: %d \t Time: %f Sec\n", n, milliseconds / 1000);
// shin bwt parallel ---------------------------
puts("aaaaa");
thrust::device_vector<char> d_T(data, data + n + 1);
thrust::device_vector<char> d_BWT(n + 1);
char *pd_T = thrust::raw_pointer_cast(&d_T[0]);
char *pd_BWT = thrust::raw_pointer_cast(&d_BWT[0]);
int *pd_SA = thrust::raw_pointer_cast(&d_SA[0]);
dim3 block(8, 1);
dim3 grid((n + block.x - 1) / block.x, 1);
bwt<<<grid, block>>>(pd_T, pd_BWT, pd_SA, n);
thrust::host_vector<char> h_BWT = d_BWT;
printf("T: %s\n", data);
printf("SA:\n");
for (i = 0; i < n; i++) {
printf("%d ", h_SA[i]);
}
printf("\nBWT:\n");
for (i = 0; i < n; i++) {
printf("%c", h_BWT[i]);
}
putchar('\n');
// ---------------------------------------------
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(data); //free allocated memory
return 0;
}
void suffixArray(thrust::device_vector<int>& s, thrust::device_vector<int>& SA, int n, int K) {
int n0 = (n + 2) / 3, n1 = (n + 1) / 3, n2 = n / 3, n02 = n0 + n2;
thrust::device_vector<int>d_s12(n02 + 3, 0);
int *pd_s12 = thrust::raw_pointer_cast(&d_s12[0]);
thrust::device_vector<int>d_SA12(n02 + 3, 0);
int *pd_SA12 = thrust::raw_pointer_cast(&d_SA12[0]);
thrust::device_vector<int>d_s0(n0, 0);
int *pd_s0 = thrust::raw_pointer_cast(&d_s0[0]);
thrust::device_vector<int>d_SA0(n0, 0);
int *pd_SA0 = thrust::raw_pointer_cast(&d_SA0[0]);
thrust::device_vector<int>d_scan(n02 + 3);
int *pd_scan = thrust::raw_pointer_cast(&d_scan[0]);
int *pd_s = thrust::raw_pointer_cast(&s[0]);
int *pd_SA = thrust::raw_pointer_cast(&SA[0]);
dim3 numThreads(1024, 1, 1);
dim3 numBlocks((n02 - 1) / 1024 + 1, 1, 1);
// S12 initialization:
//thrust::sequence(d_s12.begin(), d_s12.begin() + n02);
//thrust::transform(d_s12.begin(), d_s12.begin() + n02, d_s12.begin(), mapping());
Init_d_s12 <<<numBlocks, numThreads >>>(pd_s12, n02);
//radix sort - using SA12 to store keys
keybits <<<numBlocks, numThreads >>>(pd_SA12, pd_s12, pd_s, n02, 2);
thrust::sort_by_key(d_SA12.begin(), d_SA12.begin() + n02, d_s12.begin());
keybits <<<numBlocks, numThreads >>>(pd_SA12, pd_s12, pd_s, n02, 1);
thrust::sort_by_key(d_SA12.begin(), d_SA12.begin() + n02, d_s12.begin());
keybits <<<numBlocks, numThreads >>>(pd_SA12, pd_s12, pd_s, n02, 0);
thrust::sort_by_key(d_SA12.begin(), d_SA12.begin() + n02, d_s12.begin());
d_SA12 = d_s12;
// stably sort the mod 0 suffixes from SA12 by their first character
// find lexicographic names of triples
InitScan <<<numBlocks, numThreads>>>(pd_s, pd_SA12, pd_scan, n02);
thrust::exclusive_scan(d_scan.begin(), d_scan.begin() + n02 + 1, d_scan.begin());
Set_suffix_rank <<<numBlocks, numThreads >>>(pd_s12, pd_SA12, pd_scan, n02, n0);
int max_rank = d_scan[n02];
// if max_rank is less than the size of s12, we have a repeat. repeat dc3.
// else generate the suffix array of s12 directly
if (max_rank < n02)
{
suffixArray(d_s12, d_SA12, n02, max_rank);
Store_unique_ranks <<<numBlocks, numThreads >>>(pd_s12, pd_SA12, n02);
}
else{
Compute_SA_From_UniqueRank <<<numBlocks, numThreads >>>(pd_s12, pd_SA12, n02);
}
InitScan2 <<<numBlocks, numThreads >>>(pd_SA12, pd_scan, n0, n02);
thrust::exclusive_scan(d_scan.begin(), d_scan.begin() + n02, d_scan.begin());
Set_S0 <<<numBlocks, numThreads >>>(pd_s0, pd_SA12, pd_scan, n0, n02);
dim3 numBlocks3((n0 - 1) / 1024 + 1);
keybits <<<numBlocks3, numThreads >>>(pd_SA0, pd_s0, pd_s, n0, 0);
thrust::sort_by_key(d_SA0.begin(), d_SA0.begin() + n0, d_s0.begin());
d_SA0 = d_s0;
// merge sorted SA0 suffixes and sorted SA12 suffixes
dim3 numBlocks2((n - 1) / 1024 + 1);
merge_suffixes <<<numBlocks, numThreads >>>(pd_SA0, pd_SA12, pd_SA, pd_s, pd_s12, n0, n02, n);
}
|
6d0bd993a16f99f3bc8d7ddf21f7c48f8e3e742f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
segmentSumLinearKernel(void *input, Nd4jLong *inputShape, int *starts, int *lengths, Nd4jLong numOfClasses,
void *output, Nd4jLong *outputShape) {
__shared__
T *val;
__shared__
Nd4jLong xLen, zLen, segment, zIndex;
__shared__
T *x;
__shared__
T *z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<T *>(input);
z = reinterpret_cast<T *>(output);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape, zLen);
start = starts[segment];
finish = start + lengths[segment];
//val[segment] = ;
z[zIndex] = x[shape::getIndexOffset(start, inputShape, xLen)];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape, xLen);
nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
unsortedSegmentSumLinearKernel(void *input, Nd4jLong *inputShape, void *indices, Nd4jLong *indicesShape,
int *starts, int *lengths, Nd4jLong numOfClasses, void *output,
Nd4jLong *outputShape) {
__shared__
T *val;
__shared__
Nd4jLong xLen, zLen, segment, zIndex;
__shared__
T *x;
__shared__
T *z;
__shared__
I *y; //int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
segment = blockIdx.x;
x = reinterpret_cast<T *>(input);
z = reinterpret_cast<T *>(output);
y = reinterpret_cast<I *>(indices);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
zIndex = shape::getIndexOffset(segment, outputShape, zLen);
if (lengths[segment] > 0)
z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape, xLen)];
else
z[zIndex] = 0; //DataTypeUtils::max<T>();
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape, xLen);
auto yIndex = shape::getIndexOffset(e, indicesShape, xLen);
if (y[yIndex] == segment && e != starts[segment]) {
nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentSum kernel
template <typename T, typename I>
static __global__ void segmentSumTadKernel(void* inputBuf, Nd4jLong* inputShape, Nd4jLong* inputTads, Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* outputTads, Nd4jLong* outputTadOffsets) {
__shared__ T* val;
__shared__ Nd4jLong len, zIndex, total;
__shared__ T* z;
__shared__ int start, finish;
if (threadIdx.x == 0) {
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<T *>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads, len);
auto zIndex = shape::getIndexOffset(e, outputTads, len);
nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads, len);
auto zIndex = shape::getIndexOffset(e, outputTads, len);
if (lengths[indices[idx]])
nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void segmentSumFunctor_(nd4j::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses});
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
hipLaunchKernelGGL(( segmentSumLinearKernel<T,I>), dim3(numClasses), dim3(input->lengthOf()), numClasses * 32 + 32, *stream, input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
hipLaunchKernelGGL(( segmentSumTadKernel<T,I>), dim3(input->sizeAt(0)), dim3(512), 2048, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentSumFunctor(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentSumFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentSumFunctor_(nd4j::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses});
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses});
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(nd4j::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), (numOfClasses + 1) * 64);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
hipLaunchKernelGGL(( unsortedSegmentSumLinearKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
hipLaunchKernelGGL(( segmentSumTadKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentSumFunctor(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSumFunctor_, (context, input, indices, numOfClasses, output),
NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
// Backpropagate ops
// -------------------------------------------------------------------------------------------------------------- //
// Sorted sum backpropagate
template <typename T, typename I>
static __global__ void segmentSumBPLinearKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape,
void* indicesBuf, Nd4jLong* indicesShape, void* outputBuf, Nd4jLong* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape, xLen);
auto xOffset = shape::getIndexOffset(e, inputShape, xLen);
auto yOffset = shape::getIndexOffset(e, indicesShape, xLen);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape, gradLen);
z[zOffset] = gradOut[gradOffsetO];
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentSumBPTadKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape,
void* indicesBuf, Nd4jLong* indicesShape, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* inputTad,
Nd4jLong* inputOffsets, Nd4jLong* gradOutTad, Nd4jLong* gradOutOffsets, Nd4jLong* outTad, Nd4jLong* outOffsets) {
__shared__ T* x;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
auto yIndex = shape::getIndexOffset(i, indicesShape, yLen);
auto segment = y[yIndex];
T* currentOut = z + outOffsets[i];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
currentOut[e] = outGrad[e];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
int segmentSumFunctorBP_(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
hipLaunchKernelGGL(( segmentSumBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
auto packGradOut = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
Nd4jLong* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentSumBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int segmentSumFunctorBP(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentSumFunctorBP_, (context, input,
indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
template <typename T, typename I>
static int unsortedSegmentSumFunctorBP_(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
hipLaunchKernelGGL(( segmentSumBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
auto packGradOut = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
Nd4jLong* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentSumBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentSumFunctorBP(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSumFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
}
|
6d0bd993a16f99f3bc8d7ddf21f7c48f8e3e742f.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
segmentSumLinearKernel(void *input, Nd4jLong *inputShape, int *starts, int *lengths, Nd4jLong numOfClasses,
void *output, Nd4jLong *outputShape) {
__shared__
T *val;
__shared__
Nd4jLong xLen, zLen, segment, zIndex;
__shared__
T *x;
__shared__
T *z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<T *>(input);
z = reinterpret_cast<T *>(output);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape, zLen);
start = starts[segment];
finish = start + lengths[segment];
//val[segment] = ;
z[zIndex] = x[shape::getIndexOffset(start, inputShape, xLen)];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape, xLen);
nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
unsortedSegmentSumLinearKernel(void *input, Nd4jLong *inputShape, void *indices, Nd4jLong *indicesShape,
int *starts, int *lengths, Nd4jLong numOfClasses, void *output,
Nd4jLong *outputShape) {
__shared__
T *val;
__shared__
Nd4jLong xLen, zLen, segment, zIndex;
__shared__
T *x;
__shared__
T *z;
__shared__
I *y; //int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
segment = blockIdx.x;
x = reinterpret_cast<T *>(input);
z = reinterpret_cast<T *>(output);
y = reinterpret_cast<I *>(indices);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
zIndex = shape::getIndexOffset(segment, outputShape, zLen);
if (lengths[segment] > 0)
z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape, xLen)];
else
z[zIndex] = 0; //DataTypeUtils::max<T>();
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape, xLen);
auto yIndex = shape::getIndexOffset(e, indicesShape, xLen);
if (y[yIndex] == segment && e != starts[segment]) {
nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentSum kernel
template <typename T, typename I>
static __global__ void segmentSumTadKernel(void* inputBuf, Nd4jLong* inputShape, Nd4jLong* inputTads, Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* outputTads, Nd4jLong* outputTadOffsets) {
__shared__ T* val;
__shared__ Nd4jLong len, zIndex, total;
__shared__ T* z;
__shared__ int start, finish;
if (threadIdx.x == 0) {
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<T *>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads, len);
auto zIndex = shape::getIndexOffset(e, outputTads, len);
nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads, len);
auto zIndex = shape::getIndexOffset(e, outputTads, len);
if (lengths[indices[idx]])
nd4j::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void segmentSumFunctor_(nd4j::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses});
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
segmentSumLinearKernel<T,I><<<numClasses, input->lengthOf(), numClasses * 32 + 32, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
segmentSumTadKernel<T,I><<<input->sizeAt(0), 512, 2048, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentSumFunctor(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentSumFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentSumFunctor_(nd4j::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses});
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses});
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(nd4j::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), (numOfClasses + 1) * 64);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
unsortedSegmentSumLinearKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
segmentSumTadKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentSumFunctor(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSumFunctor_, (context, input, indices, numOfClasses, output),
NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
// Backpropagate ops
// -------------------------------------------------------------------------------------------------------------- //
// Sorted sum backpropagate
template <typename T, typename I>
static __global__ void segmentSumBPLinearKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape,
void* indicesBuf, Nd4jLong* indicesShape, void* outputBuf, Nd4jLong* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape, xLen);
auto xOffset = shape::getIndexOffset(e, inputShape, xLen);
auto yOffset = shape::getIndexOffset(e, indicesShape, xLen);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape, gradLen);
z[zOffset] = gradOut[gradOffsetO];
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentSumBPTadKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape,
void* indicesBuf, Nd4jLong* indicesShape, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* inputTad,
Nd4jLong* inputOffsets, Nd4jLong* gradOutTad, Nd4jLong* gradOutOffsets, Nd4jLong* outTad, Nd4jLong* outOffsets) {
__shared__ T* x;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
auto yIndex = shape::getIndexOffset(i, indicesShape, yLen);
auto segment = y[yIndex];
T* currentOut = z + outOffsets[i];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
currentOut[e] = outGrad[e];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
int segmentSumFunctorBP_(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
segmentSumBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
auto packGradOut = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
Nd4jLong* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets();
segmentSumBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int segmentSumFunctorBP(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentSumFunctorBP_, (context, input,
indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
template <typename T, typename I>
static int unsortedSegmentSumFunctorBP_(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
segmentSumBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
auto packGradOut = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
Nd4jLong* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets();
segmentSumBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentSumFunctorBP(nd4j::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSumFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
}
|
6bfd9fd86ca688140c893f9c8fc8dfdb4517d953.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include <iostream>
#include "caffe/layer.hpp"
#include "caffe/common.cuh"
#include "caffe/layers/duplicate_layer.hpp"
using namespace std;
namespace caffe {
template <typename Dtype>
__global__ void DuplicateForward(const int n,
const int channels, const int height, const int width,
const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
out[index] = in[n * channels + c];
}
}
template <typename Dtype>
void DuplicateLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = top[0]->count();
hipLaunchKernelGGL(( DuplicateForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels_, height_, width_, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void DuplicateBackward(const int n,
const int channels, const int height, const int width,
const Dtype* in_diff, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
atomicAdd(&out_diff[n * channels + c], in_diff[index]);
}
}
template <typename Dtype>
void DuplicateLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = top[0]->count();
caffe_gpu_set(bottom[0]->count() , Dtype(0), bottom_diff);
hipLaunchKernelGGL(( DuplicateBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels_, height_, width_, top_diff, bottom_diff);
CUDA_POST_KERNEL_CHECK;
bottom_diff = bottom[0]->mutable_cpu_diff();
//cout << bottom_diff[0] << endl;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DuplicateLayer);
} // namespace caffe
|
6bfd9fd86ca688140c893f9c8fc8dfdb4517d953.cu
|
#include <algorithm>
#include <vector>
#include <iostream>
#include "caffe/layer.hpp"
#include "caffe/common.cuh"
#include "caffe/layers/duplicate_layer.hpp"
using namespace std;
namespace caffe {
template <typename Dtype>
__global__ void DuplicateForward(const int n,
const int channels, const int height, const int width,
const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
out[index] = in[n * channels + c];
}
}
template <typename Dtype>
void DuplicateLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = top[0]->count();
DuplicateForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels_, height_, width_, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void DuplicateBackward(const int n,
const int channels, const int height, const int width,
const Dtype* in_diff, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
atomicAdd(&out_diff[n * channels + c], in_diff[index]);
}
}
template <typename Dtype>
void DuplicateLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = top[0]->count();
caffe_gpu_set(bottom[0]->count() , Dtype(0), bottom_diff);
DuplicateBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels_, height_, width_, top_diff, bottom_diff);
CUDA_POST_KERNEL_CHECK;
bottom_diff = bottom[0]->mutable_cpu_diff();
//cout << bottom_diff[0] << endl;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DuplicateLayer);
} // namespace caffe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.