hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
aee5065de942d00491e9b6f8b02cd0068924f43f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#ifndef _BUCKETSORT_KERNEL_H_
#define _BUCKETSORT_KERNEL_H_
#include <stdio.h>
#define BUCKET_WARP_LOG_SIZE 5
#define BUCKET_WARP_N 1
#ifdef BUCKET_WG_SIZE_1
#define BUCKET_THREAD_N BUCKET_WG_SIZE_1
#else
#define BUCKET_THREAD_N (BUCKET_WARP_N << BUCKET_WARP_LOG_SIZE)
#endif
#define BUCKET_BLOCK_MEMORY (DIVISIONS * BUCKET_WARP_N)
#define BUCKET_BAND 128
texture<float, 1, hipReadModeElementType> texPivot;
__device__ int addOffset(volatile unsigned int *s_offset, unsigned int data, unsigned int threadTag){
unsigned int count;
do{
count = s_offset[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_offset[data] = count;
}while(s_offset[data] != count);
return (count & 0x07FFFFFFU) - 1;
}
__global__ void
bucketcount( float *input, int *indice, unsigned int *d_prefixoffsets, int size)
{
volatile __shared__ unsigned int s_offset[BUCKET_BLOCK_MEMORY];
const unsigned int threadTag = threadIdx.x << (32 - BUCKET_WARP_LOG_SIZE);
const int warpBase = (threadIdx.x >> BUCKET_WARP_LOG_SIZE) * DIVISIONS;
const int numThreads = blockDim.x * gridDim.x;
for (int i = threadIdx.x; i < BUCKET_BLOCK_MEMORY; i += blockDim.x)
s_offset[i] = 0;
__syncthreads();
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < size; tid += numThreads) {
float elem = input[tid];
int idx = DIVISIONS/2 - 1;
int jump = DIVISIONS/4;
float piv = tex1Dfetch(texPivot, idx); //s_pivotpoints[idx];
while(jump >= 1){
idx = (elem < piv) ? (idx - jump) : (idx + jump);
piv = tex1Dfetch(texPivot, idx); //s_pivotpoints[idx];
jump /= 2;
}
idx = (elem < piv) ? idx : (idx + 1);
indice[tid] = (addOffset(s_offset + warpBase, idx, threadTag) << LOG_DIVISIONS) + idx; //atomicInc(&offsets[idx], size + 1);
}
__syncthreads();
int prefixBase = blockIdx.x * BUCKET_BLOCK_MEMORY;
for (int i = threadIdx.x; i < BUCKET_BLOCK_MEMORY; i += blockDim.x)
d_prefixoffsets[prefixBase + i] = s_offset[i] & 0x07FFFFFFU;
}
__global__ void bucketprefixoffset(unsigned int *d_prefixoffsets, unsigned int *d_offsets, int blocks) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int size = blocks * BUCKET_BLOCK_MEMORY;
int sum = 0;
for (int i = tid; i < size; i += DIVISIONS) {
int x = d_prefixoffsets[i];
d_prefixoffsets[i] = sum;
sum += x;
}
d_offsets[tid] = sum;
}
__global__ void
bucketsort(float *input, int *indice, float *output, int size, unsigned int *d_prefixoffsets,
unsigned int *l_offsets)
{
volatile __shared__ unsigned int s_offset[BUCKET_BLOCK_MEMORY];
int prefixBase = blockIdx.x * BUCKET_BLOCK_MEMORY;
const int warpBase = (threadIdx.x >> BUCKET_WARP_LOG_SIZE) * DIVISIONS;
const int numThreads = blockDim.x * gridDim.x;
for (int i = threadIdx.x; i < BUCKET_BLOCK_MEMORY; i += blockDim.x)
s_offset[i] = l_offsets[i & (DIVISIONS - 1)] + d_prefixoffsets[prefixBase + i];
__syncthreads();
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < size; tid += numThreads) {
float elem = input[tid];
int id = indice[tid];
output[s_offset[warpBase + (id & (DIVISIONS - 1))] + (id >> LOG_DIVISIONS)] = elem;
int test = s_offset[warpBase + (id & (DIVISIONS - 1))] + (id >> LOG_DIVISIONS);
}
}
#endif
|
aee5065de942d00491e9b6f8b02cd0068924f43f.cu
|
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#ifndef _BUCKETSORT_KERNEL_H_
#define _BUCKETSORT_KERNEL_H_
#include <stdio.h>
#define BUCKET_WARP_LOG_SIZE 5
#define BUCKET_WARP_N 1
#ifdef BUCKET_WG_SIZE_1
#define BUCKET_THREAD_N BUCKET_WG_SIZE_1
#else
#define BUCKET_THREAD_N (BUCKET_WARP_N << BUCKET_WARP_LOG_SIZE)
#endif
#define BUCKET_BLOCK_MEMORY (DIVISIONS * BUCKET_WARP_N)
#define BUCKET_BAND 128
texture<float, 1, cudaReadModeElementType> texPivot;
__device__ int addOffset(volatile unsigned int *s_offset, unsigned int data, unsigned int threadTag){
unsigned int count;
do{
count = s_offset[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_offset[data] = count;
}while(s_offset[data] != count);
return (count & 0x07FFFFFFU) - 1;
}
__global__ void
bucketcount( float *input, int *indice, unsigned int *d_prefixoffsets, int size)
{
volatile __shared__ unsigned int s_offset[BUCKET_BLOCK_MEMORY];
const unsigned int threadTag = threadIdx.x << (32 - BUCKET_WARP_LOG_SIZE);
const int warpBase = (threadIdx.x >> BUCKET_WARP_LOG_SIZE) * DIVISIONS;
const int numThreads = blockDim.x * gridDim.x;
for (int i = threadIdx.x; i < BUCKET_BLOCK_MEMORY; i += blockDim.x)
s_offset[i] = 0;
__syncthreads();
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < size; tid += numThreads) {
float elem = input[tid];
int idx = DIVISIONS/2 - 1;
int jump = DIVISIONS/4;
float piv = tex1Dfetch(texPivot, idx); //s_pivotpoints[idx];
while(jump >= 1){
idx = (elem < piv) ? (idx - jump) : (idx + jump);
piv = tex1Dfetch(texPivot, idx); //s_pivotpoints[idx];
jump /= 2;
}
idx = (elem < piv) ? idx : (idx + 1);
indice[tid] = (addOffset(s_offset + warpBase, idx, threadTag) << LOG_DIVISIONS) + idx; //atomicInc(&offsets[idx], size + 1);
}
__syncthreads();
int prefixBase = blockIdx.x * BUCKET_BLOCK_MEMORY;
for (int i = threadIdx.x; i < BUCKET_BLOCK_MEMORY; i += blockDim.x)
d_prefixoffsets[prefixBase + i] = s_offset[i] & 0x07FFFFFFU;
}
__global__ void bucketprefixoffset(unsigned int *d_prefixoffsets, unsigned int *d_offsets, int blocks) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int size = blocks * BUCKET_BLOCK_MEMORY;
int sum = 0;
for (int i = tid; i < size; i += DIVISIONS) {
int x = d_prefixoffsets[i];
d_prefixoffsets[i] = sum;
sum += x;
}
d_offsets[tid] = sum;
}
__global__ void
bucketsort(float *input, int *indice, float *output, int size, unsigned int *d_prefixoffsets,
unsigned int *l_offsets)
{
volatile __shared__ unsigned int s_offset[BUCKET_BLOCK_MEMORY];
int prefixBase = blockIdx.x * BUCKET_BLOCK_MEMORY;
const int warpBase = (threadIdx.x >> BUCKET_WARP_LOG_SIZE) * DIVISIONS;
const int numThreads = blockDim.x * gridDim.x;
for (int i = threadIdx.x; i < BUCKET_BLOCK_MEMORY; i += blockDim.x)
s_offset[i] = l_offsets[i & (DIVISIONS - 1)] + d_prefixoffsets[prefixBase + i];
__syncthreads();
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < size; tid += numThreads) {
float elem = input[tid];
int id = indice[tid];
output[s_offset[warpBase + (id & (DIVISIONS - 1))] + (id >> LOG_DIVISIONS)] = elem;
int test = s_offset[warpBase + (id & (DIVISIONS - 1))] + (id >> LOG_DIVISIONS);
}
}
#endif
|
5440e0962af4d1268bd274c9232129b6f8ce7f6c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string>
#include <device_launch_parameters.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Gtype, typename Wtype, typename Htype>
__global__ void SGDRegUpdateAllAndClear(int N,
Gtype* g, Wtype* w, Htype* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
CUDA_KERNEL_LOOP(i, N) {
Wtype reg = reg_L2 ? w[i] : Wtype((Wtype(0) < w[i]) - (w[i] < Wtype(0)));
Wtype gr = Wtype(g[i]) + reg * local_decay;
gr = h[i] = momentum * h[i] + local_rate * gr;
w[i] -= gr;
g[i] = clear_grads ? Gtype(0) : Gtype(gr);
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<half, half, half>(int N,
half* g, half* w, half* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float wf = __half2float(w[i]);
float gf = __half2float(g[i]);
float hf = __half2float(h[i]);
float reg = reg_L2 ? wf : float((0.F < wf)-(wf < 0.F));
gf += reg * local_decay;
gf = hf = momentum * hf + local_rate * gf;
wf -= gf;
h[i] = float2half_clip(hf);
w[i] = float2half_clip(wf);
g[i] = clear_grads ? hz : float2half_clip(gf);
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<float, float, half>(int N,
float* g, float* w, half* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float wf = w[i];
float gf = g[i];
float hf = __half2float(h[i]);
float reg = reg_L2 ? wf : float((0.F < wf)-(wf < 0.F));
gf += reg * local_decay;
gf = hf = momentum * hf + local_rate * gf;
wf -= gf;
h[i] = float2half_clip(hf);
w[i] = wf;
g[i] = clear_grads ? 0.F : gf;
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<half, float, float>(int N,
half* g, float* w, float* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float reg = reg_L2 ? w[i] : (0.F < w[i]) - (w[i] < 0.F);
float gr = __half2float(g[i]) + reg * local_decay;
gr = h[i] = momentum * h[i] + local_rate * gr;
w[i] -= gr;
g[i] = clear_grads ? hz : float2half_clip(h[i]);
}
}
template<typename Gtype, typename Wtype, typename Htype>
void sgd_reg_update_all_and_clear_gpu(int N,
Gtype* g, Wtype* w, Htype* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
bool reg_L2 = (reg_type == "L2") || (reg_type == "L2_unitary");
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SGDRegUpdateAllAndClear), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N,
g, w, h,
momentum, local_rate, local_decay, reg_L2, clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template void sgd_reg_update_all_and_clear_gpu<float16, double, double>(
int, float16*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float, float>(
int, float*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, double, double>(
int, float*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float16, float16>(
int, float*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, float, float>(
int, double*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, double, double>(
int, double*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, float16, float16>(
int, double*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float, float16>(
int, float*, float*, float16*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float, double>(
int, float*, float*, double*,
float, float, const std::string&, float, void*, bool);
template<>
void
sgd_reg_update_all_and_clear_gpu<float16, float16>(int N,
float16* g, float16* w, float16* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SGDRegUpdateAllAndClear), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N,
reinterpret_cast<half*>(g), reinterpret_cast<half*>(w), reinterpret_cast<half*>(h),
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void
sgd_reg_update_all_and_clear_gpu<float16, float>(int N,
float16* g, float* w, float* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SGDRegUpdateAllAndClear), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
N, reinterpret_cast<half*>(g), w, h, momentum, local_rate,
local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
} // namespace caffe
|
5440e0962af4d1268bd274c9232129b6f8ce7f6c.cu
|
#include <string>
#include <device_launch_parameters.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Gtype, typename Wtype, typename Htype>
__global__ void SGDRegUpdateAllAndClear(int N,
Gtype* g, Wtype* w, Htype* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
CUDA_KERNEL_LOOP(i, N) {
Wtype reg = reg_L2 ? w[i] : Wtype((Wtype(0) < w[i]) - (w[i] < Wtype(0)));
Wtype gr = Wtype(g[i]) + reg * local_decay;
gr = h[i] = momentum * h[i] + local_rate * gr;
w[i] -= gr;
g[i] = clear_grads ? Gtype(0) : Gtype(gr);
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<half, half, half>(int N,
half* g, half* w, half* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float wf = __half2float(w[i]);
float gf = __half2float(g[i]);
float hf = __half2float(h[i]);
float reg = reg_L2 ? wf : float((0.F < wf)-(wf < 0.F));
gf += reg * local_decay;
gf = hf = momentum * hf + local_rate * gf;
wf -= gf;
h[i] = float2half_clip(hf);
w[i] = float2half_clip(wf);
g[i] = clear_grads ? hz : float2half_clip(gf);
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<float, float, half>(int N,
float* g, float* w, half* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float wf = w[i];
float gf = g[i];
float hf = __half2float(h[i]);
float reg = reg_L2 ? wf : float((0.F < wf)-(wf < 0.F));
gf += reg * local_decay;
gf = hf = momentum * hf + local_rate * gf;
wf -= gf;
h[i] = float2half_clip(hf);
w[i] = wf;
g[i] = clear_grads ? 0.F : gf;
}
}
template<>
__global__ void SGDRegUpdateAllAndClear<half, float, float>(int N,
half* g, float* w, float* h,
float momentum, float local_rate, float local_decay, bool reg_L2, bool clear_grads) {
half hz;
CUDA_KERNEL_LOOP(i, N) {
float reg = reg_L2 ? w[i] : (0.F < w[i]) - (w[i] < 0.F);
float gr = __half2float(g[i]) + reg * local_decay;
gr = h[i] = momentum * h[i] + local_rate * gr;
w[i] -= gr;
g[i] = clear_grads ? hz : float2half_clip(h[i]);
}
}
template<typename Gtype, typename Wtype, typename Htype>
void sgd_reg_update_all_and_clear_gpu(int N,
Gtype* g, Wtype* w, Htype* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
bool reg_L2 = (reg_type == "L2") || (reg_type == "L2_unitary");
// NOLINT_NEXT_LINE(whitespace/operators)
SGDRegUpdateAllAndClear<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N,
g, w, h,
momentum, local_rate, local_decay, reg_L2, clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template void sgd_reg_update_all_and_clear_gpu<float16, double, double>(
int, float16*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float, float>(
int, float*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, double, double>(
int, float*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float16, float16>(
int, float*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, float, float>(
int, double*, float*, float*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, double, double>(
int, double*, double*, double*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<double, float16, float16>(
int, double*, float16*, float16*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float, float16>(
int, float*, float*, float16*,
float, float, const std::string&, float, void*, bool);
template void sgd_reg_update_all_and_clear_gpu<float, float, double>(
int, float*, float*, double*,
float, float, const std::string&, float, void*, bool);
template<>
void
sgd_reg_update_all_and_clear_gpu<float16, float16>(int N,
float16* g, float16* w, float16* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
SGDRegUpdateAllAndClear<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N,
reinterpret_cast<half*>(g), reinterpret_cast<half*>(w), reinterpret_cast<half*>(h),
momentum, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void
sgd_reg_update_all_and_clear_gpu<float16, float>(int N,
float16* g, float* w, float* h,
float momentum, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
SGDRegUpdateAllAndClear<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>
(N, reinterpret_cast<half*>(g), w, h, momentum, local_rate,
local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
} // namespace caffe
|
e1deeba2e442af72050e3d1e0536b5a09f5ecc10.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "hipcub/hipcub.hpp"
#include <limits>
inline int up2(int len, int th) { return (len - 1) / th + 1; }
// Implementations
template <typename scalar_t>
__global__
void flex_pool_forward_kernel_cuda_impl(
const int B, const int N, const int K, const int D,
const scalar_t* features,
const int* neighborhood,
scalar_t* output,
int* argmax,
scalar_t float_min_value)
{
const int b = blockIdx.z;
for (int d = blockIdx.y * blockDim.y + threadIdx.y; d < D;
d += blockDim.y * gridDim.y)
{
for (int n = blockIdx.x * blockDim.x + threadIdx.x; n < N;
n += blockDim.x * gridDim.x)
{
scalar_t best_value = float_min_value;
int best_id = 0;
const int current_flat = b * D * N + d * N + n;
for (int k_ = 0; k_ < K; ++k_)
{
const int other_global_id = neighborhood[b * K * N + k_ * N + n];
const scalar_t v = features[b * D * N + d * N + other_global_id];
if (best_value < v)
{
best_id = other_global_id;
best_value = v;
}
}
output[current_flat] = best_value;
argmax[current_flat] = best_id;
}
}
}
template <typename scalar_t>
__global__
void flex_pool_backward_kernel_cuda_impl(
const int B, const int N, const int K, const int D,
const scalar_t* features,
const int* neighborhood,
const scalar_t* topdiff,
const int* argmax,
scalar_t* grad_features)
{
const int b = blockIdx.z;
for (int d = blockIdx.y * blockDim.y + threadIdx.y; d < D;
d += blockDim.y * gridDim.y)
{
for (int n = blockIdx.x * blockDim.x + threadIdx.x; n < N;
n += blockDim.x * gridDim.x)
{
const int top_id_flat = b * D * N + d * N + n;
const int argmax_id = argmax[top_id_flat];
const int bottom_id_flat = b * D * N + d * N + argmax_id;
// TODO(patwie): scattered write, yeah :-(
atomicAdd(&grad_features[bottom_id_flat], topdiff[top_id_flat]);
}
}
}
// Interface
void flex_pool_forward_kernel_cuda(
at::Tensor features,
at::Tensor neighborhood,
at::Tensor output,
at::Tensor argmax)
{
// get dimensions
const int B = neighborhood.size(0);
const int K = neighborhood.size(1);
const int N = neighborhood.size(2);
const int D = features.size(1);
const int threads = 32;
dim3 block(threads, threads, 1);
dim3 grid(up2(N, threads), up2(D, threads), B);
argmax.zero_();
AT_DISPATCH_FLOATING_TYPES(
features.type(), "flex_pool_forward_kernel_cuda", ([&]
{
output.fill_(std::numeric_limits<scalar_t>::lowest());
hipLaunchKernelGGL(( flex_pool_forward_kernel_cuda_impl<scalar_t>), dim3(grid), dim3(block), 0, 0,
B, N, K, D,
features.data<scalar_t>(),
neighborhood.data<int>(),
output.data<scalar_t>(),
argmax.data<int>(),
std::numeric_limits<scalar_t>::lowest());
}));
}
void flex_pool_backward_kernel_cuda(
at::Tensor features,
at::Tensor neighborhood,
at::Tensor topdiff,
at::Tensor argmax,
at::Tensor grad_features)
{
// get dimensions
const int B = neighborhood.size(0);
const int K = neighborhood.size(1);
const int N = neighborhood.size(2);
const int D = features.size(1);
const int threads = 32;
dim3 block(threads, threads, 1);
dim3 grid(up2(N, threads), up2(D, threads), B);
grad_features.zero_();
AT_DISPATCH_FLOATING_TYPES(
features.type(), "flex_pool_backward_kernel_cuda", ([&]
{
hipLaunchKernelGGL(( flex_pool_backward_kernel_cuda_impl<scalar_t>), dim3(grid), dim3(block), 0, 0,
B, N, K, D,
features.data<scalar_t>(),
neighborhood.data<int>(),
topdiff.data<scalar_t>(),
argmax.data<int>(),
grad_features.data<scalar_t>());
}));
}
|
e1deeba2e442af72050e3d1e0536b5a09f5ecc10.cu
|
#include "ATen/ATen.h"
#include "cub/cub.cuh"
#include <limits>
inline int up2(int len, int th) { return (len - 1) / th + 1; }
// Implementations
template <typename scalar_t>
__global__
void flex_pool_forward_kernel_cuda_impl(
const int B, const int N, const int K, const int D,
const scalar_t* features,
const int* neighborhood,
scalar_t* output,
int* argmax,
scalar_t float_min_value)
{
const int b = blockIdx.z;
for (int d = blockIdx.y * blockDim.y + threadIdx.y; d < D;
d += blockDim.y * gridDim.y)
{
for (int n = blockIdx.x * blockDim.x + threadIdx.x; n < N;
n += blockDim.x * gridDim.x)
{
scalar_t best_value = float_min_value;
int best_id = 0;
const int current_flat = b * D * N + d * N + n;
for (int k_ = 0; k_ < K; ++k_)
{
const int other_global_id = neighborhood[b * K * N + k_ * N + n];
const scalar_t v = features[b * D * N + d * N + other_global_id];
if (best_value < v)
{
best_id = other_global_id;
best_value = v;
}
}
output[current_flat] = best_value;
argmax[current_flat] = best_id;
}
}
}
template <typename scalar_t>
__global__
void flex_pool_backward_kernel_cuda_impl(
const int B, const int N, const int K, const int D,
const scalar_t* features,
const int* neighborhood,
const scalar_t* topdiff,
const int* argmax,
scalar_t* grad_features)
{
const int b = blockIdx.z;
for (int d = blockIdx.y * blockDim.y + threadIdx.y; d < D;
d += blockDim.y * gridDim.y)
{
for (int n = blockIdx.x * blockDim.x + threadIdx.x; n < N;
n += blockDim.x * gridDim.x)
{
const int top_id_flat = b * D * N + d * N + n;
const int argmax_id = argmax[top_id_flat];
const int bottom_id_flat = b * D * N + d * N + argmax_id;
// TODO(patwie): scattered write, yeah :-(
atomicAdd(&grad_features[bottom_id_flat], topdiff[top_id_flat]);
}
}
}
// Interface
void flex_pool_forward_kernel_cuda(
at::Tensor features,
at::Tensor neighborhood,
at::Tensor output,
at::Tensor argmax)
{
// get dimensions
const int B = neighborhood.size(0);
const int K = neighborhood.size(1);
const int N = neighborhood.size(2);
const int D = features.size(1);
const int threads = 32;
dim3 block(threads, threads, 1);
dim3 grid(up2(N, threads), up2(D, threads), B);
argmax.zero_();
AT_DISPATCH_FLOATING_TYPES(
features.type(), "flex_pool_forward_kernel_cuda", ([&]
{
output.fill_(std::numeric_limits<scalar_t>::lowest());
flex_pool_forward_kernel_cuda_impl<scalar_t><<<grid, block>>>(
B, N, K, D,
features.data<scalar_t>(),
neighborhood.data<int>(),
output.data<scalar_t>(),
argmax.data<int>(),
std::numeric_limits<scalar_t>::lowest());
}));
}
void flex_pool_backward_kernel_cuda(
at::Tensor features,
at::Tensor neighborhood,
at::Tensor topdiff,
at::Tensor argmax,
at::Tensor grad_features)
{
// get dimensions
const int B = neighborhood.size(0);
const int K = neighborhood.size(1);
const int N = neighborhood.size(2);
const int D = features.size(1);
const int threads = 32;
dim3 block(threads, threads, 1);
dim3 grid(up2(N, threads), up2(D, threads), B);
grad_features.zero_();
AT_DISPATCH_FLOATING_TYPES(
features.type(), "flex_pool_backward_kernel_cuda", ([&]
{
flex_pool_backward_kernel_cuda_impl<scalar_t><<<grid, block>>>(
B, N, K, D,
features.data<scalar_t>(),
neighborhood.data<int>(),
topdiff.data<scalar_t>(),
argmax.data<int>(),
grad_features.data<scalar_t>());
}));
}
|
d686277efa001faa402bd18418da74a6000469e7.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/label/classlabels.cuh>
#include <hipcub/hipcub.hpp>
#include <cuml/common/logger.hpp>
#include <raft/core/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/op/sort.cuh>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <thrust/tuple.h>
#include <cuml/cluster/hdbscan.hpp>
namespace ML {
namespace HDBSCAN {
namespace Common {
struct TupleComp {
template <typename one, typename two>
__host__ __device__ bool operator()(const one& t1, const two& t2)
{
// sort first by each parent,
if (thrust::get<0>(t1) < thrust::get<0>(t2)) return true;
if (thrust::get<0>(t1) > thrust::get<0>(t2)) return false;
// within each parent, sort by each child,
if (thrust::get<1>(t1) < thrust::get<1>(t2)) return true;
if (thrust::get<1>(t1) > thrust::get<1>(t2)) return false;
// then sort by value in descending order
return thrust::get<2>(t1) < thrust::get<2>(t2);
}
};
template <typename value_idx, typename value_t>
CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_,
size_t n_leaves_)
: handle(handle_),
n_leaves(n_leaves_),
parents(0, handle.get_stream()),
children(0, handle.get_stream()),
lambdas(0, handle.get_stream()),
sizes(0, handle.get_stream())
{
}
template <typename value_idx, typename value_t>
CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_,
size_t n_leaves_,
int n_edges_,
value_idx* parents_,
value_idx* children_,
value_t* lambdas_,
value_idx* sizes_)
: handle(handle_),
n_leaves(n_leaves_),
n_edges(n_edges_),
parents(n_edges_, handle.get_stream()),
children(n_edges_, handle.get_stream()),
lambdas(n_edges_, handle.get_stream()),
sizes(n_edges_, handle.get_stream())
{
raft::copy(parents.begin(), parents_, n_edges_, handle.get_stream());
raft::copy(children.begin(), children_, n_edges_, handle.get_stream());
raft::copy(lambdas.begin(), lambdas_, n_edges_, handle.get_stream());
raft::copy(sizes.begin(), sizes_, n_edges_, handle.get_stream());
auto parents_ptr = thrust::device_pointer_cast(parents.data());
auto parents_min_max = thrust::minmax_element(
thrust::hip::par.on(handle.get_stream()), parents_ptr, parents_ptr + n_edges);
auto min_cluster = *parents_min_max.first;
auto max_cluster = *parents_min_max.second;
n_clusters = max_cluster - min_cluster + 1;
auto sort_keys =
thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin()));
auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin()));
thrust::sort_by_key(thrust::hip::par.on(handle.get_stream()),
sort_keys,
sort_keys + n_edges,
sort_values,
TupleComp());
}
template <typename value_idx, typename value_t>
CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(
const raft::handle_t& handle_,
size_t n_leaves_,
int n_edges_,
int n_clusters_,
rmm::device_uvector<value_idx>&& parents_,
rmm::device_uvector<value_idx>&& children_,
rmm::device_uvector<value_t>&& lambdas_,
rmm::device_uvector<value_idx>&& sizes_)
: handle(handle_),
n_leaves(n_leaves_),
n_edges(n_edges_),
n_clusters(n_clusters_),
parents(std::move(parents_)),
children(std::move(children_)),
lambdas(std::move(lambdas_)),
sizes(std::move(sizes_))
{
}
/**
* Populates the condensed hierarchy object with the output
* from Condense::condense_hierarchy
* @param full_parents
* @param full_children
* @param full_lambdas
* @param full_sizes
*/
template <typename value_idx, typename value_t>
void CondensedHierarchy<value_idx, value_t>::condense(value_idx* full_parents,
value_idx* full_children,
value_t* full_lambdas,
value_idx* full_sizes,
value_idx size)
{
auto stream = handle.get_stream();
if (size == -1) size = 4 * (n_leaves - 1) + 2;
n_edges = thrust::transform_reduce(
thrust::hip::par.on(stream),
full_sizes,
full_sizes + size,
[=] __device__(value_idx a) { return a != -1; },
0,
thrust::plus<value_idx>());
parents.resize(n_edges, stream);
children.resize(n_edges, stream);
lambdas.resize(n_edges, stream);
sizes.resize(n_edges, stream);
auto in = thrust::make_zip_iterator(
thrust::make_tuple(full_parents, full_children, full_lambdas, full_sizes));
auto out = thrust::make_zip_iterator(
thrust::make_tuple(parents.data(), children.data(), lambdas.data(), sizes.data()));
thrust::copy_if(thrust::hip::par.on(stream),
in,
in + size,
out,
[=] __device__(thrust::tuple<value_idx, value_idx, value_t, value_idx> tup) {
return thrust::get<3>(tup) != -1;
});
// TODO: Avoid the copies here by updating kernel
rmm::device_uvector<value_idx> parent_child(n_edges * 2, stream);
raft::copy_async(parent_child.begin(), children.begin(), n_edges, stream);
raft::copy_async(parent_child.begin() + n_edges, parents.begin(), n_edges, stream);
// find n_clusters
auto parents_ptr = thrust::device_pointer_cast(parents.data());
auto max_parent =
*(thrust::max_element(thrust::hip::par.on(stream), parents_ptr, parents_ptr + n_edges));
// now invert labels
auto invert_op = [max_parent, n_leaves = n_leaves] __device__(auto& x) {
return x >= n_leaves ? max_parent - x + n_leaves : x;
};
thrust::transform(thrust::hip::par.on(stream),
parent_child.begin(),
parent_child.end(),
parent_child.begin(),
invert_op);
raft::label::make_monotonic(
parent_child.data(), parent_child.data(), parent_child.size(), stream, true);
raft::copy_async(children.begin(), parent_child.begin(), n_edges, stream);
raft::copy_async(parents.begin(), parent_child.begin() + n_edges, n_edges, stream);
auto parents_min_max =
thrust::minmax_element(thrust::hip::par.on(stream), parents_ptr, parents_ptr + n_edges);
auto min_cluster = *parents_min_max.first;
auto max_cluster = *parents_min_max.second;
n_clusters = max_cluster - min_cluster + 1;
auto sort_keys =
thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin()));
auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin()));
thrust::sort_by_key(
thrust::hip::par.on(stream), sort_keys, sort_keys + n_edges, sort_values, TupleComp());
}
}; // namespace Common
}; // namespace HDBSCAN
}; // namespace ML
|
d686277efa001faa402bd18418da74a6000469e7.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/label/classlabels.cuh>
#include <cub/cub.cuh>
#include <cuml/common/logger.hpp>
#include <raft/core/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/op/sort.cuh>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <thrust/tuple.h>
#include <cuml/cluster/hdbscan.hpp>
namespace ML {
namespace HDBSCAN {
namespace Common {
struct TupleComp {
template <typename one, typename two>
__host__ __device__ bool operator()(const one& t1, const two& t2)
{
// sort first by each parent,
if (thrust::get<0>(t1) < thrust::get<0>(t2)) return true;
if (thrust::get<0>(t1) > thrust::get<0>(t2)) return false;
// within each parent, sort by each child,
if (thrust::get<1>(t1) < thrust::get<1>(t2)) return true;
if (thrust::get<1>(t1) > thrust::get<1>(t2)) return false;
// then sort by value in descending order
return thrust::get<2>(t1) < thrust::get<2>(t2);
}
};
template <typename value_idx, typename value_t>
CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_,
size_t n_leaves_)
: handle(handle_),
n_leaves(n_leaves_),
parents(0, handle.get_stream()),
children(0, handle.get_stream()),
lambdas(0, handle.get_stream()),
sizes(0, handle.get_stream())
{
}
template <typename value_idx, typename value_t>
CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_,
size_t n_leaves_,
int n_edges_,
value_idx* parents_,
value_idx* children_,
value_t* lambdas_,
value_idx* sizes_)
: handle(handle_),
n_leaves(n_leaves_),
n_edges(n_edges_),
parents(n_edges_, handle.get_stream()),
children(n_edges_, handle.get_stream()),
lambdas(n_edges_, handle.get_stream()),
sizes(n_edges_, handle.get_stream())
{
raft::copy(parents.begin(), parents_, n_edges_, handle.get_stream());
raft::copy(children.begin(), children_, n_edges_, handle.get_stream());
raft::copy(lambdas.begin(), lambdas_, n_edges_, handle.get_stream());
raft::copy(sizes.begin(), sizes_, n_edges_, handle.get_stream());
auto parents_ptr = thrust::device_pointer_cast(parents.data());
auto parents_min_max = thrust::minmax_element(
thrust::cuda::par.on(handle.get_stream()), parents_ptr, parents_ptr + n_edges);
auto min_cluster = *parents_min_max.first;
auto max_cluster = *parents_min_max.second;
n_clusters = max_cluster - min_cluster + 1;
auto sort_keys =
thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin()));
auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin()));
thrust::sort_by_key(thrust::cuda::par.on(handle.get_stream()),
sort_keys,
sort_keys + n_edges,
sort_values,
TupleComp());
}
template <typename value_idx, typename value_t>
CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(
const raft::handle_t& handle_,
size_t n_leaves_,
int n_edges_,
int n_clusters_,
rmm::device_uvector<value_idx>&& parents_,
rmm::device_uvector<value_idx>&& children_,
rmm::device_uvector<value_t>&& lambdas_,
rmm::device_uvector<value_idx>&& sizes_)
: handle(handle_),
n_leaves(n_leaves_),
n_edges(n_edges_),
n_clusters(n_clusters_),
parents(std::move(parents_)),
children(std::move(children_)),
lambdas(std::move(lambdas_)),
sizes(std::move(sizes_))
{
}
/**
* Populates the condensed hierarchy object with the output
* from Condense::condense_hierarchy
* @param full_parents
* @param full_children
* @param full_lambdas
* @param full_sizes
*/
template <typename value_idx, typename value_t>
void CondensedHierarchy<value_idx, value_t>::condense(value_idx* full_parents,
value_idx* full_children,
value_t* full_lambdas,
value_idx* full_sizes,
value_idx size)
{
auto stream = handle.get_stream();
if (size == -1) size = 4 * (n_leaves - 1) + 2;
n_edges = thrust::transform_reduce(
thrust::cuda::par.on(stream),
full_sizes,
full_sizes + size,
[=] __device__(value_idx a) { return a != -1; },
0,
thrust::plus<value_idx>());
parents.resize(n_edges, stream);
children.resize(n_edges, stream);
lambdas.resize(n_edges, stream);
sizes.resize(n_edges, stream);
auto in = thrust::make_zip_iterator(
thrust::make_tuple(full_parents, full_children, full_lambdas, full_sizes));
auto out = thrust::make_zip_iterator(
thrust::make_tuple(parents.data(), children.data(), lambdas.data(), sizes.data()));
thrust::copy_if(thrust::cuda::par.on(stream),
in,
in + size,
out,
[=] __device__(thrust::tuple<value_idx, value_idx, value_t, value_idx> tup) {
return thrust::get<3>(tup) != -1;
});
// TODO: Avoid the copies here by updating kernel
rmm::device_uvector<value_idx> parent_child(n_edges * 2, stream);
raft::copy_async(parent_child.begin(), children.begin(), n_edges, stream);
raft::copy_async(parent_child.begin() + n_edges, parents.begin(), n_edges, stream);
// find n_clusters
auto parents_ptr = thrust::device_pointer_cast(parents.data());
auto max_parent =
*(thrust::max_element(thrust::cuda::par.on(stream), parents_ptr, parents_ptr + n_edges));
// now invert labels
auto invert_op = [max_parent, n_leaves = n_leaves] __device__(auto& x) {
return x >= n_leaves ? max_parent - x + n_leaves : x;
};
thrust::transform(thrust::cuda::par.on(stream),
parent_child.begin(),
parent_child.end(),
parent_child.begin(),
invert_op);
raft::label::make_monotonic(
parent_child.data(), parent_child.data(), parent_child.size(), stream, true);
raft::copy_async(children.begin(), parent_child.begin(), n_edges, stream);
raft::copy_async(parents.begin(), parent_child.begin() + n_edges, n_edges, stream);
auto parents_min_max =
thrust::minmax_element(thrust::cuda::par.on(stream), parents_ptr, parents_ptr + n_edges);
auto min_cluster = *parents_min_max.first;
auto max_cluster = *parents_min_max.second;
n_clusters = max_cluster - min_cluster + 1;
auto sort_keys =
thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin()));
auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin()));
thrust::sort_by_key(
thrust::cuda::par.on(stream), sort_keys, sort_keys + n_edges, sort_values, TupleComp());
}
}; // namespace Common
}; // namespace HDBSCAN
}; // namespace ML
|
17560494b584bc39f93f2e6353ebc5000ed9d61a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//gpubpm.cu
/*
Team: Seahawks
Program Description: Determines how many beats per minute are in a song using
a list of frequencies that must be provided by a file in
the command-line.
The algorithm to determine the beats per minute were provided
by Marco Ziccardi. His beat detection algorithm can be found in
the link below:
http://mziccard.me/2015/05/28/beats-detection-algorithms-1/
*/
#include <emmintrin.h>
#include <sys/time.h>
#include <stdio.h>
#include<stdlib.h>
#include<limits.h>
#define SAMPS_IN_SONG 9281536
#define SAMPLE_RATE 44100
#define SAMPLES_PER_MIN SAMPLE_RATE * 60
#define UNCALCULATED_SAMPS 68
#define SAMPLES_PER_BLOCK 1024
#define BLOCKS_PER_SECOND SAMPLE_RATE / SAMPLES_PER_BLOCK
#define C_MULTIPLIER -0.0000015
#define C_ADDER 1.5142857
#define BLOCKS 43
/*Prototypes*/
int initialize(float *, int, char**);
__device__ void gpuSquared(float *, int);
__device__ void gpuCalcInstantEnergies(float *, float *);
__global__ void getInstantEnergies(float *, float *, int);
void calcBPM(float*, int);
float getAvgEnergy(float *, int);
double getVariance(float, float *, int);
double getSoil(float, float);
int calcBeats(float *, float, int);
int getBeats(float * ejs, int totalFrequencies);
int initialize(float *frequency, int argc, char** argv) {
/*Variables*/
FILE * file;
file = fopen(argv[argc - 1], "r");
int totalFrequencies = 0;
while (fscanf(file, "%f", &frequency[totalFrequencies]) != EOF) {
totalFrequencies++;
}
return totalFrequencies;
}
/*
The GPU kernal gpuSquared(float* frequency, int totalFrequencies) accepts
two parameters for the input vector. Each thread squares the given array's
elements and multiplies it by two in its respective place. The size of the
array is passed into the kernal as numOfFrequencies. The result is saved
into the array's original position.
The original equation provided by Marco Ziccardi takes the left and right
frequencies at the same index, squares each of the elements and adds them
together. However that is when the song is in stereo. This kernal only
supports songs in mono.
@parameters array is an array of float elements that are to be squared.
@parameters numOfFrequencies is the number of elements in the array.
*/
__device__ void gpuSquared(float frequency[], int totalFrequencies) {
/*Variables*/
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element < totalFrequencies)
frequency[element] = 2 * (frequency[element] * frequency[element]);
}
/*
Function getBlocks(float ej[], float sampleArray[], long samplesPerBlock,
long samples) computes the energy of a block. A block is made up of 1024
samples in mono. The energy in a block is computed by summing a block
of sampleArray and returning the result into the given array ej.
The equation provided by Marco Ziccardi is:
1024
Ej = sampleArray[i]
i = 0
@parameters ej is an array of floats that returns the result of the
summation.
@parameters sampleArray is an array of floats that contains the results
of each left and right frequency squared and multiplied by two
in each index.
@parameters samplesPerBlock indicates indicates block we are at, must
be a multiple of 1024.
@parameters samples is the current sample per second.
@return ej which is an array containing the energies of the blocks.
*/
__device__ void gpuCalcInstantEnergies(float frequency[], float instantEnergy[]) {
/*Variables*/
unsigned int tid = threadIdx.x;
unsigned int element = blockIdx.x * blockDim.x + tid;
/*The last 68 samples of a second don't get computed*/
unsigned int offset = blockIdx.x / BLOCKS_PER_SECOND;
offset *= UNCALCULATED_SAMPS;
instantEnergy[element] = frequency[element];
__syncthreads();
for (unsigned int s = 1; s < SAMPLES_PER_BLOCK; s *= 2) {
if (tid % (2 * s) == 0) {
instantEnergy[element + offset] += instantEnergy[element + s
+ offset];
}
__syncthreads();
}
if (tid == 0) {
frequency[blockIdx.x] = instantEnergy[element + offset];
}
}
__global__ void getInstantEnergies(float * frequencies, float * energy, int samples) {
gpuSquared(frequencies, samples);
__syncthreads();
gpuCalcInstantEnergies(frequencies, energy);
__syncthreads();
}
/*
Function calcBPM (float *samples, int totalFrequencies) allocates
GPU memory and transfers the data between the CPU and GPU to get the instant
energy of each block. Once it has the instant energies of all the elements it
transfers the data between the GPU to the CPU and calls the following functions
get the beat count of the song. A simple formula is then applied to this beat
count to calculate the BPM of the song.
@parameters samples is an array of float elements that are to be squared.
@parameters totalFrequencies is the number of elements in the array.
*/
void calcBPM(float* samples, int totalFrequencies) {
/*Variables*/
int numThreads = 1024;
int numCores = totalFrequencies / 1024 + 1;
int bpm = 0;
int beats = 0;
float* gpuA;
hipMalloc(&gpuA, totalFrequencies * sizeof(float));
hipMemcpy(gpuA, samples, totalFrequencies * sizeof(float),
hipMemcpyHostToDevice);
float* gpuB;
hipMalloc(&gpuB, totalFrequencies * sizeof(float));
hipLaunchKernelGGL(( getInstantEnergies), dim3(numCores), dim3(numThreads), 0, 0, gpuA, gpuB, totalFrequencies);
hipMemcpy(samples, gpuA, totalFrequencies * sizeof(float),
hipMemcpyDeviceToHost);
hipFree(&gpuA);
hipFree(&gpuB);
/*Samples contain the instant energies*/
beats = getBeats(samples, totalFrequencies);
bpm = (int) ((beats * SAMPLES_PER_MIN) / totalFrequencies);
printf("BPM = %d\n", bpm);
}
/*
Function getAvgEnergy(float ej[]) computes the average window energy with
a sample rate of 44100 and 43 blocks per current window, which slightly
more than 1 second of music. The equation provided by Marco Ziccardi is:
42
avg(E) = (1/43) ej[i]
i = 0
@parameters ej is an array of floats containing the energy computed in
each block.
@return avg is the computed average energy in the current window made
up of 43 blocks.
*/
float getAvgEnergy(float * ejs, int currentSec) {
/*Variables*/
int currentEnergy = BLOCKS_PER_SECOND * currentSec;
int lastEnergy = currentEnergy + BLOCKS_PER_SECOND;
float avg = 0;
while (currentEnergy < lastEnergy) {
avg += ejs[currentEnergy];
currentEnergy++;
}
avg = avg / BLOCKS;
return avg;
}
/*
Function getVariance(float ej[], float avg) computes the variance inside
a window of blocks. The bigger the variance, the more likely a block will
be considered a beat. The equation provided by Marco Ziccardi is:
42
var(E) = (1/43) (avg(E) - Ej)^2
i = 0
@parameters ej is an array of floats containing the energy computed in
each block.
@parameters avg is the average energy in the current window made up of
43 blocks.
@return variance is the calculated variance of a window of blocks.
*/
double getVariance(float avg, float * ejs, int currentSec) {
/*Variables*/
float var = 0.0;
int currentEnergy = BLOCKS_PER_SECOND * currentSec;
int lastEnergy = currentEnergy + BLOCKS_PER_SECOND;
float temp;
while (currentEnergy < lastEnergy) {
temp = avg - ejs[currentEnergy];
var += pow(temp, 2.0);
currentEnergy++;
}
var /= BLOCKS;
return var;
}
/*
Function getSoil(float var, float avg) computes the linear regression of the
energy variance that lowers the impact of the variance provided by
Marco Ziccardi that is used to determine if a beat has occurred.
C = 0.0000015 * var(E) + 1.5142857
@parameters var is a float that contains the current variance.
@parameters avg is a float that contains the average energy of the current Ej.
@return soil returns the computed linear regression of the current energy
variance.
*/
double getSoil(float var, float avg) {
/*Variables*/
float soil = 0.0;
soil = (var * C_MULTIPLIER) + C_ADDER;
soil *= avg;
return soil;
}
/*
Function calcBeats(float * ejs, float soil, int currentSec) detects
a peak if the instant energy is bigger than c * avg(E).
If a peak is detected 4 times in a row it is counted as a beat.
@parameters ejs is a pointer to an array of floats that contains
the computed ejs.
@parameters soil contains the linear regression of the current
energy variance.
*/
int calcBeats(float * ejs, float soil, int currentSec) {
/*Variables*/
int beats = 0;
int peakCounter = 0;
float energy = 0.0;
int currentEnergy = currentSec * BLOCKS_PER_SECOND;
float lastEnergy = currentEnergy + BLOCKS_PER_SECOND;
while (currentEnergy < lastEnergy) {
energy = ejs[currentEnergy];
if (energy > soil) {
peakCounter++;
if (peakCounter == 4) {
beats++;
peakCounter = 0;
}
} else {
peakCounter = 0;
}
currentEnergy++;
}
return beats;
}
/*
Function getBeats(float * ejs, int totalFrequencies) computes per
second the neccessary parameters to determine whether
a beat has occured, and increments the beat count if beats are found.
@parameters ejs is the array holding the instant energies
@parameters totalFrequencies is the number of samples in the song
*/
int getBeats(float * ejs, int totalFrequencies) {
/*Variables*/
int beats = 0;
int secsInSong = totalFrequencies / SAMPLE_RATE;
float avg = 0.0;
float var = 0.0;
float soil = 0.0;
int currentSec = 0;
while (currentSec <= secsInSong) {
avg = getAvgEnergy(ejs, currentSec);
var = getVariance(avg, ejs, currentSec);
soil = getSoil(var, avg);
beats += calcBeats(ejs, soil, currentSec);
currentSec++;
}
return beats;
}
int main(int argc, char** argv) {
/*Start clock*/
int msec = 0;
clock_t start = clock(), diff;
/*Variables*/
int frequenciesInSong = 0;
float* frequencies = (float*) malloc(INT_MAX * sizeof(float));
printf("Starting\n");
frequenciesInSong = initialize(frequencies, argc, argv);
calcBPM(frequencies, frequenciesInSong);
free(frequencies);
/*Calculate Time*/
diff = clock() - start;
msec = diff * 1000 / CLOCKS_PER_SEC;
printf("Time taken %d seconds %d milliseconds\n", msec / 1000, msec % 1000);
}
|
17560494b584bc39f93f2e6353ebc5000ed9d61a.cu
|
//gpubpm.cu
/*
Team: Seahawks
Program Description: Determines how many beats per minute are in a song using
a list of frequencies that must be provided by a file in
the command-line.
The algorithm to determine the beats per minute were provided
by Marco Ziccardi. His beat detection algorithm can be found in
the link below:
http://mziccard.me/2015/05/28/beats-detection-algorithms-1/
*/
#include <emmintrin.h>
#include <sys/time.h>
#include <stdio.h>
#include<stdlib.h>
#include<limits.h>
#define SAMPS_IN_SONG 9281536
#define SAMPLE_RATE 44100
#define SAMPLES_PER_MIN SAMPLE_RATE * 60
#define UNCALCULATED_SAMPS 68
#define SAMPLES_PER_BLOCK 1024
#define BLOCKS_PER_SECOND SAMPLE_RATE / SAMPLES_PER_BLOCK
#define C_MULTIPLIER -0.0000015
#define C_ADDER 1.5142857
#define BLOCKS 43
/*Prototypes*/
int initialize(float *, int, char**);
__device__ void gpuSquared(float *, int);
__device__ void gpuCalcInstantEnergies(float *, float *);
__global__ void getInstantEnergies(float *, float *, int);
void calcBPM(float*, int);
float getAvgEnergy(float *, int);
double getVariance(float, float *, int);
double getSoil(float, float);
int calcBeats(float *, float, int);
int getBeats(float * ejs, int totalFrequencies);
int initialize(float *frequency, int argc, char** argv) {
/*Variables*/
FILE * file;
file = fopen(argv[argc - 1], "r");
int totalFrequencies = 0;
while (fscanf(file, "%f", &frequency[totalFrequencies]) != EOF) {
totalFrequencies++;
}
return totalFrequencies;
}
/*
The GPU kernal gpuSquared(float* frequency, int totalFrequencies) accepts
two parameters for the input vector. Each thread squares the given array's
elements and multiplies it by two in its respective place. The size of the
array is passed into the kernal as numOfFrequencies. The result is saved
into the array's original position.
The original equation provided by Marco Ziccardi takes the left and right
frequencies at the same index, squares each of the elements and adds them
together. However that is when the song is in stereo. This kernal only
supports songs in mono.
@parameters array is an array of float elements that are to be squared.
@parameters numOfFrequencies is the number of elements in the array.
*/
__device__ void gpuSquared(float frequency[], int totalFrequencies) {
/*Variables*/
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element < totalFrequencies)
frequency[element] = 2 * (frequency[element] * frequency[element]);
}
/*
Function getBlocks(float ej[], float sampleArray[], long samplesPerBlock,
long samples) computes the energy of a block. A block is made up of 1024
samples in mono. The energy in a block is computed by summing a block
of sampleArray and returning the result into the given array ej.
The equation provided by Marco Ziccardi is:
1024
Ej = ∑ sampleArray[i]
i = 0
@parameters ej is an array of floats that returns the result of the
summation.
@parameters sampleArray is an array of floats that contains the results
of each left and right frequency squared and multiplied by two
in each index.
@parameters samplesPerBlock indicates indicates block we are at, must
be a multiple of 1024.
@parameters samples is the current sample per second.
@return ej which is an array containing the energies of the blocks.
*/
__device__ void gpuCalcInstantEnergies(float frequency[], float instantEnergy[]) {
/*Variables*/
unsigned int tid = threadIdx.x;
unsigned int element = blockIdx.x * blockDim.x + tid;
/*The last 68 samples of a second don't get computed*/
unsigned int offset = blockIdx.x / BLOCKS_PER_SECOND;
offset *= UNCALCULATED_SAMPS;
instantEnergy[element] = frequency[element];
__syncthreads();
for (unsigned int s = 1; s < SAMPLES_PER_BLOCK; s *= 2) {
if (tid % (2 * s) == 0) {
instantEnergy[element + offset] += instantEnergy[element + s
+ offset];
}
__syncthreads();
}
if (tid == 0) {
frequency[blockIdx.x] = instantEnergy[element + offset];
}
}
__global__ void getInstantEnergies(float * frequencies, float * energy, int samples) {
gpuSquared(frequencies, samples);
__syncthreads();
gpuCalcInstantEnergies(frequencies, energy);
__syncthreads();
}
/*
Function calcBPM (float *samples, int totalFrequencies) allocates
GPU memory and transfers the data between the CPU and GPU to get the instant
energy of each block. Once it has the instant energies of all the elements it
transfers the data between the GPU to the CPU and calls the following functions
get the beat count of the song. A simple formula is then applied to this beat
count to calculate the BPM of the song.
@parameters samples is an array of float elements that are to be squared.
@parameters totalFrequencies is the number of elements in the array.
*/
void calcBPM(float* samples, int totalFrequencies) {
/*Variables*/
int numThreads = 1024;
int numCores = totalFrequencies / 1024 + 1;
int bpm = 0;
int beats = 0;
float* gpuA;
cudaMalloc(&gpuA, totalFrequencies * sizeof(float));
cudaMemcpy(gpuA, samples, totalFrequencies * sizeof(float),
cudaMemcpyHostToDevice);
float* gpuB;
cudaMalloc(&gpuB, totalFrequencies * sizeof(float));
getInstantEnergies<<<numCores, numThreads>>> (gpuA, gpuB, totalFrequencies);
cudaMemcpy(samples, gpuA, totalFrequencies * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(&gpuA);
cudaFree(&gpuB);
/*Samples contain the instant energies*/
beats = getBeats(samples, totalFrequencies);
bpm = (int) ((beats * SAMPLES_PER_MIN) / totalFrequencies);
printf("BPM = %d\n", bpm);
}
/*
Function getAvgEnergy(float ej[]) computes the average window energy with
a sample rate of 44100 and 43 blocks per current window, which slightly
more than 1 second of music. The equation provided by Marco Ziccardi is:
42
avg(E) = (1/43) ∑ ej[i]
i = 0
@parameters ej is an array of floats containing the energy computed in
each block.
@return avg is the computed average energy in the current window made
up of 43 blocks.
*/
float getAvgEnergy(float * ejs, int currentSec) {
/*Variables*/
int currentEnergy = BLOCKS_PER_SECOND * currentSec;
int lastEnergy = currentEnergy + BLOCKS_PER_SECOND;
float avg = 0;
while (currentEnergy < lastEnergy) {
avg += ejs[currentEnergy];
currentEnergy++;
}
avg = avg / BLOCKS;
return avg;
}
/*
Function getVariance(float ej[], float avg) computes the variance inside
a window of blocks. The bigger the variance, the more likely a block will
be considered a beat. The equation provided by Marco Ziccardi is:
42
var(E) = (1/43) ∑ (avg(E) - Ej)^2
i = 0
@parameters ej is an array of floats containing the energy computed in
each block.
@parameters avg is the average energy in the current window made up of
43 blocks.
@return variance is the calculated variance of a window of blocks.
*/
double getVariance(float avg, float * ejs, int currentSec) {
/*Variables*/
float var = 0.0;
int currentEnergy = BLOCKS_PER_SECOND * currentSec;
int lastEnergy = currentEnergy + BLOCKS_PER_SECOND;
float temp;
while (currentEnergy < lastEnergy) {
temp = avg - ejs[currentEnergy];
var += pow(temp, 2.0);
currentEnergy++;
}
var /= BLOCKS;
return var;
}
/*
Function getSoil(float var, float avg) computes the linear regression of the
energy variance that lowers the impact of the variance provided by
Marco Ziccardi that is used to determine if a beat has occurred.
C = −0.0000015 * var(E) + 1.5142857
@parameters var is a float that contains the current variance.
@parameters avg is a float that contains the average energy of the current Ej.
@return soil returns the computed linear regression of the current energy
variance.
*/
double getSoil(float var, float avg) {
/*Variables*/
float soil = 0.0;
soil = (var * C_MULTIPLIER) + C_ADDER;
soil *= avg;
return soil;
}
/*
Function calcBeats(float * ejs, float soil, int currentSec) detects
a peak if the instant energy is bigger than c * avg(E).
If a peak is detected 4 times in a row it is counted as a beat.
@parameters ejs is a pointer to an array of floats that contains
the computed ejs.
@parameters soil contains the linear regression of the current
energy variance.
*/
int calcBeats(float * ejs, float soil, int currentSec) {
/*Variables*/
int beats = 0;
int peakCounter = 0;
float energy = 0.0;
int currentEnergy = currentSec * BLOCKS_PER_SECOND;
float lastEnergy = currentEnergy + BLOCKS_PER_SECOND;
while (currentEnergy < lastEnergy) {
energy = ejs[currentEnergy];
if (energy > soil) {
peakCounter++;
if (peakCounter == 4) {
beats++;
peakCounter = 0;
}
} else {
peakCounter = 0;
}
currentEnergy++;
}
return beats;
}
/*
Function getBeats(float * ejs, int totalFrequencies) computes per
second the neccessary parameters to determine whether
a beat has occured, and increments the beat count if beats are found.
@parameters ejs is the array holding the instant energies
@parameters totalFrequencies is the number of samples in the song
*/
int getBeats(float * ejs, int totalFrequencies) {
/*Variables*/
int beats = 0;
int secsInSong = totalFrequencies / SAMPLE_RATE;
float avg = 0.0;
float var = 0.0;
float soil = 0.0;
int currentSec = 0;
while (currentSec <= secsInSong) {
avg = getAvgEnergy(ejs, currentSec);
var = getVariance(avg, ejs, currentSec);
soil = getSoil(var, avg);
beats += calcBeats(ejs, soil, currentSec);
currentSec++;
}
return beats;
}
int main(int argc, char** argv) {
/*Start clock*/
int msec = 0;
clock_t start = clock(), diff;
/*Variables*/
int frequenciesInSong = 0;
float* frequencies = (float*) malloc(INT_MAX * sizeof(float));
printf("Starting\n");
frequenciesInSong = initialize(frequencies, argc, argv);
calcBPM(frequencies, frequenciesInSong);
free(frequencies);
/*Calculate Time*/
diff = clock() - start;
msec = diff * 1000 / CLOCKS_PER_SEC;
printf("Time taken %d seconds %d milliseconds\n", msec / 1000, msec % 1000);
}
|
343209214d912e8ca6db5d6ef79a56af5faa313c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Ejemplo pinnedMen.cu
Juan Mndez para MNC, [email protected]
*/
#include <cstdio>
#include <random>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "eTimer.h"
#define N 4*1024
#define PINNED_MEM
#undef PINNED_MEM
__global__ void miKernel(double *C, const double *A, const double *B)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = y*N + x;
if (index % 2 == 0)
C[index] = A[index] * A[index] + B[index] * B[index];
else
C[index] = 0.0;
}
int main(int argc, char *argv[])
{
std::default_random_engine generador;
std::normal_distribution<double> distribucion(0.0, 1.0);
double *host_A, *host_B, *host_C;
unsigned int size = N*N*sizeof(double);
hipError_t status;
status = hipSetDevice(0);
#if defined PINNED_MEM
printf("\nPinned Memory\n");
status = hipHostMalloc((void**)&host_A, size);
status = hipHostMalloc((void**)&host_B, size);
status = hipHostMalloc((void**)&host_C, size);
#else
printf("\nPaged Memory\n");
host_A = (double*)_aligned_malloc(size, 64);
host_B = (double*)_aligned_malloc(size, 64);
host_C = (double*)_aligned_malloc(size, 64);
#endif
for (int y = 0; y < N; y++){
for (int x = 0; x < N; x++){
host_A[y*N + x] = distribucion(generador);
host_B[y*N + x] = distribucion(generador);
}
}
eTimer *Tcpu = new eTimer();
eTimer *Thd = new eTimer();
eTimer *Tkernel = new eTimer();
eTimer *Tdh = new eTimer();
Tcpu->start();
for (int y = 0; y < N; y++){
for (int x = 0; x < N; x++){
int index = y*N + x;
if (index % 2 == 0)
host_C[index] = host_A[index] * host_A[index] + host_B[index] * host_B[index];
else
host_C[index] = 0.0;
}
}
Tcpu->stop();
Tcpu->report("CPU");
// casos de prueba
for (int i = 0; i < 5; i++) printf("%lf ", host_C[i]);
printf("\n\n");
memset(host_C, 0, size);
double *dev_A, *dev_B, *dev_C;
status = hipMalloc((void**)&dev_A, size);
status = hipMalloc((void**)&dev_B, size);
status = hipMalloc((void**)&dev_C, size);
Thd->start();
status = hipMemcpy(dev_A, host_A, size, hipMemcpyHostToDevice);
status = hipMemcpy(dev_B, host_B, size, hipMemcpyHostToDevice);
Thd->stop();
Thd->report("HostToDevice");
Tkernel->start();
dim3 Block(32, 16, 1);
dim3 Grid(N / 32, N / 16, 1);
hipLaunchKernelGGL(( miKernel) , dim3(Grid), dim3(Block) , 0, 0, dev_C, dev_A, dev_B);
status = hipDeviceSynchronize();
Tkernel->stop();
Tkernel->report("Kernel");
Tdh->start();
status = hipMemcpy(host_C, dev_C, size, hipMemcpyDeviceToHost);
Tdh->stop();
Tdh->report("DeviceToHost");
// casos de prueba
for (int i = 0; i < 5; i++) printf("%lf ", host_C[i]);
printf("\n\n");
status = hipFree(dev_A);
status = hipFree(dev_B);
status = hipFree(dev_B);
status = hipDeviceReset();
#if defined PINNED_MEM
status = hipHostFree(host_A);
status = hipHostFree(host_B);
status = hipHostFree(host_C);
#else
_aligned_free(host_A);
_aligned_free(host_B);
_aligned_free(host_C);
#endif
delete Tcpu;
delete Thd;
delete Tkernel;
delete Tdh;
return 0;
}
|
343209214d912e8ca6db5d6ef79a56af5faa313c.cu
|
/*
Ejemplo pinnedMen.cu
Juan Méndez para MNC, [email protected]
*/
#include <cstdio>
#include <random>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "eTimer.h"
#define N 4*1024
#define PINNED_MEM
#undef PINNED_MEM
__global__ void miKernel(double *C, const double *A, const double *B)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = y*N + x;
if (index % 2 == 0)
C[index] = A[index] * A[index] + B[index] * B[index];
else
C[index] = 0.0;
}
int main(int argc, char *argv[])
{
std::default_random_engine generador;
std::normal_distribution<double> distribucion(0.0, 1.0);
double *host_A, *host_B, *host_C;
unsigned int size = N*N*sizeof(double);
cudaError_t status;
status = cudaSetDevice(0);
#if defined PINNED_MEM
printf("\nPinned Memory\n");
status = cudaMallocHost((void**)&host_A, size);
status = cudaMallocHost((void**)&host_B, size);
status = cudaMallocHost((void**)&host_C, size);
#else
printf("\nPaged Memory\n");
host_A = (double*)_aligned_malloc(size, 64);
host_B = (double*)_aligned_malloc(size, 64);
host_C = (double*)_aligned_malloc(size, 64);
#endif
for (int y = 0; y < N; y++){
for (int x = 0; x < N; x++){
host_A[y*N + x] = distribucion(generador);
host_B[y*N + x] = distribucion(generador);
}
}
eTimer *Tcpu = new eTimer();
eTimer *Thd = new eTimer();
eTimer *Tkernel = new eTimer();
eTimer *Tdh = new eTimer();
Tcpu->start();
for (int y = 0; y < N; y++){
for (int x = 0; x < N; x++){
int index = y*N + x;
if (index % 2 == 0)
host_C[index] = host_A[index] * host_A[index] + host_B[index] * host_B[index];
else
host_C[index] = 0.0;
}
}
Tcpu->stop();
Tcpu->report("CPU");
// casos de prueba
for (int i = 0; i < 5; i++) printf("%lf ", host_C[i]);
printf("\n\n");
memset(host_C, 0, size);
double *dev_A, *dev_B, *dev_C;
status = cudaMalloc((void**)&dev_A, size);
status = cudaMalloc((void**)&dev_B, size);
status = cudaMalloc((void**)&dev_C, size);
Thd->start();
status = cudaMemcpy(dev_A, host_A, size, cudaMemcpyHostToDevice);
status = cudaMemcpy(dev_B, host_B, size, cudaMemcpyHostToDevice);
Thd->stop();
Thd->report("HostToDevice");
Tkernel->start();
dim3 Block(32, 16, 1);
dim3 Grid(N / 32, N / 16, 1);
miKernel <<< Grid, Block >>>(dev_C, dev_A, dev_B);
status = cudaDeviceSynchronize();
Tkernel->stop();
Tkernel->report("Kernel");
Tdh->start();
status = cudaMemcpy(host_C, dev_C, size, cudaMemcpyDeviceToHost);
Tdh->stop();
Tdh->report("DeviceToHost");
// casos de prueba
for (int i = 0; i < 5; i++) printf("%lf ", host_C[i]);
printf("\n\n");
status = cudaFree(dev_A);
status = cudaFree(dev_B);
status = cudaFree(dev_B);
status = cudaDeviceReset();
#if defined PINNED_MEM
status = cudaFreeHost(host_A);
status = cudaFreeHost(host_B);
status = cudaFreeHost(host_C);
#else
_aligned_free(host_A);
_aligned_free(host_B);
_aligned_free(host_C);
#endif
delete Tcpu;
delete Thd;
delete Tkernel;
delete Tdh;
return 0;
}
|
c06bb38698c2f9e272016697abf3ccc7fe61634e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaSReduceIndex_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned int inputSize = 1;
const unsigned int inputBatchOffset = 1;
const unsigned int outputBatchOffset = 1;
const unsigned int channelsWidth = 1;
const unsigned int channelsHeight = 1;
const unsigned int nbAnchors = 1;
const float *valueThreshold = NULL;
hipMalloc(&valueThreshold, XSIZE*YSIZE);
const float *inputs = NULL;
hipMalloc(&inputs, XSIZE*YSIZE);
int *outputMap = NULL;
hipMalloc(&outputMap, XSIZE*YSIZE);
float *scores = NULL;
hipMalloc(&scores, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaSReduceIndex_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputSize,inputBatchOffset,outputBatchOffset,channelsWidth,channelsHeight,nbAnchors,valueThreshold,inputs,outputMap,scores);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaSReduceIndex_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputSize,inputBatchOffset,outputBatchOffset,channelsWidth,channelsHeight,nbAnchors,valueThreshold,inputs,outputMap,scores);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaSReduceIndex_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputSize,inputBatchOffset,outputBatchOffset,channelsWidth,channelsHeight,nbAnchors,valueThreshold,inputs,outputMap,scores);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c06bb38698c2f9e272016697abf3ccc7fe61634e.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaSReduceIndex_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned int inputSize = 1;
const unsigned int inputBatchOffset = 1;
const unsigned int outputBatchOffset = 1;
const unsigned int channelsWidth = 1;
const unsigned int channelsHeight = 1;
const unsigned int nbAnchors = 1;
const float *valueThreshold = NULL;
cudaMalloc(&valueThreshold, XSIZE*YSIZE);
const float *inputs = NULL;
cudaMalloc(&inputs, XSIZE*YSIZE);
int *outputMap = NULL;
cudaMalloc(&outputMap, XSIZE*YSIZE);
float *scores = NULL;
cudaMalloc(&scores, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaSReduceIndex_kernel<<<gridBlock,threadBlock>>>(inputSize,inputBatchOffset,outputBatchOffset,channelsWidth,channelsHeight,nbAnchors,valueThreshold,inputs,outputMap,scores);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaSReduceIndex_kernel<<<gridBlock,threadBlock>>>(inputSize,inputBatchOffset,outputBatchOffset,channelsWidth,channelsHeight,nbAnchors,valueThreshold,inputs,outputMap,scores);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaSReduceIndex_kernel<<<gridBlock,threadBlock>>>(inputSize,inputBatchOffset,outputBatchOffset,channelsWidth,channelsHeight,nbAnchors,valueThreshold,inputs,outputMap,scores);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3b4797fc1574ce04a1f55e91c1b35da14e85436c.hip
|
// !!! This is a file automatically generated by hipify!!!
// *************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2012-2014 Dimitar Lukarski
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// *************************************************************************
// PARALUTION version 0.7.0b
#include <cmath>
#include "gpu_allocate_free.hpp"
#include <hip/hip_runtime.h>
#include <assert.h>
#include "gpu_utils.hpp"
#include "cuda_kernels_general.hpp"
#include "../../utils/allocate_free.hpp"
namespace paralution {
#ifdef PARALUTION_CUDA_PINNED_MEMORY
template <typename DataType>
void allocate_host(const int size, DataType **ptr) {
LOG_DEBUG(0, "allocate_host()",
size);
if (size > 0) {
assert(*ptr == NULL);
// *ptr = new DataType[size];
hipHostMalloc((void **)ptr, size*sizeof(DataType));
CHECK_CUDA_ERROR(__FILE__, __LINE__);
LOG_DEBUG(0, "allocate_host()",
*ptr);
assert(*ptr != NULL);
}
}
template <typename DataType>
void free_host(DataType **ptr) {
LOG_DEBUG(0, "free_host()",
*ptr);
assert(*ptr != NULL);
// delete[] *ptr;
hipHostFree(*ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*ptr = NULL;
}
#endif
template <typename DataType>
void allocate_gpu(const int size, DataType **ptr) {
LOG_DEBUG(0, "allocate_gpu()",
size);
if (size > 0) {
assert(*ptr == NULL);
hipMalloc( (void **)ptr, size*sizeof(DataType));
CHECK_CUDA_ERROR(__FILE__, __LINE__);
assert(*ptr != NULL);
}
}
template <typename DataType>
void free_gpu(DataType **ptr) {
LOG_DEBUG(0, "free_gpu()",
*ptr);
assert(*ptr != NULL);
hipFree(*ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*ptr = NULL;
}
template <typename DataType>
void set_to_zero_gpu(const int blocksize,
const int max_threads,
const int size, DataType *ptr) {
LOG_DEBUG(0, "set_to_zero_gpu()",
"size =" << size <<
" ptr=" << ptr);
if (size > 0) {
assert(ptr != NULL);
hipMemset(ptr, 0, size*sizeof(DataType));
CHECK_CUDA_ERROR(__FILE__, __LINE__);
/*
int s = size ;
int k = (size/blocksize)/max_threads + 1;
if (k > 1) s = size / k;
dim3 BlockSize(blocksize);
dim3 GridSize(s / blocksize + 1);
kernel_set_to_zeros<DataType, int> <<<GridSize, BlockSize>>> (size, ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
/*
// 1D accessing, no stride
dim3 BlockSize(blocksize);
dim3 GridSize(size / blocksize + 1);
kernel_set_to_zeros<DataType, int> <<<GridSize, BlockSize>>> (size, ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
template <typename DataType>
void set_to_one_gpu(const int blocksize,
const int max_threads,
const int size, DataType *ptr) {
LOG_DEBUG(0, "set_to_zero_gpu()",
"size =" << size <<
" ptr=" << ptr);
if (size > 0) {
assert(ptr != NULL);
int s = size ;
int k = (size/blocksize)/max_threads + 1;
if (k > 1) s = size / k;
dim3 BlockSize(blocksize);
dim3 GridSize(s / blocksize + 1);
hipLaunchKernelGGL(( kernel_set_to_ones<DataType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, size, ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
/*
// 1D accessing, no stride
dim3 BlockSize(blocksize);
dim3 GridSize(size / blocksize + 1);
hipLaunchKernelGGL(( kernel_set_to_ones<DataType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, size, ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
#ifdef PARALUTION_CUDA_PINNED_MEMORY
template void allocate_host<float >(const int size, float **ptr);
template void allocate_host<double >(const int size, double **ptr);
template void allocate_host<int >(const int size, int **ptr);
template void allocate_host<unsigned int>(const int size, unsigned int **ptr);
template void allocate_host<char >(const int size, char **ptr);
template void free_host<float >(float **ptr);
template void free_host<double >(double **ptr);
template void free_host<int >(int **ptr);
template void free_host<unsigned int>(unsigned int **ptr);
template void free_host<char >(char **ptr);
#endif
template void allocate_gpu<float >(const int size, float **ptr);
template void allocate_gpu<double >(const int size, double **ptr);
template void allocate_gpu<int >(const int size, int **ptr);
template void allocate_gpu<unsigned int>(const int size, unsigned int **ptr);
template void allocate_gpu<char >(const int size, char **ptr);
template void free_gpu<float >(float **ptr);
template void free_gpu<double >(double **ptr);
template void free_gpu<int >(int **ptr);
template void free_gpu<unsigned int>(unsigned int **ptr);
template void free_gpu<char >(char **ptr);
template void set_to_zero_gpu<float >(const int blocksize, const int max_threads, const int size, float *ptr);
template void set_to_zero_gpu<double >(const int blocksize, const int max_threads, const int size, double *ptr);
template void set_to_zero_gpu<int >(const int blocksize, const int max_threads, const int size, int *ptr);
template void set_to_zero_gpu<unsigned int>(const int blocksize, const int max_threads, const int size, unsigned int *ptr);
template void set_to_zero_gpu<char >(const int blocksize, const int max_threads, const int size, char *ptr);
template void set_to_one_gpu<float >(const int blocksize, const int max_threads, const int size, float *ptr);
template void set_to_one_gpu<double >(const int blocksize, const int max_threads, const int size, double *ptr);
template void set_to_one_gpu<int >(const int blocksize, const int max_threads, const int size, int *ptr);
template void set_to_one_gpu<unsigned int>(const int blocksize, const int max_threads, const int size, unsigned int *ptr);
template void set_to_one_gpu<char >(const int blocksize, const int max_threads, const int size, char *ptr);
}
|
3b4797fc1574ce04a1f55e91c1b35da14e85436c.cu
|
// *************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2012-2014 Dimitar Lukarski
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// *************************************************************************
// PARALUTION version 0.7.0b
#include <cmath>
#include "gpu_allocate_free.hpp"
#include <cuda.h>
#include <assert.h>
#include "gpu_utils.hpp"
#include "cuda_kernels_general.hpp"
#include "../../utils/allocate_free.hpp"
namespace paralution {
#ifdef PARALUTION_CUDA_PINNED_MEMORY
template <typename DataType>
void allocate_host(const int size, DataType **ptr) {
LOG_DEBUG(0, "allocate_host()",
size);
if (size > 0) {
assert(*ptr == NULL);
// *ptr = new DataType[size];
cudaMallocHost((void **)ptr, size*sizeof(DataType));
CHECK_CUDA_ERROR(__FILE__, __LINE__);
LOG_DEBUG(0, "allocate_host()",
*ptr);
assert(*ptr != NULL);
}
}
template <typename DataType>
void free_host(DataType **ptr) {
LOG_DEBUG(0, "free_host()",
*ptr);
assert(*ptr != NULL);
// delete[] *ptr;
cudaFreeHost(*ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*ptr = NULL;
}
#endif
template <typename DataType>
void allocate_gpu(const int size, DataType **ptr) {
LOG_DEBUG(0, "allocate_gpu()",
size);
if (size > 0) {
assert(*ptr == NULL);
cudaMalloc( (void **)ptr, size*sizeof(DataType));
CHECK_CUDA_ERROR(__FILE__, __LINE__);
assert(*ptr != NULL);
}
}
template <typename DataType>
void free_gpu(DataType **ptr) {
LOG_DEBUG(0, "free_gpu()",
*ptr);
assert(*ptr != NULL);
cudaFree(*ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*ptr = NULL;
}
template <typename DataType>
void set_to_zero_gpu(const int blocksize,
const int max_threads,
const int size, DataType *ptr) {
LOG_DEBUG(0, "set_to_zero_gpu()",
"size =" << size <<
" ptr=" << ptr);
if (size > 0) {
assert(ptr != NULL);
cudaMemset(ptr, 0, size*sizeof(DataType));
CHECK_CUDA_ERROR(__FILE__, __LINE__);
/*
int s = size ;
int k = (size/blocksize)/max_threads + 1;
if (k > 1) s = size / k;
dim3 BlockSize(blocksize);
dim3 GridSize(s / blocksize + 1);
kernel_set_to_zeros<DataType, int> <<<GridSize, BlockSize>>> (size, ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
/*
// 1D accessing, no stride
dim3 BlockSize(blocksize);
dim3 GridSize(size / blocksize + 1);
kernel_set_to_zeros<DataType, int> <<<GridSize, BlockSize>>> (size, ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
template <typename DataType>
void set_to_one_gpu(const int blocksize,
const int max_threads,
const int size, DataType *ptr) {
LOG_DEBUG(0, "set_to_zero_gpu()",
"size =" << size <<
" ptr=" << ptr);
if (size > 0) {
assert(ptr != NULL);
int s = size ;
int k = (size/blocksize)/max_threads + 1;
if (k > 1) s = size / k;
dim3 BlockSize(blocksize);
dim3 GridSize(s / blocksize + 1);
kernel_set_to_ones<DataType, int> <<<GridSize, BlockSize>>> (size, ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
/*
// 1D accessing, no stride
dim3 BlockSize(blocksize);
dim3 GridSize(size / blocksize + 1);
kernel_set_to_ones<DataType, int> <<<GridSize, BlockSize>>> (size, ptr);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
#ifdef PARALUTION_CUDA_PINNED_MEMORY
template void allocate_host<float >(const int size, float **ptr);
template void allocate_host<double >(const int size, double **ptr);
template void allocate_host<int >(const int size, int **ptr);
template void allocate_host<unsigned int>(const int size, unsigned int **ptr);
template void allocate_host<char >(const int size, char **ptr);
template void free_host<float >(float **ptr);
template void free_host<double >(double **ptr);
template void free_host<int >(int **ptr);
template void free_host<unsigned int>(unsigned int **ptr);
template void free_host<char >(char **ptr);
#endif
template void allocate_gpu<float >(const int size, float **ptr);
template void allocate_gpu<double >(const int size, double **ptr);
template void allocate_gpu<int >(const int size, int **ptr);
template void allocate_gpu<unsigned int>(const int size, unsigned int **ptr);
template void allocate_gpu<char >(const int size, char **ptr);
template void free_gpu<float >(float **ptr);
template void free_gpu<double >(double **ptr);
template void free_gpu<int >(int **ptr);
template void free_gpu<unsigned int>(unsigned int **ptr);
template void free_gpu<char >(char **ptr);
template void set_to_zero_gpu<float >(const int blocksize, const int max_threads, const int size, float *ptr);
template void set_to_zero_gpu<double >(const int blocksize, const int max_threads, const int size, double *ptr);
template void set_to_zero_gpu<int >(const int blocksize, const int max_threads, const int size, int *ptr);
template void set_to_zero_gpu<unsigned int>(const int blocksize, const int max_threads, const int size, unsigned int *ptr);
template void set_to_zero_gpu<char >(const int blocksize, const int max_threads, const int size, char *ptr);
template void set_to_one_gpu<float >(const int blocksize, const int max_threads, const int size, float *ptr);
template void set_to_one_gpu<double >(const int blocksize, const int max_threads, const int size, double *ptr);
template void set_to_one_gpu<int >(const int blocksize, const int max_threads, const int size, int *ptr);
template void set_to_one_gpu<unsigned int>(const int blocksize, const int max_threads, const int size, unsigned int *ptr);
template void set_to_one_gpu<char >(const int blocksize, const int max_threads, const int size, char *ptr);
}
|
53c30983af8f448aaf9bd66d00b77677fde2ea58.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "absolute_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "../neural_network_exception.h"
#include "util_cuda.h"
__global__ void absolute_upd_kernel(
const float4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = fabsf(val.x);
val.y = fabsf(val.y);
val.z = fabsf(val.z);
val.w = fabsf(val.w);
output[elem_id] = val;
}
}
__global__ void absolute_deriviative_upd_kernel(
float4 * __restrict errors,
const float4 * __restrict input_neurons,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 inp = input_neurons[elem_id];
float4 current_error = errors[elem_id];
if (inp.x < 0.0F)
current_error.x = -current_error.x;
if (inp.y < 0.0F)
current_error.y = -current_error.y;
if (inp.z < 0.0F)
current_error.z = -current_error.z;
if (inp.w < 0.0F)
current_error.w = -current_error.w;
errors[elem_id] = current_error;
}
}
namespace nnforge
{
namespace cuda
{
absolute_layer_updater_cuda::absolute_layer_updater_cuda()
{
}
absolute_layer_updater_cuda::~absolute_layer_updater_cuda()
{
}
void absolute_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
if (offset_input_entry_id > 0)
throw neural_network_exception("absolute_layer_updater_cuda is not able to run using offset");
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( absolute_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_neurons_buffer,
*output_neurons_buffer,
elem_count);
}
void absolute_layer_updater_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( absolute_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_errors_buffer,
*input_neurons_buffer,
elem_count);
}
bool absolute_layer_updater_cuda::is_in_place_backprop() const
{
return true;
}
}
}
|
53c30983af8f448aaf9bd66d00b77677fde2ea58.cu
|
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "absolute_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "../neural_network_exception.h"
#include "util_cuda.h"
__global__ void absolute_upd_kernel(
const float4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = fabsf(val.x);
val.y = fabsf(val.y);
val.z = fabsf(val.z);
val.w = fabsf(val.w);
output[elem_id] = val;
}
}
__global__ void absolute_deriviative_upd_kernel(
float4 * __restrict errors,
const float4 * __restrict input_neurons,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 inp = input_neurons[elem_id];
float4 current_error = errors[elem_id];
if (inp.x < 0.0F)
current_error.x = -current_error.x;
if (inp.y < 0.0F)
current_error.y = -current_error.y;
if (inp.z < 0.0F)
current_error.z = -current_error.z;
if (inp.w < 0.0F)
current_error.w = -current_error.w;
errors[elem_id] = current_error;
}
}
namespace nnforge
{
namespace cuda
{
absolute_layer_updater_cuda::absolute_layer_updater_cuda()
{
}
absolute_layer_updater_cuda::~absolute_layer_updater_cuda()
{
}
void absolute_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
if (offset_input_entry_id > 0)
throw neural_network_exception("absolute_layer_updater_cuda is not able to run using offset");
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
absolute_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_neurons_buffer,
*output_neurons_buffer,
elem_count);
}
void absolute_layer_updater_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
absolute_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_errors_buffer,
*input_neurons_buffer,
elem_count);
}
bool absolute_layer_updater_cuda::is_in_place_backprop() const
{
return true;
}
}
}
|
a8cd9bf2ce411c3a3dfa7732a3f41f7a7760c21b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "ppl/cv/cuda/copymakeborder.h"
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
__DEVICE__
int borderInterpolate0(int index, int range, BorderType border_type) {
if (border_type == BORDER_TYPE_DEFAULT ||
border_type == BORDER_TYPE_REFLECT_101) {
if (index < 0) {
return 0 - index;
}
else if (index < range) {
return index;
}
else {
return (range << 1) - index - 2;
}
}
else if (border_type == BORDER_TYPE_CONSTANT) {
if (index < 0) {
return -1;
}
else if (index < range) {
return index;
}
else {
return -1;
}
}
else if (border_type == BORDER_TYPE_REPLICATE) {
if (index < 0) {
return 0;
}
else if (index < range) {
return index;
}
else {
return range - 1;
}
}
else if (border_type == BORDER_TYPE_REFLECT) {
if (index < 0) {
return -1 - index;
}
else if (index < range) {
return index;
}
else {
return (range << 1) - index - 1;
}
}
else if (border_type == BORDER_TYPE_WRAP) {
if (index < 0) {
return index + range;
}
else if (index < range) {
return index;
}
else {
return index - range;
}
}
else {
return -2;
}
}
__DEVICE__
int borderInterpolate1(int index, int range, BorderType border_type) {
if (border_type == BORDER_TYPE_DEFAULT ||
border_type == BORDER_TYPE_REFLECT_101) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index = 0 - index;
else
index = (range << 1) - index - 2;
} while (index >= range || index < 0);
}
return index;
}
}
else if (border_type == BORDER_TYPE_CONSTANT) {
if (index < 0) {
return -1;
}
else if (index < range) {
return index;
}
else {
return -1;
}
}
else if (border_type == BORDER_TYPE_REPLICATE) {
if (index < 0) {
return 0;
}
else if (index < range) {
return index;
}
else {
return range - 1;
}
}
else if (border_type == BORDER_TYPE_REFLECT) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index = -1 - index;
else
index = (range << 1) - index - 1;
} while (index >= range || index < 0);
}
return index;
}
}
else if (border_type == BORDER_TYPE_WRAP) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index += range;
else
index -= range;
} while (index >= range || index < 0);
}
return index;
}
}
else {
return -2;
}
}
template <typename T0, typename T1>
__DEVICE__
T0 makeValuen(T1 value);
template <>
__DEVICE__
uchar makeValuen<uchar, uchar>(uchar value) {
return value;
}
template <>
__DEVICE__
uchar3 makeValuen<uchar3, uchar>(uchar value) {
return make_uchar3(value, value, value);
}
template <>
__DEVICE__
uchar4 makeValuen<uchar4, uchar>(uchar value) {
return make_uchar4(value, value, value, value);
}
template <>
__DEVICE__
float makeValuen<float, float>(float value) {
return value;
}
template <>
__DEVICE__
float3 makeValuen<float3, float>(float value) {
return make_float3(value, value, value);
}
template <>
__DEVICE__
float4 makeValuen<float4, float>(float value) {
return make_float4(value, value, value, value);
}
template <typename T0, typename T1>
__global__
void copyMakeBorderKernel(const T0* src, int rows, int cols, int src_stride,
T0* dst, int dst_stride, int top, int bottom,
int left, int right, BorderType border_type,
T1 border_value, bool small_border) {
int element_x, element_y;
if (sizeof(T1) == 1) {
element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
}
else {
element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
}
if (element_y >= (rows + top + bottom) ||
element_x >= (cols + left + right)) {
return;
}
int src_x = element_x - left;
int src_y = element_y - top;
if (small_border == true) {
src_x = borderInterpolate0(src_x, cols, border_type);
src_y = borderInterpolate0(src_y, rows, border_type);
}
else {
src_x = borderInterpolate1(src_x, cols, border_type);
src_y = borderInterpolate1(src_y, rows, border_type);
}
T0 value;
T0 *input, *output;
if (border_type != BORDER_TYPE_CONSTANT) {
input = (T0*)((uchar*)src + src_y * src_stride);
value = input[src_x];
}
else {
if (src_x != -1 && src_y != -1) {
input = (T0*)((uchar*)src + src_y * src_stride);
value = input[src_x];
}
else {
value = makeValuen<T0, T1>(border_value);
}
}
output = (T0*)((uchar*)dst + element_y * dst_stride);
output[element_x] = value;
}
RetCode copyMakeBorder(const uchar* src, int rows, int cols, int channels,
int src_stride, uchar* dst, int dst_stride, int top,
int bottom, int left, int right, BorderType border_type,
uchar border_value, hipStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows > 0 && cols > 0);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(top >= 0);
PPL_ASSERT(bottom >= 0);
PPL_ASSERT(left >= 0);
PPL_ASSERT(right >= 0);
PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT ||
border_type == BORDER_TYPE_REPLICATE ||
border_type == BORDER_TYPE_REFLECT ||
border_type == BORDER_TYPE_WRAP ||
border_type == BORDER_TYPE_REFLECT_101 ||
border_type == BORDER_TYPE_DEFAULT);
hipError_t code;
if (top == 0 && bottom == 0 && left == 0 && right == 0 &&
src_stride == dst_stride) {
if (src != dst) {
code = hipMemcpyAsync(dst, src, rows * src_stride,
hipMemcpyDeviceToDevice);
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp((cols + left + right), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp((rows + top + bottom), kBlockDimY0, kBlockShiftY0);
bool small_border = false;
if (rows > top && rows > bottom && cols > left && cols > right) {
small_border = true;
}
if (channels == 1) {
hipLaunchKernelGGL(( copyMakeBorderKernel<uchar, uchar>), dim3(grid), dim3(block), 0, stream, src, rows,
cols, src_stride, dst, dst_stride, top, bottom, left, right,
border_type, border_value, small_border);
}
else if (channels == 3) {
hipLaunchKernelGGL(( copyMakeBorderKernel<uchar3, uchar>), dim3(grid), dim3(block), 0, stream,
(uchar3*)src, rows, cols, src_stride, (uchar3*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else { // channels == 4
hipLaunchKernelGGL(( copyMakeBorderKernel<uchar4, uchar>), dim3(grid), dim3(block), 0, stream,
(uchar4*)src, rows, cols, src_stride, (uchar4*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode copyMakeBorder(const float* src, int rows, int cols, int channels,
int src_stride, float* dst, int dst_stride, int top,
int bottom, int left, int right, BorderType border_type,
float border_value, hipStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows > 0 && cols > 0);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(top >= 0);
PPL_ASSERT(bottom >= 0);
PPL_ASSERT(left >= 0);
PPL_ASSERT(right >= 0);
PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT ||
border_type == BORDER_TYPE_REPLICATE ||
border_type == BORDER_TYPE_REFLECT ||
border_type == BORDER_TYPE_WRAP ||
border_type == BORDER_TYPE_REFLECT_101 ||
border_type == BORDER_TYPE_DEFAULT);
hipError_t code;
if (top == 0 && bottom == 0 && left == 0 && right == 0 &&
src_stride == dst_stride) {
if (src != dst) {
code = hipMemcpyAsync(dst, src, rows * src_stride,
hipMemcpyDeviceToDevice);
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp((cols + left + right), kBlockDimX1, kBlockShiftX1);
grid.y = divideUp((rows + top + bottom), kBlockDimY1, kBlockShiftY1);
bool small_border = false;
if (rows > top && rows > bottom && cols > left && cols > right) {
small_border = true;
}
if (channels == 1) {
hipLaunchKernelGGL(( copyMakeBorderKernel<float, float>), dim3(grid), dim3(block), 0, stream, src, rows,
cols, src_stride, dst, dst_stride, top, bottom, left, right,
border_type, border_value, small_border);
}
else if (channels == 3) {
hipLaunchKernelGGL(( copyMakeBorderKernel<float3, float>), dim3(grid), dim3(block), 0, stream,
(float3*)src, rows, cols, src_stride, (float3*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else { // channels == 4
hipLaunchKernelGGL(( copyMakeBorderKernel<float4, float>), dim3(grid), dim3(block), 0, stream,
(float4*)src, rows, cols, src_stride, (float4*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode CopyMakeBorder<uchar, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 1, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<uchar, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 3, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<uchar, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 4, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 1, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 3, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 4, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
|
a8cd9bf2ce411c3a3dfa7732a3f41f7a7760c21b.cu
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "ppl/cv/cuda/copymakeborder.h"
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
__DEVICE__
int borderInterpolate0(int index, int range, BorderType border_type) {
if (border_type == BORDER_TYPE_DEFAULT ||
border_type == BORDER_TYPE_REFLECT_101) {
if (index < 0) {
return 0 - index;
}
else if (index < range) {
return index;
}
else {
return (range << 1) - index - 2;
}
}
else if (border_type == BORDER_TYPE_CONSTANT) {
if (index < 0) {
return -1;
}
else if (index < range) {
return index;
}
else {
return -1;
}
}
else if (border_type == BORDER_TYPE_REPLICATE) {
if (index < 0) {
return 0;
}
else if (index < range) {
return index;
}
else {
return range - 1;
}
}
else if (border_type == BORDER_TYPE_REFLECT) {
if (index < 0) {
return -1 - index;
}
else if (index < range) {
return index;
}
else {
return (range << 1) - index - 1;
}
}
else if (border_type == BORDER_TYPE_WRAP) {
if (index < 0) {
return index + range;
}
else if (index < range) {
return index;
}
else {
return index - range;
}
}
else {
return -2;
}
}
__DEVICE__
int borderInterpolate1(int index, int range, BorderType border_type) {
if (border_type == BORDER_TYPE_DEFAULT ||
border_type == BORDER_TYPE_REFLECT_101) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index = 0 - index;
else
index = (range << 1) - index - 2;
} while (index >= range || index < 0);
}
return index;
}
}
else if (border_type == BORDER_TYPE_CONSTANT) {
if (index < 0) {
return -1;
}
else if (index < range) {
return index;
}
else {
return -1;
}
}
else if (border_type == BORDER_TYPE_REPLICATE) {
if (index < 0) {
return 0;
}
else if (index < range) {
return index;
}
else {
return range - 1;
}
}
else if (border_type == BORDER_TYPE_REFLECT) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index = -1 - index;
else
index = (range << 1) - index - 1;
} while (index >= range || index < 0);
}
return index;
}
}
else if (border_type == BORDER_TYPE_WRAP) {
if (index >= 0 && index < range) {
return index;
}
else {
if (range == 1) {
index = 0;
}
else {
do {
if (index < 0)
index += range;
else
index -= range;
} while (index >= range || index < 0);
}
return index;
}
}
else {
return -2;
}
}
template <typename T0, typename T1>
__DEVICE__
T0 makeValuen(T1 value);
template <>
__DEVICE__
uchar makeValuen<uchar, uchar>(uchar value) {
return value;
}
template <>
__DEVICE__
uchar3 makeValuen<uchar3, uchar>(uchar value) {
return make_uchar3(value, value, value);
}
template <>
__DEVICE__
uchar4 makeValuen<uchar4, uchar>(uchar value) {
return make_uchar4(value, value, value, value);
}
template <>
__DEVICE__
float makeValuen<float, float>(float value) {
return value;
}
template <>
__DEVICE__
float3 makeValuen<float3, float>(float value) {
return make_float3(value, value, value);
}
template <>
__DEVICE__
float4 makeValuen<float4, float>(float value) {
return make_float4(value, value, value, value);
}
template <typename T0, typename T1>
__global__
void copyMakeBorderKernel(const T0* src, int rows, int cols, int src_stride,
T0* dst, int dst_stride, int top, int bottom,
int left, int right, BorderType border_type,
T1 border_value, bool small_border) {
int element_x, element_y;
if (sizeof(T1) == 1) {
element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
}
else {
element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
}
if (element_y >= (rows + top + bottom) ||
element_x >= (cols + left + right)) {
return;
}
int src_x = element_x - left;
int src_y = element_y - top;
if (small_border == true) {
src_x = borderInterpolate0(src_x, cols, border_type);
src_y = borderInterpolate0(src_y, rows, border_type);
}
else {
src_x = borderInterpolate1(src_x, cols, border_type);
src_y = borderInterpolate1(src_y, rows, border_type);
}
T0 value;
T0 *input, *output;
if (border_type != BORDER_TYPE_CONSTANT) {
input = (T0*)((uchar*)src + src_y * src_stride);
value = input[src_x];
}
else {
if (src_x != -1 && src_y != -1) {
input = (T0*)((uchar*)src + src_y * src_stride);
value = input[src_x];
}
else {
value = makeValuen<T0, T1>(border_value);
}
}
output = (T0*)((uchar*)dst + element_y * dst_stride);
output[element_x] = value;
}
RetCode copyMakeBorder(const uchar* src, int rows, int cols, int channels,
int src_stride, uchar* dst, int dst_stride, int top,
int bottom, int left, int right, BorderType border_type,
uchar border_value, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows > 0 && cols > 0);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(top >= 0);
PPL_ASSERT(bottom >= 0);
PPL_ASSERT(left >= 0);
PPL_ASSERT(right >= 0);
PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT ||
border_type == BORDER_TYPE_REPLICATE ||
border_type == BORDER_TYPE_REFLECT ||
border_type == BORDER_TYPE_WRAP ||
border_type == BORDER_TYPE_REFLECT_101 ||
border_type == BORDER_TYPE_DEFAULT);
cudaError_t code;
if (top == 0 && bottom == 0 && left == 0 && right == 0 &&
src_stride == dst_stride) {
if (src != dst) {
code = cudaMemcpyAsync(dst, src, rows * src_stride,
cudaMemcpyDeviceToDevice);
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp((cols + left + right), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp((rows + top + bottom), kBlockDimY0, kBlockShiftY0);
bool small_border = false;
if (rows > top && rows > bottom && cols > left && cols > right) {
small_border = true;
}
if (channels == 1) {
copyMakeBorderKernel<uchar, uchar><<<grid, block, 0, stream>>>(src, rows,
cols, src_stride, dst, dst_stride, top, bottom, left, right,
border_type, border_value, small_border);
}
else if (channels == 3) {
copyMakeBorderKernel<uchar3, uchar><<<grid, block, 0, stream>>>(
(uchar3*)src, rows, cols, src_stride, (uchar3*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else { // channels == 4
copyMakeBorderKernel<uchar4, uchar><<<grid, block, 0, stream>>>(
(uchar4*)src, rows, cols, src_stride, (uchar4*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode copyMakeBorder(const float* src, int rows, int cols, int channels,
int src_stride, float* dst, int dst_stride, int top,
int bottom, int left, int right, BorderType border_type,
float border_value, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows > 0 && cols > 0);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(top >= 0);
PPL_ASSERT(bottom >= 0);
PPL_ASSERT(left >= 0);
PPL_ASSERT(right >= 0);
PPL_ASSERT(border_type == BORDER_TYPE_CONSTANT ||
border_type == BORDER_TYPE_REPLICATE ||
border_type == BORDER_TYPE_REFLECT ||
border_type == BORDER_TYPE_WRAP ||
border_type == BORDER_TYPE_REFLECT_101 ||
border_type == BORDER_TYPE_DEFAULT);
cudaError_t code;
if (top == 0 && bottom == 0 && left == 0 && right == 0 &&
src_stride == dst_stride) {
if (src != dst) {
code = cudaMemcpyAsync(dst, src, rows * src_stride,
cudaMemcpyDeviceToDevice);
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp((cols + left + right), kBlockDimX1, kBlockShiftX1);
grid.y = divideUp((rows + top + bottom), kBlockDimY1, kBlockShiftY1);
bool small_border = false;
if (rows > top && rows > bottom && cols > left && cols > right) {
small_border = true;
}
if (channels == 1) {
copyMakeBorderKernel<float, float><<<grid, block, 0, stream>>>(src, rows,
cols, src_stride, dst, dst_stride, top, bottom, left, right,
border_type, border_value, small_border);
}
else if (channels == 3) {
copyMakeBorderKernel<float3, float><<<grid, block, 0, stream>>>(
(float3*)src, rows, cols, src_stride, (float3*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
else { // channels == 4
copyMakeBorderKernel<float4, float><<<grid, block, 0, stream>>>(
(float4*)src, rows, cols, src_stride, (float4*)dst, dst_stride, top,
bottom, left, right, border_type, border_value, small_border);
}
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode CopyMakeBorder<uchar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 1, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<uchar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 3, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<uchar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
uchar border_value) {
RetCode code = copyMakeBorder(inData, height, width, 4, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 1, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 3, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
template <>
RetCode CopyMakeBorder<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int top,
int bottom,
int left,
int right,
BorderType border_type,
float border_value) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = copyMakeBorder(inData, height, width, 4, inWidthStride,
outData, outWidthStride, top, bottom, left,
right, border_type, border_value, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
|
27daf90655cd55207d954af166c7f02cd3522526.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
#include <cmath>
#define mu 0.05f
#define sigma .2f
#define timespan 252.0f
#define TRIALS 10000
#define numThreads 512
__global__ void europeanOption(
int size, int iterations,
float *d_price, float initialPrice, float strikePrice,
hiprandState_t *d_state)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < size)
{
for (int i = 0; i < iterations; i++)
{
initialPrice *= 1 + mu / timespan + hiprand_normal(&d_state[tid])*sigma/sqrt(timespan);
}
d_price[tid] = initialPrice - strikePrice;
if (d_price[tid] < 0)
{
d_price[tid] = 0;
}
}
}
__global__ void init(
unsigned int seed,
hiprandState_t *d_state)
{
hiprand_init(
seed,
threadIdx.x + blockDim.x * blockIdx.x,
0,
&d_state[threadIdx.x + blockDim.x * blockIdx.x]);
}
int main()
{
float *h_prices, *d_prices;
h_prices = new float[TRIALS];
hipMalloc((void**)&d_prices, TRIALS*sizeof(float));
hiprandState_t *d_state;
hipMalloc((void**)&d_state, TRIALS * sizeof(hiprandState_t));
hipLaunchKernelGGL(( init) , dim3((TRIALS - numThreads - 1)/numThreads), dim3(numThreads) , 0, 0, time(0), d_state);
europeanOption << <(TRIALS - numThreads - 1) / numThreads, numThreads >> >(
TRIALS, 252,
d_prices, 100.0f, 100.0f,
d_state);
hipMemcpy(h_prices, d_prices, TRIALS*sizeof(float), hipMemcpyDeviceToHost);
float price = 0;
int count = 0;
for (int i = 0; i < TRIALS; i++)
{
price += h_prices[i];
if (h_prices[i] != 0)
{
count += 1;
}
}
price /= TRIALS;
std::cout << "The Theoretical Price of the Option is " << price << "." << std::endl;
std::cout << "Count is " << count << "." << std::endl;
delete[] h_prices;
hipFree(d_state); hipFree(d_prices);
hipDeviceReset();
return 0;
}
|
27daf90655cd55207d954af166c7f02cd3522526.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <cmath>
#define mu 0.05f
#define sigma .2f
#define timespan 252.0f
#define TRIALS 10000
#define numThreads 512
__global__ void europeanOption(
int size, int iterations,
float *d_price, float initialPrice, float strikePrice,
curandState_t *d_state)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < size)
{
for (int i = 0; i < iterations; i++)
{
initialPrice *= 1 + mu / timespan + curand_normal(&d_state[tid])*sigma/sqrt(timespan);
}
d_price[tid] = initialPrice - strikePrice;
if (d_price[tid] < 0)
{
d_price[tid] = 0;
}
}
}
__global__ void init(
unsigned int seed,
curandState_t *d_state)
{
curand_init(
seed,
threadIdx.x + blockDim.x * blockIdx.x,
0,
&d_state[threadIdx.x + blockDim.x * blockIdx.x]);
}
int main()
{
float *h_prices, *d_prices;
h_prices = new float[TRIALS];
cudaMalloc((void**)&d_prices, TRIALS*sizeof(float));
curandState_t *d_state;
cudaMalloc((void**)&d_state, TRIALS * sizeof(curandState_t));
init <<< (TRIALS - numThreads - 1)/numThreads, numThreads >>>(time(0), d_state);
europeanOption << <(TRIALS - numThreads - 1) / numThreads, numThreads >> >(
TRIALS, 252,
d_prices, 100.0f, 100.0f,
d_state);
cudaMemcpy(h_prices, d_prices, TRIALS*sizeof(float), cudaMemcpyDeviceToHost);
float price = 0;
int count = 0;
for (int i = 0; i < TRIALS; i++)
{
price += h_prices[i];
if (h_prices[i] != 0)
{
count += 1;
}
}
price /= TRIALS;
std::cout << "The Theoretical Price of the Option is " << price << "." << std::endl;
std::cout << "Count is " << count << "." << std::endl;
delete[] h_prices;
cudaFree(d_state); cudaFree(d_prices);
cudaDeviceReset();
return 0;
}
|
fe2f92c0c53e6203aba8a8cc778742259d6b2b05.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
//implement one grid with 4 blocks and 256 threads in total, 8x8 threads for each block
__global__ void print_threadIds()
{
printf("blockIdx,x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n",blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
__global__ void unique_idx_calc_threadIdx(int * input)
{
int tid = threadIdx.x;
int offset = (blockIdx.x>0)? 4:0;
printf("blockIdx : %d, threadIdx : %d, value : %d\n", blockIdx.x, tid, input[tid+offset]);
}
__global__ void unique_gid_calculation(int * input){
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int offset = blockIdx.y * gridDim.x * (blockDim.x * blockDim.y) + blockIdx.x * (blockDim.x * blockDim.y);
//number of threads in one row = gridDim.x * blockDim.x
//row offset: gridDim.x * blockDim.x * blockIdx.y
//int offset = blockIdx.x * (blockDim.x * blockDim.y) + blockIdx.y * (blockDim.x * blockDim.y);
int gid = tid + offset;
printf("gid: %d, input[gid]: %d \n",gid, input[gid]);
printf("threadIdx.x : %d, blockIdx.x : %d, blockIdx.y : %d, blockDim.x : %d, blockDim.y : %d, gridDim.x : %d gid : %d value : %d\n",
threadIdx.x, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, gid, input[gid]);
}
__global__ void mem_trs_test(int * input)
{
int n_per_block = blockDim.x * blockDim.y * blockDim.z;
int offset_grid = blockIdx.z * (gridDim.x*gridDim.y) * n_per_block + blockIdx.y * gridDim.x * n_per_block + blockIdx.x * n_per_block;
int offset_block = threadIdx.z * (blockDim.x*blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x;
int gid = offset_grid + offset_block;
printf("tid : %d, gid : %d, value : %d \n", threadIdx.x, gid, input[gid]);
}
__global__ void mem_trs_test1(int * input,int size)
{
int gid = blockIdx.y * (blockDim.x*blockDim.y)*gridDim.x + blockIdx.x * (blockDim.x*blockDim.y) + threadIdx.x;
//if(gid<size){
printf("tid : %d, gid : %d, value : %d \n", threadIdx.x, gid, input[gid]);
//}
}
int main()
{
int size = 64;
int byte_size = size * sizeof(int);
int *h_input;
h_input = (int*)malloc(byte_size);
time_t t;
srand((unsigned)time(&t));
for(int i=0;i<size;i++)
{
h_input[i] = (int)(rand() &0xff);
}
int * d_input;
hipMalloc((void**)&d_input, byte_size);
hipMemcpy(d_input,h_input,byte_size,hipMemcpyHostToDevice);
dim3 block(2,2,2);
dim3 grid(2,2,2);
hipLaunchKernelGGL(( mem_trs_test), dim3(grid),dim3(block), 0, 0, d_input);
//data transfer between host and device
//direction:
//host to device - cudamemcpyhtod
//device to host - cudamemcpydtoh
//device to device - cudamemcpydtod
hipDeviceSynchronize();
hipFree(d_input);
free(h_input);
hipDeviceReset();
return 0;
}
|
fe2f92c0c53e6203aba8a8cc778742259d6b2b05.cu
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
//implement one grid with 4 blocks and 256 threads in total, 8x8 threads for each block
__global__ void print_threadIds()
{
printf("blockIdx,x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n",blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
__global__ void unique_idx_calc_threadIdx(int * input)
{
int tid = threadIdx.x;
int offset = (blockIdx.x>0)? 4:0;
printf("blockIdx : %d, threadIdx : %d, value : %d\n", blockIdx.x, tid, input[tid+offset]);
}
__global__ void unique_gid_calculation(int * input){
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int offset = blockIdx.y * gridDim.x * (blockDim.x * blockDim.y) + blockIdx.x * (blockDim.x * blockDim.y);
//number of threads in one row = gridDim.x * blockDim.x
//row offset: gridDim.x * blockDim.x * blockIdx.y
//int offset = blockIdx.x * (blockDim.x * blockDim.y) + blockIdx.y * (blockDim.x * blockDim.y);
int gid = tid + offset;
printf("gid: %d, input[gid]: %d \n",gid, input[gid]);
printf("threadIdx.x : %d, blockIdx.x : %d, blockIdx.y : %d, blockDim.x : %d, blockDim.y : %d, gridDim.x : %d gid : %d value : %d\n",
threadIdx.x, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, gid, input[gid]);
}
__global__ void mem_trs_test(int * input)
{
int n_per_block = blockDim.x * blockDim.y * blockDim.z;
int offset_grid = blockIdx.z * (gridDim.x*gridDim.y) * n_per_block + blockIdx.y * gridDim.x * n_per_block + blockIdx.x * n_per_block;
int offset_block = threadIdx.z * (blockDim.x*blockDim.y) + threadIdx.y * blockDim.x + threadIdx.x;
int gid = offset_grid + offset_block;
printf("tid : %d, gid : %d, value : %d \n", threadIdx.x, gid, input[gid]);
}
__global__ void mem_trs_test1(int * input,int size)
{
int gid = blockIdx.y * (blockDim.x*blockDim.y)*gridDim.x + blockIdx.x * (blockDim.x*blockDim.y) + threadIdx.x;
//if(gid<size){
printf("tid : %d, gid : %d, value : %d \n", threadIdx.x, gid, input[gid]);
//}
}
int main()
{
int size = 64;
int byte_size = size * sizeof(int);
int *h_input;
h_input = (int*)malloc(byte_size);
time_t t;
srand((unsigned)time(&t));
for(int i=0;i<size;i++)
{
h_input[i] = (int)(rand() &0xff);
}
int * d_input;
cudaMalloc((void**)&d_input, byte_size);
cudaMemcpy(d_input,h_input,byte_size,cudaMemcpyHostToDevice);
dim3 block(2,2,2);
dim3 grid(2,2,2);
mem_trs_test<<<grid,block>>>(d_input);
//data transfer between host and device
//direction:
//host to device - cudamemcpyhtod
//device to host - cudamemcpydtoh
//device to device - cudamemcpydtod
cudaDeviceSynchronize();
cudaFree(d_input);
free(h_input);
cudaDeviceReset();
return 0;
}
|
237a282d56348bfdb8f6ea9bceacb96a22c21915.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#undef NDEBUG
#include "global_cuda.h"
#include <hip/device_functions.h>
#include "cell_connection_cuda.h"
#include <stdio.h>
#include "utils_hip.cuh"
#include <cassert>
#include <vector>
#include <fstream>
#include <string>
//#include <thrust/sort.h>
#define AREA_GRID_ORIGINAL 2.0f
#define AREA_GRID_ORIGINAL_D 2.0
#define AREA_GRID ((float)(AREA_GRID_ORIGINAL_D + 1e-7))
#define ANX ((int)(LX / AREA_GRID_ORIGINAL_D + 0.5))
#define ANY ((int)(LY / AREA_GRID_ORIGINAL_D + 0.5))
#define ANZ ((int)(LZ / AREA_GRID_ORIGINAL_D))
#define N3 64
#define N2 400
#define SEARCH_GRID_DIM 5
#define SEARCH_GRID_NUM (SEARCH_GRID_DIM*SEARCH_GRID_DIM*SEARCH_GRID_DIM)
#define SEARCH_PAR_NUM 8
#define SEARCH_THREAD_UPPER_LIMIT 128
#define EXTRACT_STATE_CS(cs) (*(unsigned int*)(&(cs).w))
__global__ void init_grid(int ncell, unsigned int* cstate, cell_pos_set* cs, connected_index_set*cis, unsigned int* aindx_3d, float4* area_info){
unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
//while (true){ index++; }
//exit(1);
if (index < ncell){
cell_pos_set c = cs[index];
bool ismemb = cstate[index] == MEMB;
using namespace cont;
int aix = (int)((0.5f*LX - p_diff_x(0.5f*LX, c.x)) / (float)AREA_GRID);
int aiy = (int)((0.5f*LY - p_diff_y(0.5f*LY, c.y)) / (float)AREA_GRID);
int aiz = (int)((min0(c.z)) / (float)AREA_GRID);
if ((aix >= ANX || aiy >= ANY || aiz >= ANZ || aix < 0 || aiy < 0 || aiz < 0)) {
printf("err\n");
printf("cx:%lf cy:%lf cz:%lf\n", c.x, c.y, c.z);
printf("aix:%d aiy:%d aiz:%d\n", aix, aiy, aiz);
assert(false);
}
/*
area_info:
x=x pos
y=y pos
z=z pos
w=
head 8bit =connection num(unsigned char)
next 1bit = is memb
rest 23bit = index(as unsigned32bit int)
*/
int idx3d = aix*ANY*ANZ + aiy*ANZ + aiz;
unsigned int listidx = atomicAdd(&aindx_3d[idx3d], 1);
int areaidx = idx3d*N3 + listidx;
area_info[areaidx] = c;
unsigned int wvalue = ismemb ? index | 1u << 23 : index&(0x007fffff);
*(unsigned int*)&area_info[areaidx].w = wvalue;
cis[index].connected_num = ismemb ? 8 : 0;
/*
assert
*/
if (listidx >= N3){
printf("error:aaaaaa\n");
}
//assert(aindx[aix][aiy][aiz] < (cint)N3);
//c->connected_cell.force_set_count(c->state == MEMB ? MEMB_ADJ_CONN_NUM : 0);
}
}
#define ADD_CELL_CONNECT(cis,me,add) cis[me].index[atomicAdd(&cis[me].connected_num,1)]=add
__global__ void connect_proc(int n_cell, int n_memb, cell_pos_set* cs, connected_index_set* cis, float4* area_info){
using namespace cont;
__shared__ int4 an;
__shared__ cell_pos_set my_pos;
__shared__ float my_radius;
//__shared__ int conn_num[128];
//__shared__ int conn_count;
int my_index = n_memb + blockIdx.x;
if (threadIdx.x == 0){
/*
for (int jj = 0; jj < 128; jj++){
conn_num[jj] = -1;
}
*/
//conn_count = 0;
my_pos = cs[my_index];
my_radius = get_radius(EXTRACT_STATE_CS(my_pos));
an.x = my_pos.x / (float)AREA_GRID;
an.y = my_pos.y / (float)AREA_GRID;
an.z = my_pos.z / (float)AREA_GRID;
//if(my_index>=n_cell-1)
}
#define DIV_NUM 8
int virt_id = threadIdx.x / DIV_NUM;
int divflg = threadIdx.x % DIV_NUM;
int cz = virt_id;
int cy = ((int)(cz / (int)SEARCH_GRID_DIM)); cz = cz%SEARCH_GRID_DIM;
int cx = ((int)(cy / (int)SEARCH_GRID_DIM)) % SEARCH_GRID_DIM; cy = cy%SEARCH_GRID_DIM;
__syncthreads();//do work before sync as much as possible
if (virt_id <= SEARCH_GRID_DIM*SEARCH_GRID_DIM*SEARCH_GRID_DIM){
int aix = an.x - (SEARCH_GRID_DIM / 2) + cx; aix = (aix + ANX) % ANX;
int aiy = an.y - (SEARCH_GRID_DIM / 2) + cy; aiy = (aiy + ANY) % ANY;
int aiz = an.z - (SEARCH_GRID_DIM / 2) + cz; aiz = (aiz + ANZ) % ANZ;
//printf("aix:%d aiy:%d aiz:%d\n", aix, aiy, aiz);
int idx3d = (aix*ANY*ANZ + aiy*ANZ + aiz)*N3;
int m = *(unsigned int*)&area_info[idx3d + 0].w >> 24;
for (int i = divflg; i < m; i += DIV_NUM){
float4 cpos = area_info[idx3d + i];
//__syncthreads();
int cidx = (*(unsigned int*)&cpos.w)&(0x007fffff);
//if (threadIdx.x == 25)printf("idx:%d\n",cidx);
bool op_memb = (*(unsigned int*)&cpos.w)&(0x00800000) != 0;
if (my_index > cidx){
float diffx = p_diff_x(my_pos.x, cpos.x);
float diffy = p_diff_y(my_pos.y, cpos.y);
float diffz = my_pos.z - cpos.z;
float rad_sum = my_radius + (op_memb ? R_memb : R_max);
if (diffx*diffx + diffy*diffy + diffz*diffz < LJ_THRESH*LJ_THRESH*rad_sum*rad_sum){
ADD_CELL_CONNECT(cis, my_index, cidx);
ADD_CELL_CONNECT(cis, cidx, my_index);
}
}
}
}
}
__global__ void connect_proc2(int n_cell, int n_memb, cell_pos_set* cs, unsigned int* cstate, connected_index_set* cis, float4* area_info){
using namespace cont;
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int conn_num[256];
if (index < n_cell){
int mylist[128];
int mylist_count = 0;
cell_pos_set my_pos = cs[index];
bool me_memb = cstate[index] == MEMB;
__syncthreads();
float my_radius = EXTRACT_STATE_CS(my_pos);
int4 an = { (int)(my_pos.x / (float)AREA_GRID),
(int)(my_pos.y / (float)AREA_GRID),
(int)(my_pos.z / (float)AREA_GRID), 0 };
for (int i = an.x - 2; i <= an.x + 2; i++){
int aix = (i + ANX) % ANX;
for (int j = an.y - 2; j <= an.y + 2; j++){
int aiy = (j + ANY) % ANY;
for (int k = an.z - 2; k <= an.z + 2; k++){
int aiz = (k + ANZ) % ANZ;
int idx3d = (aix*ANY*ANZ + aiy*ANZ + aiz)*N3;
int m = *(unsigned int*)&area_info[idx3d + 0].w >> 24;
for (int l = 0; l < m; l++){
float4 cpos = area_info[idx3d + l];
int cidx = (*(unsigned int*)&cpos.w)&(0x007fffff);
bool op_memb = (*(unsigned int*)&cpos.w)&(0x00800000) != 0;
if (index != cidx && !(op_memb&&me_memb)){
float diffx = p_diff_x(my_pos.x, cpos.x);
float diffy = p_diff_y(my_pos.y, cpos.y);
float diffz = my_pos.z - cpos.z;
float rad_sum = my_radius + (op_memb ? R_memb : R_max);
if (diffx*diffx + diffy*diffy + diffz*diffz < LJ_THRESH*LJ_THRESH*rad_sum*rad_sum){
//ADD_CELL_CONNECT(cis, index, cidx);
//mylist[mylist_count++] = cidx;
//ADD_CELL_CONNECT(cis, cidx, index);
mylist[mylist_count] = cidx;
mylist_count++;
}
}
}
}
}
}
cis[index].connected_num = mylist_count;
/*
for (int kk = 0; kk < mylist_count; kk++){
cis[index].index[kk] = mylist[kk];
}
*/
}
}
__global__ void complete_area_info(unsigned int* aindx_3d, float4* area_info){
int idx = blockIdx.x*ANY*ANZ + blockIdx.y*ANZ + threadIdx.x;
unsigned int w = *(unsigned int*)&area_info[idx*N3 + 0].w;
unsigned int num = aindx_3d[idx];
*(unsigned int*)&area_info[idx*N3 + 0].w = ((unsigned int)(0x00ffffff) & w) | (num << 24);
}
__global__ void set_dermis(int offset, int ncell, cell_pos_set* pos, connected_index_set* cis, int* dermis_arr){
int index = blockIdx.x*blockDim.x + threadIdx.x + offset;
if (index < ncell){
cell_pos_set cme = pos[index];
int cnum = cis[index].connected_num;
int dermis_index = -1;
float distSq = FLT_MAX;
float tmpdistSq = 0;
for (int i = 0; i < cnum; i++){
int opidx = cis[index].index[i];
cell_pos_set opcs = pos[cis[index].index[i]];
if (*(unsigned int*)&opcs.w == MEMB){
tmpdistSq = p_cell_dist_sq_d(cme, opcs);
if (tmpdistSq < distSq){
dermis_index = opidx;
}
}
}
dermis_arr[index] = dermis_index;
}
}
__global__ void conn_sort(int ncell, int nmemb, unsigned int* cstate, connected_index_set* cis){
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < ncell){
int offset = cstate[index] == MEMB ? 8 : 0;
int bucket[10][128] = {}; int bucket_count[10] = { 0 };
for (int i = offset; i < cis[index].connected_num; i++) {
int idx = cis[index].index[i];
unsigned int state = cstate[idx];
//bucket[state][bucket_count[state]++] = idx;
}
/*
int count = 0;
for (int i = 0; i < 10; i++){
for (int j = 0; j < bucket_count[i]; j++){
cis[index].index[offset + (count++)] = bucket[i][j];
}
}
*/
}
}
void dbg_conn_info(unsigned int* aindx, int count){
static std::vector<unsigned int> tmp(ANX*ANY*ANZ);
hipMemcpy(&tmp[0], aindx, sizeof(unsigned int)*ANX*ANY*ANZ, hipMemcpyDeviceToHost);
std::ofstream ofs("dbg_ci" + std::to_string(count));
for (int i = 0; i < ANX; i++){
for (int j = 0; j < ANY; j++){
for (int k = 0; k < ANZ; k++){
ofs << i << "," << j << "," << k << " " << tmp[i*ANY*ANZ + j*ANZ + k] << std::endl;
}
}
}
}
void connect_cell(DeviceData* d){
struct uint_alloc{
unsigned int* ptr;
uint_alloc(size_t size){
checkCudaErrors(hipMalloc((void**)&ptr, size));
}
~uint_alloc(){
hipFree(ptr);
}
};
struct f4_alloc{
float4* ptr;
f4_alloc(size_t size){
checkCudaErrors(hipMalloc((void**)&ptr, size));
}
~f4_alloc(){
hipFree(ptr);
}
};
static int count = 0;
static uint_alloc aindx(sizeof(unsigned int)*ANX*ANY*ANZ);
//static uint_alloc area(sizeof(unsigned int)*ANX*ANY*ANZ*N3);
static f4_alloc area_info(sizeof(float4)*ANX*ANY*ANZ*N3);
hipMemset(aindx.ptr, 0, sizeof(unsigned int)*ANX*ANY*ANZ);
hipMemset(area_info.ptr, 0, sizeof(float4)*ANX*ANY*ANZ*N3);
init_grid << <256, 256 >> >(d->ncell, (unsigned int*)d->c_state_d, d->c_pos_d[d->current], d->c_connected_index_d, aindx.ptr, area_info.ptr);
hipDeviceSynchronize();
dim3 grd(ANX, ANY);
printf("LAST ERROR:%d\n", hipGetLastError());
complete_area_info << <grd, ANZ >> >(aindx.ptr, area_info.ptr);
hipDeviceSynchronize();
printf("LAST ERROR:%d\n", hipGetLastError());
connect_proc << <d->ncell - d->nmemb, 128*8 >> >(d->ncell, d->nmemb, d->c_pos_d[d->current], d->c_connected_index_d, area_info.ptr);
hipDeviceSynchronize();
printf("LAST ERROR:%d\n", hipGetLastError());
//conn_sort << < d->ncell / 256 + 1, 256 >>> (d->ncell, d->nmemb, (unsigned int*)d->c_state_d, d->c_connected_index_d);
//hipDeviceSynchronize();
printf("LAST ERROR:%d\n", hipGetLastError());
set_dermis << <(d->ncell - d->nmemb - d->nder) / 256 + 1, 256 >> >(d->nmemb + d->nder, d->ncell, d->c_pos_d[d->current], d->c_connected_index_d, d->c_dermis_index_d);
hipDeviceSynchronize();
printf("LAST ERROR:%d\n", hipGetLastError());
//dbg_conn_info(aindx.ptr, count++);
}
|
237a282d56348bfdb8f6ea9bceacb96a22c21915.cu
|
#pragma once
#undef NDEBUG
#include "global_cuda.h"
#include <device_functions.h>
#include "cell_connection_cuda.h"
#include <stdio.h>
#include "utils_cuda.cuh"
#include <cassert>
#include <vector>
#include <fstream>
#include <string>
//#include <thrust/sort.h>
#define AREA_GRID_ORIGINAL 2.0f
#define AREA_GRID_ORIGINAL_D 2.0
#define AREA_GRID ((float)(AREA_GRID_ORIGINAL_D + 1e-7))
#define ANX ((int)(LX / AREA_GRID_ORIGINAL_D + 0.5))
#define ANY ((int)(LY / AREA_GRID_ORIGINAL_D + 0.5))
#define ANZ ((int)(LZ / AREA_GRID_ORIGINAL_D))
#define N3 64
#define N2 400
#define SEARCH_GRID_DIM 5
#define SEARCH_GRID_NUM (SEARCH_GRID_DIM*SEARCH_GRID_DIM*SEARCH_GRID_DIM)
#define SEARCH_PAR_NUM 8
#define SEARCH_THREAD_UPPER_LIMIT 128
#define EXTRACT_STATE_CS(cs) (*(unsigned int*)(&(cs).w))
__global__ void init_grid(int ncell, unsigned int* cstate, cell_pos_set* cs, connected_index_set*cis, unsigned int* aindx_3d, float4* area_info){
unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
//while (true){ index++; }
//exit(1);
if (index < ncell){
cell_pos_set c = cs[index];
bool ismemb = cstate[index] == MEMB;
using namespace cont;
int aix = (int)((0.5f*LX - p_diff_x(0.5f*LX, c.x)) / (float)AREA_GRID);
int aiy = (int)((0.5f*LY - p_diff_y(0.5f*LY, c.y)) / (float)AREA_GRID);
int aiz = (int)((min0(c.z)) / (float)AREA_GRID);
if ((aix >= ANX || aiy >= ANY || aiz >= ANZ || aix < 0 || aiy < 0 || aiz < 0)) {
printf("err\n");
printf("cx:%lf cy:%lf cz:%lf\n", c.x, c.y, c.z);
printf("aix:%d aiy:%d aiz:%d\n", aix, aiy, aiz);
assert(false);
}
/*
area_info:
x=x pos
y=y pos
z=z pos
w=
head 8bit =connection num(unsigned char)
next 1bit = is memb
rest 23bit = index(as unsigned32bit int)
*/
int idx3d = aix*ANY*ANZ + aiy*ANZ + aiz;
unsigned int listidx = atomicAdd(&aindx_3d[idx3d], 1);
int areaidx = idx3d*N3 + listidx;
area_info[areaidx] = c;
unsigned int wvalue = ismemb ? index | 1u << 23 : index&(0x007fffff);
*(unsigned int*)&area_info[areaidx].w = wvalue;
cis[index].connected_num = ismemb ? 8 : 0;
/*
このassertは消してよい
*/
if (listidx >= N3){
printf("error:aaaaaa\n");
}
//assert(aindx[aix][aiy][aiz] < (cint)N3);
//c->connected_cell.force_set_count(c->state == MEMB ? MEMB_ADJ_CONN_NUM : 0);
}
}
#define ADD_CELL_CONNECT(cis,me,add) cis[me].index[atomicAdd(&cis[me].connected_num,1)]=add
__global__ void connect_proc(int n_cell, int n_memb, cell_pos_set* cs, connected_index_set* cis, float4* area_info){
using namespace cont;
__shared__ int4 an;
__shared__ cell_pos_set my_pos;
__shared__ float my_radius;
//__shared__ int conn_num[128];
//__shared__ int conn_count;
int my_index = n_memb + blockIdx.x;
if (threadIdx.x == 0){
/*
for (int jj = 0; jj < 128; jj++){
conn_num[jj] = -1;
}
*/
//conn_count = 0;
my_pos = cs[my_index];
my_radius = get_radius(EXTRACT_STATE_CS(my_pos));
an.x = my_pos.x / (float)AREA_GRID;
an.y = my_pos.y / (float)AREA_GRID;
an.z = my_pos.z / (float)AREA_GRID;
//if(my_index>=n_cell-1)
}
#define DIV_NUM 8
int virt_id = threadIdx.x / DIV_NUM;
int divflg = threadIdx.x % DIV_NUM;
int cz = virt_id;
int cy = ((int)(cz / (int)SEARCH_GRID_DIM)); cz = cz%SEARCH_GRID_DIM;
int cx = ((int)(cy / (int)SEARCH_GRID_DIM)) % SEARCH_GRID_DIM; cy = cy%SEARCH_GRID_DIM;
__syncthreads();//do work before sync as much as possible
if (virt_id <= SEARCH_GRID_DIM*SEARCH_GRID_DIM*SEARCH_GRID_DIM){
int aix = an.x - (SEARCH_GRID_DIM / 2) + cx; aix = (aix + ANX) % ANX;
int aiy = an.y - (SEARCH_GRID_DIM / 2) + cy; aiy = (aiy + ANY) % ANY;
int aiz = an.z - (SEARCH_GRID_DIM / 2) + cz; aiz = (aiz + ANZ) % ANZ;
//printf("aix:%d aiy:%d aiz:%d\n", aix, aiy, aiz);
int idx3d = (aix*ANY*ANZ + aiy*ANZ + aiz)*N3;
int m = *(unsigned int*)&area_info[idx3d + 0].w >> 24;
for (int i = divflg; i < m; i += DIV_NUM){
float4 cpos = area_info[idx3d + i];
//__syncthreads();
int cidx = (*(unsigned int*)&cpos.w)&(0x007fffff);
//if (threadIdx.x == 25)printf("idx:%d\n",cidx);
bool op_memb = (*(unsigned int*)&cpos.w)&(0x00800000) != 0;
if (my_index > cidx){
float diffx = p_diff_x(my_pos.x, cpos.x);
float diffy = p_diff_y(my_pos.y, cpos.y);
float diffz = my_pos.z - cpos.z;
float rad_sum = my_radius + (op_memb ? R_memb : R_max);
if (diffx*diffx + diffy*diffy + diffz*diffz < LJ_THRESH*LJ_THRESH*rad_sum*rad_sum){
ADD_CELL_CONNECT(cis, my_index, cidx);
ADD_CELL_CONNECT(cis, cidx, my_index);
}
}
}
}
}
__global__ void connect_proc2(int n_cell, int n_memb, cell_pos_set* cs, unsigned int* cstate, connected_index_set* cis, float4* area_info){
using namespace cont;
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int conn_num[256];
if (index < n_cell){
int mylist[128];
int mylist_count = 0;
cell_pos_set my_pos = cs[index];
bool me_memb = cstate[index] == MEMB;
__syncthreads();
float my_radius = EXTRACT_STATE_CS(my_pos);
int4 an = { (int)(my_pos.x / (float)AREA_GRID),
(int)(my_pos.y / (float)AREA_GRID),
(int)(my_pos.z / (float)AREA_GRID), 0 };
for (int i = an.x - 2; i <= an.x + 2; i++){
int aix = (i + ANX) % ANX;
for (int j = an.y - 2; j <= an.y + 2; j++){
int aiy = (j + ANY) % ANY;
for (int k = an.z - 2; k <= an.z + 2; k++){
int aiz = (k + ANZ) % ANZ;
int idx3d = (aix*ANY*ANZ + aiy*ANZ + aiz)*N3;
int m = *(unsigned int*)&area_info[idx3d + 0].w >> 24;
for (int l = 0; l < m; l++){
float4 cpos = area_info[idx3d + l];
int cidx = (*(unsigned int*)&cpos.w)&(0x007fffff);
bool op_memb = (*(unsigned int*)&cpos.w)&(0x00800000) != 0;
if (index != cidx && !(op_memb&&me_memb)){
float diffx = p_diff_x(my_pos.x, cpos.x);
float diffy = p_diff_y(my_pos.y, cpos.y);
float diffz = my_pos.z - cpos.z;
float rad_sum = my_radius + (op_memb ? R_memb : R_max);
if (diffx*diffx + diffy*diffy + diffz*diffz < LJ_THRESH*LJ_THRESH*rad_sum*rad_sum){
//ADD_CELL_CONNECT(cis, index, cidx);
//mylist[mylist_count++] = cidx;
//ADD_CELL_CONNECT(cis, cidx, index);
mylist[mylist_count] = cidx;
mylist_count++;
}
}
}
}
}
}
cis[index].connected_num = mylist_count;
/*
for (int kk = 0; kk < mylist_count; kk++){
cis[index].index[kk] = mylist[kk];
}
*/
}
}
__global__ void complete_area_info(unsigned int* aindx_3d, float4* area_info){
int idx = blockIdx.x*ANY*ANZ + blockIdx.y*ANZ + threadIdx.x;
unsigned int w = *(unsigned int*)&area_info[idx*N3 + 0].w;
unsigned int num = aindx_3d[idx];
*(unsigned int*)&area_info[idx*N3 + 0].w = ((unsigned int)(0x00ffffff) & w) | (num << 24);
}
__global__ void set_dermis(int offset, int ncell, cell_pos_set* pos, connected_index_set* cis, int* dermis_arr){
int index = blockIdx.x*blockDim.x + threadIdx.x + offset;
if (index < ncell){
cell_pos_set cme = pos[index];
int cnum = cis[index].connected_num;
int dermis_index = -1;
float distSq = FLT_MAX;
float tmpdistSq = 0;
for (int i = 0; i < cnum; i++){
int opidx = cis[index].index[i];
cell_pos_set opcs = pos[cis[index].index[i]];
if (*(unsigned int*)&opcs.w == MEMB){
tmpdistSq = p_cell_dist_sq_d(cme, opcs);
if (tmpdistSq < distSq){
dermis_index = opidx;
}
}
}
dermis_arr[index] = dermis_index;
}
}
__global__ void conn_sort(int ncell, int nmemb, unsigned int* cstate, connected_index_set* cis){
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < ncell){
int offset = cstate[index] == MEMB ? 8 : 0;
int bucket[10][128] = {}; int bucket_count[10] = { 0 };
for (int i = offset; i < cis[index].connected_num; i++) {
int idx = cis[index].index[i];
unsigned int state = cstate[idx];
//bucket[state][bucket_count[state]++] = idx;
}
/*
int count = 0;
for (int i = 0; i < 10; i++){
for (int j = 0; j < bucket_count[i]; j++){
cis[index].index[offset + (count++)] = bucket[i][j];
}
}
*/
}
}
void dbg_conn_info(unsigned int* aindx, int count){
static std::vector<unsigned int> tmp(ANX*ANY*ANZ);
cudaMemcpy(&tmp[0], aindx, sizeof(unsigned int)*ANX*ANY*ANZ, cudaMemcpyDeviceToHost);
std::ofstream ofs("dbg_ci" + std::to_string(count));
for (int i = 0; i < ANX; i++){
for (int j = 0; j < ANY; j++){
for (int k = 0; k < ANZ; k++){
ofs << i << "," << j << "," << k << " " << tmp[i*ANY*ANZ + j*ANZ + k] << std::endl;
}
}
}
}
void connect_cell(DeviceData* d){
struct uint_alloc{
unsigned int* ptr;
uint_alloc(size_t size){
checkCudaErrors(cudaMalloc((void**)&ptr, size));
}
~uint_alloc(){
cudaFree(ptr);
}
};
struct f4_alloc{
float4* ptr;
f4_alloc(size_t size){
checkCudaErrors(cudaMalloc((void**)&ptr, size));
}
~f4_alloc(){
cudaFree(ptr);
}
};
static int count = 0;
static uint_alloc aindx(sizeof(unsigned int)*ANX*ANY*ANZ);
//static uint_alloc area(sizeof(unsigned int)*ANX*ANY*ANZ*N3);
static f4_alloc area_info(sizeof(float4)*ANX*ANY*ANZ*N3);
cudaMemset(aindx.ptr, 0, sizeof(unsigned int)*ANX*ANY*ANZ);
cudaMemset(area_info.ptr, 0, sizeof(float4)*ANX*ANY*ANZ*N3);
init_grid << <256, 256 >> >(d->ncell, (unsigned int*)d->c_state_d, d->c_pos_d[d->current], d->c_connected_index_d, aindx.ptr, area_info.ptr);
cudaDeviceSynchronize();
dim3 grd(ANX, ANY);
printf("LAST ERROR:%d\n", cudaGetLastError());
complete_area_info << <grd, ANZ >> >(aindx.ptr, area_info.ptr);
cudaDeviceSynchronize();
printf("LAST ERROR:%d\n", cudaGetLastError());
connect_proc << <d->ncell - d->nmemb, 128*8 >> >(d->ncell, d->nmemb, d->c_pos_d[d->current], d->c_connected_index_d, area_info.ptr);
cudaDeviceSynchronize();
printf("LAST ERROR:%d\n", cudaGetLastError());
//conn_sort << < d->ncell / 256 + 1, 256 >>> (d->ncell, d->nmemb, (unsigned int*)d->c_state_d, d->c_connected_index_d);
//cudaDeviceSynchronize();
printf("LAST ERROR:%d\n", cudaGetLastError());
set_dermis << <(d->ncell - d->nmemb - d->nder) / 256 + 1, 256 >> >(d->nmemb + d->nder, d->ncell, d->c_pos_d[d->current], d->c_connected_index_d, d->c_dermis_index_d);
cudaDeviceSynchronize();
printf("LAST ERROR:%d\n", cudaGetLastError());
//dbg_conn_info(aindx.ptr, count++);
}
|
a8ae40d21cd01b9d221d17a7ecf139ff71fc16ff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bfscuda_local.h"
__global__ void selectVariables(
int* sort_buffer, int* sequence_ids,
int* d_data, int* d_vars,
int n, int n_vars, int pitch, int offset,
int vars_width, int y_width, int sort_buffer_n) {
int row_index = blockIdx.x * blockDim.x + threadIdx.x + offset;
int col_index = blockIdx.y;
int tmp = row_index / n;
int sequence_id = tmp / 2;
int shl_width_raw = d_vars[sequence_id * n_vars * 2 + col_index * 2];
if(row_index < sort_buffer_n && (0x80000000 & shl_width_raw)) {
int write_y = tmp & 0x00000001;
int shl_width = (0x7FFFFFFF & shl_width_raw);
int var_index = d_vars[sequence_id * n_vars * 2 + col_index * 2 + 1];
int projection = (d_data[(row_index % n) * pitch + var_index] << shl_width);
projection >>= (1 - write_y) * y_width;
projection |= (0x01 << vars_width) * write_y;
atomicOr(&sort_buffer[row_index], projection);
sequence_ids[row_index] = sequence_id;
}
}
|
a8ae40d21cd01b9d221d17a7ecf139ff71fc16ff.cu
|
#include "bfscuda_local.h"
__global__ void selectVariables(
int* sort_buffer, int* sequence_ids,
int* d_data, int* d_vars,
int n, int n_vars, int pitch, int offset,
int vars_width, int y_width, int sort_buffer_n) {
int row_index = blockIdx.x * blockDim.x + threadIdx.x + offset;
int col_index = blockIdx.y;
int tmp = row_index / n;
int sequence_id = tmp / 2;
int shl_width_raw = d_vars[sequence_id * n_vars * 2 + col_index * 2];
if(row_index < sort_buffer_n && (0x80000000 & shl_width_raw)) {
int write_y = tmp & 0x00000001;
int shl_width = (0x7FFFFFFF & shl_width_raw);
int var_index = d_vars[sequence_id * n_vars * 2 + col_index * 2 + 1];
int projection = (d_data[(row_index % n) * pitch + var_index] << shl_width);
projection >>= (1 - write_y) * y_width;
projection |= (0x01 << vars_width) * write_y;
atomicOr(&sort_buffer[row_index], projection);
sequence_ids[row_index] = sequence_id;
}
}
|
f1e836c9560cf92b2c758622cb0675bfbff648ed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Device/PreDeal_KernelFunc.cuh>
#include <Device/GNR_KernelFunc.cuh>
#include <Device/cudaOverlayGraph.cuh>
#include <../config.cuh>
#include <vector>
#include <XLib.hpp>
#include <ctime>
using std::vector;
using namespace PreDeal;
using namespace cuda_graph;
using namespace graph;
using namespace Kernels;
void PreDealGraph(GraphPreDeal &g)
{
cudaGraphPreDeal cg(g);
int supLevel = 0;
EdgeType iSdirected = g.gw.Direction;
while (supLevel < ALL_LEVELS - 1)
{
cg.copyOut();
if (iSdirected == EdgeType::DIRECTED)
{
cg.copyIn();
}
cg.copyCandidate();
cg.copyOrders();
hostPrintfSmall("edgeDiffrence");
hipLaunchKernelGGL(( EdgeDifference<DEFAULT_VW>)
, dim3(GRIDDIM), dim3(BLOCKDIM), SMem_Per_Block<char, BLOCKDIM>::value, 0, cg.devOutNodes, cg.devOutEdges, cg.devOrders,
cg.devCandidates, cg.devCandidateSize,
cg.devOutLoads, cg.devOutDones);
if (iSdirected == EdgeType::DIRECTED)
{
hipLaunchKernelGGL(( EdgeDifference<DEFAULT_VW>)
, dim3(GRIDDIM), dim3(BLOCKDIM), SMem_Per_Block<char, BLOCKDIM>::value, 0, cg.devInNodes, cg.devInEdges, cg.devOrders,
cg.devCandidates, cg.devCandidateSize,
cg.devInLoads, cg.devInDones);
}
hostPrintfSmall("NodeMarksCompute");
hipLaunchKernelGGL(( NodeMarksCompute), dim3(GRIDDIM), dim3(BLOCKDIM), SMem_Per_Block<char, BLOCKDIM>::value, 0, cg.devCandidates, cg.devCandidateSize,
cg.devOutLoads, cg.devOutDones,
cg.devInLoads, cg.devInDones,
cg.devMarks);
__CUDA_ERROR("NodeMarksCompute Kernel");
cg.copyMarks();
//printVector(g.Marks,20);
hostPrintfSmall("SelectNodesThresholdOrTOP");
SelectNodesThresholdOrTOP(g.Candidates, g.Marks,
g.Orders, g.ContractedNodes, supLevel, selectNodesNum[supLevel], selectNodesStrategy);
hostPrintfSmall("ShortCut");
g.AddEdges.clear();
ShortCutCPU(g.ContractedNodes, g.Orders, g.AddEdges, g.OutNodesVec, g.OutEdgesVec, g.InNodesVec, g.InEdgesVec);
hostPrintfSmall("CSRMerge");
hostPrintfSmall(g.AddEdges.size(), "newSize: ");
CSRMerge(g.AddEdges, g.OutNodesVec, g.OutEdgesVec, 0);
if (iSdirected == EdgeType::DIRECTED)
{
CSRMerge(g.AddEdges, g.InNodesVec, g.InEdgesVec, 1);
}
supLevel++;
}
}
namespace cuda_graph
{
void cudaGNRGraph::GNRSearchMain(int source)
{
cudaClear();
int f1Size = cudaInit(source), f2Size, f3Size, f4Size;
f2Size = f3Size = f4Size = 0;
int zero = 0;
int maxCB = (V % DistanceSplitSize == 0) ? (V / DistanceSplitSize) : (V / DistanceSplitSize + 1);
// __CUDA_ERROR("GNRSearchMain Kernel");
#if LOGFRONTIER
//LogHostPrintSmall("NEXT");
#endif
// printfInt(i,"bucketIndex");
int *devF1 = upF1;
int *devF1Size = upF1Size;
int *devF2 = upF2;
int *devF2Size = upF2Size;
int *devF3 = downF1;
int *devF3Size = downF1Size;
int *devF4 = downF2;
int *devF4Size = downF2Size;
int level = 1;
int iter = 0;
while (1)
{
// __CUDA_ERROR("GNRSearchMain Kernel");
#if LOGFRONTIER
if (level < 0)
{
LogPrintfFrotier(devF1, devF1Size, "up_devF1");
LogPrintfFrotier(devF3, devF3Size, "up_devF3");
}
#endif
iter++;
printf("%d:%d\n", iter, f1Size);
// hostPrintfSmall(f1Size, "f1Size: ");
// hostPrintfSmall(f3Size, "f3Size: ");
if (f1Size == 0)
{
level++;
break;
}
#if DISTANCESPLIT
if (f1Size < DSNodes)
{
hipLaunchKernelGGL(( GNRSearchNorm<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devUpOutNodes, devUpOutEdges, devDistances, devF1, devF2, f1Size, devF2Size, level);
__CUDA_ERROR("GNRSearchMain Kernel");
}
else
{
cudaClearBucket();
// BucketDebug();
hipLaunchKernelGGL(( GNRSearchForWrite<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devUpOutNodes, devUpOutEdges, devDistances, devF1, f1Size);
// copyIntFrom(f1Size, devF1Size);
// copyIntFrom(f2Size, devF2Size);
// hostPrintfSmall(f1Size, "f1Size: ");
// hostPrintfSmall(f2Size, "f2Size: ");
// BucketDebug();
// __CUDA_ERROR("GNRSearchMain Kernel");
for (int kb = 0; kb < maxCB; kb += 1)
{
hipLaunchKernelGGL(( CoalescedWrite<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devDistances, kb, devF2, devF2Size, level);
// __CUDA_ERROR("GNRSearchMain Kernel");
}
// copyIntFrom(f1Size, devF1Size);
// copyIntFrom(f2Size, devF2Size);
}
#else
hipLaunchKernelGGL(( GNRSearchNorm<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devUpOutNodes, devUpOutEdges, devDistances, devF1, devF2, f1Size, devF2Size, level);
#endif
std::swap<int *>(devF1, devF2);
std::swap<int *>(devF1Size, devF2Size);
copyIntFrom(f1Size, devF1Size);
copyIntTo(zero, devF2Size);
level++;
}
if (ALL_LEVELS != 0)
{
#if DISTANCESPLIT
if (V < DSNodes)
{
hipLaunchKernelGGL(( GNRSearchDownFirstTime<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devDownOutNodes, devDownOutEdges, devDistances, V, devF3, devF3Size, level);
}
else
{
cudaClearBucket();
hipLaunchKernelGGL(( GNRSearchDownFirstTimeForWrite<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devDownOutNodes, devDownOutEdges, devDistances, V, level);
for (int kb = 0; kb < maxCB; kb += 1)
hipLaunchKernelGGL(( CoalescedWrite<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devDistances, kb, devF3, devF3Size, level);
}
#else
hipLaunchKernelGGL(( GNRSearchDownFirstTime<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devDownOutNodes, devDownOutEdges, devDistances, V, devF3, devF3Size, level);
#endif
level++;
copyIntFrom(f3Size, devF3Size);
while (1)
{
if (f3Size == 0)
{
level++;
break;
}
#if DISTANCESPLIT
if (f1Size < DSNodes)
{
hipLaunchKernelGGL(( GNRSearchNorm<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devDownOutNodes, devDownOutEdges, devDistances, devF3, devF4, f3Size, devF4Size, level);
}
else
{
cudaClearBucket();
hipLaunchKernelGGL(( GNRSearchForWrite<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devDownOutNodes, devDownOutEdges, devDistances, devF3, f3Size);
for (int kb = 0; kb < maxCB; kb += 1)
hipLaunchKernelGGL(( CoalescedWrite<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devDistances, kb, devF4, devF4Size, level);
}
#else
hipLaunchKernelGGL(( GNRSearchNorm<DEFAULT_VW>), dim3(GRIDDIM), dim3(BLOCKDIM), 0, 0, devDownOutNodes, devDownOutEdges, devDistances, devF3, devF4, f3Size, devF4Size, level);
#endif
std::swap<int *>(devF3, devF4);
std::swap<int *>(devF3Size, devF4Size);
copyIntFrom(f3Size, devF3Size);
copyIntTo(zero, devF4Size);
level++;
}
__CUDA_ERROR("GNRSearchMain Kernel");
}
// printf("iter times:%d", iter);
}
} // namespace cuda_graph
|
f1e836c9560cf92b2c758622cb0675bfbff648ed.cu
|
#include <Device/PreDeal_KernelFunc.cuh>
#include <Device/GNR_KernelFunc.cuh>
#include <Device/cudaOverlayGraph.cuh>
#include <../config.cuh>
#include <vector>
#include <XLib.hpp>
#include <ctime>
using std::vector;
using namespace PreDeal;
using namespace cuda_graph;
using namespace graph;
using namespace Kernels;
void PreDealGraph(GraphPreDeal &g)
{
cudaGraphPreDeal cg(g);
int supLevel = 0;
EdgeType iSdirected = g.gw.Direction;
while (supLevel < ALL_LEVELS - 1)
{
cg.copyOut();
if (iSdirected == EdgeType::DIRECTED)
{
cg.copyIn();
}
cg.copyCandidate();
cg.copyOrders();
hostPrintfSmall("edgeDiffrence");
EdgeDifference<DEFAULT_VW>
<<<GRIDDIM, BLOCKDIM, SMem_Per_Block<char, BLOCKDIM>::value>>>(cg.devOutNodes, cg.devOutEdges, cg.devOrders,
cg.devCandidates, cg.devCandidateSize,
cg.devOutLoads, cg.devOutDones);
if (iSdirected == EdgeType::DIRECTED)
{
EdgeDifference<DEFAULT_VW>
<<<GRIDDIM, BLOCKDIM, SMem_Per_Block<char, BLOCKDIM>::value>>>(cg.devInNodes, cg.devInEdges, cg.devOrders,
cg.devCandidates, cg.devCandidateSize,
cg.devInLoads, cg.devInDones);
}
hostPrintfSmall("NodeMarksCompute");
NodeMarksCompute<<<GRIDDIM, BLOCKDIM, SMem_Per_Block<char, BLOCKDIM>::value>>>(cg.devCandidates, cg.devCandidateSize,
cg.devOutLoads, cg.devOutDones,
cg.devInLoads, cg.devInDones,
cg.devMarks);
__CUDA_ERROR("NodeMarksCompute Kernel");
cg.copyMarks();
//printVector(g.Marks,20);
hostPrintfSmall("SelectNodesThresholdOrTOP");
SelectNodesThresholdOrTOP(g.Candidates, g.Marks,
g.Orders, g.ContractedNodes, supLevel, selectNodesNum[supLevel], selectNodesStrategy);
hostPrintfSmall("ShortCut");
g.AddEdges.clear();
ShortCutCPU(g.ContractedNodes, g.Orders, g.AddEdges, g.OutNodesVec, g.OutEdgesVec, g.InNodesVec, g.InEdgesVec);
hostPrintfSmall("CSRMerge");
hostPrintfSmall(g.AddEdges.size(), "newSize: ");
CSRMerge(g.AddEdges, g.OutNodesVec, g.OutEdgesVec, 0);
if (iSdirected == EdgeType::DIRECTED)
{
CSRMerge(g.AddEdges, g.InNodesVec, g.InEdgesVec, 1);
}
supLevel++;
}
}
namespace cuda_graph
{
void cudaGNRGraph::GNRSearchMain(int source)
{
cudaClear();
int f1Size = cudaInit(source), f2Size, f3Size, f4Size;
f2Size = f3Size = f4Size = 0;
int zero = 0;
int maxCB = (V % DistanceSplitSize == 0) ? (V / DistanceSplitSize) : (V / DistanceSplitSize + 1);
// __CUDA_ERROR("GNRSearchMain Kernel");
#if LOGFRONTIER
//LogHostPrintSmall("NEXT");
#endif
// printfInt(i,"bucketIndex");
int *devF1 = upF1;
int *devF1Size = upF1Size;
int *devF2 = upF2;
int *devF2Size = upF2Size;
int *devF3 = downF1;
int *devF3Size = downF1Size;
int *devF4 = downF2;
int *devF4Size = downF2Size;
int level = 1;
int iter = 0;
while (1)
{
// __CUDA_ERROR("GNRSearchMain Kernel");
#if LOGFRONTIER
if (level < 0)
{
LogPrintfFrotier(devF1, devF1Size, "up_devF1");
LogPrintfFrotier(devF3, devF3Size, "up_devF3");
}
#endif
iter++;
printf("%d:%d\n", iter, f1Size);
// hostPrintfSmall(f1Size, "f1Size: ");
// hostPrintfSmall(f3Size, "f3Size: ");
if (f1Size == 0)
{
level++;
break;
}
#if DISTANCESPLIT
if (f1Size < DSNodes)
{
GNRSearchNorm<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devUpOutNodes, devUpOutEdges, devDistances, devF1, devF2, f1Size, devF2Size, level);
__CUDA_ERROR("GNRSearchMain Kernel");
}
else
{
cudaClearBucket();
// BucketDebug();
GNRSearchForWrite<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devUpOutNodes, devUpOutEdges, devDistances, devF1, f1Size);
// copyIntFrom(f1Size, devF1Size);
// copyIntFrom(f2Size, devF2Size);
// hostPrintfSmall(f1Size, "f1Size: ");
// hostPrintfSmall(f2Size, "f2Size: ");
// BucketDebug();
// __CUDA_ERROR("GNRSearchMain Kernel");
for (int kb = 0; kb < maxCB; kb += 1)
{
CoalescedWrite<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devDistances, kb, devF2, devF2Size, level);
// __CUDA_ERROR("GNRSearchMain Kernel");
}
// copyIntFrom(f1Size, devF1Size);
// copyIntFrom(f2Size, devF2Size);
}
#else
GNRSearchNorm<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devUpOutNodes, devUpOutEdges, devDistances, devF1, devF2, f1Size, devF2Size, level);
#endif
std::swap<int *>(devF1, devF2);
std::swap<int *>(devF1Size, devF2Size);
copyIntFrom(f1Size, devF1Size);
copyIntTo(zero, devF2Size);
level++;
}
if (ALL_LEVELS != 0)
{
#if DISTANCESPLIT
if (V < DSNodes)
{
GNRSearchDownFirstTime<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devDownOutNodes, devDownOutEdges, devDistances, V, devF3, devF3Size, level);
}
else
{
cudaClearBucket();
GNRSearchDownFirstTimeForWrite<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devDownOutNodes, devDownOutEdges, devDistances, V, level);
for (int kb = 0; kb < maxCB; kb += 1)
CoalescedWrite<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devDistances, kb, devF3, devF3Size, level);
}
#else
GNRSearchDownFirstTime<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devDownOutNodes, devDownOutEdges, devDistances, V, devF3, devF3Size, level);
#endif
level++;
copyIntFrom(f3Size, devF3Size);
while (1)
{
if (f3Size == 0)
{
level++;
break;
}
#if DISTANCESPLIT
if (f1Size < DSNodes)
{
GNRSearchNorm<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devDownOutNodes, devDownOutEdges, devDistances, devF3, devF4, f3Size, devF4Size, level);
}
else
{
cudaClearBucket();
GNRSearchForWrite<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devDownOutNodes, devDownOutEdges, devDistances, devF3, f3Size);
for (int kb = 0; kb < maxCB; kb += 1)
CoalescedWrite<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devDistances, kb, devF4, devF4Size, level);
}
#else
GNRSearchNorm<DEFAULT_VW><<<GRIDDIM, BLOCKDIM>>>(devDownOutNodes, devDownOutEdges, devDistances, devF3, devF4, f3Size, devF4Size, level);
#endif
std::swap<int *>(devF3, devF4);
std::swap<int *>(devF3Size, devF4Size);
copyIntFrom(f3Size, devF3Size);
copyIntTo(zero, devF4Size);
level++;
}
__CUDA_ERROR("GNRSearchMain Kernel");
}
// printf("iter times:%d", iter);
}
} // namespace cuda_graph
|
a87fd1fe0b32089cfd1e614703e494edf5699717.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void naiveHistKernel(int* bins, int nbins, int* in, int nrows) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
auto offset = blockIdx.y * nrows;
auto binOffset = blockIdx.y * nbins;
for (; tid < nrows; tid += stride) {
int id = in[offset + tid];
if (id < 0)
id = 0;
else if (id >= nbins)
id = nbins - 1;
in[offset + tid] = id;
atomicAdd(bins + binOffset + id, 1);
}
}
|
a87fd1fe0b32089cfd1e614703e494edf5699717.cu
|
#include "includes.h"
__global__ void naiveHistKernel(int* bins, int nbins, int* in, int nrows) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
auto offset = blockIdx.y * nrows;
auto binOffset = blockIdx.y * nbins;
for (; tid < nrows; tid += stride) {
int id = in[offset + tid];
if (id < 0)
id = 0;
else if (id >= nbins)
id = nbins - 1;
in[offset + tid] = id;
atomicAdd(bins + binOffset + id, 1);
}
}
|
bde2b25595e7f1c4a77096682e63108abe7a44b0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/pow.hpp"
template <typename T>
__global__ void pow_yx_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = alpha * pow(y[incy * index], x[incx * index]);
}
}
template <>
__global__ void pow_yx_kernel(size_t n, hipComplex alpha, const hipComplex* x, size_t incx, hipComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
auto x_ = x[incx * index];
auto y_ = y[incy * index];
float c_abs = hypot(y_.x, y_.y);
float c_arg = atan2(y_.y, y_.x);
auto logx = make_cuComplex(log(c_abs), c_arg);
auto ylogx = cuCmulf(x_, logx);
float e = exp(ylogx.x);
auto res = make_cuComplex(e * cos(ylogx.y), e * sin(ylogx.y));
y[incy * index] = cuCmulf(alpha, res);
}
}
template <>
__global__ void pow_yx_kernel(size_t n, hipDoubleComplex alpha, const hipDoubleComplex* x, size_t incx, hipDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
auto x_ = x[incx * index];
auto y_ = y[incy * index];
double c_abs = hypot(y_.x, y_.y);
double c_arg = atan2(y_.y, y_.x);
auto logx = make_cuDoubleComplex(log(c_abs), c_arg);
auto ylogx = cuCmul(x_, logx);
double e = exp(ylogx.x);
auto res = make_cuDoubleComplex(e * cos(ylogx.y), e * sin(ylogx.y));
y[incy * index] = cuCmul(alpha, res);
}
}
template <typename T>
__global__ void pow_yx_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = pow(y[incy * index], x[incx * index]);
}
}
template <>
__global__ void pow_yx_kernel1(size_t n, const hipComplex* x, size_t incx, hipComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
auto x_ = x[incx * index];
auto y_ = y[incy * index];
float c_abs = hypot(y_.x, y_.y);
float c_arg = atan2(y_.y, y_.x);
auto logx = make_cuComplex(log(c_abs), c_arg);
auto ylogx = cuCmulf(x_, logx);
float e = exp(ylogx.x);
auto res = make_cuComplex(e * cos(ylogx.y), e * sin(ylogx.y));
y[incy * index] = res;
}
}
template <>
__global__ void pow_yx_kernel1(size_t n, const hipDoubleComplex* x, size_t incx, hipDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
auto x_ = x[incx * index];
auto y_ = y[incy * index];
double c_abs = hypot(y_.x, y_.y);
double c_arg = atan2(y_.y, y_.x);
auto logx = make_cuDoubleComplex(log(c_abs), c_arg);
auto ylogx = cuCmul(x_, logx);
double e = exp(ylogx.x);
auto res = make_cuDoubleComplex(e * cos(ylogx.y), e * sin(ylogx.y));
y[incy * index] = res;
}
}
template <typename T>
__global__ void pow_yx_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = T(0);
}
}
template <>
__global__ void pow_yx_kernel0(size_t n, hipComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = make_cuComplex(0, 0);
}
}
template <>
__global__ void pow_yx_kernel0(size_t n, hipDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = make_cuDoubleComplex(0, 0);
}
}
template <typename T>
void pow_yx_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, pow_yx_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( pow_yx_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, alpha, x, incx, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
template <typename T>
void pow_yx_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, pow_yx_kernel1<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( pow_yx_kernel1<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, x, incx, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
template <typename T>
void pow_yx_kernel0_run(size_t n, T* y, size_t incy) {
int blockSize;
int minGridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, pow_yx_kernel0<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( pow_yx_kernel0<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
void egblas_spow_yx(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
if (alpha == 1.0f) {
pow_yx_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0f) {
pow_yx_kernel0_run(n, y, incy);
} else {
pow_yx_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_dpow_yx(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
if (alpha == 1.0) {
pow_yx_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0) {
pow_yx_kernel0_run(n, y, incy);
} else {
pow_yx_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_cpow_yx(size_t n, hipComplex alpha, const hipComplex* x, size_t incx, hipComplex* y, size_t incy) {
if (alpha.x == 1.0f && alpha.y == 0.0f) {
pow_yx_kernel1_run(n, x, incx, y, incy);
} else if (alpha.x == 0.0f && alpha.y == 0.0f) {
pow_yx_kernel0_run(n, y, incy);
} else {
pow_yx_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_zpow_yx(size_t n, hipDoubleComplex alpha, const hipDoubleComplex* x, size_t incx, hipDoubleComplex* y, size_t incy) {
if (alpha.x == 1.0 && alpha.y == 0.0) {
pow_yx_kernel1_run(n, x, incx, y, incy);
} else if (alpha.x == 0.0 && alpha.y == 0.0) {
pow_yx_kernel0_run(n, y, incy);
} else {
pow_yx_kernel_run(n, alpha, x, incx, y, incy);
}
}
|
bde2b25595e7f1c4a77096682e63108abe7a44b0.cu
|
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include "egblas/pow.hpp"
template <typename T>
__global__ void pow_yx_kernel(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = alpha * pow(y[incy * index], x[incx * index]);
}
}
template <>
__global__ void pow_yx_kernel(size_t n, cuComplex alpha, const cuComplex* x, size_t incx, cuComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
auto x_ = x[incx * index];
auto y_ = y[incy * index];
float c_abs = hypot(y_.x, y_.y);
float c_arg = atan2(y_.y, y_.x);
auto logx = make_cuComplex(log(c_abs), c_arg);
auto ylogx = cuCmulf(x_, logx);
float e = exp(ylogx.x);
auto res = make_cuComplex(e * cos(ylogx.y), e * sin(ylogx.y));
y[incy * index] = cuCmulf(alpha, res);
}
}
template <>
__global__ void pow_yx_kernel(size_t n, cuDoubleComplex alpha, const cuDoubleComplex* x, size_t incx, cuDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
auto x_ = x[incx * index];
auto y_ = y[incy * index];
double c_abs = hypot(y_.x, y_.y);
double c_arg = atan2(y_.y, y_.x);
auto logx = make_cuDoubleComplex(log(c_abs), c_arg);
auto ylogx = cuCmul(x_, logx);
double e = exp(ylogx.x);
auto res = make_cuDoubleComplex(e * cos(ylogx.y), e * sin(ylogx.y));
y[incy * index] = cuCmul(alpha, res);
}
}
template <typename T>
__global__ void pow_yx_kernel1(size_t n, const T* x, size_t incx, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = pow(y[incy * index], x[incx * index]);
}
}
template <>
__global__ void pow_yx_kernel1(size_t n, const cuComplex* x, size_t incx, cuComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
auto x_ = x[incx * index];
auto y_ = y[incy * index];
float c_abs = hypot(y_.x, y_.y);
float c_arg = atan2(y_.y, y_.x);
auto logx = make_cuComplex(log(c_abs), c_arg);
auto ylogx = cuCmulf(x_, logx);
float e = exp(ylogx.x);
auto res = make_cuComplex(e * cos(ylogx.y), e * sin(ylogx.y));
y[incy * index] = res;
}
}
template <>
__global__ void pow_yx_kernel1(size_t n, const cuDoubleComplex* x, size_t incx, cuDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
auto x_ = x[incx * index];
auto y_ = y[incy * index];
double c_abs = hypot(y_.x, y_.y);
double c_arg = atan2(y_.y, y_.x);
auto logx = make_cuDoubleComplex(log(c_abs), c_arg);
auto ylogx = cuCmul(x_, logx);
double e = exp(ylogx.x);
auto res = make_cuDoubleComplex(e * cos(ylogx.y), e * sin(ylogx.y));
y[incy * index] = res;
}
}
template <typename T>
__global__ void pow_yx_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = T(0);
}
}
template <>
__global__ void pow_yx_kernel0(size_t n, cuComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = make_cuComplex(0, 0);
}
}
template <>
__global__ void pow_yx_kernel0(size_t n, cuDoubleComplex* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
auto stride = blockDim.x * gridDim.x;
for (; index < n; index += stride) {
y[incy * index] = make_cuDoubleComplex(0, 0);
}
}
template <typename T>
void pow_yx_kernel_run(size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, pow_yx_kernel<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
pow_yx_kernel<T><<<gridSize, blockSize>>>(n, alpha, x, incx, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
template <typename T>
void pow_yx_kernel1_run(size_t n, const T* x, size_t incx, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, pow_yx_kernel1<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
pow_yx_kernel1<T><<<gridSize, blockSize>>>(n, x, incx, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
template <typename T>
void pow_yx_kernel0_run(size_t n, T* y, size_t incy) {
int blockSize;
int minGridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, pow_yx_kernel0<T>, 0, 0);
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
pow_yx_kernel0<T><<<gridSize, blockSize>>>(n, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
void egblas_spow_yx(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
if (alpha == 1.0f) {
pow_yx_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0f) {
pow_yx_kernel0_run(n, y, incy);
} else {
pow_yx_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_dpow_yx(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
if (alpha == 1.0) {
pow_yx_kernel1_run(n, x, incx, y, incy);
} else if (alpha == 0.0) {
pow_yx_kernel0_run(n, y, incy);
} else {
pow_yx_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_cpow_yx(size_t n, cuComplex alpha, const cuComplex* x, size_t incx, cuComplex* y, size_t incy) {
if (alpha.x == 1.0f && alpha.y == 0.0f) {
pow_yx_kernel1_run(n, x, incx, y, incy);
} else if (alpha.x == 0.0f && alpha.y == 0.0f) {
pow_yx_kernel0_run(n, y, incy);
} else {
pow_yx_kernel_run(n, alpha, x, incx, y, incy);
}
}
void egblas_zpow_yx(size_t n, cuDoubleComplex alpha, const cuDoubleComplex* x, size_t incx, cuDoubleComplex* y, size_t incy) {
if (alpha.x == 1.0 && alpha.y == 0.0) {
pow_yx_kernel1_run(n, x, incx, y, incy);
} else if (alpha.x == 0.0 && alpha.y == 0.0) {
pow_yx_kernel0_run(n, y, incy);
} else {
pow_yx_kernel_run(n, alpha, x, incx, y, incy);
}
}
|
fe287704537996a7e534e7ef186c7f1c440b833a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "visualization.cuh"
#include <fstream>
#include <iostream>
#include <string>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
#define INFINITY 2<<27
#define BLOCK_SIZE 512
#define GRID_SIZE 8192
//CUDA kernel for expansion of each field in matrix to square with edge = square_dimen_pixels
__global__
void expandMatrix(int* transformed, int* source, int n, int m, int square_dimen_pixels) {
int tIdx = threadIdx.x + blockDim.x*blockIdx.x;
while (tIdx < n*m) {
for (int i = 0; i < square_dimen_pixels; i++) {
for (int j = 0; j < square_dimen_pixels; j++) {
transformed[(tIdx / n)*n*square_dimen_pixels*square_dimen_pixels + (tIdx%n)*square_dimen_pixels + i * n*square_dimen_pixels + j] = source[tIdx];
}
}
tIdx += blockDim.x*gridDim.x;
}
}
//CUDA kernel assigning RGB values to each weight in matrix
__global__
void assignRGB(unsigned char* dest, int* src, int minVal, int maxVal, int pathMark, int blockedMark, int size) {
#define c( x ) (255 * x)
double granularity = 360.0 / ((double)(maxVal - minVal) + 1);
int tIdx = threadIdx.x + blockDim.x*blockIdx.x;
while (tIdx < size) {
unsigned char red, green, blue;
if (src[tIdx] != pathMark && src[tIdx] != blockedMark) {
double hue = (src[tIdx] - minVal) * granularity;
int H = (int)(hue / 60) % 6;
double F = (hue / 60) - H;
double Q = 1.0 - F;
switch (H)
{
case 0: red = c(1); green = c(F); blue = c(0); break;
case 1: red = c(Q); green = c(1); blue = c(0); break;
case 2: red = c(0); green = c(1); blue = c(F); break;
case 3: red = c(0); green = c(Q); blue = c(1); break;
case 4: red = c(F); green = c(0); blue = c(1); break;
default: red = c(1); green = c(0); blue = c(Q);
}
}
else {
if (src[tIdx] == blockedMark) {
blue = green = red = c(0);
}
else {
blue = green = red = c(0.5);
}
}
dest[tIdx * 3] = blue;
dest[tIdx*3 + 1] = green;
dest[tIdx * 3 + 2] = red;
tIdx += blockDim.x*gridDim.x;
}
#undef c
}
struct writeToStream
{
unsigned long value;
unsigned size;
writeToStream(unsigned long value, unsigned size) :
value(value), size(size)
{ }
};
inline std::ostream& operator << (std::ostream& outs, const writeToStream& v)
{
unsigned long value = v.value;
for (unsigned cntr = 0; cntr < v.size; cntr++, value >>= 8)
outs.put(static_cast <char> (value & 0xFF));
return outs;
}
bool makeBMP(const std::string& filename, unsigned char* RGBMatrix, int rows, int columns) {
std::ofstream f(filename.c_str(),
std::ios::out | std::ios::trunc | std::ios::binary);
if (!f) return false;
unsigned long headers_size = 14 + 40;
unsigned long pixel_data_size = rows * columns*3;
// Write the BITMAPFILEHEADER
f.put('B').put('M');
f << writeToStream(headers_size + pixel_data_size, 4);
f << writeToStream(0, 2);
f << writeToStream(0, 2);
f << writeToStream(headers_size, 4);
// Write the BITMAPINFOHEADER
f << writeToStream(40, 4);
f << writeToStream(columns, 4);
f << writeToStream(rows, 4);
f << writeToStream(1, 2);
f << writeToStream(24, 2);
f << writeToStream(0, 4);
f << writeToStream(pixel_data_size, 4);
f << writeToStream(0, 4);
f << writeToStream(0, 4);
f << writeToStream(0, 4);
f << writeToStream(0, 4);
// Write RGB matrix to stream
for (unsigned long i = 0; i < rows*columns*3; i++) {
f.put(static_cast <char> (RGBMatrix[i]));
}
return f.good();
}
//Adding found path to matrix
void addPathToMatrix(int* matrix, int* path, int size) {
int idx = size - 1;
while (idx > 0) {
matrix[idx] = -1;
idx = path[idx];
}
matrix[0] = -1;
}
//Wrapping function producing colorful bitmap out of the matrix
void visualizeMatrix(const std::string& filename, int* matrix, int n, int m, int pixel_dimension, int minWeight, int maxWeight) {
int *mGPU, *tGPU;
unsigned char *rgb, *rgbGPU;
//Memory allocation
int* transformed = (int*)malloc(n*m * sizeof(int) * pixel_dimension * pixel_dimension);
rgb = (unsigned char*)malloc(sizeof(unsigned char)*n*m*pixel_dimension*pixel_dimension * 3);
hipMalloc(&rgbGPU, sizeof(unsigned char)*n*m*pixel_dimension*pixel_dimension * 3);
hipMalloc(&tGPU, sizeof(int)*n*m*pixel_dimension*pixel_dimension);
hipMalloc(&mGPU, sizeof(int)*n*m);
hipMemcpy(mGPU, matrix, sizeof(int)*n*m, hipMemcpyHostToDevice);
//Actual work
expandMatrix << <GRID_SIZE, BLOCK_SIZE >> > (tGPU, mGPU, n, m, pixel_dimension);
assignRGB << <GRID_SIZE, BLOCK_SIZE >> > (rgbGPU, tGPU, minWeight, maxWeight, -1, INFINITY, n*m*pixel_dimension*pixel_dimension);
hipMemcpy(rgb, rgbGPU, sizeof(unsigned char)*n*m*pixel_dimension*pixel_dimension * 3, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
makeBMP(filename, rgb, n*pixel_dimension, m*pixel_dimension);
//Memory deallocation
free(transformed);
free(rgb);
hipFree(rgbGPU);
hipFree(tGPU);
hipFree(mGPU);
}
#undef GRID_SIZE
#undef BLOCK_SIZE
|
fe287704537996a7e534e7ef186c7f1c440b833a.cu
|
#include "visualization.cuh"
#include <fstream>
#include <iostream>
#include <string>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
#define INFINITY 2<<27
#define BLOCK_SIZE 512
#define GRID_SIZE 8192
//CUDA kernel for expansion of each field in matrix to square with edge = square_dimen_pixels
__global__
void expandMatrix(int* transformed, int* source, int n, int m, int square_dimen_pixels) {
int tIdx = threadIdx.x + blockDim.x*blockIdx.x;
while (tIdx < n*m) {
for (int i = 0; i < square_dimen_pixels; i++) {
for (int j = 0; j < square_dimen_pixels; j++) {
transformed[(tIdx / n)*n*square_dimen_pixels*square_dimen_pixels + (tIdx%n)*square_dimen_pixels + i * n*square_dimen_pixels + j] = source[tIdx];
}
}
tIdx += blockDim.x*gridDim.x;
}
}
//CUDA kernel assigning RGB values to each weight in matrix
__global__
void assignRGB(unsigned char* dest, int* src, int minVal, int maxVal, int pathMark, int blockedMark, int size) {
#define c( x ) (255 * x)
double granularity = 360.0 / ((double)(maxVal - minVal) + 1);
int tIdx = threadIdx.x + blockDim.x*blockIdx.x;
while (tIdx < size) {
unsigned char red, green, blue;
if (src[tIdx] != pathMark && src[tIdx] != blockedMark) {
double hue = (src[tIdx] - minVal) * granularity;
int H = (int)(hue / 60) % 6;
double F = (hue / 60) - H;
double Q = 1.0 - F;
switch (H)
{
case 0: red = c(1); green = c(F); blue = c(0); break;
case 1: red = c(Q); green = c(1); blue = c(0); break;
case 2: red = c(0); green = c(1); blue = c(F); break;
case 3: red = c(0); green = c(Q); blue = c(1); break;
case 4: red = c(F); green = c(0); blue = c(1); break;
default: red = c(1); green = c(0); blue = c(Q);
}
}
else {
if (src[tIdx] == blockedMark) {
blue = green = red = c(0);
}
else {
blue = green = red = c(0.5);
}
}
dest[tIdx * 3] = blue;
dest[tIdx*3 + 1] = green;
dest[tIdx * 3 + 2] = red;
tIdx += blockDim.x*gridDim.x;
}
#undef c
}
struct writeToStream
{
unsigned long value;
unsigned size;
writeToStream(unsigned long value, unsigned size) :
value(value), size(size)
{ }
};
inline std::ostream& operator << (std::ostream& outs, const writeToStream& v)
{
unsigned long value = v.value;
for (unsigned cntr = 0; cntr < v.size; cntr++, value >>= 8)
outs.put(static_cast <char> (value & 0xFF));
return outs;
}
bool makeBMP(const std::string& filename, unsigned char* RGBMatrix, int rows, int columns) {
std::ofstream f(filename.c_str(),
std::ios::out | std::ios::trunc | std::ios::binary);
if (!f) return false;
unsigned long headers_size = 14 + 40;
unsigned long pixel_data_size = rows * columns*3;
// Write the BITMAPFILEHEADER
f.put('B').put('M');
f << writeToStream(headers_size + pixel_data_size, 4);
f << writeToStream(0, 2);
f << writeToStream(0, 2);
f << writeToStream(headers_size, 4);
// Write the BITMAPINFOHEADER
f << writeToStream(40, 4);
f << writeToStream(columns, 4);
f << writeToStream(rows, 4);
f << writeToStream(1, 2);
f << writeToStream(24, 2);
f << writeToStream(0, 4);
f << writeToStream(pixel_data_size, 4);
f << writeToStream(0, 4);
f << writeToStream(0, 4);
f << writeToStream(0, 4);
f << writeToStream(0, 4);
// Write RGB matrix to stream
for (unsigned long i = 0; i < rows*columns*3; i++) {
f.put(static_cast <char> (RGBMatrix[i]));
}
return f.good();
}
//Adding found path to matrix
void addPathToMatrix(int* matrix, int* path, int size) {
int idx = size - 1;
while (idx > 0) {
matrix[idx] = -1;
idx = path[idx];
}
matrix[0] = -1;
}
//Wrapping function producing colorful bitmap out of the matrix
void visualizeMatrix(const std::string& filename, int* matrix, int n, int m, int pixel_dimension, int minWeight, int maxWeight) {
int *mGPU, *tGPU;
unsigned char *rgb, *rgbGPU;
//Memory allocation
int* transformed = (int*)malloc(n*m * sizeof(int) * pixel_dimension * pixel_dimension);
rgb = (unsigned char*)malloc(sizeof(unsigned char)*n*m*pixel_dimension*pixel_dimension * 3);
cudaMalloc(&rgbGPU, sizeof(unsigned char)*n*m*pixel_dimension*pixel_dimension * 3);
cudaMalloc(&tGPU, sizeof(int)*n*m*pixel_dimension*pixel_dimension);
cudaMalloc(&mGPU, sizeof(int)*n*m);
cudaMemcpy(mGPU, matrix, sizeof(int)*n*m, cudaMemcpyHostToDevice);
//Actual work
expandMatrix << <GRID_SIZE, BLOCK_SIZE >> > (tGPU, mGPU, n, m, pixel_dimension);
assignRGB << <GRID_SIZE, BLOCK_SIZE >> > (rgbGPU, tGPU, minWeight, maxWeight, -1, INFINITY, n*m*pixel_dimension*pixel_dimension);
cudaMemcpy(rgb, rgbGPU, sizeof(unsigned char)*n*m*pixel_dimension*pixel_dimension * 3, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
makeBMP(filename, rgb, n*pixel_dimension, m*pixel_dimension);
//Memory deallocation
free(transformed);
free(rgb);
cudaFree(rgbGPU);
cudaFree(tGPU);
cudaFree(mGPU);
}
#undef GRID_SIZE
#undef BLOCK_SIZE
|
e47ebf73c539c9f308070a1890b60c325b6d7504.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/histogram_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
using IndexType = int64_t;
using phi::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T, typename IndexType>
__device__ static IndexType GetBin(T input_value,
T min_value,
T max_value,
int64_t nbins) {
IndexType bin = static_cast<int>((input_value - min_value) * nbins /
(max_value - min_value));
IndexType output_index = bin < nbins - 1 ? bin : nbins - 1;
return output_index;
}
template <typename T, typename IndexType>
__global__ void KernelHistogram(const T* input,
const int total_elements,
const int64_t nbins,
const T min_value,
const T max_value,
int64_t* output) {
extern __shared__ int64_t buf_hist[];
for (int i = threadIdx.x; i < nbins; i += blockDim.x) {
buf_hist[i] = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(input_index, total_elements) {
// const IndexType input_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto input_value = input[input_index];
if (input_value >= min_value && input_value <= max_value) {
const IndexType output_index =
GetBin<T, IndexType>(input_value, min_value, max_value, nbins);
phi::CudaAtomicAdd(&buf_hist[output_index], 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < nbins; i += blockDim.x) {
phi::CudaAtomicAdd(&output[i], buf_hist[i]);
}
}
template <typename T, typename Context>
void HistogramKernel(const Context& dev_ctx,
const DenseTensor& input,
int64_t bins,
int min,
int max,
DenseTensor* output) {
auto& nbins = bins;
auto& minval = min;
auto& maxval = max;
const T* input_data = input.data<T>();
const int input_numel = input.numel();
int64_t* out_data = dev_ctx.template Alloc<int64_t>(output);
phi::funcs::SetConstant<Context, int64_t>()(
dev_ctx, output, static_cast<int64_t>(0));
if (input_data == nullptr) return;
T output_min = static_cast<T>(minval);
T output_max = static_cast<T>(maxval);
if (output_min == output_max) {
auto input_x = phi::EigenVector<T>::Flatten(input);
DenseTensor input_min_t, input_max_t;
input_min_t.Resize({1});
input_max_t.Resize({1});
auto* input_min_data = dev_ctx.template Alloc<T>(&input_min_t);
auto* input_max_data = dev_ctx.template Alloc<T>(&input_max_t);
auto input_min_scala = phi::EigenScalar<T>::From(input_min_t);
auto input_max_scala = phi::EigenScalar<T>::From(input_max_t);
auto* place = dev_ctx.eigen_device();
input_min_scala.device(*place) = input_x.minimum();
input_max_scala.device(*place) = input_x.maximum();
DenseTensor input_min_cpu, input_max_cpu;
phi::Copy(dev_ctx, input_min_t, phi::CPUPlace(), true, &input_min_cpu);
phi::Copy(dev_ctx, input_max_t, phi::CPUPlace(), true, &input_max_cpu);
output_min = input_min_cpu.data<T>()[0];
output_max = input_max_cpu.data<T>()[0];
}
if (output_min == output_max) {
output_min = output_min - 1;
output_max = output_max + 1;
}
PADDLE_ENFORCE_EQ((std::isinf(static_cast<float>(output_min)) ||
std::isnan(static_cast<float>(output_max)) ||
std::isinf(static_cast<float>(output_min)) ||
std::isnan(static_cast<float>(output_max))),
false,
phi::errors::OutOfRange("range of min, max is not finite"));
PADDLE_ENFORCE_GE(
output_max,
output_min,
phi::errors::InvalidArgument(
"max must be larger or equal to min. If min and max are both zero, "
"the minimum and maximum values of the data are used. "
"But received max is %d, min is %d",
maxval,
minval));
auto stream = dev_ctx.stream();
hipLaunchKernelGGL(( KernelHistogram<T, IndexType>), dim3(GET_BLOCKS(input_numel)),
dim3(PADDLE_CUDA_NUM_THREADS),
nbins * sizeof(int64_t),
stream,
input_data, input_numel, nbins, output_min, output_max, out_data);
}
} // namespace phi
PD_REGISTER_KERNEL(histogram,
GPU,
ALL_LAYOUT,
phi::HistogramKernel,
float,
double,
int,
int64_t) {
kernel->OutputAt(0).SetDataType(paddle::DataType::INT64);
}
|
e47ebf73c539c9f308070a1890b60c325b6d7504.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/histogram_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
using IndexType = int64_t;
using phi::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T, typename IndexType>
__device__ static IndexType GetBin(T input_value,
T min_value,
T max_value,
int64_t nbins) {
IndexType bin = static_cast<int>((input_value - min_value) * nbins /
(max_value - min_value));
IndexType output_index = bin < nbins - 1 ? bin : nbins - 1;
return output_index;
}
template <typename T, typename IndexType>
__global__ void KernelHistogram(const T* input,
const int total_elements,
const int64_t nbins,
const T min_value,
const T max_value,
int64_t* output) {
extern __shared__ int64_t buf_hist[];
for (int i = threadIdx.x; i < nbins; i += blockDim.x) {
buf_hist[i] = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(input_index, total_elements) {
// const IndexType input_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto input_value = input[input_index];
if (input_value >= min_value && input_value <= max_value) {
const IndexType output_index =
GetBin<T, IndexType>(input_value, min_value, max_value, nbins);
phi::CudaAtomicAdd(&buf_hist[output_index], 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < nbins; i += blockDim.x) {
phi::CudaAtomicAdd(&output[i], buf_hist[i]);
}
}
template <typename T, typename Context>
void HistogramKernel(const Context& dev_ctx,
const DenseTensor& input,
int64_t bins,
int min,
int max,
DenseTensor* output) {
auto& nbins = bins;
auto& minval = min;
auto& maxval = max;
const T* input_data = input.data<T>();
const int input_numel = input.numel();
int64_t* out_data = dev_ctx.template Alloc<int64_t>(output);
phi::funcs::SetConstant<Context, int64_t>()(
dev_ctx, output, static_cast<int64_t>(0));
if (input_data == nullptr) return;
T output_min = static_cast<T>(minval);
T output_max = static_cast<T>(maxval);
if (output_min == output_max) {
auto input_x = phi::EigenVector<T>::Flatten(input);
DenseTensor input_min_t, input_max_t;
input_min_t.Resize({1});
input_max_t.Resize({1});
auto* input_min_data = dev_ctx.template Alloc<T>(&input_min_t);
auto* input_max_data = dev_ctx.template Alloc<T>(&input_max_t);
auto input_min_scala = phi::EigenScalar<T>::From(input_min_t);
auto input_max_scala = phi::EigenScalar<T>::From(input_max_t);
auto* place = dev_ctx.eigen_device();
input_min_scala.device(*place) = input_x.minimum();
input_max_scala.device(*place) = input_x.maximum();
DenseTensor input_min_cpu, input_max_cpu;
phi::Copy(dev_ctx, input_min_t, phi::CPUPlace(), true, &input_min_cpu);
phi::Copy(dev_ctx, input_max_t, phi::CPUPlace(), true, &input_max_cpu);
output_min = input_min_cpu.data<T>()[0];
output_max = input_max_cpu.data<T>()[0];
}
if (output_min == output_max) {
output_min = output_min - 1;
output_max = output_max + 1;
}
PADDLE_ENFORCE_EQ((std::isinf(static_cast<float>(output_min)) ||
std::isnan(static_cast<float>(output_max)) ||
std::isinf(static_cast<float>(output_min)) ||
std::isnan(static_cast<float>(output_max))),
false,
phi::errors::OutOfRange("range of min, max is not finite"));
PADDLE_ENFORCE_GE(
output_max,
output_min,
phi::errors::InvalidArgument(
"max must be larger or equal to min. If min and max are both zero, "
"the minimum and maximum values of the data are used. "
"But received max is %d, min is %d",
maxval,
minval));
auto stream = dev_ctx.stream();
KernelHistogram<T, IndexType><<<GET_BLOCKS(input_numel),
PADDLE_CUDA_NUM_THREADS,
nbins * sizeof(int64_t),
stream>>>(
input_data, input_numel, nbins, output_min, output_max, out_data);
}
} // namespace phi
PD_REGISTER_KERNEL(histogram,
GPU,
ALL_LAYOUT,
phi::HistogramKernel,
float,
double,
int,
int64_t) {
kernel->OutputAt(0).SetDataType(paddle::DataType::INT64);
}
|
8aa0b560f00ce2ad9abd38b2f7668dd5d282e193.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
__global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result
{
int offset=blockIdx.x;
__shared__ short2 read_reference_number;
__shared__ char * read_base_array;
__shared__ char4 * reference_base_array;
__shared__ int mismatch;
__shared__ int match;
__shared__ int open;
__shared__ int extend;
__shared__ short2 * direction_index;
while(offset<size)
{
if( threadIdx.x==0)
{
read_reference_number=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128);
direction_index=(short2 *) (direction+offset*640*1100);
}
__syncthreads();
__shared__ char reference_base_in_char[600];
int hh=(read_reference_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 reference_base_in_thread;
reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory
reference_base_in_char[aa*4]=reference_base_in_thread.x;
reference_base_in_char[aa*4+1]=reference_base_in_thread.y;
reference_base_in_char[aa*4+2]=reference_base_in_thread.z;
reference_base_in_char[aa*4+3]=reference_base_in_thread.w;
}
}
__shared__ int MM[193];
__shared__ int gap_h[193]; //insertion
__shared__ short2 gap_size_h[193]; //insertion
__shared__ int result_col;
__shared__ int result_row;
__shared__ int result_col_index;
__shared__ int result_row_index;
//__shared__ char cigar_m[128];
//__shared__ int cigar_int_m[128];
//int final_result;
//int final_i;
//int final_j;
if(threadIdx.x==0)
{
MM[0]=0;
gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2;
gap_size_h[0].x=0;
gap_size_h[0].y=0;
match=200;
mismatch=-150;
open=-260;
extend=-11;
result_col=-1000000000;//std::numeric_limits<int>::min()/2;
result_row=-1000000000;//std::numeric_limits<int>::min()/2;
// for(int i=0;i<read_reference_number.y;i++)
// printf("%c",reference_base_in_char[i]);
// printf("\n");
// for(int i=0;i<read_reference_number.x;i++)
// printf("%c",read_base_array[i]);
}
__syncthreads();
int read_number=read_reference_number.x;
{
char read_base;
read_base=read_base_array[threadIdx.x];
int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;;
int gap_size_v=0; //Deletion
int M=0; //now
int step_right; //now
int ki=0;//insertion h negetive
//deletion v
int MMM=0;
short mt=0;
short2 curmt;
curmt.x=0;
curmt.y=0;
int current_reference_id=0;
for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_reference_id<read_reference_number.y))
{
int prev_gap=M+open; //M which is cacluated by last step in the same thread
gap_v+=extend;
if(prev_gap>gap_v)
{
gap_v=prev_gap;
gap_size_v=1;
}
else
gap_size_v++;
char reference_base_each=reference_base_in_char[current_reference_id];
M=MMM+(read_base==reference_base_each? match:mismatch);
prev_gap=MM[threadIdx.x]+open;
step_right=gap_h[threadIdx.x]+extend;
if(prev_gap>step_right)
{
step_right=prev_gap;
ki=1;
}
else
ki=gap_size_h[threadIdx.x].x+1;
bool diag=(M>=gap_v)&&(M>=step_right);
curmt.y=0;
if(diag)
{
curmt.x=0;
//if(threadIdx.x==0||current_reference_id==0)
// curmt.y=0;
// else
curmt.y=mt+1;
// curBtrack=0;
}
else
if(step_right>=gap_v)
{
M=step_right;
curmt.x=0-ki;
// curBtrack=0-ki;
}
else
{
M=gap_v;
curmt.x=gap_size_v;
//curBtrack=gap_size_v;
}
MMM=MM[threadIdx.x];
mt=gap_size_h[threadIdx.x].y;
direction_index[640*j+threadIdx.x]=curmt;
//if(threadIdx.x==read_reference_number.x-3)
//printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack);
if(current_reference_id==read_reference_number.y-1)
{
if(M>=result_row)
{
result_row=M;
result_row_index=threadIdx.x; //
}
//printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x);
}
if(threadIdx.x==read_reference_number.x-1)
{
if(M>=result_col)
{
result_col=M;
result_col_index=current_reference_id; // +1
}
}
current_reference_id++;
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
gap_h[threadIdx.x+1]=step_right;
gap_size_h[threadIdx.x+1].x=ki;
gap_size_h[threadIdx.x+1].y=curmt.y;
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
// char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
// __shared__ int cigar_index;
// int segment_length;
// short2 btr;
// char new_state;
// int step_length;
int4 result4;
if(threadIdx.x==read_reference_number.x-1)
{
//printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index);
if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1))
{
// final_result=result_row;
result4.x=read_reference_number.y-1;
result4.y=result_row_index;
result4.z=read_reference_number.x-1-result_row_index;
}
else
{
// final_result=result_col;
result4.x=result_col_index;
result4.y=read_reference_number.x-1;
result4.z=0;
}
//result[offset*3]=final_result;
//printf("%d\n",final_result);
//result4.x=fina_i;
//result4.y=fina_j;
//result4.z=segment_length;
result[offset]=result4;
}
__syncthreads();
offset+=gridDim.x;
}
}
__global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result
{
int offset=blockIdx.x;
int4 result4;;
short2 * direction_index;
__shared__ char * cigar_store;
__shared__ int *cigar_int_store;
__shared__ char cigar_m[128];
__shared__ int cigar_int_m[128];
while(offset<size)
{
char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
__shared__ int cigar_index;
int segment_length;
short2 btr;
char new_state;
int step_length;
if( threadIdx.x==0)
{
result4=result[offset];
direction_index=(short2 *) (direction+offset*640*1100);
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
//printf("\n %d %d\n", final_i,final_j);
cigar_index=0;
if(result4.z>0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.z;
cigar_index++;
}
segment_length=0;
state='N';
do
{
btr=direction_index[(result4.x+result4.y)*640+result4.y];
if(btr.x>0)
{
new_state='D';
step_length=btr.x;
result4.x-=step_length;
}
else
if(btr.x<0)
{
new_state='I';
step_length=0-btr.x;
result4.y-=step_length;
}
else
{
new_state='M';
step_length=btr.y;
result4.x-=step_length;
result4.y-=step_length;
}
if(state=='N') state=new_state;
if(state==new_state)
{
segment_length+=step_length;
}
else
{
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
segment_length=step_length;
cigar_index++;
state=new_state;
}
}while(result4.x>=0&&result4.y>=0);
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
cigar_index++;
if(result4.y>=0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.y+1;
cigar_index++;
}
result4.z=result4.x+1;
result4.w=cigar_index;
result[offset]=result4;
/* for(int i=cigar_index-1;i>=0;i--)
{
printf("%d%c",cigar_int_m[i],cigar_m[i]);
}
*/
}
__syncthreads();
if(threadIdx.x<cigar_index && cigar_index<=blockDim.x)
{
// if(threadIdx.x==0)
// printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]);
cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x];
cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x];
// if(threadIdx.x==0)
// printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]);
}
offset+=gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
int total_size=0;
FILE * file;
file=fopen(args[1],"r");
int size;
double computation_time=0;//total_time=0;
timespec start,finish;
/* char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[1]);
strcpy(inputdata[index].read_base,data[1]);
index++;
}
*/
/* fscanf(file,"%d",&size);
while(!feof(file))
{
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<size;i++)
{
fscanf(file,"%s ",inputdata[i].reference_base);
fscanf(file,"%s ",inputdata[i].read_base);
}
*/
char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[i]);
strcpy(inputdata[index].read_base,data[j]);
index++;
}
//data preparation.
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align
int data_size=0;
char * data_d_total;
hipMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
int * result_h=(int *) malloc(sizeof(int)*size*4);
char * cigar_h=(char *) malloc(sizeof(char)*size*128);
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128);
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
total_size+=ref_len*read_len;
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
int4 * result_d=(int4 *) (data_d_total+data_size_to_copy);
char * cigar;
hipMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
int * direction;
hipMalloc( (int **) & direction, size * (640*1100* sizeof (int)));
dim3 block(192);
dim3 grid(size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
hipLaunchKernelGGL(( calculate_cigar), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d,result_d,direction); //result
// calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result
hipMemcpy(result_h,result_d,size*sizeof(int)*4,hipMemcpyDeviceToHost);
hipMemcpy(cigar_h,cigar,128*sizeof(char)*size, hipMemcpyDeviceToHost);
hipMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
/* for(int i=0;i<size;i++)
{
printf("%d\n",result_h[i*4+1]);
printf("[");
for(int j=0;j<result_h[i*4+3];j++)
{
if(j!=0) printf(", ");
printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]);
}
printf("]\n");
}
*/
hipFree(direction);
free(data_h_total);
hipFree(data_d_total);
free(inputdata);
hipFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
// printf(" computation_time= %e total_time=%e \n",computation_time,0);
printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000);
return 0;
}
|
8aa0b560f00ce2ad9abd38b2f7668dd5d282e193.cu
|
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
__global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result
{
int offset=blockIdx.x;
__shared__ short2 read_reference_number;
__shared__ char * read_base_array;
__shared__ char4 * reference_base_array;
__shared__ int mismatch;
__shared__ int match;
__shared__ int open;
__shared__ int extend;
__shared__ short2 * direction_index;
while(offset<size)
{
if( threadIdx.x==0)
{
read_reference_number=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128);
direction_index=(short2 *) (direction+offset*640*1100);
}
__syncthreads();
__shared__ char reference_base_in_char[600];
int hh=(read_reference_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 reference_base_in_thread;
reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory
reference_base_in_char[aa*4]=reference_base_in_thread.x;
reference_base_in_char[aa*4+1]=reference_base_in_thread.y;
reference_base_in_char[aa*4+2]=reference_base_in_thread.z;
reference_base_in_char[aa*4+3]=reference_base_in_thread.w;
}
}
__shared__ int MM[193];
__shared__ int gap_h[193]; //insertion
__shared__ short2 gap_size_h[193]; //insertion
__shared__ int result_col;
__shared__ int result_row;
__shared__ int result_col_index;
__shared__ int result_row_index;
//__shared__ char cigar_m[128];
//__shared__ int cigar_int_m[128];
//int final_result;
//int final_i;
//int final_j;
if(threadIdx.x==0)
{
MM[0]=0;
gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2;
gap_size_h[0].x=0;
gap_size_h[0].y=0;
match=200;
mismatch=-150;
open=-260;
extend=-11;
result_col=-1000000000;//std::numeric_limits<int>::min()/2;
result_row=-1000000000;//std::numeric_limits<int>::min()/2;
// for(int i=0;i<read_reference_number.y;i++)
// printf("%c",reference_base_in_char[i]);
// printf("\n");
// for(int i=0;i<read_reference_number.x;i++)
// printf("%c",read_base_array[i]);
}
__syncthreads();
int read_number=read_reference_number.x;
{
char read_base;
read_base=read_base_array[threadIdx.x];
int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;;
int gap_size_v=0; //Deletion
int M=0; //now
int step_right; //now
int ki=0;//insertion h negetive
//deletion v
int MMM=0;
short mt=0;
short2 curmt;
curmt.x=0;
curmt.y=0;
int current_reference_id=0;
for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_reference_id<read_reference_number.y))
{
int prev_gap=M+open; //M which is cacluated by last step in the same thread
gap_v+=extend;
if(prev_gap>gap_v)
{
gap_v=prev_gap;
gap_size_v=1;
}
else
gap_size_v++;
char reference_base_each=reference_base_in_char[current_reference_id];
M=MMM+(read_base==reference_base_each? match:mismatch);
prev_gap=MM[threadIdx.x]+open;
step_right=gap_h[threadIdx.x]+extend;
if(prev_gap>step_right)
{
step_right=prev_gap;
ki=1;
}
else
ki=gap_size_h[threadIdx.x].x+1;
bool diag=(M>=gap_v)&&(M>=step_right);
curmt.y=0;
if(diag)
{
curmt.x=0;
//if(threadIdx.x==0||current_reference_id==0)
// curmt.y=0;
// else
curmt.y=mt+1;
// curBtrack=0;
}
else
if(step_right>=gap_v)
{
M=step_right;
curmt.x=0-ki;
// curBtrack=0-ki;
}
else
{
M=gap_v;
curmt.x=gap_size_v;
//curBtrack=gap_size_v;
}
MMM=MM[threadIdx.x];
mt=gap_size_h[threadIdx.x].y;
direction_index[640*j+threadIdx.x]=curmt;
//if(threadIdx.x==read_reference_number.x-3)
//printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack);
if(current_reference_id==read_reference_number.y-1)
{
if(M>=result_row)
{
result_row=M;
result_row_index=threadIdx.x; //
}
//printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x);
}
if(threadIdx.x==read_reference_number.x-1)
{
if(M>=result_col)
{
result_col=M;
result_col_index=current_reference_id; // +1
}
}
current_reference_id++;
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
gap_h[threadIdx.x+1]=step_right;
gap_size_h[threadIdx.x+1].x=ki;
gap_size_h[threadIdx.x+1].y=curmt.y;
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
// char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
// __shared__ int cigar_index;
// int segment_length;
// short2 btr;
// char new_state;
// int step_length;
int4 result4;
if(threadIdx.x==read_reference_number.x-1)
{
//printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index);
if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1))
{
// final_result=result_row;
result4.x=read_reference_number.y-1;
result4.y=result_row_index;
result4.z=read_reference_number.x-1-result_row_index;
}
else
{
// final_result=result_col;
result4.x=result_col_index;
result4.y=read_reference_number.x-1;
result4.z=0;
}
//result[offset*3]=final_result;
//printf("%d\n",final_result);
//result4.x=fina_i;
//result4.y=fina_j;
//result4.z=segment_length;
result[offset]=result4;
}
__syncthreads();
offset+=gridDim.x;
}
}
__global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result
{
int offset=blockIdx.x;
int4 result4;;
short2 * direction_index;
__shared__ char * cigar_store;
__shared__ int *cigar_int_store;
__shared__ char cigar_m[128];
__shared__ int cigar_int_m[128];
while(offset<size)
{
char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
__shared__ int cigar_index;
int segment_length;
short2 btr;
char new_state;
int step_length;
if( threadIdx.x==0)
{
result4=result[offset];
direction_index=(short2 *) (direction+offset*640*1100);
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
//printf("\n %d %d\n", final_i,final_j);
cigar_index=0;
if(result4.z>0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.z;
cigar_index++;
}
segment_length=0;
state='N';
do
{
btr=direction_index[(result4.x+result4.y)*640+result4.y];
if(btr.x>0)
{
new_state='D';
step_length=btr.x;
result4.x-=step_length;
}
else
if(btr.x<0)
{
new_state='I';
step_length=0-btr.x;
result4.y-=step_length;
}
else
{
new_state='M';
step_length=btr.y;
result4.x-=step_length;
result4.y-=step_length;
}
if(state=='N') state=new_state;
if(state==new_state)
{
segment_length+=step_length;
}
else
{
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
segment_length=step_length;
cigar_index++;
state=new_state;
}
}while(result4.x>=0&&result4.y>=0);
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
cigar_index++;
if(result4.y>=0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.y+1;
cigar_index++;
}
result4.z=result4.x+1;
result4.w=cigar_index;
result[offset]=result4;
/* for(int i=cigar_index-1;i>=0;i--)
{
printf("%d%c",cigar_int_m[i],cigar_m[i]);
}
*/
}
__syncthreads();
if(threadIdx.x<cigar_index && cigar_index<=blockDim.x)
{
// if(threadIdx.x==0)
// printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]);
cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x];
cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x];
// if(threadIdx.x==0)
// printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]);
}
offset+=gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
int total_size=0;
FILE * file;
file=fopen(args[1],"r");
int size;
double computation_time=0;//total_time=0;
timespec start,finish;
/* char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[1]);
strcpy(inputdata[index].read_base,data[1]);
index++;
}
*/
/* fscanf(file,"%d",&size);
while(!feof(file))
{
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<size;i++)
{
fscanf(file,"%s ",inputdata[i].reference_base);
fscanf(file,"%s ",inputdata[i].read_base);
}
*/
char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[i]);
strcpy(inputdata[index].read_base,data[j]);
index++;
}
//data preparation.
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align
int data_size=0;
char * data_d_total;
cudaMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
int * result_h=(int *) malloc(sizeof(int)*size*4);
char * cigar_h=(char *) malloc(sizeof(char)*size*128);
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128);
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
total_size+=ref_len*read_len;
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
int4 * result_d=(int4 *) (data_d_total+data_size_to_copy);
char * cigar;
cudaMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
int * direction;
cudaMalloc( (int **) & direction, size * (640*1100* sizeof (int)));
dim3 block(192);
dim3 grid(size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
calculate_cigar<<<grid,block>>> (size,data_d,num_add_d,result_d,direction); //result
// calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result
cudaMemcpy(result_h,result_d,size*sizeof(int)*4,cudaMemcpyDeviceToHost);
cudaMemcpy(cigar_h,cigar,128*sizeof(char)*size, cudaMemcpyDeviceToHost);
cudaMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
/* for(int i=0;i<size;i++)
{
printf("%d\n",result_h[i*4+1]);
printf("[");
for(int j=0;j<result_h[i*4+3];j++)
{
if(j!=0) printf(", ");
printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]);
}
printf("]\n");
}
*/
cudaFree(direction);
free(data_h_total);
cudaFree(data_d_total);
free(inputdata);
cudaFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
// printf(" computation_time= %e total_time=%e \n",computation_time,0);
printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000);
return 0;
}
|
c6b71a60b9237d4fb995e1c538d99282d786544c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 19660800
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
// index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
// int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/dram/fadd_dram_80_20_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
c6b71a60b9237d4fb995e1c538d99282d786544c.cu
|
#include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 19660800
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
// index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
int elements_per_warp = elements_per_block / num_warps_per_block;
// int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
// int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/dram/fadd_dram_80_20_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
892435ba8130995650b7df9f90f05010ae81f046.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// --------------------------------------------------------
// R-FCN
// Written by Yi Li, 2016.
// --------------------------------------------------------
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/triangle_pooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void PSROIPoolingForward(
const int nthreads,
const Dtype* bottom_data,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois,
const int output_dim,
const int group_size,
Dtype* top_data,
int* mapping_channel, Dtype* tmp1,Dtype* tmp2) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c1 = (ctop*group_size + gh)*group_size + gw;
int c2 = c1 + output_dim * group_size * group_size;
const Dtype* bottom_data1 = bottom_data + (roi_batch_ind * channels + c1) * height * width;
const Dtype* bottom_data2 = bottom_data + (roi_batch_ind * channels + c2) * height * width;
Dtype out_sum1 = 0.0;
Dtype out_sum2 = 0.0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
Dtype ratio1 = (Dtype)( h - hstart) / (Dtype)( w - wstart) ;
Dtype ratio2 = (Dtype)( hend - hstart) / (Dtype)( wend - wstart) ;
if( ratio1 >= ratio2){
out_sum1 += bottom_data1[bottom_index];
tmp1[index] += 1.0;
}
if ( ratio1 <= ratio2){
out_sum2 += bottom_data2[bottom_index];
tmp2[index] += 1.0;
}
}
}
//Dtype bin_area = (hend - hstart)*(wend - wstart);
if (tmp1[index] == 0 && tmp2[index] == 0){
top_data[index] = 0.0;
}
else if(tmp1[index] == 0 &&tmp2[index] > 0 ){
top_data[index] = out_sum2/tmp2[index];
}
else if(tmp1[index] > 0 &&tmp2[index] == 0 ){
top_data[index] = out_sum1/tmp1[index];
}
else{
top_data[index] = 0.5 * (out_sum1/tmp1[index] + out_sum2/tmp2[index]);
}
//printf("index:%d tmp1 %.2f: tmp2:%.2f top_data: %.6f\n",index,tmp1[index],tmp2[index],top_data[index] );
mapping_channel[index] = c1;
}
}
template <typename Dtype>
void TrianglePoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data();
Dtype* tmp1 = tmp1_.mutable_gpu_data();
Dtype* tmp2 = tmp2_.mutable_gpu_data();
int count = top[0]->count();
//printf("%d\n",count);
caffe_gpu_set(count, Dtype(0), top_data);
caffe_gpu_set(count, -1, mapping_channel_ptr);
caffe_gpu_set(count, Dtype(0), tmp1);
caffe_gpu_set(count, Dtype(0), tmp2);
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_,
channels_, height_, width_, pooled_height_,
pooled_width_, bottom_rois, output_dim_, group_size_,
top_data, mapping_channel_ptr,tmp1,tmp2);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PSROIPoolingBackwardAtomic(
const int nthreads,
const Dtype* top_diff,
const int* mapping_channel,
const int num_rois,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
Dtype* bottom_diff,
const Dtype* bottom_rois,
const Dtype* tmp1,
const Dtype* tmp2) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c1 = mapping_channel[index];
int c2 = c1 + output_dim * pooled_height * pooled_height;
Dtype* offset_bottom_diff1 = bottom_diff + (roi_batch_ind * channels + c1) * height * width;
Dtype* offset_bottom_diff2 = bottom_diff + (roi_batch_ind * channels + c2) * height * width;
//Dtype bin_area = (hend - hstart)*(wend - wstart);
Dtype diff_val1 = 0.0;
Dtype diff_val2 = 0.0;
if (!is_empty){
if (tmp1[index] == 0 && tmp2[index] == 0){
diff_val1 = 0.0;
diff_val2 = 0.0;
}
else if(tmp1[index] == 0 &&tmp2[index] > 0 ){
diff_val2 = 0.5 * top_diff[index] / tmp2[index];
}
else if(tmp1[index] > 0 &&tmp2[index] == 0 ){
diff_val1 = 0.5 * top_diff[index] / tmp1[index];
}
else{
diff_val2 = 0.5 * top_diff[index] / tmp2[index];
diff_val1 = 0.5 * top_diff[index] / tmp1[index];
// printf("%.4f\t%.4f\n",tmp1[index],tmp2[index]);
}
}
//Dtype diff_val1 = is_empty ? 0. : top_diff[index] * 0.5 / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
Dtype ratio1 = (Dtype)( h - hstart) / (Dtype)( w - wstart) ;
Dtype ratio2 = (Dtype)( hend - hstart) / (Dtype)( wend - wstart) ;
if( ratio1 >= ratio2){
//offset_bottom_diff1[bottom_index] += top_diff[index];
caffe_gpu_atomic_add(diff_val1, offset_bottom_diff1 + bottom_index);
}
if ( ratio1 <= ratio2){
caffe_gpu_atomic_add(diff_val2, offset_bottom_diff2 + bottom_index);
}
//caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
//printf("%f\n",*(offset_bottom_diff + bottom_index));
}
}
}
}
template <typename Dtype>
void TrianglePoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
const int* mapping_channel_ptr = mapping_channel_.gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
caffe_gpu_set(bottom_count, Dtype(0), bottom_diff);
const int count = top[0]->count();
const Dtype* tmp1 = tmp1_.gpu_data();
const Dtype* tmp2 = tmp2_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr,
top[0]->num(), spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, output_dim_, bottom_diff,
bottom_rois,tmp1,tmp2);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(TrianglePoolingLayer);
} // namespace caffe
|
892435ba8130995650b7df9f90f05010ae81f046.cu
|
// --------------------------------------------------------
// R-FCN
// Written by Yi Li, 2016.
// --------------------------------------------------------
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/triangle_pooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void PSROIPoolingForward(
const int nthreads,
const Dtype* bottom_data,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois,
const int output_dim,
const int group_size,
Dtype* top_data,
int* mapping_channel, Dtype* tmp1,Dtype* tmp2) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c1 = (ctop*group_size + gh)*group_size + gw;
int c2 = c1 + output_dim * group_size * group_size;
const Dtype* bottom_data1 = bottom_data + (roi_batch_ind * channels + c1) * height * width;
const Dtype* bottom_data2 = bottom_data + (roi_batch_ind * channels + c2) * height * width;
Dtype out_sum1 = 0.0;
Dtype out_sum2 = 0.0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
Dtype ratio1 = (Dtype)( h - hstart) / (Dtype)( w - wstart) ;
Dtype ratio2 = (Dtype)( hend - hstart) / (Dtype)( wend - wstart) ;
if( ratio1 >= ratio2){
out_sum1 += bottom_data1[bottom_index];
tmp1[index] += 1.0;
}
if ( ratio1 <= ratio2){
out_sum2 += bottom_data2[bottom_index];
tmp2[index] += 1.0;
}
}
}
//Dtype bin_area = (hend - hstart)*(wend - wstart);
if (tmp1[index] == 0 && tmp2[index] == 0){
top_data[index] = 0.0;
}
else if(tmp1[index] == 0 &&tmp2[index] > 0 ){
top_data[index] = out_sum2/tmp2[index];
}
else if(tmp1[index] > 0 &&tmp2[index] == 0 ){
top_data[index] = out_sum1/tmp1[index];
}
else{
top_data[index] = 0.5 * (out_sum1/tmp1[index] + out_sum2/tmp2[index]);
}
//printf("index:%d tmp1 %.2f: tmp2:%.2f top_data: %.6f\n",index,tmp1[index],tmp2[index],top_data[index] );
mapping_channel[index] = c1;
}
}
template <typename Dtype>
void TrianglePoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data();
Dtype* tmp1 = tmp1_.mutable_gpu_data();
Dtype* tmp2 = tmp2_.mutable_gpu_data();
int count = top[0]->count();
//printf("%d\n",count);
caffe_gpu_set(count, Dtype(0), top_data);
caffe_gpu_set(count, -1, mapping_channel_ptr);
caffe_gpu_set(count, Dtype(0), tmp1);
caffe_gpu_set(count, Dtype(0), tmp2);
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_,
channels_, height_, width_, pooled_height_,
pooled_width_, bottom_rois, output_dim_, group_size_,
top_data, mapping_channel_ptr,tmp1,tmp2);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PSROIPoolingBackwardAtomic(
const int nthreads,
const Dtype* top_diff,
const int* mapping_channel,
const int num_rois,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
Dtype* bottom_diff,
const Dtype* bottom_rois,
const Dtype* tmp1,
const Dtype* tmp2) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c1 = mapping_channel[index];
int c2 = c1 + output_dim * pooled_height * pooled_height;
Dtype* offset_bottom_diff1 = bottom_diff + (roi_batch_ind * channels + c1) * height * width;
Dtype* offset_bottom_diff2 = bottom_diff + (roi_batch_ind * channels + c2) * height * width;
//Dtype bin_area = (hend - hstart)*(wend - wstart);
Dtype diff_val1 = 0.0;
Dtype diff_val2 = 0.0;
if (!is_empty){
if (tmp1[index] == 0 && tmp2[index] == 0){
diff_val1 = 0.0;
diff_val2 = 0.0;
}
else if(tmp1[index] == 0 &&tmp2[index] > 0 ){
diff_val2 = 0.5 * top_diff[index] / tmp2[index];
}
else if(tmp1[index] > 0 &&tmp2[index] == 0 ){
diff_val1 = 0.5 * top_diff[index] / tmp1[index];
}
else{
diff_val2 = 0.5 * top_diff[index] / tmp2[index];
diff_val1 = 0.5 * top_diff[index] / tmp1[index];
// printf("%.4f\t%.4f\n",tmp1[index],tmp2[index]);
}
}
//Dtype diff_val1 = is_empty ? 0. : top_diff[index] * 0.5 / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
Dtype ratio1 = (Dtype)( h - hstart) / (Dtype)( w - wstart) ;
Dtype ratio2 = (Dtype)( hend - hstart) / (Dtype)( wend - wstart) ;
if( ratio1 >= ratio2){
//offset_bottom_diff1[bottom_index] += top_diff[index];
caffe_gpu_atomic_add(diff_val1, offset_bottom_diff1 + bottom_index);
}
if ( ratio1 <= ratio2){
caffe_gpu_atomic_add(diff_val2, offset_bottom_diff2 + bottom_index);
}
//caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
//printf("%f\n",*(offset_bottom_diff + bottom_index));
}
}
}
}
template <typename Dtype>
void TrianglePoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
const int* mapping_channel_ptr = mapping_channel_.gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
caffe_gpu_set(bottom_count, Dtype(0), bottom_diff);
const int count = top[0]->count();
const Dtype* tmp1 = tmp1_.gpu_data();
const Dtype* tmp2 = tmp2_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr,
top[0]->num(), spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, output_dim_, bottom_diff,
bottom_rois,tmp1,tmp2);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(TrianglePoolingLayer);
} // namespace caffe
|
e7423c5e7a910ed80fcbcc5c6435e3a52614bcb2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.h"
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int r = blockIdx.y * blockDim.y + threadIdx.y;
int c = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0.f;
int image_r, image_c;
if (r >= numRows || c >= numCols) {
return;
}
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
image_r = min(max(r + filter_r, 0), static_cast<int>(numRows - 1));
image_c = min(max(c + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
//if (r==250 && c==250) {
//printf("%d, %d, %d, %d, %f, %f, %f\n", filter_r, filter_c, image_r, image_c, image_value, filter_value, result);
//}
}
}
outputChannel[r * numCols + c] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
uchar4 inputPixel = inputImageRGBA[thread_1D_pos];
unsigned char red = inputPixel.x;
unsigned char green = inputPixel.y;
unsigned char blue = inputPixel.z;
//printf("Separate Channels: %d - %d %d %d\n", thread_1D_pos, red, green, blue);
redChannel[thread_1D_pos] = red;
greenChannel[thread_1D_pos] = green;
blueChannel[thread_1D_pos] = blue;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) / sizeof(char) * filterWidth * filterWidth,
hipMemcpyHostToDevice));
//printf ("Filter: %f\n", d_filter[0]);
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
int bx = 16;
int by = 16;
const dim3 blockSize (bx, by, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize ((numCols/bx)+1, (numRows/by)+1, 1);
//printf("block size = %d, %d, grid size = %d, %d\n", bx, by, numCols/bx+1, numRows/by+1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue
);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// printf ("Filter: %f, %f\n", d_filter[0], d_filter[1]);
//for (int i=0; i<filterWidth; i++) {
// for (int j=0; j<filterWidth; j++) {
// printf ("%f, ", d_filter[j]);
// }
// printf ("\n");
//}
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
|
e7423c5e7a910ed80fcbcc5c6435e3a52614bcb2.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.h"
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int r = blockIdx.y * blockDim.y + threadIdx.y;
int c = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0.f;
int image_r, image_c;
if (r >= numRows || c >= numCols) {
return;
}
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
image_r = min(max(r + filter_r, 0), static_cast<int>(numRows - 1));
image_c = min(max(c + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
//if (r==250 && c==250) {
//printf("%d, %d, %d, %d, %f, %f, %f\n", filter_r, filter_c, image_r, image_c, image_value, filter_value, result);
//}
}
}
outputChannel[r * numCols + c] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
uchar4 inputPixel = inputImageRGBA[thread_1D_pos];
unsigned char red = inputPixel.x;
unsigned char green = inputPixel.y;
unsigned char blue = inputPixel.z;
//printf("Separate Channels: %d - %d %d %d\n", thread_1D_pos, red, green, blue);
redChannel[thread_1D_pos] = red;
greenChannel[thread_1D_pos] = green;
blueChannel[thread_1D_pos] = blue;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) / sizeof(char) * filterWidth * filterWidth,
cudaMemcpyHostToDevice));
//printf ("Filter: %f\n", d_filter[0]);
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
int bx = 16;
int by = 16;
const dim3 blockSize (bx, by, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize ((numCols/bx)+1, (numRows/by)+1, 1);
//printf("block size = %d, %d, grid size = %d, %d\n", bx, by, numCols/bx+1, numRows/by+1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue
);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// printf ("Filter: %f, %f\n", d_filter[0], d_filter[1]);
//for (int i=0; i<filterWidth; i++) {
// for (int j=0; j<filterWidth; j++) {
// printf ("%f, ", d_filter[j]);
// }
// printf ("\n");
//}
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
e92ca06a985a045477e22e417907e1fca09b94e2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o Crack_with_CUDA Crack_with_CUDA.cu
./Crack_with_CUDA
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password[] = "KB7867";
char *a = attempt;
char *p = plain_password;
while(*a == *p) {
if(*a == '\0') {
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
int i, j, k, l;
char c;
char password[7];
password[0] = 'A' + threadIdx.x;
password[1] = 'A' + blockIdx.x;
for(i=0; i<10; i++) {
c = i+'0';
password[2] = c;
for(j=0; j<10; j++) {
c = j+'0';
password[3] = c;
for(k=0; k<10; k++) {
c = k+'0';
password[4] = c;
for(l=0; l<10; l++) {
c = l+'0';
password[5] = c;
password[6] = '\0';
if(is_a_match(password)) {
printf("password found: %s, Thread id is %d\n", password);
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
//Launch the kernel hipLaunchKernelGGL((
kernel), dim3(26), dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
e92ca06a985a045477e22e417907e1fca09b94e2.cu
|
#include <stdio.h>
#include <cuda_runtime_api.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o Crack_with_CUDA Crack_with_CUDA.cu
./Crack_with_CUDA
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password[] = "KB7867";
char *a = attempt;
char *p = plain_password;
while(*a == *p) {
if(*a == '\0') {
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
int i, j, k, l;
char c;
char password[7];
password[0] = 'A' + threadIdx.x;
password[1] = 'A' + blockIdx.x;
for(i=0; i<10; i++) {
c = i+'0';
password[2] = c;
for(j=0; j<10; j++) {
c = j+'0';
password[3] = c;
for(k=0; k<10; k++) {
c = k+'0';
password[4] = c;
for(l=0; l<10; l++) {
c = l+'0';
password[5] = c;
password[6] = '\0';
if(is_a_match(password)) {
printf("password found: %s, Thread id is %d\n", password);
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
//Launch the kernel
kernel<<<26, 26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
08095944ee953d4ba936e60b9b319bdfeb10cc2a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/linalg/distance_type.h>
#include <iostream>
#include <raft/spatial/knn/knn.hpp>
#include <rmm/device_buffer.hpp>
#include <vector>
#include "../test_utils.h"
namespace raft {
namespace spatial {
namespace knn {
struct KNNInputs {
std::vector<std::vector<float>> input;
int k;
std::vector<int> labels;
};
__global__ void build_actual_output(int *output, int n_rows, int k,
const int *idx_labels,
const int64_t *indices) {
int element = threadIdx.x + blockDim.x * blockIdx.x;
if (element >= n_rows * k) return;
int ind = (int)indices[element];
output[element] = idx_labels[ind];
}
__global__ void build_expected_output(int *output, int n_rows, int k,
const int *labels) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= n_rows) return;
int cur_label = labels[row];
for (int i = 0; i < k; i++) {
output[row * k + i] = cur_label;
}
}
template <typename T>
class KNNTest : public ::testing::TestWithParam<KNNInputs> {
protected:
void testBruteForce() {
raft::print_device_vector("Input array: ", input_, rows_ * cols_,
std::cout);
std::cout << "K: " << k_ << "\n";
raft::print_device_vector("Labels array: ", search_labels_, rows_,
std::cout);
auto stream = handle_.get_stream();
raft::allocate(actual_labels_, rows_ * k_, true);
raft::allocate(expected_labels_, rows_ * k_, true);
std::vector<float *> input_vec;
std::vector<int> sizes_vec;
input_vec.push_back(input_);
sizes_vec.push_back(rows_);
brute_force_knn(handle_, input_vec, sizes_vec, cols_, search_data_, rows_,
indices_, distances_, k_, true, true);
hipLaunchKernelGGL(( build_actual_output), dim3(raft::ceildiv(rows_ * k_, 32)), dim3(32), 0, stream,
actual_labels_, rows_, k_, search_labels_, indices_);
hipLaunchKernelGGL(( build_expected_output), dim3(raft::ceildiv(rows_ * k_, 32)), dim3(32), 0, stream,
expected_labels_, rows_, k_, search_labels_);
raft::print_device_vector("Output indices: ", indices_, rows_ * k_,
std::cout);
raft::print_device_vector("Output distances: ", distances_, rows_ * k_,
std::cout);
raft::print_device_vector("Output labels: ", actual_labels_, rows_ * k_,
std::cout);
raft::print_device_vector("Expected labels: ", expected_labels_, rows_ * k_,
std::cout);
ASSERT_TRUE(devArrMatch(expected_labels_, actual_labels_, rows_ * k_,
raft::Compare<int>()));
}
void SetUp() override {
params_ = ::testing::TestWithParam<KNNInputs>::GetParam();
rows_ = params_.input.size();
cols_ = params_.input[0].size();
k_ = params_.k;
std::vector<float> row_major_input;
for (int i = 0; i < params_.input.size(); ++i) {
for (int j = 0; j < params_.input[i].size(); ++j) {
row_major_input.push_back(params_.input[i][j]);
}
}
rmm::device_buffer input_d = rmm::device_buffer(
row_major_input.data(), row_major_input.size() * sizeof(float));
float *input_ptr = static_cast<float *>(input_d.data());
rmm::device_buffer labels_d = rmm::device_buffer(
params_.labels.data(), params_.labels.size() * sizeof(int));
int *labels_ptr = static_cast<int *>(labels_d.data());
raft::allocate(input_, rows_ * cols_, true);
raft::allocate(search_data_, rows_ * cols_, true);
raft::allocate(indices_, rows_ * k_, true);
raft::allocate(distances_, rows_ * k_, true);
raft::allocate(search_labels_, rows_, true);
raft::copy(input_, input_ptr, rows_ * cols_, handle_.get_stream());
raft::copy(search_data_, input_ptr, rows_ * cols_, handle_.get_stream());
raft::copy(search_labels_, labels_ptr, rows_, handle_.get_stream());
}
void TearDown() override {
CUDA_CHECK(hipFree(search_data_));
CUDA_CHECK(hipFree(indices_));
CUDA_CHECK(hipFree(distances_));
CUDA_CHECK(hipFree(actual_labels_));
}
private:
raft::handle_t handle_;
KNNInputs params_;
int rows_;
int cols_;
float *input_;
float *search_data_;
int64_t *indices_;
float *distances_;
int k_;
int *search_labels_;
int *actual_labels_;
int *expected_labels_;
};
const std::vector<KNNInputs> inputs = {
// 2D
{{
{2.7810836, 2.550537003},
{1.465489372, 2.362125076},
{3.396561688, 4.400293529},
{1.38807019, 1.850220317},
{3.06407232, 3.005305973},
{7.627531214, 2.759262235},
{5.332441248, 2.088626775},
{6.922596716, 1.77106367},
{8.675418651, -0.242068655},
{7.673756466, 3.508563011},
},
2,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1}}};
typedef KNNTest<float> KNNTestF;
TEST_P(KNNTestF, BruteForce) { this->testBruteForce(); }
INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestF, ::testing::ValuesIn(inputs));
} // namespace knn
} // namespace spatial
} // namespace raft
|
08095944ee953d4ba936e60b9b319bdfeb10cc2a.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/linalg/distance_type.h>
#include <iostream>
#include <raft/spatial/knn/knn.hpp>
#include <rmm/device_buffer.hpp>
#include <vector>
#include "../test_utils.h"
namespace raft {
namespace spatial {
namespace knn {
struct KNNInputs {
std::vector<std::vector<float>> input;
int k;
std::vector<int> labels;
};
__global__ void build_actual_output(int *output, int n_rows, int k,
const int *idx_labels,
const int64_t *indices) {
int element = threadIdx.x + blockDim.x * blockIdx.x;
if (element >= n_rows * k) return;
int ind = (int)indices[element];
output[element] = idx_labels[ind];
}
__global__ void build_expected_output(int *output, int n_rows, int k,
const int *labels) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= n_rows) return;
int cur_label = labels[row];
for (int i = 0; i < k; i++) {
output[row * k + i] = cur_label;
}
}
template <typename T>
class KNNTest : public ::testing::TestWithParam<KNNInputs> {
protected:
void testBruteForce() {
raft::print_device_vector("Input array: ", input_, rows_ * cols_,
std::cout);
std::cout << "K: " << k_ << "\n";
raft::print_device_vector("Labels array: ", search_labels_, rows_,
std::cout);
auto stream = handle_.get_stream();
raft::allocate(actual_labels_, rows_ * k_, true);
raft::allocate(expected_labels_, rows_ * k_, true);
std::vector<float *> input_vec;
std::vector<int> sizes_vec;
input_vec.push_back(input_);
sizes_vec.push_back(rows_);
brute_force_knn(handle_, input_vec, sizes_vec, cols_, search_data_, rows_,
indices_, distances_, k_, true, true);
build_actual_output<<<raft::ceildiv(rows_ * k_, 32), 32, 0, stream>>>(
actual_labels_, rows_, k_, search_labels_, indices_);
build_expected_output<<<raft::ceildiv(rows_ * k_, 32), 32, 0, stream>>>(
expected_labels_, rows_, k_, search_labels_);
raft::print_device_vector("Output indices: ", indices_, rows_ * k_,
std::cout);
raft::print_device_vector("Output distances: ", distances_, rows_ * k_,
std::cout);
raft::print_device_vector("Output labels: ", actual_labels_, rows_ * k_,
std::cout);
raft::print_device_vector("Expected labels: ", expected_labels_, rows_ * k_,
std::cout);
ASSERT_TRUE(devArrMatch(expected_labels_, actual_labels_, rows_ * k_,
raft::Compare<int>()));
}
void SetUp() override {
params_ = ::testing::TestWithParam<KNNInputs>::GetParam();
rows_ = params_.input.size();
cols_ = params_.input[0].size();
k_ = params_.k;
std::vector<float> row_major_input;
for (int i = 0; i < params_.input.size(); ++i) {
for (int j = 0; j < params_.input[i].size(); ++j) {
row_major_input.push_back(params_.input[i][j]);
}
}
rmm::device_buffer input_d = rmm::device_buffer(
row_major_input.data(), row_major_input.size() * sizeof(float));
float *input_ptr = static_cast<float *>(input_d.data());
rmm::device_buffer labels_d = rmm::device_buffer(
params_.labels.data(), params_.labels.size() * sizeof(int));
int *labels_ptr = static_cast<int *>(labels_d.data());
raft::allocate(input_, rows_ * cols_, true);
raft::allocate(search_data_, rows_ * cols_, true);
raft::allocate(indices_, rows_ * k_, true);
raft::allocate(distances_, rows_ * k_, true);
raft::allocate(search_labels_, rows_, true);
raft::copy(input_, input_ptr, rows_ * cols_, handle_.get_stream());
raft::copy(search_data_, input_ptr, rows_ * cols_, handle_.get_stream());
raft::copy(search_labels_, labels_ptr, rows_, handle_.get_stream());
}
void TearDown() override {
CUDA_CHECK(cudaFree(search_data_));
CUDA_CHECK(cudaFree(indices_));
CUDA_CHECK(cudaFree(distances_));
CUDA_CHECK(cudaFree(actual_labels_));
}
private:
raft::handle_t handle_;
KNNInputs params_;
int rows_;
int cols_;
float *input_;
float *search_data_;
int64_t *indices_;
float *distances_;
int k_;
int *search_labels_;
int *actual_labels_;
int *expected_labels_;
};
const std::vector<KNNInputs> inputs = {
// 2D
{{
{2.7810836, 2.550537003},
{1.465489372, 2.362125076},
{3.396561688, 4.400293529},
{1.38807019, 1.850220317},
{3.06407232, 3.005305973},
{7.627531214, 2.759262235},
{5.332441248, 2.088626775},
{6.922596716, 1.77106367},
{8.675418651, -0.242068655},
{7.673756466, 3.508563011},
},
2,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1}}};
typedef KNNTest<float> KNNTestF;
TEST_P(KNNTestF, BruteForce) { this->testBruteForce(); }
INSTANTIATE_TEST_CASE_P(KNNTest, KNNTestF, ::testing::ValuesIn(inputs));
} // namespace knn
} // namespace spatial
} // namespace raft
|
3bec218ad2f4472e9ec89c6e8f3ccfd1cf212a28.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "network_analyzer_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_linear_buffer_device.h"
#include "cuda_linear_buffer_host.h"
#include "util_cuda.h"
#include "cuda_event.h"
#include "layer_updater_schema_factory.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
namespace nnforge
{
namespace cuda
{
__global__ void convert_compacted_to_raw_analazer_kernel(
const uchar4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
uchar4 inp = input[elem_id];
float4 val;
val.x = inp.x * (1.0F / 255.0F);
val.y = inp.y * (1.0F / 255.0F);
val.z = inp.z * (1.0F / 255.0F);
val.w = inp.w * (1.0F / 255.0F);
output[elem_id] = val;
}
}
network_analyzer_cuda::network_analyzer_cuda(
network_schema_smart_ptr schema,
cuda_running_configuration_const_smart_ptr cuda_config)
: network_analyzer(schema)
, cuda_config(cuda_config)
{
const const_layer_list& layer_list = *schema;
for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it)
updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config));
setup_network_cuda();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it)
updater_schema_data.push_back((*it)->get_schema_buffers());
}
network_analyzer_cuda::~network_analyzer_cuda()
{
}
void network_analyzer_cuda::setup_network_cuda()
{
command_stream = cuda_stream_smart_ptr(new cuda_stream());
}
void network_analyzer_cuda::layer_config_list_modified()
{
updater_list.clear();
updater_input_and_all_buffers_pack.clear();
output_errors_buffers.clear();
layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf)
{
updater_list.push_back(
(*it)->create_updater(
*it_conf,
*(it_conf + 1),
true,
true));
}
unsigned int input_neuron_count = layer_config_list.front().get_neuron_count();
unsigned int output_neuron_count = layer_config_list.back().get_neuron_count();
input_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
input_converted_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it)
{
layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(1);
updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers));
output_buffer = all_buffers.output_neurons_buffer;
}
cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf;
for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it)
{
output_errors_buffers.push_back(output_errors);
layer_updater_cuda::buffer_set& all_buffers = it->second;
if (all_buffers.input_errors_buffer != 0)
output_errors = all_buffers.input_errors_buffer;
}
}
void network_analyzer_cuda::actual_set_data(network_data_smart_ptr data)
{
net_data.clear();
for(layer_data_list::const_iterator it2 = data->begin(); it2 != data->end(); ++it2)
{
std::vector<cuda_linear_buffer_device_smart_ptr> res;
for(std::vector<std::vector<float> >::iterator it = (*it2)->begin(); it != (*it2)->end(); ++it)
{
size_t buffer_size = it->size() * sizeof(float);
cuda_linear_buffer_device_smart_ptr new_buf(new cuda_linear_buffer_device(buffer_size));
cuda_safe_call(hipMemcpy(*new_buf, &(*it->begin()), buffer_size, hipMemcpyHostToDevice));
res.push_back(new_buf);
}
net_data.push_back(res);
}
}
void network_analyzer_cuda::actual_set_input_data(
const void * input,
neuron_data_type::input_type type_code)
{
unsigned int input_neuron_count = layer_config_list.front().get_neuron_count();
unsigned int output_neuron_count = layer_config_list.back().get_neuron_count();
size_t input_neuron_elem_size = neuron_data_type::get_input_size(type_code);
// Convert input
if (type_code == neuron_data_type::type_byte)
{
cuda_safe_call(hipMemcpyAsync(
*input_buf,
input,
input_neuron_count * input_neuron_elem_size,
hipMemcpyHostToDevice,
*command_stream));
int elem_count = (input_neuron_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( convert_compacted_to_raw_analazer_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream,
*input_buf,
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(hipMemcpyAsync(
*input_converted_buf,
input,
input_neuron_count * input_neuron_elem_size,
hipMemcpyHostToDevice,
*command_stream));
}
else throw neural_network_exception((boost::format("actual_set_input_data cannot handle input neurons of type %1%") % type_code).str());
// Forward updater
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin();
layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin();
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++layer_config_it)
{
(*it)->enqueue_test(
0,
*command_stream,
*schema_data_it,
*net_data_it,
input_and_all_buffers_pack_it->first,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1);
}
}
cuda_safe_call(hipStreamSynchronize(*command_stream));
}
std::pair<layer_configuration_specific_snapshot_smart_ptr, layer_configuration_specific_snapshot_smart_ptr> network_analyzer_cuda::actual_run_backprop(
const layer_configuration_specific_snapshot& output_data,
const std::vector<unsigned int>& output_offset_list,
unsigned int output_layer_id,
const std::vector<std::pair<unsigned int, unsigned int> >& input_rectangle_borders)
{
std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin() + (output_errors_buffers.size() - output_layer_id - 1);
// Initialize output errors
{
float * dst = **output_errors_it;
cuda_util::set_with_value(*cuda_config, dst, 0.0F, (*output_errors_it)->get_size() / sizeof(float), *command_stream);
const layer_configuration_specific& output_config = layer_config_list[output_layer_id + 1];
int sequential_chunk_dimension_count = -1;
unsigned int sequential_copy_elem_count = 1;
while (sequential_chunk_dimension_count < (int)output_offset_list.size() - 1)
{
++sequential_chunk_dimension_count;
sequential_copy_elem_count *= output_data.config.dimension_sizes[sequential_chunk_dimension_count];
if (output_data.config.dimension_sizes[sequential_chunk_dimension_count] != output_config.dimension_sizes[sequential_chunk_dimension_count])
break;
}
++sequential_chunk_dimension_count;
std::vector<float>::const_iterator src_it = output_data.data.begin();
for(unsigned int feature_map_id = 0; feature_map_id < output_data.config.feature_map_count; ++feature_map_id)
{
unsigned int dst_fm_offset = feature_map_id * output_config.get_neuron_count_per_feature_map();
std::vector<unsigned int> src_list(output_offset_list.size(), 0);
bool cont = true;
while (cont)
{
bool should_copy = false;
for(std::vector<float>::const_iterator it = src_it; it != src_it + sequential_copy_elem_count; ++it)
{
if (*src_it != 0.0F)
{
should_copy = true;
break;
}
}
if (should_copy)
{
std::vector<unsigned int> dst_offset_list(output_offset_list);
for(unsigned int i = sequential_chunk_dimension_count; i < dst_offset_list.size(); ++i)
dst_offset_list[i] += src_list[i];
cuda_safe_call(hipMemcpyAsync(dst + dst_fm_offset + output_config.get_pos(dst_offset_list), &(*src_it), sequential_copy_elem_count * sizeof(float), hipMemcpyHostToDevice, *command_stream));
};
cont = false;
for(int i = sequential_chunk_dimension_count; i < src_list.size(); ++i)
{
src_list[i]++;
if (src_list[i] < output_data.config.dimension_sizes[i])
{
cont = true;
break;
}
else
src_list[i] = 0;
}
src_it += sequential_copy_elem_count;
}
}
}
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin() + (updater_input_and_all_buffers_pack.size() - output_layer_id - 1);
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin() + (updater_schema_data.size() - output_layer_id - 1);
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin() + (net_data.size() - output_layer_id - 1);
for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin() + (updater_list.size() - output_layer_id - 1); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++output_errors_it, ++net_data_it)
{
(*it)->enqueue_backprop(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->first,
*output_errors_it,
input_and_all_buffers_pack_it->second.input_errors_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1);
}
layer_configuration_specific_snapshot_smart_ptr res(new layer_configuration_specific_snapshot());
layer_configuration_specific_snapshot_smart_ptr input_data(new layer_configuration_specific_snapshot());
// Copy input errors
{
res->config.feature_map_count = layer_config_list.front().feature_map_count;
input_data->config.feature_map_count = layer_config_list.front().feature_map_count;
unsigned int elem_count = res->config.feature_map_count;
for(int i = 0; i < input_rectangle_borders.size(); ++i)
{
unsigned int val = input_rectangle_borders[i].second - input_rectangle_borders[i].first;
elem_count *= val;
res->config.dimension_sizes.push_back(val);
input_data->config.dimension_sizes.push_back(val);
}
res->data.resize(elem_count);
input_data->data.resize(elem_count);
cuda_linear_buffer_device_smart_ptr input_errors_buf = updater_input_and_all_buffers_pack.front().second.input_errors_buffer;
if (input_errors_buf == 0)
input_errors_buf = output_errors_buffers.back();
float * src = *input_errors_buf;
float * src_input_data = *input_converted_buf;
const layer_configuration_specific& input_config = layer_config_list.front();
int sequential_chunk_dimension_count = -1;
unsigned int sequential_copy_elem_count = 1;
while (sequential_chunk_dimension_count < (int)input_config.dimension_sizes.size() - 1)
{
++sequential_chunk_dimension_count;
sequential_copy_elem_count *= res->config.dimension_sizes[sequential_chunk_dimension_count];
if (res->config.dimension_sizes[sequential_chunk_dimension_count] != input_config.dimension_sizes[sequential_chunk_dimension_count])
break;
}
++sequential_chunk_dimension_count;
std::vector<float>::iterator dst_it = res->data.begin();
std::vector<float>::iterator dst_input_data_it = input_data->data.begin();
for(unsigned int feature_map_id = 0; feature_map_id < input_config.feature_map_count; ++feature_map_id)
{
unsigned int src_fm_offset = feature_map_id * input_config.get_neuron_count_per_feature_map();
std::vector<unsigned int> dst_list(input_rectangle_borders.size(), 0);
bool cont = true;
while (cont)
{
std::vector<unsigned int> src_offset_list(input_rectangle_borders.size());
for(int i = 0; i < src_offset_list.size(); ++i)
src_offset_list[i] = input_rectangle_borders[i].first;
for(unsigned int i = sequential_chunk_dimension_count; i < src_offset_list.size(); ++i)
src_offset_list[i] += dst_list[i];
cuda_safe_call(hipMemcpyAsync(&(*dst_it), src + src_fm_offset + input_config.get_pos(src_offset_list), sequential_copy_elem_count * sizeof(float), hipMemcpyDeviceToHost, *command_stream));
cuda_safe_call(hipMemcpyAsync(&(*dst_input_data_it), src_input_data + src_fm_offset + input_config.get_pos(src_offset_list), sequential_copy_elem_count * sizeof(float), hipMemcpyDeviceToHost, *command_stream));
cont = false;
for(int i = sequential_chunk_dimension_count; i < dst_list.size(); ++i)
{
dst_list[i]++;
if (dst_list[i] < res->config.dimension_sizes[i])
{
cont = true;
break;
}
else
dst_list[i] = 0;
}
dst_it += sequential_copy_elem_count;
dst_input_data_it += sequential_copy_elem_count;
}
}
}
cuda_safe_call(hipStreamSynchronize(*command_stream));
return std::make_pair(res, input_data);
}
}
}
|
3bec218ad2f4472e9ec89c6e8f3ccfd1cf212a28.cu
|
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "network_analyzer_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_linear_buffer_device.h"
#include "cuda_linear_buffer_host.h"
#include "util_cuda.h"
#include "cuda_event.h"
#include "layer_updater_schema_factory.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
namespace nnforge
{
namespace cuda
{
__global__ void convert_compacted_to_raw_analazer_kernel(
const uchar4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
uchar4 inp = input[elem_id];
float4 val;
val.x = inp.x * (1.0F / 255.0F);
val.y = inp.y * (1.0F / 255.0F);
val.z = inp.z * (1.0F / 255.0F);
val.w = inp.w * (1.0F / 255.0F);
output[elem_id] = val;
}
}
network_analyzer_cuda::network_analyzer_cuda(
network_schema_smart_ptr schema,
cuda_running_configuration_const_smart_ptr cuda_config)
: network_analyzer(schema)
, cuda_config(cuda_config)
{
const const_layer_list& layer_list = *schema;
for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it)
updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config));
setup_network_cuda();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it)
updater_schema_data.push_back((*it)->get_schema_buffers());
}
network_analyzer_cuda::~network_analyzer_cuda()
{
}
void network_analyzer_cuda::setup_network_cuda()
{
command_stream = cuda_stream_smart_ptr(new cuda_stream());
}
void network_analyzer_cuda::layer_config_list_modified()
{
updater_list.clear();
updater_input_and_all_buffers_pack.clear();
output_errors_buffers.clear();
layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf)
{
updater_list.push_back(
(*it)->create_updater(
*it_conf,
*(it_conf + 1),
true,
true));
}
unsigned int input_neuron_count = layer_config_list.front().get_neuron_count();
unsigned int output_neuron_count = layer_config_list.back().get_neuron_count();
input_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
input_converted_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it)
{
layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(1);
updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers));
output_buffer = all_buffers.output_neurons_buffer;
}
cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf;
for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it)
{
output_errors_buffers.push_back(output_errors);
layer_updater_cuda::buffer_set& all_buffers = it->second;
if (all_buffers.input_errors_buffer != 0)
output_errors = all_buffers.input_errors_buffer;
}
}
void network_analyzer_cuda::actual_set_data(network_data_smart_ptr data)
{
net_data.clear();
for(layer_data_list::const_iterator it2 = data->begin(); it2 != data->end(); ++it2)
{
std::vector<cuda_linear_buffer_device_smart_ptr> res;
for(std::vector<std::vector<float> >::iterator it = (*it2)->begin(); it != (*it2)->end(); ++it)
{
size_t buffer_size = it->size() * sizeof(float);
cuda_linear_buffer_device_smart_ptr new_buf(new cuda_linear_buffer_device(buffer_size));
cuda_safe_call(cudaMemcpy(*new_buf, &(*it->begin()), buffer_size, cudaMemcpyHostToDevice));
res.push_back(new_buf);
}
net_data.push_back(res);
}
}
void network_analyzer_cuda::actual_set_input_data(
const void * input,
neuron_data_type::input_type type_code)
{
unsigned int input_neuron_count = layer_config_list.front().get_neuron_count();
unsigned int output_neuron_count = layer_config_list.back().get_neuron_count();
size_t input_neuron_elem_size = neuron_data_type::get_input_size(type_code);
// Convert input
if (type_code == neuron_data_type::type_byte)
{
cuda_safe_call(cudaMemcpyAsync(
*input_buf,
input,
input_neuron_count * input_neuron_elem_size,
cudaMemcpyHostToDevice,
*command_stream));
int elem_count = (input_neuron_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
convert_compacted_to_raw_analazer_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>(
*input_buf,
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(cudaMemcpyAsync(
*input_converted_buf,
input,
input_neuron_count * input_neuron_elem_size,
cudaMemcpyHostToDevice,
*command_stream));
}
else throw neural_network_exception((boost::format("actual_set_input_data cannot handle input neurons of type %1%") % type_code).str());
// Forward updater
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin();
layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin();
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++layer_config_it)
{
(*it)->enqueue_test(
0,
*command_stream,
*schema_data_it,
*net_data_it,
input_and_all_buffers_pack_it->first,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1);
}
}
cuda_safe_call(cudaStreamSynchronize(*command_stream));
}
std::pair<layer_configuration_specific_snapshot_smart_ptr, layer_configuration_specific_snapshot_smart_ptr> network_analyzer_cuda::actual_run_backprop(
const layer_configuration_specific_snapshot& output_data,
const std::vector<unsigned int>& output_offset_list,
unsigned int output_layer_id,
const std::vector<std::pair<unsigned int, unsigned int> >& input_rectangle_borders)
{
std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin() + (output_errors_buffers.size() - output_layer_id - 1);
// Initialize output errors
{
float * dst = **output_errors_it;
cuda_util::set_with_value(*cuda_config, dst, 0.0F, (*output_errors_it)->get_size() / sizeof(float), *command_stream);
const layer_configuration_specific& output_config = layer_config_list[output_layer_id + 1];
int sequential_chunk_dimension_count = -1;
unsigned int sequential_copy_elem_count = 1;
while (sequential_chunk_dimension_count < (int)output_offset_list.size() - 1)
{
++sequential_chunk_dimension_count;
sequential_copy_elem_count *= output_data.config.dimension_sizes[sequential_chunk_dimension_count];
if (output_data.config.dimension_sizes[sequential_chunk_dimension_count] != output_config.dimension_sizes[sequential_chunk_dimension_count])
break;
}
++sequential_chunk_dimension_count;
std::vector<float>::const_iterator src_it = output_data.data.begin();
for(unsigned int feature_map_id = 0; feature_map_id < output_data.config.feature_map_count; ++feature_map_id)
{
unsigned int dst_fm_offset = feature_map_id * output_config.get_neuron_count_per_feature_map();
std::vector<unsigned int> src_list(output_offset_list.size(), 0);
bool cont = true;
while (cont)
{
bool should_copy = false;
for(std::vector<float>::const_iterator it = src_it; it != src_it + sequential_copy_elem_count; ++it)
{
if (*src_it != 0.0F)
{
should_copy = true;
break;
}
}
if (should_copy)
{
std::vector<unsigned int> dst_offset_list(output_offset_list);
for(unsigned int i = sequential_chunk_dimension_count; i < dst_offset_list.size(); ++i)
dst_offset_list[i] += src_list[i];
cuda_safe_call(cudaMemcpyAsync(dst + dst_fm_offset + output_config.get_pos(dst_offset_list), &(*src_it), sequential_copy_elem_count * sizeof(float), cudaMemcpyHostToDevice, *command_stream));
};
cont = false;
for(int i = sequential_chunk_dimension_count; i < src_list.size(); ++i)
{
src_list[i]++;
if (src_list[i] < output_data.config.dimension_sizes[i])
{
cont = true;
break;
}
else
src_list[i] = 0;
}
src_it += sequential_copy_elem_count;
}
}
}
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin() + (updater_input_and_all_buffers_pack.size() - output_layer_id - 1);
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin() + (updater_schema_data.size() - output_layer_id - 1);
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin() + (net_data.size() - output_layer_id - 1);
for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin() + (updater_list.size() - output_layer_id - 1); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++output_errors_it, ++net_data_it)
{
(*it)->enqueue_backprop(
*command_stream,
*schema_data_it,
*net_data_it,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->first,
*output_errors_it,
input_and_all_buffers_pack_it->second.input_errors_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1);
}
layer_configuration_specific_snapshot_smart_ptr res(new layer_configuration_specific_snapshot());
layer_configuration_specific_snapshot_smart_ptr input_data(new layer_configuration_specific_snapshot());
// Copy input errors
{
res->config.feature_map_count = layer_config_list.front().feature_map_count;
input_data->config.feature_map_count = layer_config_list.front().feature_map_count;
unsigned int elem_count = res->config.feature_map_count;
for(int i = 0; i < input_rectangle_borders.size(); ++i)
{
unsigned int val = input_rectangle_borders[i].second - input_rectangle_borders[i].first;
elem_count *= val;
res->config.dimension_sizes.push_back(val);
input_data->config.dimension_sizes.push_back(val);
}
res->data.resize(elem_count);
input_data->data.resize(elem_count);
cuda_linear_buffer_device_smart_ptr input_errors_buf = updater_input_and_all_buffers_pack.front().second.input_errors_buffer;
if (input_errors_buf == 0)
input_errors_buf = output_errors_buffers.back();
float * src = *input_errors_buf;
float * src_input_data = *input_converted_buf;
const layer_configuration_specific& input_config = layer_config_list.front();
int sequential_chunk_dimension_count = -1;
unsigned int sequential_copy_elem_count = 1;
while (sequential_chunk_dimension_count < (int)input_config.dimension_sizes.size() - 1)
{
++sequential_chunk_dimension_count;
sequential_copy_elem_count *= res->config.dimension_sizes[sequential_chunk_dimension_count];
if (res->config.dimension_sizes[sequential_chunk_dimension_count] != input_config.dimension_sizes[sequential_chunk_dimension_count])
break;
}
++sequential_chunk_dimension_count;
std::vector<float>::iterator dst_it = res->data.begin();
std::vector<float>::iterator dst_input_data_it = input_data->data.begin();
for(unsigned int feature_map_id = 0; feature_map_id < input_config.feature_map_count; ++feature_map_id)
{
unsigned int src_fm_offset = feature_map_id * input_config.get_neuron_count_per_feature_map();
std::vector<unsigned int> dst_list(input_rectangle_borders.size(), 0);
bool cont = true;
while (cont)
{
std::vector<unsigned int> src_offset_list(input_rectangle_borders.size());
for(int i = 0; i < src_offset_list.size(); ++i)
src_offset_list[i] = input_rectangle_borders[i].first;
for(unsigned int i = sequential_chunk_dimension_count; i < src_offset_list.size(); ++i)
src_offset_list[i] += dst_list[i];
cuda_safe_call(cudaMemcpyAsync(&(*dst_it), src + src_fm_offset + input_config.get_pos(src_offset_list), sequential_copy_elem_count * sizeof(float), cudaMemcpyDeviceToHost, *command_stream));
cuda_safe_call(cudaMemcpyAsync(&(*dst_input_data_it), src_input_data + src_fm_offset + input_config.get_pos(src_offset_list), sequential_copy_elem_count * sizeof(float), cudaMemcpyDeviceToHost, *command_stream));
cont = false;
for(int i = sequential_chunk_dimension_count; i < dst_list.size(); ++i)
{
dst_list[i]++;
if (dst_list[i] < res->config.dimension_sizes[i])
{
cont = true;
break;
}
else
dst_list[i] = 0;
}
dst_it += sequential_copy_elem_count;
dst_input_data_it += sequential_copy_elem_count;
}
}
}
cuda_safe_call(cudaStreamSynchronize(*command_stream));
return std::make_pair(res, input_data);
}
}
}
|
c0483b4801844f7940717dc21aa180888811aaa9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define MATRIX_SIZE 64
__global__ void Square(int *A)
{
// Block index
/************Add your code***********/
// Thread index
/************Add your code***********/
//Calculation
/************Add your code***********/
}
int main()
{
int size = MATRIX_SIZE*MATRIX_SIZE*sizeof(int);
int *h_A = (int *)malloc(size);
int *d_A;
int i;
//Intialize A
for(i=0;i<MATRIX_SIZE*MATRIX_SIZE;i++)
{
h_A[i] = 2;
}
//Allocate the memory in GPU to store the content of A
/************Add your code***********/
//Copy h_A to d_A
/************Add your code***********/
//Allocate blocks and 32*32 threads per block.
/************Add your code***********/
//Run the kernel
/************Add your code***********/
//Copy the result back to CPU
/************Add your code***********/
//free GPU memory for d_A
/************Add your code***********/
//free Host Memory
free(h_A);
return 0;
}
|
c0483b4801844f7940717dc21aa180888811aaa9.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#define MATRIX_SIZE 64
__global__ void Square(int *A)
{
// Block index
/************Add your code***********/
// Thread index
/************Add your code***********/
//Calculation
/************Add your code***********/
}
int main()
{
int size = MATRIX_SIZE*MATRIX_SIZE*sizeof(int);
int *h_A = (int *)malloc(size);
int *d_A;
int i;
//Intialize A
for(i=0;i<MATRIX_SIZE*MATRIX_SIZE;i++)
{
h_A[i] = 2;
}
//Allocate the memory in GPU to store the content of A
/************Add your code***********/
//Copy h_A to d_A
/************Add your code***********/
//Allocate blocks and 32*32 threads per block.
/************Add your code***********/
//Run the kernel
/************Add your code***********/
//Copy the result back to CPU
/************Add your code***********/
//free GPU memory for d_A
/************Add your code***********/
//free Host Memory
free(h_A);
return 0;
}
|
8f05da8cd37ab6e765e1fbace700237bef8c65c4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n){
bools[index] = idata[index] != 0;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= n) return;
if (bools[index]){
odata[indices[index]] = idata[index];
}
}
}
}
|
8f05da8cd37ab6e765e1fbace700237bef8c65c4.cu
|
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n){
bools[index] = idata[index] != 0;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= n) return;
if (bools[index]){
odata[indices[index]] = idata[index];
}
}
}
}
|
4e18ebf9b2908296e227568c8a162368d0c1f3e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "common_hip.cuh"
#define N 128*1024
#define MAX_OFFSET 128
__global__ void assignValue (float *a,float *b)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
a[idx] = 1.0f * idx;
b[idx] = 2.0f * idx;
}
__global__ void memcpyOffset(float *a,float *b,int offset)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x + offset;
a[idx] = b[idx];
}
int main()
{
hipError_t err;
hipEvent_t start,end;
size_t bytes = sizeof(float) * (N + MAX_OFFSET) ;
float * a,*b;
float * c;
c = (float *) malloc(bytes);
CHECK_CUDA_ERR( hipMalloc(&a,bytes) );
CHECK_CUDA_ERR( hipMalloc(&b,bytes) );
CHECK_CUDA_ERR( hipEventCreate(&start) );
CHECK_CUDA_ERR( hipEventCreate(&end) );
hipLaunchKernelGGL(( assignValue), dim3(128),dim3(1024), 0, 0, a,b);
CHECK_LAST_CUDA_ERR
int i = 0;
float ms = 0;
for (i = 0;i<MAX_OFFSET;i++ ) {
CHECK_CUDA_ERR( hipEventRecord(start,0));
hipLaunchKernelGGL(( memcpyOffset), dim3(128),dim3(1024), 0, 0, a,b,i);
CHECK_LAST_CUDA_ERR
CHECK_CUDA_ERR( hipEventRecord(end,0));
CHECK_CUDA_ERR( hipDeviceSynchronize());
CHECK_CUDA_ERR( hipEventElapsedTime(&ms,start,end));
printf("offset :%d time :%f milli second\n",i,ms);
}
CHECK_CUDA_ERR( hipEventDestroy(start) );
CHECK_CUDA_ERR( hipEventDestroy(end) );
CHECK_CUDA_ERR( hipFree(a) );
CHECK_CUDA_ERR( hipFree(b) );
free(c);
return 0;
}
|
4e18ebf9b2908296e227568c8a162368d0c1f3e7.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include "common.cuh"
#define N 128*1024
#define MAX_OFFSET 128
__global__ void assignValue (float *a,float *b)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
a[idx] = 1.0f * idx;
b[idx] = 2.0f * idx;
}
__global__ void memcpyOffset(float *a,float *b,int offset)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x + offset;
a[idx] = b[idx];
}
int main()
{
cudaError_t err;
cudaEvent_t start,end;
size_t bytes = sizeof(float) * (N + MAX_OFFSET) ;
float * a,*b;
float * c;
c = (float *) malloc(bytes);
CHECK_CUDA_ERR( cudaMalloc(&a,bytes) );
CHECK_CUDA_ERR( cudaMalloc(&b,bytes) );
CHECK_CUDA_ERR( cudaEventCreate(&start) );
CHECK_CUDA_ERR( cudaEventCreate(&end) );
assignValue<<<128,1024>>> (a,b);
CHECK_LAST_CUDA_ERR
int i = 0;
float ms = 0;
for (i = 0;i<MAX_OFFSET;i++ ) {
CHECK_CUDA_ERR( cudaEventRecord(start,0));
memcpyOffset<<<128,1024>>>(a,b,i);
CHECK_LAST_CUDA_ERR
CHECK_CUDA_ERR( cudaEventRecord(end,0));
CHECK_CUDA_ERR( cudaDeviceSynchronize());
CHECK_CUDA_ERR( cudaEventElapsedTime(&ms,start,end));
printf("offset :%d time :%f milli second\n",i,ms);
}
CHECK_CUDA_ERR( cudaEventDestroy(start) );
CHECK_CUDA_ERR( cudaEventDestroy(end) );
CHECK_CUDA_ERR( cudaFree(a) );
CHECK_CUDA_ERR( cudaFree(b) );
free(c);
return 0;
}
|
eb87f9630bb7f97a1280ed0b45a4cb42b7c0c96f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <chrono>
#include "../../../mem_alloc/mem_alloc.h"
#define ALL __noinline__ __host__ __device__
#include "coal.h"
#include "../configuration.h"
#define gpuErrchk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file,
line);
if (abort) exit(code);
}
}
static const int kCudaBlockSize = 256;
class BodyType {
public:
float pos_x;
float pos_y;
float vel_x;
float vel_y;
float mass;
float force_x;
float force_y;
public:
__noinline__ __device__ virtual void initBody(int idx) = 0;
ALL BodyType() {}
ALL BodyType(int idx) {}
ALL virtual float computeDistance(BodyType *other) = 0;
ALL virtual float computeForce(BodyType *other, float dist) = 0;
ALL virtual void updateVelX() = 0;
ALL virtual void updateVelY() = 0;
ALL virtual void updatePosX() = 0;
ALL virtual void updatePosY() = 0;
ALL virtual void initForce() = 0;
ALL virtual void updateForceX(BodyType *other, float F) = 0;
ALL virtual void updateForceY(BodyType *other, float F) = 0;
void add_checksum() {}
// Only for rendering.
ALL float pos_x_() const { return pos_x; }
ALL float pos_y_() const { return pos_y; }
ALL float mass_() const { return mass; }
};
class Body : public BodyType {
public:
__noinline__ __device__ void initBody(int idx) {
hiprandState_t rand_state;
hiprand_init(kSeed, idx, 0, &rand_state);
pos_x = 2 * hiprand_uniform(&rand_state) - 1;
pos_y = 2 * hiprand_uniform(&rand_state) - 1;
vel_x = (hiprand_uniform(&rand_state) - 0.5) / 1000;
vel_y = (hiprand_uniform(&rand_state) - 0.5) / 1000;
mass = (hiprand_uniform(&rand_state) / 2 + 0.5) * kMaxMass;
}
ALL Body() {}
ALL float computeDistance(BodyType *other) {
float dx;
float dy;
float dist;
dx = this->pos_x - other->pos_x;
dy = this->pos_y - other->pos_y;
dist = sqrt(dx * dx + dy * dy);
return dist;
}
ALL float computeForce(BodyType *other, float dist) {
float F = kGravityConstant * this->mass * other->mass /
(dist * dist + kDampeningFactor);
return F;
}
ALL void updateVelX() { this->vel_x += this->force_x * kDt / this->mass; }
ALL void updateVelY() { this->vel_y += this->force_y * kDt / this->mass; }
ALL void updatePosX() { this->pos_x += this->vel_x * kDt; }
ALL void updatePosY() { this->pos_y += this->vel_y * kDt; }
ALL void initForce() {
this->force_x = 0;
this->force_y = 0;
}
ALL void updateForceX(BodyType *other, float F) {
float dx;
float dy;
float dist;
dx = -1 * (this->pos_x - other->pos_x);
dy = -1 * (this->pos_y - other->pos_y);
dist = sqrt(dx * dx + dy * dy);
this->force_x += F * dx / dist;
}
ALL void updateForceY(BodyType *other, float F) {
float dx;
float dy;
float dist;
dx = -1 * (this->pos_x - other->pos_x);
dy = -1 * (this->pos_y - other->pos_y);
dist = sqrt(dx * dx + dy * dy);
this->force_y += F * dy / dist;
}
void add_checksum();
// Only for rendering.
ALL float pos_x_() const { return pos_x; }
ALL float pos_y_() const { return pos_y; }
ALL float mass_() const { return mass; }
};
__device__ float device_checksum;
__managed__ range_tree_node *range_tree;
__managed__ unsigned tree_size;
__managed__ void *temp_coal;
__global__ void Body_compute_force(BodyType **dev_bodies) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
float dist;
float F;
void **vtable;
if (id < kNumBodies) {
BodyType *ptr = dev_bodies[id];
COAL_BodyType_initForce(ptr);
ptr->initForce();
// printf("%d ddd\n", id);
for (int i = 0; i < kNumBodies; ++i) {
// Do not compute force with the body itself.
if (id != i) {
COAL_BodyType_computeDistance(ptr);
dist = ptr->computeDistance(dev_bodies[i]);
COAL_BodyType_computeForce(ptr);
F = ptr->computeForce(dev_bodies[i], dist);
COAL_BodyType_updateForceX(ptr);
ptr->updateForceX(dev_bodies[i], F);
COAL_BodyType_updateForceY(ptr);
ptr->updateForceY(dev_bodies[i], F);
}
}
}
}
__global__ void Body_update(BodyType **dev_bodies) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
void **vtable;
if (id < kNumBodies) {
BodyType *ptr = dev_bodies[id];
COAL_BodyType_updateVelX(ptr);
ptr->updateVelX();
COAL_BodyType_updateVelY(ptr);
ptr->updateVelY();
COAL_BodyType_updatePosX(ptr);
ptr->updatePosX();
COAL_BodyType_updatePosY(ptr);
ptr->updatePosY();
if (ptr->pos_x < -1 || ptr->pos_x > 1) {
ptr->vel_x = -ptr->vel_x;
}
if (ptr->pos_y < -1 || ptr->pos_y > 1) {
ptr->vel_y = -ptr->vel_y;
}
}
}
__device__ void Body_add_checksum(BodyType **dev_bodies, int id) {
atomicAdd(&device_checksum,
dev_bodies[id]->pos_x + dev_bodies[id]->pos_y * 2 +
dev_bodies[id]->vel_x * 3 + dev_bodies[id]->vel_y * 4);
}
void instantiate_bodies(BodyType **bodies, obj_alloc *alloc) {
for (int i = 0; i < kNumBodies; i++)
bodies[i] = (Body *)alloc->my_new<Body>();
}
__global__ void kernel_initialize_bodies(BodyType **bodies) {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kNumBodies;
i += blockDim.x * gridDim.x) {
bodies[i]->initBody(i);
}
}
__global__ void kernel_compute_force() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kNumBodies;
i += blockDim.x * gridDim.x) {
// Body_compute_force(i);
}
}
__global__ void kernel_update() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kNumBodies;
i += blockDim.x * gridDim.x) {
// Body_update(i);
}
}
__global__ void kernel_compute_checksum(BodyType **bodies) {
device_checksum = 0.0f;
for (int i = 0; i < kNumBodies; ++i) {
Body_add_checksum(bodies, i);
}
}
int main(int argc, char ** argv) {
BodyType **dev_bodies;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[1]));
// Allocate and create Body objects.
dev_bodies = (BodyType **)my_obj_alloc.calloc<BodyType *>(kNumBodies);
printf("init bodies...\n");
instantiate_bodies(dev_bodies, &my_obj_alloc);
my_obj_alloc.toDevice();
hipLaunchKernelGGL(( kernel_initialize_bodies), dim3(128), dim3(128), 0, 0, dev_bodies);
gpuErrchk(hipDeviceSynchronize());
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size = my_obj_alloc.get_tree_size();
printf("init done...\n");
auto time_start = std::chrono::system_clock::now();
printf("Kernel exec...\n");
for (int i = 0; i < kNumIterations; ++i) {
if (i % 300 == 0) printf("Start: BodyComputeForce(%d)\n", i);
hipLaunchKernelGGL(( Body_compute_force), dim3((kNumBodies + kCudaBlockSize - 1) / kCudaBlockSize),
dim3(kCudaBlockSize), 0, 0, dev_bodies);
gpuErrchk(hipDeviceSynchronize());
// printf("Body_compute_force(%d)\n",i);
hipLaunchKernelGGL(( Body_update), dim3((kNumBodies + kCudaBlockSize - 1) / kCudaBlockSize),
dim3(kCudaBlockSize), 0, 0, dev_bodies);
gpuErrchk(hipDeviceSynchronize());
if (i % 300 == 0) printf("Finish: BodyComputeForce(%d)\n", i);
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
printf("%lu\n", micros);
#ifndef NDEBUG
hipLaunchKernelGGL(( kernel_compute_checksum), dim3(1), dim3(1), 0, 0, dev_bodies);
gpuErrchk(hipDeviceSynchronize());
float checksum;
hipMemcpyFromSymbol(&checksum, device_checksum, sizeof(device_checksum), 0,
hipMemcpyDeviceToHost);
printf("Checksum: %f\n", checksum);
#endif // NDEBUG
hipFree(dev_bodies);
return 0;
}
|
eb87f9630bb7f97a1280ed0b45a4cb42b7c0c96f.cu
|
#include <curand_kernel.h>
#include <stdio.h>
#include <chrono>
#include "../../../mem_alloc/mem_alloc.h"
#define ALL __noinline__ __host__ __device__
#include "coal.h"
#include "../configuration.h"
#define gpuErrchk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file,
line);
if (abort) exit(code);
}
}
static const int kCudaBlockSize = 256;
class BodyType {
public:
float pos_x;
float pos_y;
float vel_x;
float vel_y;
float mass;
float force_x;
float force_y;
public:
__noinline__ __device__ virtual void initBody(int idx) = 0;
ALL BodyType() {}
ALL BodyType(int idx) {}
ALL virtual float computeDistance(BodyType *other) = 0;
ALL virtual float computeForce(BodyType *other, float dist) = 0;
ALL virtual void updateVelX() = 0;
ALL virtual void updateVelY() = 0;
ALL virtual void updatePosX() = 0;
ALL virtual void updatePosY() = 0;
ALL virtual void initForce() = 0;
ALL virtual void updateForceX(BodyType *other, float F) = 0;
ALL virtual void updateForceY(BodyType *other, float F) = 0;
void add_checksum() {}
// Only for rendering.
ALL float pos_x_() const { return pos_x; }
ALL float pos_y_() const { return pos_y; }
ALL float mass_() const { return mass; }
};
class Body : public BodyType {
public:
__noinline__ __device__ void initBody(int idx) {
curandState rand_state;
curand_init(kSeed, idx, 0, &rand_state);
pos_x = 2 * curand_uniform(&rand_state) - 1;
pos_y = 2 * curand_uniform(&rand_state) - 1;
vel_x = (curand_uniform(&rand_state) - 0.5) / 1000;
vel_y = (curand_uniform(&rand_state) - 0.5) / 1000;
mass = (curand_uniform(&rand_state) / 2 + 0.5) * kMaxMass;
}
ALL Body() {}
ALL float computeDistance(BodyType *other) {
float dx;
float dy;
float dist;
dx = this->pos_x - other->pos_x;
dy = this->pos_y - other->pos_y;
dist = sqrt(dx * dx + dy * dy);
return dist;
}
ALL float computeForce(BodyType *other, float dist) {
float F = kGravityConstant * this->mass * other->mass /
(dist * dist + kDampeningFactor);
return F;
}
ALL void updateVelX() { this->vel_x += this->force_x * kDt / this->mass; }
ALL void updateVelY() { this->vel_y += this->force_y * kDt / this->mass; }
ALL void updatePosX() { this->pos_x += this->vel_x * kDt; }
ALL void updatePosY() { this->pos_y += this->vel_y * kDt; }
ALL void initForce() {
this->force_x = 0;
this->force_y = 0;
}
ALL void updateForceX(BodyType *other, float F) {
float dx;
float dy;
float dist;
dx = -1 * (this->pos_x - other->pos_x);
dy = -1 * (this->pos_y - other->pos_y);
dist = sqrt(dx * dx + dy * dy);
this->force_x += F * dx / dist;
}
ALL void updateForceY(BodyType *other, float F) {
float dx;
float dy;
float dist;
dx = -1 * (this->pos_x - other->pos_x);
dy = -1 * (this->pos_y - other->pos_y);
dist = sqrt(dx * dx + dy * dy);
this->force_y += F * dy / dist;
}
void add_checksum();
// Only for rendering.
ALL float pos_x_() const { return pos_x; }
ALL float pos_y_() const { return pos_y; }
ALL float mass_() const { return mass; }
};
__device__ float device_checksum;
__managed__ range_tree_node *range_tree;
__managed__ unsigned tree_size;
__managed__ void *temp_coal;
__global__ void Body_compute_force(BodyType **dev_bodies) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
float dist;
float F;
void **vtable;
if (id < kNumBodies) {
BodyType *ptr = dev_bodies[id];
COAL_BodyType_initForce(ptr);
ptr->initForce();
// printf("%d ddd\n", id);
for (int i = 0; i < kNumBodies; ++i) {
// Do not compute force with the body itself.
if (id != i) {
COAL_BodyType_computeDistance(ptr);
dist = ptr->computeDistance(dev_bodies[i]);
COAL_BodyType_computeForce(ptr);
F = ptr->computeForce(dev_bodies[i], dist);
COAL_BodyType_updateForceX(ptr);
ptr->updateForceX(dev_bodies[i], F);
COAL_BodyType_updateForceY(ptr);
ptr->updateForceY(dev_bodies[i], F);
}
}
}
}
__global__ void Body_update(BodyType **dev_bodies) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
void **vtable;
if (id < kNumBodies) {
BodyType *ptr = dev_bodies[id];
COAL_BodyType_updateVelX(ptr);
ptr->updateVelX();
COAL_BodyType_updateVelY(ptr);
ptr->updateVelY();
COAL_BodyType_updatePosX(ptr);
ptr->updatePosX();
COAL_BodyType_updatePosY(ptr);
ptr->updatePosY();
if (ptr->pos_x < -1 || ptr->pos_x > 1) {
ptr->vel_x = -ptr->vel_x;
}
if (ptr->pos_y < -1 || ptr->pos_y > 1) {
ptr->vel_y = -ptr->vel_y;
}
}
}
__device__ void Body_add_checksum(BodyType **dev_bodies, int id) {
atomicAdd(&device_checksum,
dev_bodies[id]->pos_x + dev_bodies[id]->pos_y * 2 +
dev_bodies[id]->vel_x * 3 + dev_bodies[id]->vel_y * 4);
}
void instantiate_bodies(BodyType **bodies, obj_alloc *alloc) {
for (int i = 0; i < kNumBodies; i++)
bodies[i] = (Body *)alloc->my_new<Body>();
}
__global__ void kernel_initialize_bodies(BodyType **bodies) {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kNumBodies;
i += blockDim.x * gridDim.x) {
bodies[i]->initBody(i);
}
}
__global__ void kernel_compute_force() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kNumBodies;
i += blockDim.x * gridDim.x) {
// Body_compute_force(i);
}
}
__global__ void kernel_update() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < kNumBodies;
i += blockDim.x * gridDim.x) {
// Body_update(i);
}
}
__global__ void kernel_compute_checksum(BodyType **bodies) {
device_checksum = 0.0f;
for (int i = 0; i < kNumBodies; ++i) {
Body_add_checksum(bodies, i);
}
}
int main(int argc, char ** argv) {
BodyType **dev_bodies;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[1]));
// Allocate and create Body objects.
dev_bodies = (BodyType **)my_obj_alloc.calloc<BodyType *>(kNumBodies);
printf("init bodies...\n");
instantiate_bodies(dev_bodies, &my_obj_alloc);
my_obj_alloc.toDevice();
kernel_initialize_bodies<<<128, 128>>>(dev_bodies);
gpuErrchk(cudaDeviceSynchronize());
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size = my_obj_alloc.get_tree_size();
printf("init done...\n");
auto time_start = std::chrono::system_clock::now();
printf("Kernel exec...\n");
for (int i = 0; i < kNumIterations; ++i) {
if (i % 300 == 0) printf("Start: BodyComputeForce(%d)\n", i);
Body_compute_force<<<(kNumBodies + kCudaBlockSize - 1) / kCudaBlockSize,
kCudaBlockSize>>>(dev_bodies);
gpuErrchk(cudaDeviceSynchronize());
// printf("Body_compute_force(%d)\n",i);
Body_update<<<(kNumBodies + kCudaBlockSize - 1) / kCudaBlockSize,
kCudaBlockSize>>>(dev_bodies);
gpuErrchk(cudaDeviceSynchronize());
if (i % 300 == 0) printf("Finish: BodyComputeForce(%d)\n", i);
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
printf("%lu\n", micros);
#ifndef NDEBUG
kernel_compute_checksum<<<1, 1>>>(dev_bodies);
gpuErrchk(cudaDeviceSynchronize());
float checksum;
cudaMemcpyFromSymbol(&checksum, device_checksum, sizeof(device_checksum), 0,
cudaMemcpyDeviceToHost);
printf("Checksum: %f\n", checksum);
#endif // NDEBUG
cudaFree(dev_bodies);
return 0;
}
|
4df45851f711cfe14648ffc5e4e07ea78d106490.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <THHUNN/THHUNN.h>
#include <TH/THHalf.h>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <THH/THHApply.cuh>
template <typename T>
struct sqrtupdateOutput_functor
{
const T bias;
sqrtupdateOutput_functor(T bias_)
: bias(bias_)
{}
__device__ void operator()(T *output, const T *input) const
{
*output = sqrt(*input + bias);
}
};
template <typename T>
struct sqrtupdateGradInput_functor
{
sqrtupdateGradInput_functor() {}
__device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const
{
*gradInput = (THCNumerics<T>::eq(*output,ScalarConvert<float, T>::to(0.0f))) ? ScalarConvert<float, T>::to(0.0f) : ((ScalarConvert<float, T>::to(0.5f) * *gradOutput) / *output);
}
};
#include <THHUNN/generic/Sqrt.hip>
#include <THH/THHGenerateFloatTypes.h>
|
4df45851f711cfe14648ffc5e4e07ea78d106490.cu
|
#include <THCUNN/THCUNN.h>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCApply.cuh>
template <typename T>
struct sqrtupdateOutput_functor
{
const T bias;
sqrtupdateOutput_functor(T bias_)
: bias(bias_)
{}
__device__ void operator()(T *output, const T *input) const
{
*output = sqrt(*input + bias);
}
};
template <typename T>
struct sqrtupdateGradInput_functor
{
sqrtupdateGradInput_functor() {}
__device__ void operator()(T *gradInput, const T *output, const T *gradOutput) const
{
*gradInput = (THCNumerics<T>::eq(*output,ScalarConvert<float, T>::to(0.0f))) ? ScalarConvert<float, T>::to(0.0f) : ((ScalarConvert<float, T>::to(0.5f) * *gradOutput) / *output);
}
};
#include <THCUNN/generic/Sqrt.cu>
#include <THC/THCGenerateFloatTypes.h>
|
d06bfb0f51c4917a6e9ed9ce60db81d19d160c0b.hip
|
// !!! This is a file automatically generated by hipify!!!
// incrementArray.cu
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
void incrementArrayOnHost(float *a, int N)
{
int i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx]+1.f;
}
int main(void)
{
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
int i, N = 10;
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
hipMalloc((void **) &a_d, size);
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
hipMemcpy(a_d, a_h, sizeof(float)*N, hipMemcpyHostToDevice);
// do calculation on host
incrementArrayOnHost(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Part 2 of 2. Call incrementArrayOnDevice kernel
hipLaunchKernelGGL(( incrementArrayOnDevice) , dim3(nBlocks), dim3(blockSize) , 0, 0, a_d, N);
// Retrieve result from device and store in b_h
hipMemcpy(b_h, a_d, sizeof(float)*N, hipMemcpyDeviceToHost);
// check results
for (i=0; i<N; i++) assert(a_h[i] == b_h[i]);
// cleanup
free(a_h); free(b_h); hipFree(a_d);
}
|
d06bfb0f51c4917a6e9ed9ce60db81d19d160c0b.cu
|
// incrementArray.cu
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
void incrementArrayOnHost(float *a, int N)
{
int i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx]+1.f;
}
int main(void)
{
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
int i, N = 10;
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
cudaMalloc((void **) &a_d, size);
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice);
// do calculation on host
incrementArrayOnHost(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Part 2 of 2. Call incrementArrayOnDevice kernel
incrementArrayOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve result from device and store in b_h
cudaMemcpy(b_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// check results
for (i=0; i<N; i++) assert(a_h[i] == b_h[i]);
// cleanup
free(a_h); free(b_h); cudaFree(a_d);
}
|
31d5cfdbcd4625f58ef070f8702e309841af2dc1.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0],prhs[1],prhs[2] := mxGPUArray or CPU Array}[double or complex double]
* Create sparse matrix
* Z=CuMatlab_sparse(X)
* Z=CuMatlab_sparse(X,Y)
* Z=CuMatlab_sparse(X,Y,Z)
* Z=CuMatlab_sparse(X,Y,Z,row,column)
* Z=CuMatlab_sparse(X,Y,Z,row,column,nz)
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <algorithm>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
#include <omp.h>
// Input Arguments
#define ROW prhs[0]
#define COLUMN prhs[1]
#define VALUE prhs[2]
#define NROWS prhs[3]
#define NCOLS prhs[4]
// Output Arguments
#define OUTPUTMATRIX plhs[0]
extern "C" static void mexCuMatlab_sparseZ(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be five.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one.";
if ((nrhs!=5)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(ROW);
if ((mxIsChar(ROW))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
char *input_buf1;
input_buf1 = mxArrayToString(COLUMN);
if ((mxIsChar(COLUMN))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1);
}
char *input_buf2;
input_buf2 = mxArrayToString(VALUE);
if ((mxIsChar(VALUE))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(THIRD ARGUMENT) must be array, or gpuArray object not %s\n",input_buf2);
}
if (mxIsGPUArray(ROW) && mxIsGPUArray(COLUMN) && mxIsGPUArray(VALUE) ) {
mxInitGPU();
mxGPUArray const *ROWGPU;
ROWGPU= mxGPUCreateFromMxArray(ROW);
mxGPUArray const *COLUMNGPU;
COLUMNGPU= mxGPUCreateFromMxArray(COLUMN);
mxGPUArray const *VALUEGPU;
VALUEGPU= mxGPUCreateFromMxArray(VALUE);
if((mxGPUIsSparse(ROWGPU)==1)|| (mxGPUIsSparse(COLUMNGPU)==1) || (mxGPUIsSparse(VALUEGPU)==1)){
//plhs[0] = mxGPUCreateMxArrayOnGPU(INPUTMATRIXGPUx);
printf("Warning! Input(FIRST, SECOND and THIRD ARGUMENTS) must be non sparse! \n");
// mxGPUDestroyGPUArray(INPUTMATRIXGPUx);
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
return;
}
if ((mxGPUGetClassID(VALUEGPU) != mxDOUBLE_CLASS)) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input(THIRD ARGUMENT) must be double precision.");
}
if ( (mxGPUGetComplexity(ROWGPU) != mxREAL) || (mxGPUGetComplexity(COLUMNGPU) != mxREAL) || (mxGPUGetComplexity(VALUEGPU) != mxCOMPLEX)) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input(FIRST AND SECOND ARGUMENTS) must be real with no imaginary components and THIRD ARGUMENT must be complex.");
}
if ( !(mxIsScalar(NROWS)) || !(mxIsScalar(NCOLS))) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input (FOURTH and FIFTH ARGUMENTS) must be scalar.");
}
int NrowsA= (int)mxGetScalar(NROWS);
int NcolsA= (int)mxGetScalar(NCOLS);
int nnzR= static_cast<int> (mxGPUGetNumberOfElements(ROWGPU));
int nnzC= static_cast<int> (mxGPUGetNumberOfElements(COLUMNGPU));
//int nnzV= static_cast<int> (mxGPUGetNumberOfElements(VALUEGPU));
int nnzV = nnzR;
int NNZMAXA=nnzR;
if ( (nnzR!= nnzC) || (nnzC != nnzV)|| (nnzR != nnzV) ) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input vectors (FIRST, SECOND and THIRD ARGUMENTS) must be the same lengths.");
}
if ( nnzR>(NrowsA*NcolsA) ) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, index exceeds array bounds [number of non zero greater than matrix dimensions (row*column)].");
}
mxArray * RW = mxGPUCreateMxArrayOnCPU(ROWGPU);
mxArray * CL = mxGPUCreateMxArrayOnCPU(COLUMNGPU);
mxArray * VL = mxGPUCreateMxArrayOnCPU(VALUEGPU);
// mxComplexDouble *pc;
// pc = mxGetComplexDoubles(VL);
std::vector<MATRIXC> vect;
int j;
#pragma omp for schedule(static) nowait
for ( j = 0; j < nnzR; ++j) {
vect.push_back( MATRIXC((static_cast<int> ((mxGetDoubles(RW))[j])), (static_cast<int> ((mxGetDoubles(CL))[j])), (static_cast<double> ((mxGetComplexDoubles(VL))[j].real)), (static_cast<double> ((mxGetComplexDoubles(VL))[j].imag))));
}
std::sort(vect.begin(), vect.end());
std::vector<MATRIXC> vect_temp;
vect_temp= vect;
int i = 0;
std::vector<MATRIXC>::iterator ity = vect.begin();
for (std::vector<MATRIXC>::iterator itx = vect.begin(); itx != vect.end(); itx++){
ity = itx + 1;
while (ity != vect.end())
{
//for (ity ; ity != vectx.end(); ity++){
if (itx->row_C == ity->row_C && itx->column_C == ity->column_C){
vect_temp[i].value_C_real = vect_temp[i].value_C_real + ity->value_C_real;
vect_temp[i].value_C_img = vect_temp[i].value_C_img + ity->value_C_img;
vect_temp[std::distance(vect.begin(), ity)].checked = true;
}
ity++;
//}
}
i++;
}
for (auto it = vect_temp.begin(); it != vect_temp.end();) {
if ((it->checked==true) ||(it->value_C_real==0 && it->value_C_img==0 )) {
it = vect_temp.erase(it);
}
else {
++it;
}
}
nnzR=nnzC=nnzV= (int)vect_temp.size();
mxArray * ROWx =mxCreateNumericMatrix(nnzR, 1, mxINT32_CLASS, mxREAL);
int *h_A_RowIndices_coo = (int *)mxGetInt32s(ROWx);
#pragma omp parallel for shared(nnzR) private(i)
for (i = 0; i < nnzR; ++i) {
// h_A_RowIndices_coo[i] = static_cast<int> ((mxGetPr(RW))[i]);
h_A_RowIndices_coo[i] =vect_temp[i].row_C;
}
mxArray * COLUMNx =mxCreateNumericMatrix(nnzC, 1, mxINT32_CLASS, mxREAL);
int *h_A_ColIndices_coo = (int *)mxGetInt32s(COLUMNx);
#pragma omp parallel for shared(nnzC) private(i)
for (i = 0; i < nnzC; ++i) {
// h_A_ColIndices_coo[i] = static_cast<int> ((mxGetPr(CL))[i]);
h_A_ColIndices_coo[i] = vect_temp[i].column_C;
}
mxArray * VALUEx = mxCreateNumericMatrix(nnzV, 1, mxDOUBLE_CLASS, mxCOMPLEX);
hipDoubleComplex *h_A1_coo = (hipDoubleComplex *)mxGetComplexDoubles(VALUEx);
#pragma omp parallel for shared(nnzV) private(i)
for (i = 0; i < nnzV; ++i) {
h_A1_coo[i].x = vect_temp[i].value_C_real;
h_A1_coo[i].y = vect_temp[i].value_C_img;
}
int Nr= *std::max_element(h_A_RowIndices_coo, h_A_RowIndices_coo + nnzR, max_elem);
int Nc= *std::max_element(h_A_ColIndices_coo, h_A_ColIndices_coo + nnzC, max_elem);
if ( (Nr>NrowsA) || (Nc>NcolsA) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, index exceeds array bounds: max(first vector)> fourth argument's value or max(second vector)> fifth argument's value .");
}
// NrowsA= h_A_RowIndices_coo[nnzR-1];
// NcolsA= *std::max_element(h_A_ColIndices_coo, h_A_ColIndices_coo + nnzC, max_elem);
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
//hipDoubleComplex *d_A; gpuErrchk(hipMalloc(&d_A, nnzV * sizeof(*d_A)));
//int *d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (NrowsA + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnzV * sizeof(*d_A_ColIndices)));
//int *d_cooRowIndA; gpuErrchk(hipMalloc(&d_cooRowIndA, nnzV * sizeof(*d_cooRowIndA)));
size_t pivot_dimensA[1] = {nnzV};
size_t pivot_dimensROW_A[1] = {NrowsA + 1};
size_t pivot_dimensCOL_A[1] = {nnzV};
size_t pivot_dimensCOO_A[1] = {nnzV};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_A = (hipDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
mxGPUArray * COO_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOO_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_cooRowIndA = (int *)mxGPUGetData(COO_A);
gpuErrchk(hipMemcpy(d_A, h_A1_coo, nnzV * sizeof(*d_A), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_A_ColIndices, h_A_ColIndices_coo, nnzV * sizeof(*d_A_ColIndices), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_cooRowIndA, h_A_RowIndices_coo, nnzV * sizeof(*d_cooRowIndA), hipMemcpyHostToDevice));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
// A
int *Pa = NULL;
void *pBuffera = NULL;
size_t pBufferSizeInBytesa = 0;
hipsparseXcoosort_bufferSizeExt(handle, NrowsA, NcolsA,
nnzV,
d_cooRowIndA,
d_A_ColIndices, &pBufferSizeInBytesa);
gpuErrchk(hipMalloc(&pBuffera, sizeof(char)*pBufferSizeInBytesa));
gpuErrchk(hipMalloc(&Pa, sizeof(int)*nnzV));
hipsparseCreateIdentityPermutation(handle, nnzV, Pa);
cusparseSafeCall(hipsparseXcoosortByRow(handle, NrowsA, NcolsA,
nnzV,
d_cooRowIndA,
d_A_ColIndices,
Pa,
pBuffera));
cusparseSafeCall(hipsparseZgthr(handle, nnzV, d_A, d_A, Pa, HIPSPARSE_INDEX_BASE_ZERO));
cusparseSafeCall(hipsparseXcoo2csr(handle,
d_cooRowIndA,
nnzV,
NrowsA,
d_A_RowIndices,
HIPSPARSE_INDEX_BASE_ONE));
size_t pivot_dimensionsrow[1] = {nnzR};
size_t pivot_dimensionscolumn[1] = {NcolsA+1};
size_t pivot_dimensionsvalue[1] = {nnzV};
mxGPUArray * ROW_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *ROWSORT = (int *)mxGPUGetData(ROW_SORT1);
mxGPUArray * COL_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *COLSORT = (int *)mxGPUGetData(COL_SORT1);
mxGPUArray *VAL_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *VALSORT = (hipDoubleComplex *)mxGPUGetData(VAL_SORT1);
cusparseSafeCall(hipsparseZcsr2csc(handle, NrowsA, NcolsA, nnzV, d_A, d_A_RowIndices, d_A_ColIndices, VALSORT, ROWSORT, COLSORT, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ONE));
//gpuErrchk(hipFree(d_A));
//gpuErrchk(hipFree(d_A_RowIndices));
//gpuErrchk(hipFree(d_A_ColIndices));
//gpuErrchk(hipFree(d_cooRowIndA));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(COO_A);
gpuErrchk(hipFree(pBuffera));
gpuErrchk(hipFree(Pa));
mwSize nnzm=(mwSize)nnzV;
OUTPUTMATRIX = mxCreateSparse(NrowsA,NcolsA,nnzm,mxCOMPLEX);
mxArray *RS= mxGPUCreateMxArrayOnCPU(ROW_SORT1);
int * rs= (int *)mxGetInt32s(RS);
mxArray *CS= mxGPUCreateMxArrayOnCPU(COL_SORT1);
int * cs= (int *)mxGetInt32s(CS);
mwIndex *irs,*jcs;
irs = static_cast<mwIndex *> (mxMalloc (nnzR * sizeof(mwIndex)));
#pragma omp parallel for shared(nnzR) private(i)
for (i = 0; i < nnzR; ++i) {
irs[i] = static_cast<mwIndex> (rs[i])-1;
}
jcs = static_cast<mwIndex *> (mxMalloc ((NcolsA+1) * sizeof(mwIndex)));
int nc1= NcolsA+1;
#pragma omp parallel for shared(nc1) private(i)
for (i = 0; i < nc1; ++i) {
jcs[i] = static_cast<mwIndex> (cs[i])-1;
}
mxComplexDouble* PRS = (mxComplexDouble*) mxMalloc (nnzV * sizeof(mxComplexDouble));
gpuErrchk(hipMemcpy(PRS, VALSORT, nnzV * sizeof(mxComplexDouble), hipMemcpyDeviceToHost));
mxFree (mxGetJc (OUTPUTMATRIX)) ;
mxFree (mxGetIr (OUTPUTMATRIX)) ;
mxFree (mxGetComplexDoubles (OUTPUTMATRIX)) ;
mxSetNzmax(OUTPUTMATRIX, (static_cast<mwSize>(NNZMAXA)));
mxSetIr(OUTPUTMATRIX, (mwIndex *)irs);
mxSetJc(OUTPUTMATRIX, (mwIndex *)jcs);
int m= mxSetComplexDoubles(OUTPUTMATRIX, (mxComplexDouble*)PRS);
if ( m==0) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"the function is unsuccessful, either mxArray is not an unshared mxDOUBLE_CLASS array, or the data is not allocated with mxCalloc.");
}
//mxSetPr(OUTPUTMATRIX, (double *)PRS);
mxDestroyArray(RS);
mxDestroyArray(CS);
mxDestroyArray(RW);
mxDestroyArray(CL);
mxDestroyArray(VL);
mxDestroyArray(ROWx);
mxDestroyArray(COLUMNx);
mxDestroyArray(VALUEx);
mxGPUDestroyGPUArray(ROW_SORT1);
mxGPUDestroyGPUArray(COL_SORT1);
mxGPUDestroyGPUArray(VAL_SORT1);
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
hipsparseDestroyMatDescr(descrA);
hipsparseDestroy(handle);
}
////////////////////////////////////////////////////////////////////////////////////
else if (!(mxIsGPUArray(ROW)) && !(mxIsGPUArray(COLUMN)) && !(mxIsGPUArray(VALUE))){
if((mxIsSparse(ROW)) || (mxIsSparse(COLUMN)) || (mxIsSparse(VALUE))) {
printf("Warning! Input(FIRST ARGUMENT) must be non sparse!\n");
return;
}
if ( !(mxIsScalar(NROWS)) || !(mxIsScalar(NCOLS))) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input (FOURTH and FIFTH ARGUMENTS) must be scalar.");
}
int NrowsA= (int)mxGetScalar(NROWS);
int NcolsA= (int)mxGetScalar(NCOLS);
int nnzR= (int)mxGetNumberOfElements(ROW);
int nnzC= (int)mxGetNumberOfElements(COLUMN);
//int nnzV= (int)mxGetNumberOfElements(VALUE);
int nnzV = nnzR;
int NNZMAXA=nnzR;
if ( (nnzR!= nnzC) || (nnzC != nnzV)|| (nnzR != nnzV) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input vectors (FIRST, SECOND, THIRD ARGUMENTS) must be the same lengths.");
}
if ( nnzR>(NrowsA*NcolsA) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, index exceeds array bounds [number of non zero greater than matrix dimensions (row*column)].");
}
if ( mxGetClassID(VALUE) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input(THIRD ARGUMENT) must be double precision.");
}
if ( (mxIsComplex(ROW)) || (mxIsComplex(COLUMN)) || (!mxIsComplex(VALUE))) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input(FIRST AND SECOND ARGUMENTS) must be real with no imaginary components and THIRD ARGUMENT must be complex.");
}
mxInitGPU();
// mxComplexDouble *pc;
// pc = mxGetComplexDoubles(VALUE);
std::vector<MATRIXC> vect;
int j;
#pragma omp for schedule(static) nowait
for ( j = 0; j < nnzR; ++j) {
vect.push_back( MATRIXC((static_cast<int> ((mxGetDoubles(ROW))[j])), (static_cast<int> ((mxGetDoubles(COLUMN))[j])), (static_cast<double> ((mxGetComplexDoubles(VALUE))[j].real)), (static_cast<double> ((mxGetComplexDoubles(VALUE))[j].imag))));
}
std::sort(vect.begin(), vect.end());
std::vector<MATRIXC> vect_temp;
vect_temp= vect;
int i = 0;
std::vector<MATRIXC>::iterator ity = vect.begin();
for (std::vector<MATRIXC>::iterator itx = vect.begin(); itx != vect.end(); itx++){
ity = itx + 1;
while (ity != vect.end())
{
//for (ity ; ity != vectx.end(); ity++){
if (itx->row_C == ity->row_C && itx->column_C == ity->column_C){
vect_temp[i].value_C_real = vect_temp[i].value_C_real + ity->value_C_real;
vect_temp[i].value_C_img = vect_temp[i].value_C_img + ity->value_C_img;
vect_temp[std::distance(vect.begin(), ity)].checked = true;
}
ity++;
//}
}
i++;
}
for (auto it = vect_temp.begin(); it != vect_temp.end();) {
if ((it->checked==true) ||(it->value_C_real==0 && it->value_C_img==0 )) {
it = vect_temp.erase(it);
}
else {
++it;
}
}
nnzR=nnzC=nnzV=(int)vect_temp.size();
mxArray * ROWx =mxCreateNumericMatrix(nnzR, 1, mxINT32_CLASS, mxREAL);
int *h_A_RowIndices_coo = (int *)mxGetInt32s(ROWx);
#pragma omp parallel for shared(nnzR) private(i)
for (i = 0; i < nnzR; ++i) {
//h_A_RowIndices_coo[i] = static_cast<int> ((mxGetPr(ROW))[i]);
h_A_RowIndices_coo[i] =vect_temp[i].row_C;
}
mxArray * COLUMNx =mxCreateNumericMatrix(nnzC, 1, mxINT32_CLASS, mxREAL);
int *h_A_ColIndices_coo = (int *)mxGetInt32s(COLUMNx);
#pragma omp parallel for shared(nnzC) private(i)
for (i = 0; i < nnzC; ++i) {
//h_A_ColIndices_coo[i] = static_cast<int> ((mxGetPr(COLUMN))[i]);
h_A_ColIndices_coo[i] = vect_temp[i].column_C;
}
mxArray * VALUEx =mxCreateNumericMatrix(nnzV, 1, mxDOUBLE_CLASS, mxCOMPLEX);
hipDoubleComplex *h_A1_coo = (hipDoubleComplex *)mxGetComplexDoubles(VALUEx);
#pragma omp parallel for shared(nnzV) private(i)
for (i = 0; i < nnzV; ++i) {
h_A1_coo[i].x = vect_temp[i].value_C_real;
h_A1_coo[i].y = vect_temp[i].value_C_img;
}
int Nr= *std::max_element(h_A_RowIndices_coo, h_A_RowIndices_coo + nnzR, max_elem);
int Nc= *std::max_element(h_A_ColIndices_coo, h_A_ColIndices_coo + nnzC, max_elem);
if ( (Nr>NrowsA) || (Nc>NcolsA) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, index exceeds array bounds: max(first vector)> fourth argument's value or max(second vector)> fifth argument's value .");
}
// NrowsA= h_A_RowIndices_coo[nnzR-1];
// NcolsA= *std::max_element(h_A_ColIndices_coo, h_A_ColIndices_coo + nnzC, max_elem);
hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle));
//hipDoubleComplex *d_A; gpuErrchk(hipMalloc(&d_A, nnzV * sizeof(*d_A)));
//int *d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (NrowsA + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnzV * sizeof(*d_A_ColIndices)));
//int *d_cooRowIndA; gpuErrchk(hipMalloc(&d_cooRowIndA, nnzV * sizeof(*d_cooRowIndA)));
size_t pivot_dimensA[1] = {nnzV};
size_t pivot_dimensROW_A[1] = {NrowsA + 1};
size_t pivot_dimensCOL_A[1] = {nnzV};
size_t pivot_dimensCOO_A[1] = {nnzV};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *d_A = (hipDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
mxGPUArray * COO_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOO_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_cooRowIndA = (int *)mxGPUGetData(COO_A);
gpuErrchk(hipMemcpy(d_A, h_A1_coo, nnzV * sizeof(*d_A), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_A_ColIndices, h_A_ColIndices_coo, nnzV * sizeof(*d_A_ColIndices), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_cooRowIndA, h_A_RowIndices_coo, nnzV * sizeof(*d_cooRowIndA), hipMemcpyHostToDevice));
hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA));
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE);
// A
int *Pa = NULL;
void *pBuffera = NULL;
size_t pBufferSizeInBytesa = 0;
hipsparseXcoosort_bufferSizeExt(handle, NrowsA, NcolsA,
nnzV,
d_cooRowIndA,
d_A_ColIndices, &pBufferSizeInBytesa);
gpuErrchk(hipMalloc(&pBuffera, sizeof(char)*pBufferSizeInBytesa));
gpuErrchk(hipMalloc(&Pa, sizeof(int)*nnzV));
hipsparseCreateIdentityPermutation(handle, nnzV, Pa);
cusparseSafeCall(hipsparseXcoosortByRow(handle, NrowsA, NcolsA,
nnzV,
d_cooRowIndA,
d_A_ColIndices,
Pa,
pBuffera));
cusparseSafeCall(hipsparseZgthr(handle, nnzV, d_A, d_A, Pa, HIPSPARSE_INDEX_BASE_ZERO));
cusparseSafeCall(hipsparseXcoo2csr(handle,
d_cooRowIndA,
nnzV,
NrowsA,
d_A_RowIndices,
HIPSPARSE_INDEX_BASE_ONE));
size_t pivot_dimensionsrow[1] = {nnzR};
size_t pivot_dimensionscolumn[1] = {NcolsA+1};
size_t pivot_dimensionsvalue[1] = {nnzV};
mxGPUArray * ROW_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *ROWSORT = (int *)mxGPUGetData(ROW_SORT1);
mxGPUArray * COL_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *COLSORT = (int *)mxGPUGetData(COL_SORT1);
mxGPUArray *VAL_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
hipDoubleComplex *VALSORT = (hipDoubleComplex *)mxGPUGetData(VAL_SORT1);
cusparseSafeCall(hipsparseZcsr2csc(handle, NrowsA, NcolsA, nnzV, d_A, d_A_RowIndices, d_A_ColIndices, VALSORT, ROWSORT, COLSORT, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ONE));
//gpuErrchk(hipFree(d_A));
//gpuErrchk(hipFree(d_A_RowIndices));
//gpuErrchk(hipFree(d_A_ColIndices));
//gpuErrchk(hipFree(d_cooRowIndA));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(COO_A);
gpuErrchk(hipFree(pBuffera));
gpuErrchk(hipFree(Pa));
mwSize nnzm=(mwSize)nnzV;
OUTPUTMATRIX = mxCreateSparse(NrowsA,NcolsA,nnzm,mxCOMPLEX);
mxArray *RS= mxGPUCreateMxArrayOnCPU(ROW_SORT1);
int * rs= (int *)mxGetInt32s(RS);
mxArray *CS= mxGPUCreateMxArrayOnCPU(COL_SORT1);
int * cs= (int *)mxGetInt32s(CS);
mwIndex *irs,*jcs;
irs = static_cast<mwIndex *> (mxMalloc (nnzR * sizeof(mwIndex)));
#pragma omp parallel for shared(nnzR) private(i)
for (i = 0; i < nnzR; ++i) {
irs[i] = static_cast<mwIndex> (rs[i])-1;
}
jcs = static_cast<mwIndex *> (mxMalloc ((NcolsA+1) * sizeof(mwIndex)));
int nc1= NcolsA+1;
#pragma omp parallel for shared(nc1) private(i)
for (i = 0; i < nc1; ++i) {
jcs[i] = static_cast<mwIndex> (cs[i])-1;
}
mxComplexDouble* PRS = (mxComplexDouble*) mxMalloc (nnzV * sizeof(mxComplexDouble));
gpuErrchk(hipMemcpy(PRS, VALSORT, nnzV * sizeof(mxComplexDouble), hipMemcpyDeviceToHost));
mxFree (mxGetJc (OUTPUTMATRIX)) ;
mxFree (mxGetIr (OUTPUTMATRIX)) ;
mxFree (mxGetComplexDoubles (OUTPUTMATRIX)) ;
mxSetNzmax(OUTPUTMATRIX, (static_cast<mwSize>(NNZMAXA)));
mxSetIr(OUTPUTMATRIX, (mwIndex *)irs);
mxSetJc(OUTPUTMATRIX, (mwIndex *)jcs);
int m= mxSetComplexDoubles(OUTPUTMATRIX, (mxComplexDouble*)PRS);
if ( m==0) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"the function is unsuccessful, either mxArray is not an unshared mxDOUBLE_CLASS array, or the data is not allocated with mxCalloc.");
}
//mxSetPr(OUTPUTMATRIX, (double *)PRS);
mxDestroyArray(RS);
mxDestroyArray(CS);
mxDestroyArray(ROWx);
mxDestroyArray(COLUMNx);
mxDestroyArray(VALUEx);
mxGPUDestroyGPUArray(ROW_SORT1);
mxGPUDestroyGPUArray(COL_SORT1);
mxGPUDestroyGPUArray(VAL_SORT1);
hipsparseDestroyMatDescr(descrA);
hipsparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
|
31d5cfdbcd4625f58ef070f8702e309841af2dc1.cu
|
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0],prhs[1],prhs[2] := mxGPUArray or CPU Array}[double or complex double]
* Create sparse matrix
* Z=CuMatlab_sparse(X)
* Z=CuMatlab_sparse(X,Y)
* Z=CuMatlab_sparse(X,Y,Z)
* Z=CuMatlab_sparse(X,Y,Z,row,column)
* Z=CuMatlab_sparse(X,Y,Z,row,column,nz)
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <algorithm>
#include "SPARSEHELPER.h"
#include "ERRORCHK.h"
#include <omp.h>
// Input Arguments
#define ROW prhs[0]
#define COLUMN prhs[1]
#define VALUE prhs[2]
#define NROWS prhs[3]
#define NCOLS prhs[4]
// Output Arguments
#define OUTPUTMATRIX plhs[0]
extern "C" static void mexCuMatlab_sparseZ(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be five.";
char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one.";
if ((nrhs!=5)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg);
}
if ((nlhs!=1)) {
mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg);
}
char *input_buf0;
input_buf0 = mxArrayToString(ROW);
if ((mxIsChar(ROW))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0);
}
char *input_buf1;
input_buf1 = mxArrayToString(COLUMN);
if ((mxIsChar(COLUMN))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1);
}
char *input_buf2;
input_buf2 = mxArrayToString(VALUE);
if ((mxIsChar(VALUE))){
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Input(THIRD ARGUMENT) must be array, or gpuArray object not %s\n",input_buf2);
}
if (mxIsGPUArray(ROW) && mxIsGPUArray(COLUMN) && mxIsGPUArray(VALUE) ) {
mxInitGPU();
mxGPUArray const *ROWGPU;
ROWGPU= mxGPUCreateFromMxArray(ROW);
mxGPUArray const *COLUMNGPU;
COLUMNGPU= mxGPUCreateFromMxArray(COLUMN);
mxGPUArray const *VALUEGPU;
VALUEGPU= mxGPUCreateFromMxArray(VALUE);
if((mxGPUIsSparse(ROWGPU)==1)|| (mxGPUIsSparse(COLUMNGPU)==1) || (mxGPUIsSparse(VALUEGPU)==1)){
//plhs[0] = mxGPUCreateMxArrayOnGPU(INPUTMATRIXGPUx);
printf("Warning! Input(FIRST, SECOND and THIRD ARGUMENTS) must be non sparse! \n");
// mxGPUDestroyGPUArray(INPUTMATRIXGPUx);
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
return;
}
if ((mxGPUGetClassID(VALUEGPU) != mxDOUBLE_CLASS)) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input(THIRD ARGUMENT) must be double precision.");
}
if ( (mxGPUGetComplexity(ROWGPU) != mxREAL) || (mxGPUGetComplexity(COLUMNGPU) != mxREAL) || (mxGPUGetComplexity(VALUEGPU) != mxCOMPLEX)) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input(FIRST AND SECOND ARGUMENTS) must be real with no imaginary components and THIRD ARGUMENT must be complex.");
}
if ( !(mxIsScalar(NROWS)) || !(mxIsScalar(NCOLS))) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input (FOURTH and FIFTH ARGUMENTS) must be scalar.");
}
int NrowsA= (int)mxGetScalar(NROWS);
int NcolsA= (int)mxGetScalar(NCOLS);
int nnzR= static_cast<int> (mxGPUGetNumberOfElements(ROWGPU));
int nnzC= static_cast<int> (mxGPUGetNumberOfElements(COLUMNGPU));
//int nnzV= static_cast<int> (mxGPUGetNumberOfElements(VALUEGPU));
int nnzV = nnzR;
int NNZMAXA=nnzR;
if ( (nnzR!= nnzC) || (nnzC != nnzV)|| (nnzR != nnzV) ) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input vectors (FIRST, SECOND and THIRD ARGUMENTS) must be the same lengths.");
}
if ( nnzR>(NrowsA*NcolsA) ) {
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, index exceeds array bounds [number of non zero greater than matrix dimensions (row*column)].");
}
mxArray * RW = mxGPUCreateMxArrayOnCPU(ROWGPU);
mxArray * CL = mxGPUCreateMxArrayOnCPU(COLUMNGPU);
mxArray * VL = mxGPUCreateMxArrayOnCPU(VALUEGPU);
// mxComplexDouble *pc;
// pc = mxGetComplexDoubles(VL);
std::vector<MATRIXC> vect;
int j;
#pragma omp for schedule(static) nowait
for ( j = 0; j < nnzR; ++j) {
vect.push_back( MATRIXC((static_cast<int> ((mxGetDoubles(RW))[j])), (static_cast<int> ((mxGetDoubles(CL))[j])), (static_cast<double> ((mxGetComplexDoubles(VL))[j].real)), (static_cast<double> ((mxGetComplexDoubles(VL))[j].imag))));
}
std::sort(vect.begin(), vect.end());
std::vector<MATRIXC> vect_temp;
vect_temp= vect;
int i = 0;
std::vector<MATRIXC>::iterator ity = vect.begin();
for (std::vector<MATRIXC>::iterator itx = vect.begin(); itx != vect.end(); itx++){
ity = itx + 1;
while (ity != vect.end())
{
//for (ity ; ity != vectx.end(); ity++){
if (itx->row_C == ity->row_C && itx->column_C == ity->column_C){
vect_temp[i].value_C_real = vect_temp[i].value_C_real + ity->value_C_real;
vect_temp[i].value_C_img = vect_temp[i].value_C_img + ity->value_C_img;
vect_temp[std::distance(vect.begin(), ity)].checked = true;
}
ity++;
//}
}
i++;
}
for (auto it = vect_temp.begin(); it != vect_temp.end();) {
if ((it->checked==true) ||(it->value_C_real==0 && it->value_C_img==0 )) {
it = vect_temp.erase(it);
}
else {
++it;
}
}
nnzR=nnzC=nnzV= (int)vect_temp.size();
mxArray * ROWx =mxCreateNumericMatrix(nnzR, 1, mxINT32_CLASS, mxREAL);
int *h_A_RowIndices_coo = (int *)mxGetInt32s(ROWx);
#pragma omp parallel for shared(nnzR) private(i)
for (i = 0; i < nnzR; ++i) {
// h_A_RowIndices_coo[i] = static_cast<int> ((mxGetPr(RW))[i]);
h_A_RowIndices_coo[i] =vect_temp[i].row_C;
}
mxArray * COLUMNx =mxCreateNumericMatrix(nnzC, 1, mxINT32_CLASS, mxREAL);
int *h_A_ColIndices_coo = (int *)mxGetInt32s(COLUMNx);
#pragma omp parallel for shared(nnzC) private(i)
for (i = 0; i < nnzC; ++i) {
// h_A_ColIndices_coo[i] = static_cast<int> ((mxGetPr(CL))[i]);
h_A_ColIndices_coo[i] = vect_temp[i].column_C;
}
mxArray * VALUEx = mxCreateNumericMatrix(nnzV, 1, mxDOUBLE_CLASS, mxCOMPLEX);
cuDoubleComplex *h_A1_coo = (cuDoubleComplex *)mxGetComplexDoubles(VALUEx);
#pragma omp parallel for shared(nnzV) private(i)
for (i = 0; i < nnzV; ++i) {
h_A1_coo[i].x = vect_temp[i].value_C_real;
h_A1_coo[i].y = vect_temp[i].value_C_img;
}
int Nr= *std::max_element(h_A_RowIndices_coo, h_A_RowIndices_coo + nnzR, max_elem);
int Nc= *std::max_element(h_A_ColIndices_coo, h_A_ColIndices_coo + nnzC, max_elem);
if ( (Nr>NrowsA) || (Nc>NcolsA) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, index exceeds array bounds: max(first vector)> fourth argument's value or max(second vector)> fifth argument's value .");
}
// NrowsA= h_A_RowIndices_coo[nnzR-1];
// NcolsA= *std::max_element(h_A_ColIndices_coo, h_A_ColIndices_coo + nnzC, max_elem);
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
//cuDoubleComplex *d_A; gpuErrchk(cudaMalloc(&d_A, nnzV * sizeof(*d_A)));
//int *d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (NrowsA + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzV * sizeof(*d_A_ColIndices)));
//int *d_cooRowIndA; gpuErrchk(cudaMalloc(&d_cooRowIndA, nnzV * sizeof(*d_cooRowIndA)));
size_t pivot_dimensA[1] = {nnzV};
size_t pivot_dimensROW_A[1] = {NrowsA + 1};
size_t pivot_dimensCOL_A[1] = {nnzV};
size_t pivot_dimensCOO_A[1] = {nnzV};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_A = (cuDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
mxGPUArray * COO_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOO_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_cooRowIndA = (int *)mxGPUGetData(COO_A);
gpuErrchk(cudaMemcpy(d_A, h_A1_coo, nnzV * sizeof(*d_A), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_A_ColIndices, h_A_ColIndices_coo, nnzV * sizeof(*d_A_ColIndices), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_cooRowIndA, h_A_RowIndices_coo, nnzV * sizeof(*d_cooRowIndA), cudaMemcpyHostToDevice));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
// A
int *Pa = NULL;
void *pBuffera = NULL;
size_t pBufferSizeInBytesa = 0;
cusparseXcoosort_bufferSizeExt(handle, NrowsA, NcolsA,
nnzV,
d_cooRowIndA,
d_A_ColIndices, &pBufferSizeInBytesa);
gpuErrchk(cudaMalloc(&pBuffera, sizeof(char)*pBufferSizeInBytesa));
gpuErrchk(cudaMalloc(&Pa, sizeof(int)*nnzV));
cusparseCreateIdentityPermutation(handle, nnzV, Pa);
cusparseSafeCall(cusparseXcoosortByRow(handle, NrowsA, NcolsA,
nnzV,
d_cooRowIndA,
d_A_ColIndices,
Pa,
pBuffera));
cusparseSafeCall(cusparseZgthr(handle, nnzV, d_A, d_A, Pa, CUSPARSE_INDEX_BASE_ZERO));
cusparseSafeCall(cusparseXcoo2csr(handle,
d_cooRowIndA,
nnzV,
NrowsA,
d_A_RowIndices,
CUSPARSE_INDEX_BASE_ONE));
size_t pivot_dimensionsrow[1] = {nnzR};
size_t pivot_dimensionscolumn[1] = {NcolsA+1};
size_t pivot_dimensionsvalue[1] = {nnzV};
mxGPUArray * ROW_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *ROWSORT = (int *)mxGPUGetData(ROW_SORT1);
mxGPUArray * COL_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *COLSORT = (int *)mxGPUGetData(COL_SORT1);
mxGPUArray *VAL_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *VALSORT = (cuDoubleComplex *)mxGPUGetData(VAL_SORT1);
cusparseSafeCall(cusparseZcsr2csc(handle, NrowsA, NcolsA, nnzV, d_A, d_A_RowIndices, d_A_ColIndices, VALSORT, ROWSORT, COLSORT, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ONE));
//gpuErrchk(cudaFree(d_A));
//gpuErrchk(cudaFree(d_A_RowIndices));
//gpuErrchk(cudaFree(d_A_ColIndices));
//gpuErrchk(cudaFree(d_cooRowIndA));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(COO_A);
gpuErrchk(cudaFree(pBuffera));
gpuErrchk(cudaFree(Pa));
mwSize nnzm=(mwSize)nnzV;
OUTPUTMATRIX = mxCreateSparse(NrowsA,NcolsA,nnzm,mxCOMPLEX);
mxArray *RS= mxGPUCreateMxArrayOnCPU(ROW_SORT1);
int * rs= (int *)mxGetInt32s(RS);
mxArray *CS= mxGPUCreateMxArrayOnCPU(COL_SORT1);
int * cs= (int *)mxGetInt32s(CS);
mwIndex *irs,*jcs;
irs = static_cast<mwIndex *> (mxMalloc (nnzR * sizeof(mwIndex)));
#pragma omp parallel for shared(nnzR) private(i)
for (i = 0; i < nnzR; ++i) {
irs[i] = static_cast<mwIndex> (rs[i])-1;
}
jcs = static_cast<mwIndex *> (mxMalloc ((NcolsA+1) * sizeof(mwIndex)));
int nc1= NcolsA+1;
#pragma omp parallel for shared(nc1) private(i)
for (i = 0; i < nc1; ++i) {
jcs[i] = static_cast<mwIndex> (cs[i])-1;
}
mxComplexDouble* PRS = (mxComplexDouble*) mxMalloc (nnzV * sizeof(mxComplexDouble));
gpuErrchk(cudaMemcpy(PRS, VALSORT, nnzV * sizeof(mxComplexDouble), cudaMemcpyDeviceToHost));
mxFree (mxGetJc (OUTPUTMATRIX)) ;
mxFree (mxGetIr (OUTPUTMATRIX)) ;
mxFree (mxGetComplexDoubles (OUTPUTMATRIX)) ;
mxSetNzmax(OUTPUTMATRIX, (static_cast<mwSize>(NNZMAXA)));
mxSetIr(OUTPUTMATRIX, (mwIndex *)irs);
mxSetJc(OUTPUTMATRIX, (mwIndex *)jcs);
int m= mxSetComplexDoubles(OUTPUTMATRIX, (mxComplexDouble*)PRS);
if ( m==0) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"the function is unsuccessful, either mxArray is not an unshared mxDOUBLE_CLASS array, or the data is not allocated with mxCalloc.");
}
//mxSetPr(OUTPUTMATRIX, (double *)PRS);
mxDestroyArray(RS);
mxDestroyArray(CS);
mxDestroyArray(RW);
mxDestroyArray(CL);
mxDestroyArray(VL);
mxDestroyArray(ROWx);
mxDestroyArray(COLUMNx);
mxDestroyArray(VALUEx);
mxGPUDestroyGPUArray(ROW_SORT1);
mxGPUDestroyGPUArray(COL_SORT1);
mxGPUDestroyGPUArray(VAL_SORT1);
mxGPUDestroyGPUArray(ROWGPU);
mxGPUDestroyGPUArray(COLUMNGPU);
mxGPUDestroyGPUArray(VALUEGPU);
cusparseDestroyMatDescr(descrA);
cusparseDestroy(handle);
}
////////////////////////////////////////////////////////////////////////////////////
else if (!(mxIsGPUArray(ROW)) && !(mxIsGPUArray(COLUMN)) && !(mxIsGPUArray(VALUE))){
if((mxIsSparse(ROW)) || (mxIsSparse(COLUMN)) || (mxIsSparse(VALUE))) {
printf("Warning! Input(FIRST ARGUMENT) must be non sparse!\n");
return;
}
if ( !(mxIsScalar(NROWS)) || !(mxIsScalar(NCOLS))) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input (FOURTH and FIFTH ARGUMENTS) must be scalar.");
}
int NrowsA= (int)mxGetScalar(NROWS);
int NcolsA= (int)mxGetScalar(NCOLS);
int nnzR= (int)mxGetNumberOfElements(ROW);
int nnzC= (int)mxGetNumberOfElements(COLUMN);
//int nnzV= (int)mxGetNumberOfElements(VALUE);
int nnzV = nnzR;
int NNZMAXA=nnzR;
if ( (nnzR!= nnzC) || (nnzC != nnzV)|| (nnzR != nnzV) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input vectors (FIRST, SECOND, THIRD ARGUMENTS) must be the same lengths.");
}
if ( nnzR>(NrowsA*NcolsA) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, index exceeds array bounds [number of non zero greater than matrix dimensions (row*column)].");
}
if ( mxGetClassID(VALUE) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input(THIRD ARGUMENT) must be double precision.");
}
if ( (mxIsComplex(ROW)) || (mxIsComplex(COLUMN)) || (!mxIsComplex(VALUE))) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, input(FIRST AND SECOND ARGUMENTS) must be real with no imaginary components and THIRD ARGUMENT must be complex.");
}
mxInitGPU();
// mxComplexDouble *pc;
// pc = mxGetComplexDoubles(VALUE);
std::vector<MATRIXC> vect;
int j;
#pragma omp for schedule(static) nowait
for ( j = 0; j < nnzR; ++j) {
vect.push_back( MATRIXC((static_cast<int> ((mxGetDoubles(ROW))[j])), (static_cast<int> ((mxGetDoubles(COLUMN))[j])), (static_cast<double> ((mxGetComplexDoubles(VALUE))[j].real)), (static_cast<double> ((mxGetComplexDoubles(VALUE))[j].imag))));
}
std::sort(vect.begin(), vect.end());
std::vector<MATRIXC> vect_temp;
vect_temp= vect;
int i = 0;
std::vector<MATRIXC>::iterator ity = vect.begin();
for (std::vector<MATRIXC>::iterator itx = vect.begin(); itx != vect.end(); itx++){
ity = itx + 1;
while (ity != vect.end())
{
//for (ity ; ity != vectx.end(); ity++){
if (itx->row_C == ity->row_C && itx->column_C == ity->column_C){
vect_temp[i].value_C_real = vect_temp[i].value_C_real + ity->value_C_real;
vect_temp[i].value_C_img = vect_temp[i].value_C_img + ity->value_C_img;
vect_temp[std::distance(vect.begin(), ity)].checked = true;
}
ity++;
//}
}
i++;
}
for (auto it = vect_temp.begin(); it != vect_temp.end();) {
if ((it->checked==true) ||(it->value_C_real==0 && it->value_C_img==0 )) {
it = vect_temp.erase(it);
}
else {
++it;
}
}
nnzR=nnzC=nnzV=(int)vect_temp.size();
mxArray * ROWx =mxCreateNumericMatrix(nnzR, 1, mxINT32_CLASS, mxREAL);
int *h_A_RowIndices_coo = (int *)mxGetInt32s(ROWx);
#pragma omp parallel for shared(nnzR) private(i)
for (i = 0; i < nnzR; ++i) {
//h_A_RowIndices_coo[i] = static_cast<int> ((mxGetPr(ROW))[i]);
h_A_RowIndices_coo[i] =vect_temp[i].row_C;
}
mxArray * COLUMNx =mxCreateNumericMatrix(nnzC, 1, mxINT32_CLASS, mxREAL);
int *h_A_ColIndices_coo = (int *)mxGetInt32s(COLUMNx);
#pragma omp parallel for shared(nnzC) private(i)
for (i = 0; i < nnzC; ++i) {
//h_A_ColIndices_coo[i] = static_cast<int> ((mxGetPr(COLUMN))[i]);
h_A_ColIndices_coo[i] = vect_temp[i].column_C;
}
mxArray * VALUEx =mxCreateNumericMatrix(nnzV, 1, mxDOUBLE_CLASS, mxCOMPLEX);
cuDoubleComplex *h_A1_coo = (cuDoubleComplex *)mxGetComplexDoubles(VALUEx);
#pragma omp parallel for shared(nnzV) private(i)
for (i = 0; i < nnzV; ++i) {
h_A1_coo[i].x = vect_temp[i].value_C_real;
h_A1_coo[i].y = vect_temp[i].value_C_img;
}
int Nr= *std::max_element(h_A_RowIndices_coo, h_A_RowIndices_coo + nnzR, max_elem);
int Nc= *std::max_element(h_A_ColIndices_coo, h_A_ColIndices_coo + nnzC, max_elem);
if ( (Nr>NrowsA) || (Nc>NcolsA) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Invalid input to MEX file, index exceeds array bounds: max(first vector)> fourth argument's value or max(second vector)> fifth argument's value .");
}
// NrowsA= h_A_RowIndices_coo[nnzR-1];
// NcolsA= *std::max_element(h_A_ColIndices_coo, h_A_ColIndices_coo + nnzC, max_elem);
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
//cuDoubleComplex *d_A; gpuErrchk(cudaMalloc(&d_A, nnzV * sizeof(*d_A)));
//int *d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (NrowsA + 1) * sizeof(*d_A_RowIndices)));
//int *d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzV * sizeof(*d_A_ColIndices)));
//int *d_cooRowIndA; gpuErrchk(cudaMalloc(&d_cooRowIndA, nnzV * sizeof(*d_cooRowIndA)));
size_t pivot_dimensA[1] = {nnzV};
size_t pivot_dimensROW_A[1] = {NrowsA + 1};
size_t pivot_dimensCOL_A[1] = {nnzV};
size_t pivot_dimensCOO_A[1] = {nnzV};
mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *d_A = (cuDoubleComplex *)mxGPUGetData(A);
mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A);
mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_A_ColIndices = (int *)mxGPUGetData(COL_A);
mxGPUArray * COO_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOO_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *d_cooRowIndA = (int *)mxGPUGetData(COO_A);
gpuErrchk(cudaMemcpy(d_A, h_A1_coo, nnzV * sizeof(*d_A), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_A_ColIndices, h_A_ColIndices_coo, nnzV * sizeof(*d_A_ColIndices), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_cooRowIndA, h_A_RowIndices_coo, nnzV * sizeof(*d_cooRowIndA), cudaMemcpyHostToDevice));
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE);
// A
int *Pa = NULL;
void *pBuffera = NULL;
size_t pBufferSizeInBytesa = 0;
cusparseXcoosort_bufferSizeExt(handle, NrowsA, NcolsA,
nnzV,
d_cooRowIndA,
d_A_ColIndices, &pBufferSizeInBytesa);
gpuErrchk(cudaMalloc(&pBuffera, sizeof(char)*pBufferSizeInBytesa));
gpuErrchk(cudaMalloc(&Pa, sizeof(int)*nnzV));
cusparseCreateIdentityPermutation(handle, nnzV, Pa);
cusparseSafeCall(cusparseXcoosortByRow(handle, NrowsA, NcolsA,
nnzV,
d_cooRowIndA,
d_A_ColIndices,
Pa,
pBuffera));
cusparseSafeCall(cusparseZgthr(handle, nnzV, d_A, d_A, Pa, CUSPARSE_INDEX_BASE_ZERO));
cusparseSafeCall(cusparseXcoo2csr(handle,
d_cooRowIndA,
nnzV,
NrowsA,
d_A_RowIndices,
CUSPARSE_INDEX_BASE_ONE));
size_t pivot_dimensionsrow[1] = {nnzR};
size_t pivot_dimensionscolumn[1] = {NcolsA+1};
size_t pivot_dimensionsvalue[1] = {nnzV};
mxGPUArray * ROW_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsrow, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *ROWSORT = (int *)mxGPUGetData(ROW_SORT1);
mxGPUArray * COL_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionscolumn, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
int *COLSORT = (int *)mxGPUGetData(COL_SORT1);
mxGPUArray *VAL_SORT1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalue, mxDOUBLE_CLASS, mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
cuDoubleComplex *VALSORT = (cuDoubleComplex *)mxGPUGetData(VAL_SORT1);
cusparseSafeCall(cusparseZcsr2csc(handle, NrowsA, NcolsA, nnzV, d_A, d_A_RowIndices, d_A_ColIndices, VALSORT, ROWSORT, COLSORT, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ONE));
//gpuErrchk(cudaFree(d_A));
//gpuErrchk(cudaFree(d_A_RowIndices));
//gpuErrchk(cudaFree(d_A_ColIndices));
//gpuErrchk(cudaFree(d_cooRowIndA));
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(ROW_A);
mxGPUDestroyGPUArray(COL_A);
mxGPUDestroyGPUArray(COO_A);
gpuErrchk(cudaFree(pBuffera));
gpuErrchk(cudaFree(Pa));
mwSize nnzm=(mwSize)nnzV;
OUTPUTMATRIX = mxCreateSparse(NrowsA,NcolsA,nnzm,mxCOMPLEX);
mxArray *RS= mxGPUCreateMxArrayOnCPU(ROW_SORT1);
int * rs= (int *)mxGetInt32s(RS);
mxArray *CS= mxGPUCreateMxArrayOnCPU(COL_SORT1);
int * cs= (int *)mxGetInt32s(CS);
mwIndex *irs,*jcs;
irs = static_cast<mwIndex *> (mxMalloc (nnzR * sizeof(mwIndex)));
#pragma omp parallel for shared(nnzR) private(i)
for (i = 0; i < nnzR; ++i) {
irs[i] = static_cast<mwIndex> (rs[i])-1;
}
jcs = static_cast<mwIndex *> (mxMalloc ((NcolsA+1) * sizeof(mwIndex)));
int nc1= NcolsA+1;
#pragma omp parallel for shared(nc1) private(i)
for (i = 0; i < nc1; ++i) {
jcs[i] = static_cast<mwIndex> (cs[i])-1;
}
mxComplexDouble* PRS = (mxComplexDouble*) mxMalloc (nnzV * sizeof(mxComplexDouble));
gpuErrchk(cudaMemcpy(PRS, VALSORT, nnzV * sizeof(mxComplexDouble), cudaMemcpyDeviceToHost));
mxFree (mxGetJc (OUTPUTMATRIX)) ;
mxFree (mxGetIr (OUTPUTMATRIX)) ;
mxFree (mxGetComplexDoubles (OUTPUTMATRIX)) ;
mxSetNzmax(OUTPUTMATRIX, (static_cast<mwSize>(NNZMAXA)));
mxSetIr(OUTPUTMATRIX, (mwIndex *)irs);
mxSetJc(OUTPUTMATRIX, (mwIndex *)jcs);
int m= mxSetComplexDoubles(OUTPUTMATRIX, (mxComplexDouble*)PRS);
if ( m==0) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"the function is unsuccessful, either mxArray is not an unshared mxDOUBLE_CLASS array, or the data is not allocated with mxCalloc.");
}
//mxSetPr(OUTPUTMATRIX, (double *)PRS);
mxDestroyArray(RS);
mxDestroyArray(CS);
mxDestroyArray(ROWx);
mxDestroyArray(COLUMNx);
mxDestroyArray(VALUEx);
mxGPUDestroyGPUArray(ROW_SORT1);
mxGPUDestroyGPUArray(COL_SORT1);
mxGPUDestroyGPUArray(VAL_SORT1);
cusparseDestroyMatDescr(descrA);
cusparseDestroy(handle);
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
|
e159c933cd27f0cc3ce2c8d46c4f38ee5b7c204e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <THH/THH.h>
#include "THH/THHDeviceUtils.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <assert.h>
#include <cmath>
namespace gpu {
template <typename scalar_t>
__device__ float clamp(scalar_t x, scalar_t a, scalar_t b)
{
return max(a, min(b, x));
}
template <
typename scalar_t,
typename std::enable_if<std::is_same<c10::Half, scalar_t>::value>::type* =
nullptr>
__device__ __forceinline__ void fastSpecializedAtomicAdd(
scalar_t* tensor,
size_t index,
const size_t numel,
scalar_t value) {
#if ( \
(TORCH_HIP_VERSION < 10000) || \
(defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)))
atomicAdd(
reinterpret_cast<at::Half*>(tensor) + index,
static_cast<at::Half>(value));
#else
bool low_bit = (index % 2 == 0) &&
(reinterpret_cast<std::uintptr_t>(tensor) % sizeof(__half2) == 0);
if (low_bit && index < (numel - 1)) {
__half2 value2;
value2.x = value;
value2.y = __int2half_rz(0);
atomicAdd(reinterpret_cast<__half2*>(tensor) + index / 2, value2);
} else if (!low_bit && index > 0) {
__half2 value2;
value2.x = __int2half_rz(0);
value2.y = value;
atomicAdd(reinterpret_cast<__half2*>(tensor) + index / 2, value2);
} else {
atomicAdd(
reinterpret_cast<__half*>(tensor) + index, static_cast<__half>(value));
}
#endif
}
template <
typename scalar_t,
typename std::enable_if<!std::is_same<c10::Half, scalar_t>::value>::type* =
nullptr>
__device__ __forceinline__ void fastSpecializedAtomicAdd(
scalar_t* tensor,
size_t index,
const size_t numel,
scalar_t value) {
atomicAdd(tensor + index, value);
}
template <class scalar_t>
__device__ __forceinline__ void fastAtomicAdd(
scalar_t* tensor,
size_t index,
const size_t numel,
scalar_t value,
bool fast_atomics) {
if (fast_atomics) {
fastSpecializedAtomicAdd(tensor, index, numel, value);
} else {
atomicAdd(tensor + index, value);
}
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvEncoderKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_right,
const scalar_t __restrict__ max_left,
const scalar_t __restrict__ max_right,
at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output){
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t right_off = static_cast<scalar_t>(offset_right[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(tokenIdx - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const scalar_t true_right_off = clamp(tokenIdx + right_off * max_right, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, length-1);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, length-1);
const int32_t ind_floor_right = clamp(static_cast<int32_t>(floor(true_right_off)), 0, length-1);
const int32_t ind_ceil_right = clamp(static_cast<int32_t>(ceil(true_right_off)), 0, length-1);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t alpha_right = true_right_off - ind_floor_right;
const scalar_t S_output = ((1.0 - alpha_right)*input[ind_floor_right][batchIdx][rIdx] +
alpha_right*input[ind_ceil_right][batchIdx][rIdx]) -
(alpha_left*((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) +
(1.0 - alpha_left)*((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]));
output[tokenIdx][batchIdx][rIdx] = S_output;
}
}
void TaLKConvEncoder(at::Tensor & input,
at::Tensor & offset_left, at::Tensor & offset_right,
int max_left, int max_right,
at::Tensor & output) {
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output.scalar_type(), "gpu::TaLKConvEncoder", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto offsetRightAcsr = offset_right.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputAcsr = output.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
scalar_t max_right_f = static_cast<scalar_t>(max_right);
hipLaunchKernelGGL(( TaLKConvEncoderKernel<scalar_t>), dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
inputAcsr, offsetLeftAcsr, offsetRightAcsr, max_left_f, max_right_f, outputAcsr);
}));
AT_CUDA_CHECK(hipGetLastError());
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvEncoderGradKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_right,
const scalar_t __restrict__ max_left,
const scalar_t __restrict__ max_right,
const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output_grad,
scalar_t* __restrict__ input_grad,
scalar_t* __restrict__ offset_left_grad,
scalar_t* __restrict__ offset_right_grad){
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t right_off = static_cast<scalar_t>(offset_right[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(tokenIdx - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const scalar_t true_right_off = clamp(tokenIdx + right_off * max_right, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, length-1);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, length-1);
const int32_t ind_floor_right = clamp(static_cast<int32_t>(floor(true_right_off)), 0, length-1);
const int32_t ind_ceil_right = clamp(static_cast<int32_t>(ceil(true_right_off)), 0, length-1);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t alpha_right = true_right_off - ind_floor_right;
const scalar_t gradOutValue = output_grad[tokenIdx][batchIdx][rIdx];
if (ind_floor_left-1 >= 0) {
fastAtomicAdd(input_grad, (((ind_floor_left-1) * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(-alpha_left * gradOutValue), true);
}
if (ind_ceil_left-1 >= 0){
fastAtomicAdd(input_grad, (((ind_ceil_left-1) * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(-(1.0 - alpha_left) * gradOutValue), true);
}
fastAtomicAdd(input_grad, ((ind_floor_right * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>((1.0 - alpha_right) * gradOutValue), true);
fastAtomicAdd(input_grad, ((ind_ceil_right * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(alpha_right * gradOutValue), true);
const scalar_t gradOffset_left_floor = ((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) * max_left;
const scalar_t gradOffset_left_ceil = ((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]) * (-max_left);
const scalar_t gradOffset_right_floor = input[ind_floor_right][batchIdx][rIdx] * (-max_right);
const scalar_t gradOffset_right_ceil = input[ind_ceil_right][batchIdx][rIdx] * max_right;
const scalar_t grad_Offset_left = gradOffset_left_floor + gradOffset_left_ceil;
const scalar_t grad_Offset_right = gradOffset_right_floor + gradOffset_right_ceil;
fastAtomicAdd(offset_left_grad, (tokenIdx * batchSize + batchIdx), batchSize*length, static_cast<scalar_t>(-grad_Offset_left * gradOutValue), true);
fastAtomicAdd(offset_right_grad, (tokenIdx * batchSize + batchIdx), batchSize*length, static_cast<scalar_t>(grad_Offset_right * gradOutValue), true);
}
}
void TaLKConvEncoderGrad(at::Tensor & input,
at::Tensor & offset_left, at::Tensor & offset_right,
int max_left, int max_right,
at::Tensor & output_grad, at::Tensor & input_grad,
at::Tensor & offset_left_grad, at::Tensor & offset_right_grad) {
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output_grad.scalar_type(), "gpu::TaLKConvEncoderGrad", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto offsetRightAcsr = offset_right.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputGradAcsr = output_grad.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
scalar_t max_right_f = static_cast<scalar_t>(max_right);
hipLaunchKernelGGL(( TaLKConvEncoderGradKernel<scalar_t>), dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
inputAcsr, offsetLeftAcsr, offsetRightAcsr, max_left_f, max_right_f, outputGradAcsr,
input_grad.data<scalar_t>(), offset_left_grad.data<scalar_t>(), offset_right_grad.data<scalar_t>());
}));
AT_CUDA_CHECK(hipGetLastError());
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvDecoderKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const scalar_t __restrict__ max_left,
at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output){
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(tokenIdx - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, length-1);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, length-1);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t S_output = input[tokenIdx][batchIdx][rIdx] -
(alpha_left*((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) +
(1.0 - alpha_left)*((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]));
output[tokenIdx][batchIdx][rIdx] = S_output;
}
}
void TaLKConvDecoder(at::Tensor & input,
at::Tensor & offset_left,
int max_left,
at::Tensor & output) {
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output.scalar_type(), "gpu::TaLKConvDecoder", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputAcsr = output.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
hipLaunchKernelGGL(( TaLKConvDecoderKernel<scalar_t>), dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
inputAcsr, offsetLeftAcsr, max_left_f, outputAcsr);
}));
AT_CUDA_CHECK(hipGetLastError());
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvDecoderGradKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const scalar_t __restrict__ max_left,
const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output_grad,
scalar_t* __restrict__ input_grad,
scalar_t* __restrict__ offset_left_grad){
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(tokenIdx - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, length-1);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, length-1);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t gradOutValue = output_grad[tokenIdx][batchIdx][rIdx];
if (ind_floor_left-1 >= 0) {
fastAtomicAdd(input_grad, (((ind_floor_left-1) * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(-alpha_left * gradOutValue), true);
}
if (ind_ceil_left-1 >= 0){
fastAtomicAdd(input_grad, (((ind_ceil_left-1) * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(-(1.0 - alpha_left) * gradOutValue), true);
}
fastAtomicAdd(input_grad, ((tokenIdx * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, gradOutValue, true);
const scalar_t gradOffset_left_floor = ((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) * max_left;
const scalar_t gradOffset_left_ceil = ((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]) * (-max_left);
const scalar_t grad_Offset_left = gradOffset_left_floor + gradOffset_left_ceil;
fastAtomicAdd(offset_left_grad, (tokenIdx * batchSize + batchIdx), batchSize*length, static_cast<scalar_t>(-grad_Offset_left * gradOutValue), true);
}
}
void TaLKConvDecoderGrad(at::Tensor & input,
at::Tensor & offset_left,
int max_left,
at::Tensor & output_grad, at::Tensor & input_grad,
at::Tensor & offset_left_grad) {
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output_grad.scalar_type(), "gpu::TaLKConvDecoderGrad", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputGradAcsr = output_grad.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
hipLaunchKernelGGL(( TaLKConvDecoderGradKernel<scalar_t>), dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
inputAcsr, offsetLeftAcsr, max_left_f, outputGradAcsr,
input_grad.data<scalar_t>(), offset_left_grad.data<scalar_t>());
}));
AT_CUDA_CHECK(hipGetLastError());
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvDecoderInferenceKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const scalar_t __restrict__ max_left,
at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output){
const int length = offset_left.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const int last_id = input.size(0)-1;
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(last_id - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(last_id)); // - max_left - static_cast<scalar_t>(1.0))
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, last_id);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, last_id);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t S_output = input[last_id][batchIdx][rIdx] -
(alpha_left*((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) +
(1.0 - alpha_left)*((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]));
output[tokenIdx][batchIdx][rIdx] = S_output;
}
}
void TaLKConvDecoderInference(at::Tensor & input,
at::Tensor & offset_left,
int max_left,
at::Tensor & output) {
const int length = offset_left.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output.scalar_type(), "gpu::TaLKConvDecoderInference", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputAcsr = output.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
hipLaunchKernelGGL(( TaLKConvDecoderInferenceKernel<scalar_t>), dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
inputAcsr, offsetLeftAcsr, max_left_f, outputAcsr);
}));
AT_CUDA_CHECK(hipGetLastError());
}
}
|
e159c933cd27f0cc3ce2c8d46c4f38ee5b7c204e.cu
|
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <THC/THC.h>
#include "THC/THCDeviceUtils.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <assert.h>
#include <cmath>
namespace gpu {
template <typename scalar_t>
__device__ float clamp(scalar_t x, scalar_t a, scalar_t b)
{
return max(a, min(b, x));
}
template <
typename scalar_t,
typename std::enable_if<std::is_same<c10::Half, scalar_t>::value>::type* =
nullptr>
__device__ __forceinline__ void fastSpecializedAtomicAdd(
scalar_t* tensor,
size_t index,
const size_t numel,
scalar_t value) {
#if ( \
(CUDA_VERSION < 10000) || \
(defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)))
atomicAdd(
reinterpret_cast<at::Half*>(tensor) + index,
static_cast<at::Half>(value));
#else
bool low_bit = (index % 2 == 0) &&
(reinterpret_cast<std::uintptr_t>(tensor) % sizeof(__half2) == 0);
if (low_bit && index < (numel - 1)) {
__half2 value2;
value2.x = value;
value2.y = __int2half_rz(0);
atomicAdd(reinterpret_cast<__half2*>(tensor) + index / 2, value2);
} else if (!low_bit && index > 0) {
__half2 value2;
value2.x = __int2half_rz(0);
value2.y = value;
atomicAdd(reinterpret_cast<__half2*>(tensor) + index / 2, value2);
} else {
atomicAdd(
reinterpret_cast<__half*>(tensor) + index, static_cast<__half>(value));
}
#endif
}
template <
typename scalar_t,
typename std::enable_if<!std::is_same<c10::Half, scalar_t>::value>::type* =
nullptr>
__device__ __forceinline__ void fastSpecializedAtomicAdd(
scalar_t* tensor,
size_t index,
const size_t numel,
scalar_t value) {
atomicAdd(tensor + index, value);
}
template <class scalar_t>
__device__ __forceinline__ void fastAtomicAdd(
scalar_t* tensor,
size_t index,
const size_t numel,
scalar_t value,
bool fast_atomics) {
if (fast_atomics) {
fastSpecializedAtomicAdd(tensor, index, numel, value);
} else {
atomicAdd(tensor + index, value);
}
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvEncoderKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_right,
const scalar_t __restrict__ max_left,
const scalar_t __restrict__ max_right,
at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output){
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t right_off = static_cast<scalar_t>(offset_right[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(tokenIdx - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const scalar_t true_right_off = clamp(tokenIdx + right_off * max_right, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, length-1);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, length-1);
const int32_t ind_floor_right = clamp(static_cast<int32_t>(floor(true_right_off)), 0, length-1);
const int32_t ind_ceil_right = clamp(static_cast<int32_t>(ceil(true_right_off)), 0, length-1);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t alpha_right = true_right_off - ind_floor_right;
const scalar_t S_output = ((1.0 - alpha_right)*input[ind_floor_right][batchIdx][rIdx] +
alpha_right*input[ind_ceil_right][batchIdx][rIdx]) -
(alpha_left*((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) +
(1.0 - alpha_left)*((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]));
output[tokenIdx][batchIdx][rIdx] = S_output;
}
}
void TaLKConvEncoder(at::Tensor & input,
at::Tensor & offset_left, at::Tensor & offset_right,
int max_left, int max_right,
at::Tensor & output) {
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output.scalar_type(), "gpu::TaLKConvEncoder", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto offsetRightAcsr = offset_right.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputAcsr = output.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
scalar_t max_right_f = static_cast<scalar_t>(max_right);
TaLKConvEncoderKernel<scalar_t><<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>> (
inputAcsr, offsetLeftAcsr, offsetRightAcsr, max_left_f, max_right_f, outputAcsr);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvEncoderGradKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_right,
const scalar_t __restrict__ max_left,
const scalar_t __restrict__ max_right,
const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output_grad,
scalar_t* __restrict__ input_grad,
scalar_t* __restrict__ offset_left_grad,
scalar_t* __restrict__ offset_right_grad){
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t right_off = static_cast<scalar_t>(offset_right[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(tokenIdx - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const scalar_t true_right_off = clamp(tokenIdx + right_off * max_right, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, length-1);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, length-1);
const int32_t ind_floor_right = clamp(static_cast<int32_t>(floor(true_right_off)), 0, length-1);
const int32_t ind_ceil_right = clamp(static_cast<int32_t>(ceil(true_right_off)), 0, length-1);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t alpha_right = true_right_off - ind_floor_right;
const scalar_t gradOutValue = output_grad[tokenIdx][batchIdx][rIdx];
if (ind_floor_left-1 >= 0) {
fastAtomicAdd(input_grad, (((ind_floor_left-1) * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(-alpha_left * gradOutValue), true);
}
if (ind_ceil_left-1 >= 0){
fastAtomicAdd(input_grad, (((ind_ceil_left-1) * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(-(1.0 - alpha_left) * gradOutValue), true);
}
fastAtomicAdd(input_grad, ((ind_floor_right * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>((1.0 - alpha_right) * gradOutValue), true);
fastAtomicAdd(input_grad, ((ind_ceil_right * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(alpha_right * gradOutValue), true);
const scalar_t gradOffset_left_floor = ((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) * max_left;
const scalar_t gradOffset_left_ceil = ((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]) * (-max_left);
const scalar_t gradOffset_right_floor = input[ind_floor_right][batchIdx][rIdx] * (-max_right);
const scalar_t gradOffset_right_ceil = input[ind_ceil_right][batchIdx][rIdx] * max_right;
const scalar_t grad_Offset_left = gradOffset_left_floor + gradOffset_left_ceil;
const scalar_t grad_Offset_right = gradOffset_right_floor + gradOffset_right_ceil;
fastAtomicAdd(offset_left_grad, (tokenIdx * batchSize + batchIdx), batchSize*length, static_cast<scalar_t>(-grad_Offset_left * gradOutValue), true);
fastAtomicAdd(offset_right_grad, (tokenIdx * batchSize + batchIdx), batchSize*length, static_cast<scalar_t>(grad_Offset_right * gradOutValue), true);
}
}
void TaLKConvEncoderGrad(at::Tensor & input,
at::Tensor & offset_left, at::Tensor & offset_right,
int max_left, int max_right,
at::Tensor & output_grad, at::Tensor & input_grad,
at::Tensor & offset_left_grad, at::Tensor & offset_right_grad) {
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output_grad.scalar_type(), "gpu::TaLKConvEncoderGrad", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto offsetRightAcsr = offset_right.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputGradAcsr = output_grad.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
scalar_t max_right_f = static_cast<scalar_t>(max_right);
TaLKConvEncoderGradKernel<scalar_t><<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>> (
inputAcsr, offsetLeftAcsr, offsetRightAcsr, max_left_f, max_right_f, outputGradAcsr,
input_grad.data<scalar_t>(), offset_left_grad.data<scalar_t>(), offset_right_grad.data<scalar_t>());
}));
AT_CUDA_CHECK(cudaGetLastError());
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvDecoderKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const scalar_t __restrict__ max_left,
at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output){
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(tokenIdx - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, length-1);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, length-1);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t S_output = input[tokenIdx][batchIdx][rIdx] -
(alpha_left*((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) +
(1.0 - alpha_left)*((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]));
output[tokenIdx][batchIdx][rIdx] = S_output;
}
}
void TaLKConvDecoder(at::Tensor & input,
at::Tensor & offset_left,
int max_left,
at::Tensor & output) {
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output.scalar_type(), "gpu::TaLKConvDecoder", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputAcsr = output.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
TaLKConvDecoderKernel<scalar_t><<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>> (
inputAcsr, offsetLeftAcsr, max_left_f, outputAcsr);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvDecoderGradKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const scalar_t __restrict__ max_left,
const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output_grad,
scalar_t* __restrict__ input_grad,
scalar_t* __restrict__ offset_left_grad){
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(tokenIdx - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(length-1));
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, length-1);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, length-1);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t gradOutValue = output_grad[tokenIdx][batchIdx][rIdx];
if (ind_floor_left-1 >= 0) {
fastAtomicAdd(input_grad, (((ind_floor_left-1) * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(-alpha_left * gradOutValue), true);
}
if (ind_ceil_left-1 >= 0){
fastAtomicAdd(input_grad, (((ind_ceil_left-1) * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, static_cast<scalar_t>(-(1.0 - alpha_left) * gradOutValue), true);
}
fastAtomicAdd(input_grad, ((tokenIdx * batchSize + batchIdx) * r_dim + rIdx), batchSize*length*r_dim, gradOutValue, true);
const scalar_t gradOffset_left_floor = ((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) * max_left;
const scalar_t gradOffset_left_ceil = ((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]) * (-max_left);
const scalar_t grad_Offset_left = gradOffset_left_floor + gradOffset_left_ceil;
fastAtomicAdd(offset_left_grad, (tokenIdx * batchSize + batchIdx), batchSize*length, static_cast<scalar_t>(-grad_Offset_left * gradOutValue), true);
}
}
void TaLKConvDecoderGrad(at::Tensor & input,
at::Tensor & offset_left,
int max_left,
at::Tensor & output_grad, at::Tensor & input_grad,
at::Tensor & offset_left_grad) {
const int length = input.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output_grad.scalar_type(), "gpu::TaLKConvDecoderGrad", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputGradAcsr = output_grad.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
TaLKConvDecoderGradKernel<scalar_t><<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>> (
inputAcsr, offsetLeftAcsr, max_left_f, outputGradAcsr,
input_grad.data<scalar_t>(), offset_left_grad.data<scalar_t>());
}));
AT_CUDA_CHECK(cudaGetLastError());
}
/**
input: [T, B, H, R]
offset_left: [T, B, H]
offset_right: [T, B, H]
output: [T, B, H, R]
**/
template <typename scalar_t>
__global__ void TaLKConvDecoderInferenceKernel(const at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> input,
const at::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> offset_left,
const scalar_t __restrict__ max_left,
at::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits> output){
const int length = offset_left.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x;
const int rIdx = index % r_dim;
const int batchIdx = (index / r_dim) % batchSize;
const int tokenIdx = (index / r_dim) / batchSize;
if (batchIdx < batchSize and tokenIdx < length and rIdx < r_dim) {
const int last_id = input.size(0)-1;
const scalar_t left_off = static_cast<scalar_t>(offset_left[tokenIdx][batchIdx]);
const scalar_t true_left_off = clamp(last_id - left_off * max_left, static_cast<scalar_t>(0.0), static_cast<scalar_t>(last_id)); // - max_left - static_cast<scalar_t>(1.0))
const int32_t ind_floor_left = clamp(static_cast<int32_t>(floor(true_left_off)), 0, last_id);
const int32_t ind_ceil_left = clamp(static_cast<int32_t>(ceil(true_left_off)), 0, last_id);
const scalar_t alpha_left = ind_ceil_left - true_left_off;
const scalar_t S_output = input[last_id][batchIdx][rIdx] -
(alpha_left*((ind_floor_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_floor_left-1][batchIdx][rIdx]) +
(1.0 - alpha_left)*((ind_ceil_left-1 < 0)?static_cast<scalar_t>(0.0):input[ind_ceil_left-1][batchIdx][rIdx]));
output[tokenIdx][batchIdx][rIdx] = S_output;
}
}
void TaLKConvDecoderInference(at::Tensor & input,
at::Tensor & offset_left,
int max_left,
at::Tensor & output) {
const int length = offset_left.size(0);
const int batchSize = input.size(1);
const int r_dim = input.size(2);
const dim3 blockSize(128);
const dim3 gridSize((length*batchSize*r_dim + blockSize.x - 1) / blockSize.x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(output.scalar_type(), "gpu::TaLKConvDecoderInference", ([&] {
auto inputAcsr = input.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
auto offsetLeftAcsr = offset_left.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>();
auto outputAcsr = output.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
scalar_t max_left_f = static_cast<scalar_t>(max_left);
TaLKConvDecoderInferenceKernel<scalar_t><<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>> (
inputAcsr, offsetLeftAcsr, max_left_f, outputAcsr);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
}
|
e643d3315f606c10de8b72c69fdc6196dc6b8804.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "PDEsolve.h"
#define WINDOWBC windowBC(i,j,k)
//#define WINDOWBC fiveWindowsBC(i,j,k) // need to change domain dimensions and geometry for window spacing
// Comment out if not using PDMS layer
#define PDMS
#define INTERFACE 5 // make sure this corresponds to number of nodes in PDMS layer
// Solves PDE du/ds = lapl(u) + alpha*(ub-u) - beta*u/(km+u)
// GPU Index Evaluation
__device__ int at(int i,int j,int k)
{
if (i == -1) i+=2;
else if (i == Nx) i-=2;
if (j == -1) j+=2;
else if (j == Ny) j-=2;
if (k == -1) k+=2;
else if (k == Nz) k-=2;
return i+Nx*(j+Ny*k);
}
// GPU Central Difference
__device__ float CDM(float* u,int i,int j,int k)
{
return gam*(u[at(i+1,j,k)]-2*u[at(i,j,k)]+u[at(i-1,j,k)])/dx/dx
+ (u[at(i,j+1,k)]-2*u[at(i,j,k)]+u[at(i,j-1,k)])/dy/dy
+ (u[at(i,j,k+1)]-2*u[at(i,j,k)]+u[at(i,j,k-1)])/dz/dz;
}
// Central Difference at Interface (isotropic only: gamma==1.0)
__device__ float CDMi(float* u,int i,int j,int k)
{
return (1+sigma)*((u[at(i+1,j,k)]-2*u[at(i,j,k)]+u[at(i-1,j,k)])/dx/dx/2
+ (u[at(i,j+1,k)]-2*u[at(i,j,k)]+u[at(i,j-1,k)])/dy/dy/2)
+ (u[at(i,j,k+1)]-u[at(i,j,k)])/dz/dz
+ sigma*(u[at(i,j,k-1)]-u[at(i,j,k)])/dz/dz;
}
// GPU Implicit-Explicit
__device__ void imex(float* u_old,float* u_new,float BC,int i,int j,int k)
{
// Apply Boundary Conditions
int n = at(i,j,k);
// Fixed Value BC at Window
if (WINDOWBC)
{
u_new[n] = BC;
}
#ifdef PDMS
// Interface B.C.
else if (k==INTERFACE)
{
u_new[n] = (u_old[n] + dt*lambda/(lambda+sigma)*(2*CDMi(u_old,i,j,k)
+ alpha*ub - beta*u_old[at(i,j,k)]/(km+u_old[at(i,j,k)])))/(1+alpha*dt*lambda/(lambda+sigma));
}
// PDMS and Zero Flux Boundaries
else if (k<INTERFACE&&k>0)
{
u_new[n] = (u_old[n] + dt*(lambda*CDM(u_old,i,j,k)))/(1+alpha*dt);
}
#endif
else // Tissue and Zero Flux Boundaries
{
u_new[n] = (u_old[n] + dt*(CDM(u_old,i,j,k) + alpha*ub - beta*u_old[at(i,j,k)]/(km+u_old[at(i,j,k)])))/(1+alpha*dt);
}
}
// GPU Time Step Kernel
__global__ void step(float* u_old,float* u_new,float BC,model mdl,grid grd,geometry geo)
{
// Set GPU Variables
alpha = mdl.alpha; ub = mdl.ub;
beta = mdl.gamma; km = mdl.km;
gam = mdl.gamma; lambda = mdl.lambda;
sigma = mdl.sigma;
dt = grd.dt;
Nx = grd.Nx; dx = grd.dx;
Ny = grd.Ny; dy = grd.dy;
Nz = grd.Nz; dz = grd.dz;
L = geo.L; l = geo.l;
H = geo.H; h = geo.h;
xs = geo.xs; ys = geo.ys;
// Determine Position within array
int ATOM_SIZE_X = Nx/(blockDim.x*gridDim.x);
int ATOM_SIZE_Y = Ny/(blockDim.y*gridDim.y);
int ATOM_SIZE_Z = Nz/(blockDim.z*gridDim.z);
int i0 = (threadIdx.x+blockIdx.x*blockDim.x)*ATOM_SIZE_X;
int j0 = (threadIdx.y+blockIdx.y*blockDim.y)*ATOM_SIZE_Y;
int k0 = (threadIdx.z+blockIdx.z*blockDim.z)*ATOM_SIZE_Z;
int i,j,k,n;
for (i = i0; i < i0+ATOM_SIZE_X; i++)
{
for (j = j0; j < j0+ATOM_SIZE_Y; j++)
{
for (k = k0; k < k0+ATOM_SIZE_Z; k++)
{
// Call Time Scheme
imex(u_old,u_new,BC,i,j,k);
}
}
}
__syncthreads();
for (i = i0; i < i0+ATOM_SIZE_X; i++)
{
for (j = j0; j < j0+ATOM_SIZE_Y; j++)
{
for (k = k0; k < k0+ATOM_SIZE_Z; k++)
{
n = i+Nx*(j+Ny*k);
u_old[n] = u_new[n];
}
}
}
__syncthreads();
}
// Boundary Conditions
// Window Boundary Condition
__device__ bool windowBC(int i,int j,int k)
{
// Relative Window Dimensions
float l_ = l/L,h_ = h/L;
float a = H/L;
return abs(2*i*dx-1)<=l_ & abs(2*j*dy-a)<= h_ & k==0;
}
// Five Windows Boundary Condition
__device__ bool fiveWindowsBC(int i,int j,int k)
{
// Domain Dimensions (cm)
// float W = 0.52f,H = 0.44f;
// Window Dimensions (cm)
// float w = 0.04f,h = 0.02f;
// Window Spacing (cm)
// float xs = 0.1f,ys = 0.2f;
// Relative Window Dimensions
float l_ = l/L,h_ = h/L;
float xs_ = xs/L,ys_ = ys/L;
float a = H/L;
// Windows
float x0[] = {(1+l_+xs_)/2,(1-l_-xs_)/2,0.5f-l_-xs_,0.5f,0.5f+l_+xs_};
float y0[] = {(a+h_+ys_)/2,(a+h_+ys_)/2,(a-h_-ys_)/2,(a-h_-ys_)/2,(a-h_-ys_)/2};
bool window = false;
for (int win = 0; win < 5; win++)
{
window = window | abs(2*(i*dx-x0[win]))<=l_ & abs(2*(j*dy-y0[win]))<=h_ & k==0;
}
return window;
}
|
e643d3315f606c10de8b72c69fdc6196dc6b8804.cu
|
#include "PDEsolve.h"
#define WINDOWBC windowBC(i,j,k)
//#define WINDOWBC fiveWindowsBC(i,j,k) // need to change domain dimensions and geometry for window spacing
// Comment out if not using PDMS layer
#define PDMS
#define INTERFACE 5 // make sure this corresponds to number of nodes in PDMS layer
// Solves PDE du/ds = lapl(u) + alpha*(ub-u) - beta*u/(km+u)
// GPU Index Evaluation
__device__ int at(int i,int j,int k)
{
if (i == -1) i+=2;
else if (i == Nx) i-=2;
if (j == -1) j+=2;
else if (j == Ny) j-=2;
if (k == -1) k+=2;
else if (k == Nz) k-=2;
return i+Nx*(j+Ny*k);
}
// GPU Central Difference
__device__ float CDM(float* u,int i,int j,int k)
{
return gam*(u[at(i+1,j,k)]-2*u[at(i,j,k)]+u[at(i-1,j,k)])/dx/dx
+ (u[at(i,j+1,k)]-2*u[at(i,j,k)]+u[at(i,j-1,k)])/dy/dy
+ (u[at(i,j,k+1)]-2*u[at(i,j,k)]+u[at(i,j,k-1)])/dz/dz;
}
// Central Difference at Interface (isotropic only: gamma==1.0)
__device__ float CDMi(float* u,int i,int j,int k)
{
return (1+sigma)*((u[at(i+1,j,k)]-2*u[at(i,j,k)]+u[at(i-1,j,k)])/dx/dx/2
+ (u[at(i,j+1,k)]-2*u[at(i,j,k)]+u[at(i,j-1,k)])/dy/dy/2)
+ (u[at(i,j,k+1)]-u[at(i,j,k)])/dz/dz
+ sigma*(u[at(i,j,k-1)]-u[at(i,j,k)])/dz/dz;
}
// GPU Implicit-Explicit
__device__ void imex(float* u_old,float* u_new,float BC,int i,int j,int k)
{
// Apply Boundary Conditions
int n = at(i,j,k);
// Fixed Value BC at Window
if (WINDOWBC)
{
u_new[n] = BC;
}
#ifdef PDMS
// Interface B.C.
else if (k==INTERFACE)
{
u_new[n] = (u_old[n] + dt*lambda/(lambda+sigma)*(2*CDMi(u_old,i,j,k)
+ alpha*ub - beta*u_old[at(i,j,k)]/(km+u_old[at(i,j,k)])))/(1+alpha*dt*lambda/(lambda+sigma));
}
// PDMS and Zero Flux Boundaries
else if (k<INTERFACE&&k>0)
{
u_new[n] = (u_old[n] + dt*(lambda*CDM(u_old,i,j,k)))/(1+alpha*dt);
}
#endif
else // Tissue and Zero Flux Boundaries
{
u_new[n] = (u_old[n] + dt*(CDM(u_old,i,j,k) + alpha*ub - beta*u_old[at(i,j,k)]/(km+u_old[at(i,j,k)])))/(1+alpha*dt);
}
}
// GPU Time Step Kernel
__global__ void step(float* u_old,float* u_new,float BC,model mdl,grid grd,geometry geo)
{
// Set GPU Variables
alpha = mdl.alpha; ub = mdl.ub;
beta = mdl.gamma; km = mdl.km;
gam = mdl.gamma; lambda = mdl.lambda;
sigma = mdl.sigma;
dt = grd.dt;
Nx = grd.Nx; dx = grd.dx;
Ny = grd.Ny; dy = grd.dy;
Nz = grd.Nz; dz = grd.dz;
L = geo.L; l = geo.l;
H = geo.H; h = geo.h;
xs = geo.xs; ys = geo.ys;
// Determine Position within array
int ATOM_SIZE_X = Nx/(blockDim.x*gridDim.x);
int ATOM_SIZE_Y = Ny/(blockDim.y*gridDim.y);
int ATOM_SIZE_Z = Nz/(blockDim.z*gridDim.z);
int i0 = (threadIdx.x+blockIdx.x*blockDim.x)*ATOM_SIZE_X;
int j0 = (threadIdx.y+blockIdx.y*blockDim.y)*ATOM_SIZE_Y;
int k0 = (threadIdx.z+blockIdx.z*blockDim.z)*ATOM_SIZE_Z;
int i,j,k,n;
for (i = i0; i < i0+ATOM_SIZE_X; i++)
{
for (j = j0; j < j0+ATOM_SIZE_Y; j++)
{
for (k = k0; k < k0+ATOM_SIZE_Z; k++)
{
// Call Time Scheme
imex(u_old,u_new,BC,i,j,k);
}
}
}
__syncthreads();
for (i = i0; i < i0+ATOM_SIZE_X; i++)
{
for (j = j0; j < j0+ATOM_SIZE_Y; j++)
{
for (k = k0; k < k0+ATOM_SIZE_Z; k++)
{
n = i+Nx*(j+Ny*k);
u_old[n] = u_new[n];
}
}
}
__syncthreads();
}
// Boundary Conditions
// Window Boundary Condition
__device__ bool windowBC(int i,int j,int k)
{
// Relative Window Dimensions
float l_ = l/L,h_ = h/L;
float a = H/L;
return abs(2*i*dx-1)<=l_ & abs(2*j*dy-a)<= h_ & k==0;
}
// Five Windows Boundary Condition
__device__ bool fiveWindowsBC(int i,int j,int k)
{
// Domain Dimensions (cm)
// float W = 0.52f,H = 0.44f;
// Window Dimensions (cm)
// float w = 0.04f,h = 0.02f;
// Window Spacing (cm)
// float xs = 0.1f,ys = 0.2f;
// Relative Window Dimensions
float l_ = l/L,h_ = h/L;
float xs_ = xs/L,ys_ = ys/L;
float a = H/L;
// Windows
float x0[] = {(1+l_+xs_)/2,(1-l_-xs_)/2,0.5f-l_-xs_,0.5f,0.5f+l_+xs_};
float y0[] = {(a+h_+ys_)/2,(a+h_+ys_)/2,(a-h_-ys_)/2,(a-h_-ys_)/2,(a-h_-ys_)/2};
bool window = false;
for (int win = 0; win < 5; win++)
{
window = window | abs(2*(i*dx-x0[win]))<=l_ & abs(2*(j*dy-y0[win]))<=h_ & k==0;
}
return window;
}
|
498c1bd918f778565d35ff18b86799102c93485f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "testlayers.h"
#include <helpers/PointersManager.h>
#include <array/ExtraArguments.h>
#include <ops/declarable/CustomOperations.h>
#include <array>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using namespace nd4j;
using namespace nd4j::ops;
class JavaInteropCudaTests : public testing::Test {
public:
};
TEST_F(JavaInteropCudaTests, test_DeclarableOp_execution_1) {
auto x = NDArrayFactory::create<float>('c', {3, 5});
auto y = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f});
auto e = NDArrayFactory::create<float>('c', {3, 5});
x.assign(1.f);
e.assign(2.f);
nd4j::ops::add op;
Context context(1);
context.setCudaContext(LaunchContext::defaultContext()->getCudaStream(), LaunchContext::defaultContext()->getReductionPointer(), LaunchContext::defaultContext()->getAllocationPointer());
context.setInputArray(0, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo());
context.setInputArray(1, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo());
context.setOutputArray(0, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo());
nd4j_printf("Starting execution...\n","");
PointersManager pm(LaunchContext::defaultContext(), "test_DeclarableOp_execution_1");
execCustomOp2(nullptr, op.getOpHash(), &context);
pm.synchronize();
ASSERT_EQ(e, x);
}
TEST_F(JavaInteropCudaTests, test_DeclarableOp_execution_2) {
NDArray x('c', {3, 1, 2}, nd4j::DataType::FLOAT32);
NDArray y('c', {2, 2}, nd4j::DataType::FLOAT32);
NDArray z('c', {3, 2, 2}, nd4j::DataType::BOOL);
NDArray e('c', {3, 2, 2}, nd4j::DataType::BOOL);
x.assign(1.f);
y.assign(2.f);
e.assign(false);
nd4j::ops::equals op;
Context context(1);
context.setCudaContext(LaunchContext::defaultContext()->getCudaStream(), LaunchContext::defaultContext()->getReductionPointer(), LaunchContext::defaultContext()->getAllocationPointer());
context.setInputArray(0, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo());
context.setInputArray(1, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo());
context.setOutputArray(0, z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo());
nd4j_printf("Starting execution...\n","");
PointersManager pm(LaunchContext::defaultContext(), "test_DeclarableOp_execution_2");
execCustomOp2(nullptr, op.getOpHash(), &context);
pm.synchronize();
ASSERT_EQ(e, z);
}
|
498c1bd918f778565d35ff18b86799102c93485f.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "testlayers.h"
#include <helpers/PointersManager.h>
#include <array/ExtraArguments.h>
#include <ops/declarable/CustomOperations.h>
#include <array>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace nd4j;
using namespace nd4j::ops;
class JavaInteropCudaTests : public testing::Test {
public:
};
TEST_F(JavaInteropCudaTests, test_DeclarableOp_execution_1) {
auto x = NDArrayFactory::create<float>('c', {3, 5});
auto y = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f});
auto e = NDArrayFactory::create<float>('c', {3, 5});
x.assign(1.f);
e.assign(2.f);
nd4j::ops::add op;
Context context(1);
context.setCudaContext(LaunchContext::defaultContext()->getCudaStream(), LaunchContext::defaultContext()->getReductionPointer(), LaunchContext::defaultContext()->getAllocationPointer());
context.setInputArray(0, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo());
context.setInputArray(1, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo());
context.setOutputArray(0, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo());
nd4j_printf("Starting execution...\n","");
PointersManager pm(LaunchContext::defaultContext(), "test_DeclarableOp_execution_1");
execCustomOp2(nullptr, op.getOpHash(), &context);
pm.synchronize();
ASSERT_EQ(e, x);
}
TEST_F(JavaInteropCudaTests, test_DeclarableOp_execution_2) {
NDArray x('c', {3, 1, 2}, nd4j::DataType::FLOAT32);
NDArray y('c', {2, 2}, nd4j::DataType::FLOAT32);
NDArray z('c', {3, 2, 2}, nd4j::DataType::BOOL);
NDArray e('c', {3, 2, 2}, nd4j::DataType::BOOL);
x.assign(1.f);
y.assign(2.f);
e.assign(false);
nd4j::ops::equals op;
Context context(1);
context.setCudaContext(LaunchContext::defaultContext()->getCudaStream(), LaunchContext::defaultContext()->getReductionPointer(), LaunchContext::defaultContext()->getAllocationPointer());
context.setInputArray(0, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo());
context.setInputArray(1, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo());
context.setOutputArray(0, z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo());
nd4j_printf("Starting execution...\n","");
PointersManager pm(LaunchContext::defaultContext(), "test_DeclarableOp_execution_2");
execCustomOp2(nullptr, op.getOpHash(), &context);
pm.synchronize();
ASSERT_EQ(e, z);
}
|
6a4412d6acc4d9eac056138e4a76517fda5cc31f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void generate_destinations(hiprandState_t *state, int n, const uint32_t *sources, uint32_t *destinations) {
int first = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
hiprandState_t local_state = state[first];
for (int id = first ; id < n ; id += stride) {
destinations[id] = sources[hiprand(&local_state) % n];
}
state[first] = local_state;
}
|
6a4412d6acc4d9eac056138e4a76517fda5cc31f.cu
|
#include "includes.h"
__global__ void generate_destinations(curandState *state, int n, const uint32_t *sources, uint32_t *destinations) {
int first = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
curandState local_state = state[first];
for (int id = first ; id < n ; id += stride) {
destinations[id] = sources[curand(&local_state) % n];
}
state[first] = local_state;
}
|
05a8c236245800f9cdceb782937f68907ceaa1d0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#include <hipfft.h>
#include "cuda_utils.h"
#include "fftconvolve.h"
#include <vector>
#include <random>
#include <chrono>
#include <iostream>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
using namespace std;
template <typename T>
void print_vector(vector<T> &v) {
for (auto &i : v) cout << i << "\n";
}
template <typename T>
void print_vector(T *v, int n) {
for (int i = 0; i < n; i++) cout << v[i] << "\n";
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_signal = 1000;
int n_kernel = 1000;
// int blocks = 512;
// int threads = 1024;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_signal = atoi(argv[2]);
if (argc > 3) n_kernel = atoi(argv[3]);
// if (argc > 4) blocks = atoi(argv[4]);
// if (argc > 5) threads = atoi(argv[5]);
// setup random engine
default_random_engine gen;
uniform_real_distribution<double> d(0.0, 1.0);
// initialize variables
vector<double> signal, kernel;
vector<double> result;
signal.resize(n_signal);
kernel.resize(n_kernel);
result.resize(n_signal + n_kernel - 1);
for (int i = 0; i < n_signal; ++i) {
signal[i] = d(gen);
}
for (int i = 0; i < n_kernel; ++i) {
kernel[i] = d(gen);
}
thrust::device_vector<double> d_signal(result.size(), 0.);
thrust::device_vector<double> d_kernel(result.size(), 0.);
thrust::copy(signal.begin(), signal.end(), d_signal.begin());
thrust::copy(kernel.begin(), kernel.end(), d_kernel.begin());
thrust::device_vector<double> d_result(result.size());
double *d_signal_ptr = thrust::raw_pointer_cast(d_signal.data());
double *d_kernel_ptr = thrust::raw_pointer_cast(d_kernel.data());
double *d_result_ptr = thrust::raw_pointer_cast(d_result.data());
hipfftDoubleComplex * d_out_ptr;
// thrust::device_vector<hipfftDoubleComplex> d_out(result.size() / 2 + 1);
// hipfftDoubleComplex *d_out_ptr = thrust::raw_pointer_cast(d_out.data());
hipMalloc((void**)&d_out_ptr, 2 * (result.size() / 2 + 1)
* sizeof(hipfftDoubleComplex));
hipfftHandle fwplan, bwplan;
if (hipfftPlan1d(&fwplan, result.size(), HIPFFT_D2Z, 1) != HIPFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: Plan creation failed");
}
if (hipfftPlan1d(&bwplan, result.size(), HIPFFT_Z2D, 1) != HIPFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: Plan creation failed");
}
convolve_real_no_memcpy_v2(d_signal_ptr, n_signal,
d_kernel_ptr, n_kernel,
d_result_ptr,
d_out_ptr,
fwplan, bwplan);
hipDeviceSynchronize();
// // main loop
auto start = chrono::high_resolution_clock::now();
for (int i = 0; i < n_turns; ++i) {
convolve_real_no_memcpy_v2(d_signal_ptr, n_signal,
d_kernel_ptr, n_kernel,
d_result_ptr,
d_out_ptr,
fwplan, bwplan);
hipDeviceSynchronize();
}
hipfftDestroy(fwplan); hipfftDestroy(bwplan);
auto end = chrono::high_resolution_clock::now();
thrust::copy(d_result.begin(), d_result.end(), result.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("fft_convolution_gpu_v9\ttime(ms)\t%d\t0\t1\n", duration);
printf("result: %lf\n", accumulate(result.begin(), result.end(), 0.0) / (n_signal + n_kernel - 1));
return 0;
}
|
05a8c236245800f9cdceb782937f68907ceaa1d0.cu
|
#include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#include <cufft.h>
#include "cuda_utils.h"
#include "fftconvolve.h"
#include <vector>
#include <random>
#include <chrono>
#include <iostream>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
using namespace std;
template <typename T>
void print_vector(vector<T> &v) {
for (auto &i : v) cout << i << "\n";
}
template <typename T>
void print_vector(T *v, int n) {
for (int i = 0; i < n; i++) cout << v[i] << "\n";
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_signal = 1000;
int n_kernel = 1000;
// int blocks = 512;
// int threads = 1024;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_signal = atoi(argv[2]);
if (argc > 3) n_kernel = atoi(argv[3]);
// if (argc > 4) blocks = atoi(argv[4]);
// if (argc > 5) threads = atoi(argv[5]);
// setup random engine
default_random_engine gen;
uniform_real_distribution<double> d(0.0, 1.0);
// initialize variables
vector<double> signal, kernel;
vector<double> result;
signal.resize(n_signal);
kernel.resize(n_kernel);
result.resize(n_signal + n_kernel - 1);
for (int i = 0; i < n_signal; ++i) {
signal[i] = d(gen);
}
for (int i = 0; i < n_kernel; ++i) {
kernel[i] = d(gen);
}
thrust::device_vector<double> d_signal(result.size(), 0.);
thrust::device_vector<double> d_kernel(result.size(), 0.);
thrust::copy(signal.begin(), signal.end(), d_signal.begin());
thrust::copy(kernel.begin(), kernel.end(), d_kernel.begin());
thrust::device_vector<double> d_result(result.size());
double *d_signal_ptr = thrust::raw_pointer_cast(d_signal.data());
double *d_kernel_ptr = thrust::raw_pointer_cast(d_kernel.data());
double *d_result_ptr = thrust::raw_pointer_cast(d_result.data());
cufftDoubleComplex * d_out_ptr;
// thrust::device_vector<cufftDoubleComplex> d_out(result.size() / 2 + 1);
// cufftDoubleComplex *d_out_ptr = thrust::raw_pointer_cast(d_out.data());
cudaMalloc((void**)&d_out_ptr, 2 * (result.size() / 2 + 1)
* sizeof(cufftDoubleComplex));
cufftHandle fwplan, bwplan;
if (cufftPlan1d(&fwplan, result.size(), CUFFT_D2Z, 1) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: Plan creation failed");
}
if (cufftPlan1d(&bwplan, result.size(), CUFFT_Z2D, 1) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: Plan creation failed");
}
convolve_real_no_memcpy_v2(d_signal_ptr, n_signal,
d_kernel_ptr, n_kernel,
d_result_ptr,
d_out_ptr,
fwplan, bwplan);
cudaThreadSynchronize();
// // main loop
auto start = chrono::high_resolution_clock::now();
for (int i = 0; i < n_turns; ++i) {
convolve_real_no_memcpy_v2(d_signal_ptr, n_signal,
d_kernel_ptr, n_kernel,
d_result_ptr,
d_out_ptr,
fwplan, bwplan);
cudaThreadSynchronize();
}
cufftDestroy(fwplan); cufftDestroy(bwplan);
auto end = chrono::high_resolution_clock::now();
thrust::copy(d_result.begin(), d_result.end(), result.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("fft_convolution_gpu_v9\ttime(ms)\t%d\t0\t1\n", duration);
printf("result: %lf\n", accumulate(result.begin(), result.end(), 0.0) / (n_signal + n_kernel - 1));
return 0;
}
|
fed9c0b5ddd6a83f903a8bb631ba95da9538279d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
__global__
void elementwise_kernel(int volume, OpType type,
const DATATYPE* x,
const DATATYPE* y,
DATATYPE* z)
{
switch (type) {
case OP_EW_SUB:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = x[i] - y[i];
}
break;
}
case OP_EW_DIV:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = x[i] / y[i];
}
break;
}
case OP_EW_EQUAL:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = (x[i] == y[i]);
}
break;
}
case OP_EW_GREATER:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = (x[i] > y[i]);
}
break;
}
case OP_EW_LESS:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = (x[i] < y[i]);
}
break;
}
default:
assert(false);
}
}
bool Element::has_cudnn_kernel(void) const
{
switch (type) {
case OP_EW_ADD:
case OP_EW_MUL:
case OP_EW_MAX:
case OP_EW_MIN:
return true;
default:
return false;
}
}
void Element::map(void)
{
if (has_cudnn_kernel()) {
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&in1Tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&in2Tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outTensor));
checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc));
// set descriptors
helperSetBroadcastableTensorDescriptor(inputs[0], outputs[0], in1Tensor);
helperSetBroadcastableTensorDescriptor(inputs[1], outputs[0], in2Tensor);
helperSetTensorDescriptor(outputs[0], outTensor);
cudnnOpTensorOp_t opType;
switch (type) {
case OP_EW_ADD:
opType = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
opType = CUDNN_OP_TENSOR_MUL;
break;
case OP_EW_MAX:
opType = CUDNN_OP_TENSOR_MAX;
break;
case OP_EW_MIN:
opType = CUDNN_OP_TENSOR_MIN;
break;
default:
fprintf(stderr, "Unsupported Elementwise Operator by cuDNN: %d\n", type);
assert(false);
}
checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT,
CUDNN_NOT_PROPAGATE_NAN));
} else {
// No preprocessing for our customized kernel
}
// allocate tensors
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < outputs[0].numDim; i++)
outputSize *= outputs[0].dim[i];
checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize));
}
void Element::unmap(void)
{
if (has_cudnn_kernel()) {
checkCUDNN(cudnnDestroyTensorDescriptor(in1Tensor));
checkCUDNN(cudnnDestroyTensorDescriptor(in2Tensor));
checkCUDNN(cudnnDestroyTensorDescriptor(outTensor));
checkCUDNN(cudnnDestroyOpTensorDescriptor(opDesc));
}
checkCUDA(hipFree(outputs[0].data_ptr));
}
void Element::forward(bool block)
{
if (has_cudnn_kernel()) {
const float alpha = 1.0f;
const float beta = 0.0f;
checkCUDNN(cudnnOpTensor(model->dnn, opDesc, &alpha, in1Tensor, inputs[0].data_ptr,
&alpha, in2Tensor, inputs[1].data_ptr, &beta, outTensor, outputs[0].data_ptr));
} else {
hipLaunchKernelGGL(( elementwise_kernel), dim3(GET_BLOCKS(inputs[0].volume())), dim3(CUDA_NUM_THREADS), 0, 0,
inputs[0].volume(), type, (DATATYPE*)inputs[0].data_ptr, (DATATYPE*)inputs[1].data_ptr,
(DATATYPE*)outputs[0].data_ptr);
}
if (block)
checkCUDA(hipDeviceSynchronize());
}
void Model::measure_element_cost(Element* ele)
{
// cudnnOpTensor only supports OP_EW_ADD, OP_EW_MUL, OP_EW_MAX, OP_EW_MIN
if (ele->has_cudnn_kernel()) {
const float alpha = 1.0f;
const float beta = 0.0f;
helperSetBroadcastableTensorDescriptor(ele->inputs[0],
ele->outputs[0], inputTensor);
helperSetBroadcastableTensorDescriptor(ele->inputs[1],
ele->outputs[0], biasTensor);
helperSetTensorDescriptor(ele->outputs[0], outputTensor);
cudnnOpTensorOp_t opType;
switch (ele->type) {
case OP_EW_ADD:
opType = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
opType = CUDNN_OP_TENSOR_MUL;
break;
case OP_EW_MAX:
opType = CUDNN_OP_TENSOR_MAX;
break;
case OP_EW_MIN:
opType = CUDNN_OP_TENSOR_MIN;
break;
default:
{
fprintf(stderr, "Unsupported Elementwise Operator by cuDNN: %d\n", ele->type);
assert(false);
}
}
checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT,
CUDNN_NOT_PROPAGATE_NAN));
checkCUDA(hipDeviceSynchronize());
checkCUDA(hipEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
checkCUDNN(cudnnOpTensor(dnn, opDesc, &alpha, inputTensor, inputPtr,
&alpha, biasTensor, filterPtr, &beta, outputTensor, outputPtr));
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
ele->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2],
ele->inputs[0].dim[3], ele->type, ele->runtime);
} else {
// Use our implementation to measure other elementwise operators
checkCUDA(hipDeviceSynchronize());
checkCUDA(hipEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
hipLaunchKernelGGL(( elementwise_kernel), dim3(GET_BLOCKS(ele->inputs[0].volume())), dim3(CUDA_NUM_THREADS), 0, 0,
ele->inputs[0].volume(), ele->type, inputPtr, filterPtr, outputPtr);
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
ele->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2],
ele->inputs[0].dim[3], ele->type, ele->runtime);
}
}
|
fed9c0b5ddd6a83f903a8bb631ba95da9538279d.cu
|
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
__global__
void elementwise_kernel(int volume, OpType type,
const DATATYPE* x,
const DATATYPE* y,
DATATYPE* z)
{
switch (type) {
case OP_EW_SUB:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = x[i] - y[i];
}
break;
}
case OP_EW_DIV:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = x[i] / y[i];
}
break;
}
case OP_EW_EQUAL:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = (x[i] == y[i]);
}
break;
}
case OP_EW_GREATER:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = (x[i] > y[i]);
}
break;
}
case OP_EW_LESS:
{
CUDA_KERNEL_LOOP(i, volume)
{
z[i] = (x[i] < y[i]);
}
break;
}
default:
assert(false);
}
}
bool Element::has_cudnn_kernel(void) const
{
switch (type) {
case OP_EW_ADD:
case OP_EW_MUL:
case OP_EW_MAX:
case OP_EW_MIN:
return true;
default:
return false;
}
}
void Element::map(void)
{
if (has_cudnn_kernel()) {
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&in1Tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&in2Tensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outTensor));
checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc));
// set descriptors
helperSetBroadcastableTensorDescriptor(inputs[0], outputs[0], in1Tensor);
helperSetBroadcastableTensorDescriptor(inputs[1], outputs[0], in2Tensor);
helperSetTensorDescriptor(outputs[0], outTensor);
cudnnOpTensorOp_t opType;
switch (type) {
case OP_EW_ADD:
opType = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
opType = CUDNN_OP_TENSOR_MUL;
break;
case OP_EW_MAX:
opType = CUDNN_OP_TENSOR_MAX;
break;
case OP_EW_MIN:
opType = CUDNN_OP_TENSOR_MIN;
break;
default:
fprintf(stderr, "Unsupported Elementwise Operator by cuDNN: %d\n", type);
assert(false);
}
checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT,
CUDNN_NOT_PROPAGATE_NAN));
} else {
// No preprocessing for our customized kernel
}
// allocate tensors
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < outputs[0].numDim; i++)
outputSize *= outputs[0].dim[i];
checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize));
}
void Element::unmap(void)
{
if (has_cudnn_kernel()) {
checkCUDNN(cudnnDestroyTensorDescriptor(in1Tensor));
checkCUDNN(cudnnDestroyTensorDescriptor(in2Tensor));
checkCUDNN(cudnnDestroyTensorDescriptor(outTensor));
checkCUDNN(cudnnDestroyOpTensorDescriptor(opDesc));
}
checkCUDA(cudaFree(outputs[0].data_ptr));
}
void Element::forward(bool block)
{
if (has_cudnn_kernel()) {
const float alpha = 1.0f;
const float beta = 0.0f;
checkCUDNN(cudnnOpTensor(model->dnn, opDesc, &alpha, in1Tensor, inputs[0].data_ptr,
&alpha, in2Tensor, inputs[1].data_ptr, &beta, outTensor, outputs[0].data_ptr));
} else {
elementwise_kernel<<<GET_BLOCKS(inputs[0].volume()), CUDA_NUM_THREADS>>>(
inputs[0].volume(), type, (DATATYPE*)inputs[0].data_ptr, (DATATYPE*)inputs[1].data_ptr,
(DATATYPE*)outputs[0].data_ptr);
}
if (block)
checkCUDA(cudaDeviceSynchronize());
}
void Model::measure_element_cost(Element* ele)
{
// cudnnOpTensor only supports OP_EW_ADD, OP_EW_MUL, OP_EW_MAX, OP_EW_MIN
if (ele->has_cudnn_kernel()) {
const float alpha = 1.0f;
const float beta = 0.0f;
helperSetBroadcastableTensorDescriptor(ele->inputs[0],
ele->outputs[0], inputTensor);
helperSetBroadcastableTensorDescriptor(ele->inputs[1],
ele->outputs[0], biasTensor);
helperSetTensorDescriptor(ele->outputs[0], outputTensor);
cudnnOpTensorOp_t opType;
switch (ele->type) {
case OP_EW_ADD:
opType = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
opType = CUDNN_OP_TENSOR_MUL;
break;
case OP_EW_MAX:
opType = CUDNN_OP_TENSOR_MAX;
break;
case OP_EW_MIN:
opType = CUDNN_OP_TENSOR_MIN;
break;
default:
{
fprintf(stderr, "Unsupported Elementwise Operator by cuDNN: %d\n", ele->type);
assert(false);
}
}
checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT,
CUDNN_NOT_PROPAGATE_NAN));
checkCUDA(cudaDeviceSynchronize());
checkCUDA(cudaEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
checkCUDNN(cudnnOpTensor(dnn, opDesc, &alpha, inputTensor, inputPtr,
&alpha, biasTensor, filterPtr, &beta, outputTensor, outputPtr));
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
ele->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2],
ele->inputs[0].dim[3], ele->type, ele->runtime);
} else {
// Use our implementation to measure other elementwise operators
checkCUDA(cudaDeviceSynchronize());
checkCUDA(cudaEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
elementwise_kernel<<<GET_BLOCKS(ele->inputs[0].volume()), CUDA_NUM_THREADS>>>(
ele->inputs[0].volume(), ele->type, inputPtr, filterPtr, outputPtr);
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
ele->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2],
ele->inputs[0].dim[3], ele->type, ele->runtime);
}
}
|
c20b7adc6226d746346726ac03e748830c87c97f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void loop0(int* C, int* A, int* B) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
C[id] = A[id] + B[id];
}
extern "C" __global__ void loop1(int* D, int* C) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
D[id] = C[id] * 10;
}
extern "C" __global__ void loop2(int* E, int* D) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
E[id] = D[id] * 2;
}
|
c20b7adc6226d746346726ac03e748830c87c97f.cu
|
extern "C" __global__ void loop0(int* C, int* A, int* B) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
C[id] = A[id] + B[id];
}
extern "C" __global__ void loop1(int* D, int* C) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
D[id] = C[id] * 10;
}
extern "C" __global__ void loop2(int* E, int* D) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
E[id] = D[id] * 2;
}
|
855d506b7f4c51c0147acf171db6700b46d6324d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "windowKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *idata = NULL;
hipMalloc(&idata, XSIZE*YSIZE);
float *window = NULL;
hipMalloc(&window, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
windowKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, idata,window,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
windowKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, idata,window,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
windowKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, idata,window,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
855d506b7f4c51c0147acf171db6700b46d6324d.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "windowKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *idata = NULL;
cudaMalloc(&idata, XSIZE*YSIZE);
float *window = NULL;
cudaMalloc(&window, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
windowKernel<<<gridBlock,threadBlock>>>(idata,window,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
windowKernel<<<gridBlock,threadBlock>>>(idata,window,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
windowKernel<<<gridBlock,threadBlock>>>(idata,window,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f8b18edad1510e3948e003bb33bf08c38545a5c4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <simulation_methods.h>
__device__ RungeKuttaStepResults compute_rk_step(PendulumState pendulumState,
RungeKuttaStepResults previousRungeKuttaStepResults,
FloatType u,
FloatType length1, FloatType length2,
FloatType g,
FloatType timeStep) {
// Compute the new pendulum state using Forward Euler.
PendulumState newPendulumState;
newPendulumState.angle1 = pendulumState.angle1 + timeStep*previousRungeKuttaStepResults.velocity1;
newPendulumState.angle2 = pendulumState.angle2 + timeStep*previousRungeKuttaStepResults.velocity2;
newPendulumState.angularVelocity1 = pendulumState.angularVelocity1 + timeStep*previousRungeKuttaStepResults.acceleration1;
newPendulumState.angularVelocity2 = pendulumState.angularVelocity2 + timeStep*previousRungeKuttaStepResults.acceleration2;
// Compute the accelerations at the new pendulum state.
AccelerationResults accelerationResults = compute_accelerations(newPendulumState, u, length1, length2, g);
// Return the computed derivatives of position and velocity.
RungeKuttaStepResults rungeKuttaStepResults;
rungeKuttaStepResults.velocity1 = newPendulumState.angularVelocity1;
rungeKuttaStepResults.velocity2 = newPendulumState.angularVelocity2;
rungeKuttaStepResults.acceleration1 = accelerationResults.acceleration1;
rungeKuttaStepResults.acceleration2 = accelerationResults.acceleration2;
return rungeKuttaStepResults;
}
__device__ PendulumState compute_double_pendulum_step_rk4(PendulumState pendulumState,
FloatType u,
FloatType length1, FloatType length2,
FloatType g,
FloatType timeStep) {
// Compute the four steps of the classical Runge-Kutta 4th order algorithm.
RungeKuttaStepResults k1 = compute_rk_step(pendulumState, {0, 0, 0, 0}, u, length1, length2, g, timeStep/2);
RungeKuttaStepResults k2 = compute_rk_step(pendulumState, k1, u, length1, length2, g, timeStep/2);
RungeKuttaStepResults k3 = compute_rk_step(pendulumState, k2, u, length1, length2, g, timeStep/2);
RungeKuttaStepResults k4 = compute_rk_step(pendulumState, k3, u, length1, length2, g, timeStep);
// Combine the results of the Runge-Kutta steps.
FloatType velocity1 = (k1.velocity1 + 2*k2.velocity1 + 2*k3.velocity1 + k4.velocity1)/6;
FloatType velocity2 = (k1.velocity2 + 2*k2.velocity2 + 2*k3.velocity2 + k4.velocity2)/6;
FloatType acceleration1 = (k1.acceleration1 + 2*k2.acceleration1 + 2*k3.acceleration1 + k4.acceleration1)/6;
FloatType acceleration2 = (k1.acceleration2 + 2*k2.acceleration2 + 2*k3.acceleration2 + k4.acceleration2)/6;
// Compute the new state of the pendulum.
PendulumState newPendulumState;
newPendulumState.angle1 = velocity1*timeStep + pendulumState.angle1;
newPendulumState.angle2 = velocity2*timeStep + pendulumState.angle2;
newPendulumState.angularVelocity1 = acceleration1*timeStep + pendulumState.angularVelocity1;
newPendulumState.angularVelocity2 = acceleration2*timeStep + pendulumState.angularVelocity2;
return newPendulumState;
}
__global__ void compute_double_pendulum_fractal_steps_till_flip_from_initial_states(FloatType m1, FloatType m2,
FloatType length1, FloatType length2,
FloatType g,
FloatType angle1Min, FloatType angle1Max,
FloatType angle2Min, FloatType angle2Max,
PendulumState *pendulumStates,
bool startFromDefaultState,
int numberOfTimeStepsAlreadyExecuted,
int totalNumberOfAnglesToTestX, int totalNumberOfAnglesToTestY,
FloatType timeStep,
int maxNumberOfTimeStepsToSeeIfPendulumFlips,
int *numTimeStepsTillFlip) {
int stepX = gridDim.x*blockDim.x;
int stepY = gridDim.y*blockDim.y;
int startX = threadIdx.x + blockDim.x*blockIdx.x;
int startY = threadIdx.y + blockDim.y*blockIdx.y;
// Pre-compute a commonly used value.
FloatType u = 1 + m1/m2;
// Simulate the double pendulums.
for (int x = startX; x < totalNumberOfAnglesToTestX; x += stepX) {
for (int y = startY; y < totalNumberOfAnglesToTestY; y += stepY) {
int pixelIndex = (totalNumberOfAnglesToTestY - y - 1)*totalNumberOfAnglesToTestX + x;
// Set the initial state of the pendulum for the current pixel.
PendulumState initialPendulumState;
if (startFromDefaultState) {
initialPendulumState.angle1 = angle1Min + FloatType(x)*(angle1Max - angle1Min)/FloatType(totalNumberOfAnglesToTestX - 1);
initialPendulumState.angle2 = angle2Min + FloatType(y)*(angle2Max - angle2Min)/FloatType(totalNumberOfAnglesToTestY - 1);
initialPendulumState.angularVelocity1 = 0;
initialPendulumState.angularVelocity2 = 0;
}
else {
initialPendulumState.angle1 = pendulumStates[pixelIndex].angle1;
initialPendulumState.angle2 = pendulumStates[pixelIndex].angle2;
initialPendulumState.angularVelocity1 = pendulumStates[pixelIndex].angularVelocity1;
initialPendulumState.angularVelocity2 = pendulumStates[pixelIndex].angularVelocity2;
}
// If starting from the default states, skip the current pendulum if it doesn't have enough
// initial energy to flip the first mass.
if (startFromDefaultState) {
Point point1Position = get_point_position({0,0}, initialPendulumState.angle1, length1);
Point point2Position = get_point_position(point1Position, initialPendulumState.angle2, length2);
FloatType potentialEnergy1 = point1Position.y*m1*g;
FloatType potentialEnergy2 = point2Position.y*m2*g;
FloatType totalPotentialEnergy = potentialEnergy1 + potentialEnergy2;
FloatType minimumEnergyNeededForFlip = m1*length1*g + m2*(length1 - length2)*g;
if (totalPotentialEnergy < minimumEnergyNeededForFlip) {
numTimeStepsTillFlip[pixelIndex] = NotEnoughEnergyToFlip;
continue;
}
}
// Otherwise skip the pendulum if the number of current time steps at the current pendulum is -1, indicating
// it originally didn't have enough energy to flip, or -2, indicating that the pendulum already flipped.
else if (numTimeStepsTillFlip[pixelIndex] == NotEnoughEnergyToFlip ||
numTimeStepsTillFlip[pixelIndex] != DidNotFlip) {
continue;
}
// Simulate the pendulum until it flips or time runs out.
PendulumState pendulumState = initialPendulumState;
FloatType originalAngle1 = pendulumState.angle1;
int numberOfTimeStepsExecuted = numberOfTimeStepsAlreadyExecuted;
bool pendulumFlipped = false;
while (numberOfTimeStepsExecuted < maxNumberOfTimeStepsToSeeIfPendulumFlips) {
// Compute one time step of the pendulum simulation.
pendulumState = compute_double_pendulum_step_rk4(pendulumState, u, length1, length2, g, timeStep);
numberOfTimeStepsExecuted++;
// Check to see if the first mass flipped.
if (floor((pendulumState.angle1 - PI) / TAU) != floor((originalAngle1 - PI) / TAU)) {
pendulumFlipped = true;
break;
}
originalAngle1 = pendulumState.angle1;
}
// Set the new number of time steps for the pendulum to flip, and the new pendulum state.
// Set the number of time steps to -2 if it didn't flip.
numTimeStepsTillFlip[pixelIndex] = pendulumFlipped ? numberOfTimeStepsExecuted : DidNotFlip;
pendulumStates[pixelIndex] = pendulumState;
}
}
}
__global__ void compute_colors_from_steps_till_flip(int *numTimeStepsTillFlip,
char *colors,
int totalNumberOfAnglesToTestX,
int totalNumberOfAnglesToTestY,
FloatType timeStep,
FloatType redScale,
FloatType greenScale,
FloatType blueScale,
FloatType shift) {
int stepX = gridDim.x*blockDim.x;
int stepY = gridDim.y*blockDim.y;
int startX = threadIdx.x + blockDim.x*blockIdx.x;
int startY = threadIdx.y + blockDim.y*blockIdx.y;
int area = totalNumberOfAnglesToTestX*totalNumberOfAnglesToTestY;
FloatType colorScales[] = {redScale, greenScale, blueScale};
// Compute the color of each pixel.
for (int x = startX; x < totalNumberOfAnglesToTestX; x += stepX) {
for (int y = startY; y < totalNumberOfAnglesToTestY; y += stepY) {
int pixelIndex = (totalNumberOfAnglesToTestY - y - 1)*totalNumberOfAnglesToTestX + x;
int timeStepsTillFlip = numTimeStepsTillFlip[pixelIndex];
// Compute the color of the sample. Color it black if the pendulum did not flip.
FloatType timeTillFlipMs = FloatType(timeStepsTillFlip)*timeStep*1000.0;
if (timeStepsTillFlip == NotEnoughEnergyToFlip || timeStepsTillFlip == DidNotFlip) {
timeTillFlipMs = 0;
}
for (int i = 0; i < 3; i++) {
colors[pixelIndex + i*area] = lroundf(abs(sin(1.0/255 * PI * timeTillFlipMs * colorScales[i] * shift)) * 255);
}
}
}
}
|
f8b18edad1510e3948e003bb33bf08c38545a5c4.cu
|
#include <simulation_methods.h>
__device__ RungeKuttaStepResults compute_rk_step(PendulumState pendulumState,
RungeKuttaStepResults previousRungeKuttaStepResults,
FloatType u,
FloatType length1, FloatType length2,
FloatType g,
FloatType timeStep) {
// Compute the new pendulum state using Forward Euler.
PendulumState newPendulumState;
newPendulumState.angle1 = pendulumState.angle1 + timeStep*previousRungeKuttaStepResults.velocity1;
newPendulumState.angle2 = pendulumState.angle2 + timeStep*previousRungeKuttaStepResults.velocity2;
newPendulumState.angularVelocity1 = pendulumState.angularVelocity1 + timeStep*previousRungeKuttaStepResults.acceleration1;
newPendulumState.angularVelocity2 = pendulumState.angularVelocity2 + timeStep*previousRungeKuttaStepResults.acceleration2;
// Compute the accelerations at the new pendulum state.
AccelerationResults accelerationResults = compute_accelerations(newPendulumState, u, length1, length2, g);
// Return the computed derivatives of position and velocity.
RungeKuttaStepResults rungeKuttaStepResults;
rungeKuttaStepResults.velocity1 = newPendulumState.angularVelocity1;
rungeKuttaStepResults.velocity2 = newPendulumState.angularVelocity2;
rungeKuttaStepResults.acceleration1 = accelerationResults.acceleration1;
rungeKuttaStepResults.acceleration2 = accelerationResults.acceleration2;
return rungeKuttaStepResults;
}
__device__ PendulumState compute_double_pendulum_step_rk4(PendulumState pendulumState,
FloatType u,
FloatType length1, FloatType length2,
FloatType g,
FloatType timeStep) {
// Compute the four steps of the classical Runge-Kutta 4th order algorithm.
RungeKuttaStepResults k1 = compute_rk_step(pendulumState, {0, 0, 0, 0}, u, length1, length2, g, timeStep/2);
RungeKuttaStepResults k2 = compute_rk_step(pendulumState, k1, u, length1, length2, g, timeStep/2);
RungeKuttaStepResults k3 = compute_rk_step(pendulumState, k2, u, length1, length2, g, timeStep/2);
RungeKuttaStepResults k4 = compute_rk_step(pendulumState, k3, u, length1, length2, g, timeStep);
// Combine the results of the Runge-Kutta steps.
FloatType velocity1 = (k1.velocity1 + 2*k2.velocity1 + 2*k3.velocity1 + k4.velocity1)/6;
FloatType velocity2 = (k1.velocity2 + 2*k2.velocity2 + 2*k3.velocity2 + k4.velocity2)/6;
FloatType acceleration1 = (k1.acceleration1 + 2*k2.acceleration1 + 2*k3.acceleration1 + k4.acceleration1)/6;
FloatType acceleration2 = (k1.acceleration2 + 2*k2.acceleration2 + 2*k3.acceleration2 + k4.acceleration2)/6;
// Compute the new state of the pendulum.
PendulumState newPendulumState;
newPendulumState.angle1 = velocity1*timeStep + pendulumState.angle1;
newPendulumState.angle2 = velocity2*timeStep + pendulumState.angle2;
newPendulumState.angularVelocity1 = acceleration1*timeStep + pendulumState.angularVelocity1;
newPendulumState.angularVelocity2 = acceleration2*timeStep + pendulumState.angularVelocity2;
return newPendulumState;
}
__global__ void compute_double_pendulum_fractal_steps_till_flip_from_initial_states(FloatType m1, FloatType m2,
FloatType length1, FloatType length2,
FloatType g,
FloatType angle1Min, FloatType angle1Max,
FloatType angle2Min, FloatType angle2Max,
PendulumState *pendulumStates,
bool startFromDefaultState,
int numberOfTimeStepsAlreadyExecuted,
int totalNumberOfAnglesToTestX, int totalNumberOfAnglesToTestY,
FloatType timeStep,
int maxNumberOfTimeStepsToSeeIfPendulumFlips,
int *numTimeStepsTillFlip) {
int stepX = gridDim.x*blockDim.x;
int stepY = gridDim.y*blockDim.y;
int startX = threadIdx.x + blockDim.x*blockIdx.x;
int startY = threadIdx.y + blockDim.y*blockIdx.y;
// Pre-compute a commonly used value.
FloatType u = 1 + m1/m2;
// Simulate the double pendulums.
for (int x = startX; x < totalNumberOfAnglesToTestX; x += stepX) {
for (int y = startY; y < totalNumberOfAnglesToTestY; y += stepY) {
int pixelIndex = (totalNumberOfAnglesToTestY - y - 1)*totalNumberOfAnglesToTestX + x;
// Set the initial state of the pendulum for the current pixel.
PendulumState initialPendulumState;
if (startFromDefaultState) {
initialPendulumState.angle1 = angle1Min + FloatType(x)*(angle1Max - angle1Min)/FloatType(totalNumberOfAnglesToTestX - 1);
initialPendulumState.angle2 = angle2Min + FloatType(y)*(angle2Max - angle2Min)/FloatType(totalNumberOfAnglesToTestY - 1);
initialPendulumState.angularVelocity1 = 0;
initialPendulumState.angularVelocity2 = 0;
}
else {
initialPendulumState.angle1 = pendulumStates[pixelIndex].angle1;
initialPendulumState.angle2 = pendulumStates[pixelIndex].angle2;
initialPendulumState.angularVelocity1 = pendulumStates[pixelIndex].angularVelocity1;
initialPendulumState.angularVelocity2 = pendulumStates[pixelIndex].angularVelocity2;
}
// If starting from the default states, skip the current pendulum if it doesn't have enough
// initial energy to flip the first mass.
if (startFromDefaultState) {
Point point1Position = get_point_position({0,0}, initialPendulumState.angle1, length1);
Point point2Position = get_point_position(point1Position, initialPendulumState.angle2, length2);
FloatType potentialEnergy1 = point1Position.y*m1*g;
FloatType potentialEnergy2 = point2Position.y*m2*g;
FloatType totalPotentialEnergy = potentialEnergy1 + potentialEnergy2;
FloatType minimumEnergyNeededForFlip = m1*length1*g + m2*(length1 - length2)*g;
if (totalPotentialEnergy < minimumEnergyNeededForFlip) {
numTimeStepsTillFlip[pixelIndex] = NotEnoughEnergyToFlip;
continue;
}
}
// Otherwise skip the pendulum if the number of current time steps at the current pendulum is -1, indicating
// it originally didn't have enough energy to flip, or -2, indicating that the pendulum already flipped.
else if (numTimeStepsTillFlip[pixelIndex] == NotEnoughEnergyToFlip ||
numTimeStepsTillFlip[pixelIndex] != DidNotFlip) {
continue;
}
// Simulate the pendulum until it flips or time runs out.
PendulumState pendulumState = initialPendulumState;
FloatType originalAngle1 = pendulumState.angle1;
int numberOfTimeStepsExecuted = numberOfTimeStepsAlreadyExecuted;
bool pendulumFlipped = false;
while (numberOfTimeStepsExecuted < maxNumberOfTimeStepsToSeeIfPendulumFlips) {
// Compute one time step of the pendulum simulation.
pendulumState = compute_double_pendulum_step_rk4(pendulumState, u, length1, length2, g, timeStep);
numberOfTimeStepsExecuted++;
// Check to see if the first mass flipped.
if (floor((pendulumState.angle1 - PI) / TAU) != floor((originalAngle1 - PI) / TAU)) {
pendulumFlipped = true;
break;
}
originalAngle1 = pendulumState.angle1;
}
// Set the new number of time steps for the pendulum to flip, and the new pendulum state.
// Set the number of time steps to -2 if it didn't flip.
numTimeStepsTillFlip[pixelIndex] = pendulumFlipped ? numberOfTimeStepsExecuted : DidNotFlip;
pendulumStates[pixelIndex] = pendulumState;
}
}
}
__global__ void compute_colors_from_steps_till_flip(int *numTimeStepsTillFlip,
char *colors,
int totalNumberOfAnglesToTestX,
int totalNumberOfAnglesToTestY,
FloatType timeStep,
FloatType redScale,
FloatType greenScale,
FloatType blueScale,
FloatType shift) {
int stepX = gridDim.x*blockDim.x;
int stepY = gridDim.y*blockDim.y;
int startX = threadIdx.x + blockDim.x*blockIdx.x;
int startY = threadIdx.y + blockDim.y*blockIdx.y;
int area = totalNumberOfAnglesToTestX*totalNumberOfAnglesToTestY;
FloatType colorScales[] = {redScale, greenScale, blueScale};
// Compute the color of each pixel.
for (int x = startX; x < totalNumberOfAnglesToTestX; x += stepX) {
for (int y = startY; y < totalNumberOfAnglesToTestY; y += stepY) {
int pixelIndex = (totalNumberOfAnglesToTestY - y - 1)*totalNumberOfAnglesToTestX + x;
int timeStepsTillFlip = numTimeStepsTillFlip[pixelIndex];
// Compute the color of the sample. Color it black if the pendulum did not flip.
FloatType timeTillFlipMs = FloatType(timeStepsTillFlip)*timeStep*1000.0;
if (timeStepsTillFlip == NotEnoughEnergyToFlip || timeStepsTillFlip == DidNotFlip) {
timeTillFlipMs = 0;
}
for (int i = 0; i < 3; i++) {
colors[pixelIndex + i*area] = lroundf(abs(sin(1.0/255 * PI * timeTillFlipMs * colorScales[i] * shift)) * 255);
}
}
}
}
|
e1497e0d03b808f8fcdddf142381446ec002648a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "definitions.cuh"
//Performs CFD calculation on global memory. This code does not use any advance optimization technique on GPU
// But still acheives many fold performance gain
__global__ void calculateCFD_V1( float* input, float* output, unsigned int Ni, unsigned int Nj,
float h)
{
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; // Y - ID
unsigned int j = blockDim.y * blockIdx.y + threadIdx.y; // X - ID
unsigned int iPrev = i-1; // Previous Y element
unsigned int iNext = i+1; // Next Y element
unsigned int jPrev = j-1; //Previous X element
unsigned int jNext = j+1; // Next X element
unsigned int index = i * Nj + j;
if( i > 0 && j > 0 && i < (Ni-1) && j <(Nj-1))
output[index] = 0.25f * (input[iPrev * Nj + j] + input[iNext* Nj + j] + input[i * Nj+ jPrev]
+ input[i* Nj + jNext] - 4*h*h);
}
//This version of Kernel uses optimization by copying the data into shared memory and hence results in better performance
__global__ void calculateCFD_V2( float* input, float* output, unsigned int Ni, unsigned int Nj,
float h)
{
//Current Global ID
int i = blockDim.y * blockIdx.y + threadIdx.y; // Y - ID
int j = blockDim.x * blockIdx.x + threadIdx.x; // X - ID
//Current Local ID (lXX --> refers to local ID i.e. inside a block)
int li = threadIdx.y;
int lj = threadIdx.x;
// e_XX --> variables refers to expanded shared memory location in order to accomodate halo elements
//Current Local ID with radius offset.
int e_li = li + RADIUS;
int e_lj = lj + RADIUS;
// Variable pointing at top and bottom neighbouring location
int e_li_prev = e_li - 1;
int e_li_next = e_li + 1;
// Variable pointing at left and right neighbouring location
int e_lj_prev = e_lj - 1;
int e_lj_next = e_lj + 1;
__shared__ float sData [THREADS_PER_BLOCK_Y + 2 * RADIUS][THREADS_PER_BLOCK_X + 2 * RADIUS];
unsigned int index = (i)* Nj + (j) ;
if( li<RADIUS ) // copy top and bottom halo
{
//Copy Top Halo Element
if(blockIdx.y > 0) // Boundary check
sData[li][e_lj] = input[index - RADIUS * Nj];
//Copy Bottom Halo Element
if(blockIdx.y < (gridDim.y-1)) // Boundary check
sData[e_li+THREADS_PER_BLOCK_Y][e_lj] = input[index + THREADS_PER_BLOCK_Y * Nj];
}
if( lj<RADIUS ) // copy left and right halo
{
if( blockIdx.x > 0) // Boundary check
sData[e_li][lj] = input[index - RADIUS];
if(blockIdx.x < (gridDim.x-1)) // Boundary check
sData[e_li][e_lj+THREADS_PER_BLOCK_X] = input[index + THREADS_PER_BLOCK_X];
}
// copy current location
sData[e_li][e_lj] = input[index];
__syncthreads( );
if( i > 0 && j > 0 && i < (Ni-1) && j <(Nj-1))
output[index] = 0.25f * (sData[e_li_prev][e_lj] + sData[e_li_next][e_lj] + sData[e_li][e_lj_prev]
+ sData[e_li][e_lj_next] - 4*h*h);
}
|
e1497e0d03b808f8fcdddf142381446ec002648a.cu
|
#include "definitions.cuh"
//Performs CFD calculation on global memory. This code does not use any advance optimization technique on GPU
// But still acheives many fold performance gain
__global__ void calculateCFD_V1( float* input, float* output, unsigned int Ni, unsigned int Nj,
float h)
{
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; // Y - ID
unsigned int j = blockDim.y * blockIdx.y + threadIdx.y; // X - ID
unsigned int iPrev = i-1; // Previous Y element
unsigned int iNext = i+1; // Next Y element
unsigned int jPrev = j-1; //Previous X element
unsigned int jNext = j+1; // Next X element
unsigned int index = i * Nj + j;
if( i > 0 && j > 0 && i < (Ni-1) && j <(Nj-1))
output[index] = 0.25f * (input[iPrev * Nj + j] + input[iNext* Nj + j] + input[i * Nj+ jPrev]
+ input[i* Nj + jNext] - 4*h*h);
}
//This version of Kernel uses optimization by copying the data into shared memory and hence results in better performance
__global__ void calculateCFD_V2( float* input, float* output, unsigned int Ni, unsigned int Nj,
float h)
{
//Current Global ID
int i = blockDim.y * blockIdx.y + threadIdx.y; // Y - ID
int j = blockDim.x * blockIdx.x + threadIdx.x; // X - ID
//Current Local ID (lXX --> refers to local ID i.e. inside a block)
int li = threadIdx.y;
int lj = threadIdx.x;
// e_XX --> variables refers to expanded shared memory location in order to accomodate halo elements
//Current Local ID with radius offset.
int e_li = li + RADIUS;
int e_lj = lj + RADIUS;
// Variable pointing at top and bottom neighbouring location
int e_li_prev = e_li - 1;
int e_li_next = e_li + 1;
// Variable pointing at left and right neighbouring location
int e_lj_prev = e_lj - 1;
int e_lj_next = e_lj + 1;
__shared__ float sData [THREADS_PER_BLOCK_Y + 2 * RADIUS][THREADS_PER_BLOCK_X + 2 * RADIUS];
unsigned int index = (i)* Nj + (j) ;
if( li<RADIUS ) // copy top and bottom halo
{
//Copy Top Halo Element
if(blockIdx.y > 0) // Boundary check
sData[li][e_lj] = input[index - RADIUS * Nj];
//Copy Bottom Halo Element
if(blockIdx.y < (gridDim.y-1)) // Boundary check
sData[e_li+THREADS_PER_BLOCK_Y][e_lj] = input[index + THREADS_PER_BLOCK_Y * Nj];
}
if( lj<RADIUS ) // copy left and right halo
{
if( blockIdx.x > 0) // Boundary check
sData[e_li][lj] = input[index - RADIUS];
if(blockIdx.x < (gridDim.x-1)) // Boundary check
sData[e_li][e_lj+THREADS_PER_BLOCK_X] = input[index + THREADS_PER_BLOCK_X];
}
// copy current location
sData[e_li][e_lj] = input[index];
__syncthreads( );
if( i > 0 && j > 0 && i < (Ni-1) && j <(Nj-1))
output[index] = 0.25f * (sData[e_li_prev][e_lj] + sData[e_li_next][e_lj] + sData[e_li][e_lj_prev]
+ sData[e_li][e_lj_next] - 4*h*h);
}
|
24258f3d8967dbc63306c8dda3c26c650f7e746d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* original_jacobi5.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2 )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
newa[j*m+i] = w0*a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] +
a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] +
a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[256];
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( newa[j*m+i] - a[j*m+i] );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
}
__global__ void
reductionkernel( float* lchange, int n )
{
__shared__ float mychange[256];
float mych = 0.0f;
int ii = threadIdx.x;
if( ii < n ) mych = lchange[ii];
int m = blockDim.x;
while( m < n ){
if(ii+m < n)
mych = fmaxf( mych, lchange[ii+m] );
m += blockDim.x;
}
mychange[ii] = mych;
__syncthreads();
int nn = blockDim.x;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]);
__syncthreads();
}
if( ii == 0 )
lchange[0] = mychange[0];
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
hipEvent_t e1, e2;
bx = 16;
by = 16;
gx = (n-2)/bx ;
/////////////////////// + ((n-2)%bx == 0?0:1);
gy = (m-2)/by ;
// ssssssssssssssssssssssssssssssssssssssssssssssss+ ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
hipMalloc( &da, memsize );
hipMalloc( &dnewa, memsize );
hipMalloc( &lchange, gx * gy * sizeof(float) );
hipEventCreate( &e1 );
hipEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
hipMemcpy( da, a, memsize, hipMemcpyHostToDevice );
hipMemcpy( dnewa, a, memsize, hipMemcpyHostToDevice );
do{
float msec;
++iters;
hipEventRecord( e1 );
hipLaunchKernelGGL(( jacobikernel), dim3(grid), dim3(block) , 0, 0, da, dnewa, lchange, n, m, w0, w1, w2 );
hipLaunchKernelGGL(( reductionkernel), dim3(1), dim3(bx*by) , 0, 0, lchange, gx*gy );
hipEventRecord( e2 );
hipMemcpy( &change, lchange, sizeof(float), hipMemcpyDeviceToHost );
hipEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
}while( change > tol );
double time = sumtime/1000.0f;
double dNumOps = 14.0 * iters * n *m;
double gflops = dNumOps/time/1e9;
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %.5f seconds total\n", sumtime/1000.0f );
printf( "Size(Number of Operations) = %.0f Ops/sec \n", dNumOps );
printf( "Throughtput = %.4f GFlops/sec \n",gflops );
hipMemcpy( a, dnewa, memsize, hipMemcpyDeviceToHost );
hipFree( da );
hipFree( dnewa );
hipFree( lchange );
hipEventDestroy( e1 );
hipEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int
main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
|
24258f3d8967dbc63306c8dda3c26c650f7e746d.cu
|
/*
* original_jacobi5.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2 )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
newa[j*m+i] = w0*a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] +
a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] +
a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[256];
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( newa[j*m+i] - a[j*m+i] );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
}
__global__ void
reductionkernel( float* lchange, int n )
{
__shared__ float mychange[256];
float mych = 0.0f;
int ii = threadIdx.x;
if( ii < n ) mych = lchange[ii];
int m = blockDim.x;
while( m < n ){
if(ii+m < n)
mych = fmaxf( mych, lchange[ii+m] );
m += blockDim.x;
}
mychange[ii] = mych;
__syncthreads();
int nn = blockDim.x;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]);
__syncthreads();
}
if( ii == 0 )
lchange[0] = mychange[0];
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
cudaEvent_t e1, e2;
bx = 16;
by = 16;
gx = (n-2)/bx ;
/////////////////////// + ((n-2)%bx == 0?0:1);
gy = (m-2)/by ;
// ssssssssssssssssssssssssssssssssssssssssssssssss+ ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
cudaMalloc( &da, memsize );
cudaMalloc( &dnewa, memsize );
cudaMalloc( &lchange, gx * gy * sizeof(float) );
cudaEventCreate( &e1 );
cudaEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
cudaMemcpy( da, a, memsize, cudaMemcpyHostToDevice );
cudaMemcpy( dnewa, a, memsize, cudaMemcpyHostToDevice );
do{
float msec;
++iters;
cudaEventRecord( e1 );
jacobikernel<<< grid, block >>>( da, dnewa, lchange, n, m, w0, w1, w2 );
reductionkernel<<< 1, bx*by >>>( lchange, gx*gy );
cudaEventRecord( e2 );
cudaMemcpy( &change, lchange, sizeof(float), cudaMemcpyDeviceToHost );
cudaEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
}while( change > tol );
double time = sumtime/1000.0f;
double dNumOps = 14.0 * iters * n *m;
double gflops = dNumOps/time/1e9;
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %.5f seconds total\n", sumtime/1000.0f );
printf( "Size(Number of Operations) = %.0f Ops/sec \n", dNumOps );
printf( "Throughtput = %.4f GFlops/sec \n",gflops );
cudaMemcpy( a, dnewa, memsize, cudaMemcpyDeviceToHost );
cudaFree( da );
cudaFree( dnewa );
cudaFree( lchange );
cudaEventDestroy( e1 );
cudaEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int
main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
|
bccff8b5e43be49fe58ddb18551ac5e6ee908445.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_4_back;
int xdim0_update_halo_kernel4_plus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_4_back;
int ydim0_update_halo_kernel4_plus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_4_back;
int xdim1_update_halo_kernel4_plus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_4_back;
int ydim1_update_halo_kernel4_plus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_4_back*(y)+xdim0_update_halo_kernel4_plus_4_back*ydim0_update_halo_kernel4_plus_4_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_4_back*(y)+xdim1_update_halo_kernel4_plus_4_back*ydim1_update_halo_kernel4_plus_4_back*(z))
//user function
__device__
inline void update_halo_kernel4_plus_4_back_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(0,0,4)];
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(0,0,4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_4_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_4_back + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_4_back * ydim0_update_halo_kernel4_plus_4_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_4_back + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_4_back * ydim1_update_halo_kernel4_plus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_4_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_4_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_plus_4_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,80)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(80,"update_halo_kernel4_plus_4_back");
OPS_kernels[80].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_4_back_h || ydim0 != ydim0_update_halo_kernel4_plus_4_back_h || xdim1 != xdim1_update_halo_kernel4_plus_4_back_h || ydim1 != ydim1_update_halo_kernel4_plus_4_back_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel4_plus_4_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel4_plus_4_back_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel4_plus_4_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel4_plus_4_back_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel4_plus_4_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel4_plus_4_back_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel4_plus_4_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel4_plus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[80].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_4_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[80].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[80].mpi_time += t2-t1;
OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_4_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 80;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 80;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_plus_4_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(80,"update_halo_kernel4_plus_4_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
bccff8b5e43be49fe58ddb18551ac5e6ee908445.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_4_back;
int xdim0_update_halo_kernel4_plus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_4_back;
int ydim0_update_halo_kernel4_plus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_4_back;
int xdim1_update_halo_kernel4_plus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_4_back;
int ydim1_update_halo_kernel4_plus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_4_back*(y)+xdim0_update_halo_kernel4_plus_4_back*ydim0_update_halo_kernel4_plus_4_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_4_back*(y)+xdim1_update_halo_kernel4_plus_4_back*ydim1_update_halo_kernel4_plus_4_back*(z))
//user function
__device__
inline void update_halo_kernel4_plus_4_back_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(0,0,4)];
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(0,0,4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_4_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_4_back + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_4_back * ydim0_update_halo_kernel4_plus_4_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_4_back + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_4_back * ydim1_update_halo_kernel4_plus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_4_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_4_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_plus_4_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,80)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(80,"update_halo_kernel4_plus_4_back");
OPS_kernels[80].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_4_back_h || ydim0 != ydim0_update_halo_kernel4_plus_4_back_h || xdim1 != xdim1_update_halo_kernel4_plus_4_back_h || ydim1 != ydim1_update_halo_kernel4_plus_4_back_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel4_plus_4_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel4_plus_4_back_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel4_plus_4_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel4_plus_4_back_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel4_plus_4_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel4_plus_4_back_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel4_plus_4_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel4_plus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[80].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel4_plus_4_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[80].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[80].mpi_time += t2-t1;
OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_plus_4_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 80;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 80;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_plus_4_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(80,"update_halo_kernel4_plus_4_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
ea1c27582da2feb31456826f26e2aa526cc487b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************
*
* This experiment optimizes packet classification
* in the following aspects:
* 1. Thread assignment
* 2. Memory coalescing
*
* Experiment Assumptions:
* 1. 510 Non-overlapping intervals
* 2. 1024 Rules (510 * 1024 element BVs)
* 3. Number of packets varies, 1 kernel
* 4. All packets are already on CPU memory
* 5. All fields needs prefix/range match
*
********************************************************/
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <rocblas.h>
#define FIELD 15
#define RULE 511
#define ALLRULE 2048
#define WSIZE 32
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
using namespace std;
void header_gen(int**, int**, int, int);
void tree_gen(int**, int, int);
void bv_gen(long int**, int*, int);
void data_test(int**, int**, bool**, int*, int, int);
__global__ void packet_classify(int* gpu_tree, int* gpu_headers, long int* gpu_bv, int* gpu_bv_final, int* gpu_match_result, int packet_num, int block_dim){
__shared__ int gpu_tree_shared[FIELD*RULE];
//int* match_result = new int[packet_num * FIELD];
int level = 0;
while(level * block_dim + threadIdx.x < FIELD * RULE){
gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x];
level++;
}
__syncthreads();
// if (blockDim.x * blockIdx.x + threadIdx.x < packet_num * FIELD){
int index = blockDim.x * blockIdx.x + threadIdx.x;
int tree_idx = index % FIELD * FIELD;
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[index] <= gpu_tree_shared[tree_idx+i]) * 1 + (gpu_headers[index] > gpu_tree_shared[tree_idx+i]) * 2;
//tree_idx += i;
}
gpu_match_result[index] = i - RULE;
// }
/* __syncthreads();
//if ((blockDim.x * blockIdx.x + threadIdx.x)% 15 == 0){
if (blockDim.x * blockIdx.x + threadIdx.x < packet_num * FIELD){
int index = blockDim.x * blockIdx.x + threadIdx.x;
int M = ALLRULE / FIELD;
bool result[ALLRULE/FIELD];
//int ruleIdx[FIELD];
for (int i = 0; i < M; i++){
result[i] = &;
}
for(int i = 0; i < M; i++){
for (int j = 0; j < FIELD; j++){
//printf("Packet %d, field %d, result_prev: %d, gpu_bv: %d\n", index/15, i, result[i], gpu_bv[gpu_match_result[index]*ALLRULE+j]);
result[i] = result[i] & gpu_bv[gpu_match_result[index - index % FIELD + j] * ALLRULE + index % FIELD * M + i];
}
}
for(int i = 0; i < M; i++){
if (result[i]){
//printf("threadidx: %d, M: %d, packet: %d, rule: %d\n", index, M, index/FIELD, index % FIELD * M + i);
gpu_bv_final[index/FIELD]= index % FIELD * M + i;
break;
}
}
}
*/
};
int main(int argc, char** argv){
if(argc!=4){
cout<<"usage ./openflow *Packet_num *Grid_dim *Block_dim"<<endl;
return 0;
}
int packet_num = atoi(argv[1]);
int grid_dim = atoi(argv[2]);
int block_dim = atoi(argv[3]);
cout<<"grid_dim: "<<grid_dim<<", block_dim: "<<block_dim<<", packet_num: "<<packet_num<<endl;
/********************************************************
* Preparing Data:
* 1. Generate random headers
* 2. Generate BVs
* 3. Generate random packets
* 4. Deliberately make some rule-matching packets
********************************************************/
srand(time(NULL));
int** tree = new int*[FIELD];
for(int i = 0; i < FIELD; i++){
tree[i] = new int[RULE];
}
int** headers = new int*[packet_num];
for (int i = 0; i < packet_num; i++){
headers[i] = new int[FIELD];
}
long int** bv = new long int*[FIELD*(RULE+1)];
for(int i = 0; i < FIELD*(RULE+1); i++){
bv[i] = new long int[ALLRULE / sizeof(long int)];
}
int* bv_final = new int[packet_num];
int* match_result = new int[packet_num * FIELD];
tree_gen(tree, FIELD, RULE);
header_gen(headers, tree, FIELD, packet_num);
bv_gen(bv, bv_final, packet_num);
//data_test(tree, headers, bv, bv_final, packet_num, 3);
/********************************************************
* Flatten All the 2D Arrays
********************************************************/
int* tree_flatten = new int[RULE*FIELD];
int* headers_flatten = new int[packet_num*FIELD];
long int* bv_flatten = new long int[FIELD*(RULE+1) * ALLRULE / sizeof(long int)];
for (int i = 0; i < FIELD; i++){
for (int j = 0; j < RULE; j++){
tree_flatten[i*RULE+j] = tree[i][j];
}
}
for (int i = 0; i < packet_num; i++){
for (int j = 0; j < FIELD; j++){
headers_flatten[i*FIELD + j] = headers[i][j];
}
}
for (int i = 0; i < FIELD*(RULE+1); i++){
for (int j = 0; j < ALLRULE / sizeof(long int); j++){
bv_flatten[i*ALLRULE / sizeof(long int) + j] = bv[i][j];
}
}
/********************************************************
* Declare cuda events for statistical purposes:
* 1. time_memcpyH2D
* 2. time_memcpyD2H
* 3. time_pc
********************************************************/
float time1, time2, time3;
hipEvent_t time_memcpyH2D_start, time_memcpyH2D_stop, time_memcpyD2H_start, time_memcpyD2H_stop, time_comp_start, time_comp_stop;
hipEventCreate(&time_memcpyH2D_start);
hipEventCreate(&time_memcpyH2D_stop);
hipEventCreate(&time_memcpyD2H_start);
hipEventCreate(&time_memcpyD2H_stop);
hipEventCreate(&time_comp_start);
hipEventCreate(&time_comp_stop);
/********************************************************
* Allocate Space in Device:
* 1. gpu_tree
* 2. gpu_bv
* 3. gpu_bv_final
* 4. gpu_headers
********************************************************/
dim3 dimGrid(grid_dim,1);
dim3 dimBlock(block_dim,1);
int* gpu_tree;
int* gpu_headers;
int* gpu_bv_final;
int* gpu_match_result;
long int* gpu_bv;
hipMalloc((void**)&gpu_tree, sizeof(int*)*size_t(FIELD*RULE));
cudaCheckErrors("hipMalloc gpu_tree");
hipMalloc((void**)&gpu_headers, sizeof(int)*FIELD*packet_num);
cudaCheckErrors("hipMalloc gpu_headers");
hipMalloc((void**)&gpu_bv, (RULE+1)*ALLRULE);
cudaCheckErrors("hipMalloc gpu_bv");
hipMalloc((void**)&gpu_match_result, sizeof(int)*packet_num*FIELD);
cudaCheckErrors("hipMalloc gpu_match_result");
hipMalloc((void**)&gpu_bv_final, sizeof(int)*packet_num);
cudaCheckErrors("hipMalloc gpu_bv_final");
hipEventRecord(time_memcpyH2D_start, 0);
hipMemcpy(gpu_tree, tree_flatten, sizeof(int)*RULE*FIELD, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy gpu_tree");
hipMemcpy(gpu_headers, headers_flatten, sizeof(int)*FIELD*packet_num, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy gpu_headers");
hipMemcpy(gpu_bv, bv_flatten, (RULE+1)*ALLRULE, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy gpu_bv");
hipMemcpy(gpu_match_result, match_result, sizeof(int)*FIELD*packet_num, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy gpu_match_result");
hipMemcpy(gpu_bv_final, bv_final, sizeof(int)*packet_num, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy gpu_bv_final");
hipEventRecord(time_memcpyH2D_stop, 0);
hipEventSynchronize(time_memcpyH2D_stop);
hipEventElapsedTime(&time1, time_memcpyH2D_start, time_memcpyH2D_stop);
hipEventDestroy(time_memcpyH2D_stop);
hipEventDestroy(time_memcpyH2D_start);
cout<<endl<<"* 1. Time for memcpy H2D: "<<time1<<"ms, Total bytes copied: "<<endl;
cout<<" -> Tree: "<< sizeof(int)*RULE*FIELD<<endl;
cout<<" -> Headers: "<< sizeof(long int)*FIELD*packet_num<<endl;
cout<<" -> Bv: "<<(RULE+1)*ALLRULE<<endl;
cout<<" -> Bv_final: "<< sizeof(int)*packet_num<<endl;
cout<<" -> Total Memory Copy: "<< sizeof(int)*RULE*FIELD + sizeof(long int)*FIELD*packet_num + (RULE+1)*ALLRULE + sizeof(int)*packet_num<<endl;
/********************************************************
* Main Packet Classification Process:
* 1. Function Call
* 2. Timing
* 3. Memory copy back (gpu_bv_final)
********************************************************/
hipEventRecord(time_comp_start, 0);
hipLaunchKernelGGL(( packet_classify), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_tree, gpu_headers, gpu_bv, gpu_bv_final, gpu_match_result, packet_num, block_dim);
cudaCheckErrors("Kernel fail");
hipEventRecord(time_comp_stop, 0);
hipEventSynchronize(time_comp_stop);
hipEventElapsedTime(&time2, time_comp_start, time_comp_stop);
hipEventDestroy(time_comp_stop);
hipEventDestroy(time_comp_start);
cout<<endl<<"* 2. Time for GPU computation: "<<time2<<"ms, GPU throughput: "<<packet_num/time2/1000<<" MPPS"<<endl;
hipEventRecord(time_memcpyD2H_start, 0);
hipMemcpy(bv_final, gpu_bv_final, sizeof(int)*packet_num, hipMemcpyDeviceToHost);
hipEventRecord(time_memcpyD2H_stop, 0);
hipEventSynchronize(time_memcpyD2H_stop);
hipEventElapsedTime(&time3, time_memcpyD2H_start, time_memcpyD2H_stop);
hipEventDestroy(time_memcpyD2H_stop);
hipEventDestroy(time_memcpyD2H_start);
cout<<endl<<"* 3. Time for memcpy H2D: "<<time3<<"ms, Total bytes copied: "<<sizeof(int)*packet_num<<endl<<endl;
//data_test(tree, headers, bv, bv_final, packet_num, 8);
/********************************************************
* Clear Memory:
* 1. Dynamic allocations on host
* 2. cudaFrees
********************************************************/
hipFree(gpu_tree);
hipFree(gpu_bv);
hipFree(gpu_headers);
hipFree(gpu_bv_final);
hipFree(gpu_match_result);
for (int i = 0; i < FIELD; i++){
delete tree[i];
}
for(int i = 0; i < packet_num; i++){
delete headers[i];
}
for(int i = 0; i < FIELD*(RULE+1); i++){
delete bv[i];
}
delete tree;
delete bv;
delete headers;
delete bv_final;
delete match_result;
delete tree_flatten;
delete headers_flatten;
delete bv_flatten;
return 0;
}
void tree_gen(int** tree, int field, int rule){
for(int i = 0; i < field; i++){
tree[i][0] = rand() % 100;
int temp[rule];
temp[0] = tree[i][0];
for (int j = 1; j < rule; j++){
temp[j] = temp[j-1] + rand() % 20 + 1;
}
int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2);
int step_index = level;
while (step_index >= 1){
int step = pow(2, (level - step_index + 1));
while (temp_index >= 0){
tree[i][tree_index] = temp[temp_index];
temp_index -= step;
tree_index--;
}
step_index--;
temp_index = rule - 1 - (pow(2, level - step_index) - 1);
}
}
}
void header_gen(int** headers, int** tree, int field, int packet_num){
for (int i = 0; i < packet_num; i++){
for(int j = 0; j < field; j++){
headers[i][j] = rand() % 6000;
}
}
}
void bv_gen(long int ** bv, int* bv_final, int packet_num){
for (int i = 0; i < ALLRULE / sizeof(long int); i++){
for (int j = 0; j < FIELD*(RULE+1); j++){
bv[j][i] = rand() % 1000000;
}
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = -1;
}
}
void data_test(int** tree, int** headers, bool** bv, int* bv_final, int packet_num, int type){
if (type > 15 | type == 0){
return;
}
if (type % 2 == 1){
cout<<"Tree: "<<endl;
for(int i = 0; i < RULE; i++){
cout<<"Line: "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<tree[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 4 == 2 | type % 4 == 3){
cout<<endl<<"Headers: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<"Header "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<headers[i][j]<<" ";
}
cout<<endl;
}
}
if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){
cout<<endl<<"bv: "<<endl;
for(int i = 0; i < ALLRULE; i++){
cout<<"Line "<<i<<": ";
for (int j = 0; j < FIELD*(RULE+1); j++){
cout<<bv[j][i]<<" ";
}
cout<<endl;
}
}
if (type > 7){
cout<<endl<<"bv_final: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<bv_final[i]<<" ";
}
cout<<endl;
}
cout<<"============== End of Print =============="<<endl;
}
|
ea1c27582da2feb31456826f26e2aa526cc487b3.cu
|
/********************************************************
*
* This experiment optimizes packet classification
* in the following aspects:
* 1. Thread assignment
* 2. Memory coalescing
*
* Experiment Assumptions:
* 1. 510 Non-overlapping intervals
* 2. 1024 Rules (510 * 1024 element BVs)
* 3. Number of packets varies, 1 kernel
* 4. All packets are already on CPU memory
* 5. All fields needs prefix/range match
*
********************************************************/
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <cublas.h>
#define FIELD 15
#define RULE 511
#define ALLRULE 2048
#define WSIZE 32
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
using namespace std;
void header_gen(int**, int**, int, int);
void tree_gen(int**, int, int);
void bv_gen(long int**, int*, int);
void data_test(int**, int**, bool**, int*, int, int);
__global__ void packet_classify(int* gpu_tree, int* gpu_headers, long int* gpu_bv, int* gpu_bv_final, int* gpu_match_result, int packet_num, int block_dim){
__shared__ int gpu_tree_shared[FIELD*RULE];
//int* match_result = new int[packet_num * FIELD];
int level = 0;
while(level * block_dim + threadIdx.x < FIELD * RULE){
gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x];
level++;
}
__syncthreads();
// if (blockDim.x * blockIdx.x + threadIdx.x < packet_num * FIELD){
int index = blockDim.x * blockIdx.x + threadIdx.x;
int tree_idx = index % FIELD * FIELD;
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[index] <= gpu_tree_shared[tree_idx+i]) * 1 + (gpu_headers[index] > gpu_tree_shared[tree_idx+i]) * 2;
//tree_idx += i;
}
gpu_match_result[index] = i - RULE;
// }
/* __syncthreads();
//if ((blockDim.x * blockIdx.x + threadIdx.x)% 15 == 0){
if (blockDim.x * blockIdx.x + threadIdx.x < packet_num * FIELD){
int index = blockDim.x * blockIdx.x + threadIdx.x;
int M = ALLRULE / FIELD;
bool result[ALLRULE/FIELD];
//int ruleIdx[FIELD];
for (int i = 0; i < M; i++){
result[i] = &;
}
for(int i = 0; i < M; i++){
for (int j = 0; j < FIELD; j++){
//printf("Packet %d, field %d, result_prev: %d, gpu_bv: %d\n", index/15, i, result[i], gpu_bv[gpu_match_result[index]*ALLRULE+j]);
result[i] = result[i] & gpu_bv[gpu_match_result[index - index % FIELD + j] * ALLRULE + index % FIELD * M + i];
}
}
for(int i = 0; i < M; i++){
if (result[i]){
//printf("threadidx: %d, M: %d, packet: %d, rule: %d\n", index, M, index/FIELD, index % FIELD * M + i);
gpu_bv_final[index/FIELD]= index % FIELD * M + i;
break;
}
}
}
*/
};
int main(int argc, char** argv){
if(argc!=4){
cout<<"usage ./openflow *Packet_num *Grid_dim *Block_dim"<<endl;
return 0;
}
int packet_num = atoi(argv[1]);
int grid_dim = atoi(argv[2]);
int block_dim = atoi(argv[3]);
cout<<"grid_dim: "<<grid_dim<<", block_dim: "<<block_dim<<", packet_num: "<<packet_num<<endl;
/********************************************************
* Preparing Data:
* 1. Generate random headers
* 2. Generate BVs
* 3. Generate random packets
* 4. Deliberately make some rule-matching packets
********************************************************/
srand(time(NULL));
int** tree = new int*[FIELD];
for(int i = 0; i < FIELD; i++){
tree[i] = new int[RULE];
}
int** headers = new int*[packet_num];
for (int i = 0; i < packet_num; i++){
headers[i] = new int[FIELD];
}
long int** bv = new long int*[FIELD*(RULE+1)];
for(int i = 0; i < FIELD*(RULE+1); i++){
bv[i] = new long int[ALLRULE / sizeof(long int)];
}
int* bv_final = new int[packet_num];
int* match_result = new int[packet_num * FIELD];
tree_gen(tree, FIELD, RULE);
header_gen(headers, tree, FIELD, packet_num);
bv_gen(bv, bv_final, packet_num);
//data_test(tree, headers, bv, bv_final, packet_num, 3);
/********************************************************
* Flatten All the 2D Arrays
********************************************************/
int* tree_flatten = new int[RULE*FIELD];
int* headers_flatten = new int[packet_num*FIELD];
long int* bv_flatten = new long int[FIELD*(RULE+1) * ALLRULE / sizeof(long int)];
for (int i = 0; i < FIELD; i++){
for (int j = 0; j < RULE; j++){
tree_flatten[i*RULE+j] = tree[i][j];
}
}
for (int i = 0; i < packet_num; i++){
for (int j = 0; j < FIELD; j++){
headers_flatten[i*FIELD + j] = headers[i][j];
}
}
for (int i = 0; i < FIELD*(RULE+1); i++){
for (int j = 0; j < ALLRULE / sizeof(long int); j++){
bv_flatten[i*ALLRULE / sizeof(long int) + j] = bv[i][j];
}
}
/********************************************************
* Declare cuda events for statistical purposes:
* 1. time_memcpyH2D
* 2. time_memcpyD2H
* 3. time_pc
********************************************************/
float time1, time2, time3;
cudaEvent_t time_memcpyH2D_start, time_memcpyH2D_stop, time_memcpyD2H_start, time_memcpyD2H_stop, time_comp_start, time_comp_stop;
cudaEventCreate(&time_memcpyH2D_start);
cudaEventCreate(&time_memcpyH2D_stop);
cudaEventCreate(&time_memcpyD2H_start);
cudaEventCreate(&time_memcpyD2H_stop);
cudaEventCreate(&time_comp_start);
cudaEventCreate(&time_comp_stop);
/********************************************************
* Allocate Space in Device:
* 1. gpu_tree
* 2. gpu_bv
* 3. gpu_bv_final
* 4. gpu_headers
********************************************************/
dim3 dimGrid(grid_dim,1);
dim3 dimBlock(block_dim,1);
int* gpu_tree;
int* gpu_headers;
int* gpu_bv_final;
int* gpu_match_result;
long int* gpu_bv;
cudaMalloc((void**)&gpu_tree, sizeof(int*)*size_t(FIELD*RULE));
cudaCheckErrors("cudaMalloc gpu_tree");
cudaMalloc((void**)&gpu_headers, sizeof(int)*FIELD*packet_num);
cudaCheckErrors("cudaMalloc gpu_headers");
cudaMalloc((void**)&gpu_bv, (RULE+1)*ALLRULE);
cudaCheckErrors("cudaMalloc gpu_bv");
cudaMalloc((void**)&gpu_match_result, sizeof(int)*packet_num*FIELD);
cudaCheckErrors("cudaMalloc gpu_match_result");
cudaMalloc((void**)&gpu_bv_final, sizeof(int)*packet_num);
cudaCheckErrors("cudaMalloc gpu_bv_final");
cudaEventRecord(time_memcpyH2D_start, 0);
cudaMemcpy(gpu_tree, tree_flatten, sizeof(int)*RULE*FIELD, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy gpu_tree");
cudaMemcpy(gpu_headers, headers_flatten, sizeof(int)*FIELD*packet_num, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy gpu_headers");
cudaMemcpy(gpu_bv, bv_flatten, (RULE+1)*ALLRULE, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy gpu_bv");
cudaMemcpy(gpu_match_result, match_result, sizeof(int)*FIELD*packet_num, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy gpu_match_result");
cudaMemcpy(gpu_bv_final, bv_final, sizeof(int)*packet_num, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy gpu_bv_final");
cudaEventRecord(time_memcpyH2D_stop, 0);
cudaEventSynchronize(time_memcpyH2D_stop);
cudaEventElapsedTime(&time1, time_memcpyH2D_start, time_memcpyH2D_stop);
cudaEventDestroy(time_memcpyH2D_stop);
cudaEventDestroy(time_memcpyH2D_start);
cout<<endl<<"* 1. Time for memcpy H2D: "<<time1<<"ms, Total bytes copied: "<<endl;
cout<<" -> Tree: "<< sizeof(int)*RULE*FIELD<<endl;
cout<<" -> Headers: "<< sizeof(long int)*FIELD*packet_num<<endl;
cout<<" -> Bv: "<<(RULE+1)*ALLRULE<<endl;
cout<<" -> Bv_final: "<< sizeof(int)*packet_num<<endl;
cout<<" -> Total Memory Copy: "<< sizeof(int)*RULE*FIELD + sizeof(long int)*FIELD*packet_num + (RULE+1)*ALLRULE + sizeof(int)*packet_num<<endl;
/********************************************************
* Main Packet Classification Process:
* 1. Function Call
* 2. Timing
* 3. Memory copy back (gpu_bv_final)
********************************************************/
cudaEventRecord(time_comp_start, 0);
packet_classify<<<dimGrid, dimBlock>>>(gpu_tree, gpu_headers, gpu_bv, gpu_bv_final, gpu_match_result, packet_num, block_dim);
cudaCheckErrors("Kernel fail");
cudaEventRecord(time_comp_stop, 0);
cudaEventSynchronize(time_comp_stop);
cudaEventElapsedTime(&time2, time_comp_start, time_comp_stop);
cudaEventDestroy(time_comp_stop);
cudaEventDestroy(time_comp_start);
cout<<endl<<"* 2. Time for GPU computation: "<<time2<<"ms, GPU throughput: "<<packet_num/time2/1000<<" MPPS"<<endl;
cudaEventRecord(time_memcpyD2H_start, 0);
cudaMemcpy(bv_final, gpu_bv_final, sizeof(int)*packet_num, cudaMemcpyDeviceToHost);
cudaEventRecord(time_memcpyD2H_stop, 0);
cudaEventSynchronize(time_memcpyD2H_stop);
cudaEventElapsedTime(&time3, time_memcpyD2H_start, time_memcpyD2H_stop);
cudaEventDestroy(time_memcpyD2H_stop);
cudaEventDestroy(time_memcpyD2H_start);
cout<<endl<<"* 3. Time for memcpy H2D: "<<time3<<"ms, Total bytes copied: "<<sizeof(int)*packet_num<<endl<<endl;
//data_test(tree, headers, bv, bv_final, packet_num, 8);
/********************************************************
* Clear Memory:
* 1. Dynamic allocations on host
* 2. cudaFrees
********************************************************/
cudaFree(gpu_tree);
cudaFree(gpu_bv);
cudaFree(gpu_headers);
cudaFree(gpu_bv_final);
cudaFree(gpu_match_result);
for (int i = 0; i < FIELD; i++){
delete tree[i];
}
for(int i = 0; i < packet_num; i++){
delete headers[i];
}
for(int i = 0; i < FIELD*(RULE+1); i++){
delete bv[i];
}
delete tree;
delete bv;
delete headers;
delete bv_final;
delete match_result;
delete tree_flatten;
delete headers_flatten;
delete bv_flatten;
return 0;
}
void tree_gen(int** tree, int field, int rule){
for(int i = 0; i < field; i++){
tree[i][0] = rand() % 100;
int temp[rule];
temp[0] = tree[i][0];
for (int j = 1; j < rule; j++){
temp[j] = temp[j-1] + rand() % 20 + 1;
}
int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2);
int step_index = level;
while (step_index >= 1){
int step = pow(2, (level - step_index + 1));
while (temp_index >= 0){
tree[i][tree_index] = temp[temp_index];
temp_index -= step;
tree_index--;
}
step_index--;
temp_index = rule - 1 - (pow(2, level - step_index) - 1);
}
}
}
void header_gen(int** headers, int** tree, int field, int packet_num){
for (int i = 0; i < packet_num; i++){
for(int j = 0; j < field; j++){
headers[i][j] = rand() % 6000;
}
}
}
void bv_gen(long int ** bv, int* bv_final, int packet_num){
for (int i = 0; i < ALLRULE / sizeof(long int); i++){
for (int j = 0; j < FIELD*(RULE+1); j++){
bv[j][i] = rand() % 1000000;
}
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = -1;
}
}
void data_test(int** tree, int** headers, bool** bv, int* bv_final, int packet_num, int type){
if (type > 15 | type == 0){
return;
}
if (type % 2 == 1){
cout<<"Tree: "<<endl;
for(int i = 0; i < RULE; i++){
cout<<"Line: "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<tree[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 4 == 2 | type % 4 == 3){
cout<<endl<<"Headers: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<"Header "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<headers[i][j]<<" ";
}
cout<<endl;
}
}
if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){
cout<<endl<<"bv: "<<endl;
for(int i = 0; i < ALLRULE; i++){
cout<<"Line "<<i<<": ";
for (int j = 0; j < FIELD*(RULE+1); j++){
cout<<bv[j][i]<<" ";
}
cout<<endl;
}
}
if (type > 7){
cout<<endl<<"bv_final: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<bv_final[i]<<" ";
}
cout<<endl;
}
cout<<"============== End of Print =============="<<endl;
}
|
9d660dc1f88db0c2c0f394afbd104ca803d7ae9c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////
// File: ProgramCU.cu
// Author: Changchang Wu
// Description : implementation of ProgramCU and all CUDA kernels
//
// Copyright (c) 2007 University of North Carolina at Chapel Hill
// All Rights Reserved
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes, without
// fee, and without a written agreement is hereby granted, provided that the
// above copyright notice and the following paragraph appear in all copies.
//
// The University of North Carolina at Chapel Hill make no representations
// about the suitability of this software for any purpose. It is provided
// 'as is' without express or implied warranty.
//
// Please send BUG REPORTS to [email protected]
//
////////////////////////////////////////////////////////////////////////////
#if defined(CUDA_SIFTGPU_ENABLED)
#include "GL/glew.h"
#include "stdio.h"
#include "CuTexImage.h"
#include "ProgramCU.h"
#include "GlobalUtil.h"
//----------------------------------------------------------------
//Begin SiftGPU setting section.
//////////////////////////////////////////////////////////
#define IMUL(X,Y) __mul24(X,Y)
//#define FDIV(X,Y) ((X)/(Y))
#define FDIV(X,Y) __fdividef(X,Y)
/////////////////////////////////////////////////////////
//filter kernel width range (don't change this)
#define KERNEL_MAX_WIDTH 33
#define KERNEL_MIN_WIDTH 5
//////////////////////////////////////////////////////////
//horizontal filter block size (32, 64, 128, 256, 512)
#define FILTERH_TILE_WIDTH 128
//thread block for vertical filter. FILTERV_BLOCK_WIDTH can be (4, 8 or 16)
#define FILTERV_BLOCK_WIDTH 16
#define FILTERV_BLOCK_HEIGHT 32
//The corresponding image patch for a thread block
#define FILTERV_PIXEL_PER_THREAD 4
#define FILTERV_TILE_WIDTH FILTERV_BLOCK_WIDTH
#define FILTERV_TILE_HEIGHT (FILTERV_PIXEL_PER_THREAD * FILTERV_BLOCK_HEIGHT)
//////////////////////////////////////////////////////////
//thread block size for computing Difference of Gaussian
#define DOG_BLOCK_LOG_DIMX 7
#define DOG_BLOCK_LOG_DIMY 0
#define DOG_BLOCK_DIMX (1 << DOG_BLOCK_LOG_DIMX)
#define DOG_BLOCK_DIMY (1 << DOG_BLOCK_LOG_DIMY)
//////////////////////////////////////////////////////////
//thread block size for keypoint detection
#define KEY_BLOCK_LOG_DIMX 3
#define KEY_BLOCK_LOG_DIMY 3
#define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX)
#define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY)
//#define KEY_OFFSET_ONE
//make KEY_BLOCK_LOG_DIMX 4 will make the write coalesced..
//but it seems uncoalesced writes don't affect the speed
//////////////////////////////////////////////////////////
//thread block size for initializing list generation (64, 128, 256, 512 ...)
#define HIST_INIT_WIDTH 128
//thread block size for generating feature list (32, 64, 128, 256, 512, ...)
#define LISTGEN_BLOCK_DIM 128
/////////////////////////////////////////////////////////
//how many keypoint orientations to compute in a block
#define ORIENTATION_COMPUTE_PER_BLOCK 64
//how many keypoint descriptor to compute in a block (2, 4, 8, 16, 32)
#define DESCRIPTOR_COMPUTE_PER_BLOCK 4
#define DESCRIPTOR_COMPUTE_BLOCK_SIZE (16 * DESCRIPTOR_COMPUTE_PER_BLOCK)
//how many keypoint descriptor to normalized in a block (32, ...)
#define DESCRIPTOR_NORMALIZ_PER_BLOCK 32
///////////////////////////////////////////
//Thread block size for visualization
//(This doesn't affect the speed of computation)
#define BLOCK_LOG_DIM 4
#define BLOCK_DIM (1 << BLOCK_LOG_DIM)
//End SiftGPU setting section.
//----------------------------------------------------------------
__device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH];
texture<float, 1, hipReadModeElementType> texData;
texture<unsigned char, 1, hipReadModeNormalizedFloat> texDataB;
texture<float2, 2, hipReadModeElementType> texDataF2;
texture<float4, 1, hipReadModeElementType> texDataF4;
texture<int4, 1, hipReadModeElementType> texDataI4;
texture<int4, 1, hipReadModeElementType> texDataList;
//template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];}
//template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; }
//////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterH( float* d_result, int width)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1;
const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH;
__shared__ float data[CACHE_WIDTH];
const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH);
const int col = bcol + threadIdx.x;
const int index_min = IMUL(blockIdx.y, width);
const int index_max = index_min + width - 1;
int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x;
int cache_index = threadIdx.x;
float value = 0;
#pragma unroll
for(int j = 0; j < CACHE_COUNT; ++j)
{
if(cache_index < CACHE_WIDTH)
{
int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index);
data[cache_index] = tex1Dfetch(texData,fetch_index);
src_index += FILTERH_TILE_WIDTH;
cache_index += FILTERH_TILE_WIDTH;
}
}
__syncthreads();
if(col >= width) return;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[threadIdx.x + i]* d_kernel[i]);
}
// value = Conv<FW-1>(data + threadIdx.x);
d_result[index_min + col] = value;
}
////////////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterV(float* d_result, int width, int height)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1;
const int TEMP = CACHE_WIDTH & 0xf;
//add some extra space to avoid bank conflict
#if FILTERV_TILE_WIDTH == 16
//make the stride 16 * n +/- 1
const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP;
#elif FILTERV_TILE_WIDTH == 8
//make the stride 16 * n +/- 2
const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP);
#elif FILTERV_TILE_WIDTH == 4
//make the stride 16 * n +/- 4
const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP);
#else
#error
#endif
const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;
const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_BLOCK_HEIGHT - 1) / FILTERV_BLOCK_HEIGHT;
const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_BLOCK_HEIGHT -1) / FILTERV_BLOCK_HEIGHT;
__shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH];
const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT);
const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x;
const int row_first = row_block_first - HALF_WIDTH;
const int data_index_max = IMUL(height - 1, width) + col;
const int cache_col_start = threadIdx.y;
const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH);
int cache_index = cache_col_start + cache_row_start;
int data_index = IMUL(row_first + cache_col_start, width) + col;
if(col < width)
{
#pragma unroll
for(int i = 0; i < CACHE_COUNT; ++i)
{
if(cache_col_start < CACHE_WIDTH - i * FILTERV_BLOCK_HEIGHT)
{
int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index);
data[cache_index + i * FILTERV_BLOCK_HEIGHT] = tex1Dfetch(texData,fetch_index);
data_index += IMUL(FILTERV_BLOCK_HEIGHT, width);
}
}
}
__syncthreads();
if(col >= width) return;
int row = row_block_first + threadIdx.y;
int index_start = cache_row_start + threadIdx.y;
#pragma unroll
for(int i = 0; i < WRITE_COUNT; ++i,
row += FILTERV_BLOCK_HEIGHT, index_start += FILTERV_BLOCK_HEIGHT)
{
if(row < height)
{
int index_dest = IMUL(row, width) + col;
float value = 0;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[index_start + i] * d_kernel[i]);
}
d_result[index_dest] = value;
}
}
}
template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width)
{
const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1);
const float INV_SCALE = 1.0f / (float(SCALE));
int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(col >= width) return;
int row = blockIdx.y >> LOG_SCALE;
int index = row * width + col;
int dst_row = blockIdx.y;
int dst_idx= (width * dst_row + col) * SCALE;
int helper = blockIdx.y & SCALE_MASK;
if (helper)
{
float v11 = tex1Dfetch(texData, index);
float v12 = tex1Dfetch(texData, index + 1);
index += width;
float v21 = tex1Dfetch(texData, index);
float v22 = tex1Dfetch(texData, index + 1);
float w1 = INV_SCALE * helper, w2 = 1.0 - w1;
float v1 = (v21 * w1 + w2 * v11);
float v2 = (v22 * w1 + w2 * v12);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}else
{
float v1 = tex1Dfetch(texData, index);
float v2 = tex1Dfetch(texData, index + 1);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
src->BindTexture(texData);
dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale);
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : hipLaunchKernelGGL(( UpsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break;
case 2 : hipLaunchKernelGGL(( UpsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break;
case 3 : hipLaunchKernelGGL(( UpsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break;
default: break;
}
}
template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << LOG_SCALE), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << LOG_SCALE;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
__global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << log_scale), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << log_scale;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int src_width = src->GetImgWidth(), dst_width = dst->GetImgWidth() ;
src->BindTexture(texData);
dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight());
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : hipLaunchKernelGGL(( DownsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break;
case 2 :hipLaunchKernelGGL(( DownsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break;
case 3 : hipLaunchKernelGGL(( DownsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break;
default:hipLaunchKernelGGL(( DownsampleKernel) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width, log_scale);
}
}
__global__ void ChannelReduce_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texData, index*4);
}
__global__ void ChannelReduce_Convert_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
float4 rgba = tex1Dfetch(texDataF4, index);
d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z;
}
void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
if(convert_rgb)
{
src->BindTexture(texDataF4);
hipLaunchKernelGGL(( ChannelReduce_Convert_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}else
{
src->BindTexture(texData);
hipLaunchKernelGGL(( ChannelReduce_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}
}
__global__ void ConvertByteToFloat_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texDataB, index);
}
void ProgramCU::ConvertByteToFloat(CuTexImage*src, CuTexImage* dst)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
src->BindTexture(texDataB);
hipLaunchKernelGGL(( ConvertByteToFloat_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}
void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width)
{
int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ) ;//
width = 2*sz + 1;
if(width > KERNEL_MAX_WIDTH)
{
//filter size truncation
sz = KERNEL_MAX_WIDTH >> 1;
width =KERNEL_MAX_WIDTH;
}else if(width < KERNEL_MIN_WIDTH)
{
sz = KERNEL_MIN_WIDTH >> 1;
width =KERNEL_MIN_WIDTH;
}
float rv = 1.0f/(sigma*sigma), v, ksum =0;
// pre-compute filter
for( i = -sz ; i <= sz ; ++i)
{
kernel[i+sz] = v = exp(-0.5f * i * i *rv) ;
ksum += v;
}
//normalize the kernel
rv = 1.0f/ksum;
for(i = 0; i< width ;i++) kernel[i]*=rv;
}
template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
//horizontal filtering
src->BindTexture(texData);
dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height);
dim3 blockh(FILTERH_TILE_WIDTH);
hipLaunchKernelGGL(( FilterH<FW>), dim3(gridh), dim3(blockh), 0, 0, (float*)buf->_cuData, width);
CheckErrorCUDA("FilterH");
///vertical filtering
buf->BindTexture(texData);
dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/ FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT);
dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_BLOCK_HEIGHT);
hipLaunchKernelGGL(( FilterV<FW>), dim3(gridv), dim3(blockv), 0, 0, (float*)dst->_cuData, width, height);
CheckErrorCUDA("FilterV");
}
//////////////////////////////////////////////////////////////////////
// tested on 2048x1500 image, the time on pyramid construction is
// OpenGL version : 18ms
// CUDA version: 28 ms
void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma)
{
float filter_kernel[KERNEL_MAX_WIDTH]; int width;
CreateFilterKernel(sigma, filter_kernel, width);
hipMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, hipMemcpyHostToDevice);
switch(width)
{
case 5: FilterImage< 5>(dst, src, buf); break;
case 7: FilterImage< 7>(dst, src, buf); break;
case 9: FilterImage< 9>(dst, src, buf); break;
case 11: FilterImage<11>(dst, src, buf); break;
case 13: FilterImage<13>(dst, src, buf); break;
case 15: FilterImage<15>(dst, src, buf); break;
case 17: FilterImage<17>(dst, src, buf); break;
case 19: FilterImage<19>(dst, src, buf); break;
case 21: FilterImage<21>(dst, src, buf); break;
case 23: FilterImage<23>(dst, src, buf); break;
case 25: FilterImage<25>(dst, src, buf); break;
case 27: FilterImage<27>(dst, src, buf); break;
case 29: FilterImage<29>(dst, src, buf); break;
case 31: FilterImage<31>(dst, src, buf); break;
case 33: FilterImage<33>(dst, src, buf); break;
default: break;
}
}
texture<float, 1, hipReadModeElementType> texC;
texture<float, 1, hipReadModeElementType> texP;
texture<float, 1, hipReadModeElementType> texN;
void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
float vxn = tex1Dfetch(texC, index + 1);
float vxp = tex1Dfetch(texC, index - 1);
float vyp = tex1Dfetch(texC, index - width);
float vyn = tex1Dfetch(texC, index + width);
float dx = vxn - vxp, dy = vyn - vyp;
float grd = 0.5f * sqrt(dx * dx + dy * dy);
float rot = (grd == 0.0f? 0.0f : atan2(dy, dx));
d_got[index] = make_float2(grd, rot);
}
}
void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
}
}
void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got)
{
int width = gus->GetImgWidth(), height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
(gus -1)->BindTexture(texP);
if(got->_cuData)
hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, (float2*) got->_cuData, width, height);
else
hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, width, height);
}
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1);\
datai[1] = tex1Dfetch(tex, idx);\
datai[2] = tex1Dfetch(tex, idx + 1);\
if(v > nmax)\
{\
nmax = max(nmax, datai[0]);\
nmax = max(nmax, datai[1]);\
nmax = max(nmax, datai[2]);\
if(v < nmax) goto key_finish;\
}else\
{\
nmin = min(nmin, datai[0]);\
nmin = min(nmin, datai[1]);\
nmin = min(nmin, datai[2]);\
if(v > nmin) goto key_finish;\
}
void __global__ ComputeKEY_Kernel(float4* d_key, int width, int colmax, int rowmax,
float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization)
{
float data[3][3], v;
float datap[3][3], datan[3][3];
#ifdef KEY_OFFSET_ONE
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y + 1;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x + 1;
#else
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x;
#endif
int index = IMUL(row, width) + col;
int idx[3] ={index - width, index, index + width};
int in_image =0;
float nmax, nmin, result = 0.0f;
float dx = 0, dy = 0, ds = 0;
bool offset_test_passed = true;
#ifdef KEY_OFFSET_ONE
if(row < rowmax && col < colmax)
#else
if(row > 0 && col > 0 && row < rowmax && col < colmax)
#endif
{
in_image = 1;
data[1][1] = v = tex1Dfetch(texC, idx[1]);
if(fabs(v) <= dog_threshold0) goto key_finish;
data[1][0] = tex1Dfetch(texC, idx[1] - 1);
data[1][2] = tex1Dfetch(texC, idx[1] + 1);
nmax = max(data[1][0], data[1][2]);
nmin = min(data[1][0], data[1][2]);
if(v <=nmax && v >= nmin) goto key_finish;
//if((v > nmax && v < 0 )|| (v < nmin && v > 0)) goto key_finish;
READ_CMP_DOG_DATA(data[0], texC, idx[0]);
READ_CMP_DOG_DATA(data[2], texC, idx[2]);
//edge supression
float vx2 = v * 2.0f;
float fxx = data[1][0] + data[1][2] - vx2;
float fyy = data[0][1] + data[2][1] - vx2;
float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]);
float temp1 = fxx * fyy - fxy * fxy;
float temp2 = (fxx + fyy) * (fxx + fyy);
if(temp1 <=0 || temp2 > edge_threshold * temp1) goto key_finish;
//read the previous level
READ_CMP_DOG_DATA(datap[0], texP, idx[0]);
READ_CMP_DOG_DATA(datap[1], texP, idx[1]);
READ_CMP_DOG_DATA(datap[2], texP, idx[2]);
//read the next level
READ_CMP_DOG_DATA(datan[0], texN, idx[0]);
READ_CMP_DOG_DATA(datan[1], texN, idx[1]);
READ_CMP_DOG_DATA(datan[2], texN, idx[2]);
if(subpixel_localization)
{
//subpixel localization
float fx = 0.5f * (data[1][2] - data[1][0]);
float fy = 0.5f * (data[2][1] - data[0][1]);
float fs = 0.5f * (datan[1][1] - datap[1][1]);
float fss = (datan[1][1] + datap[1][1] - vx2);
float fxs = 0.25f* (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]);
float fys = 0.25f* (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10)
{
if(maxa == A1.x)
{
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x)
{
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y))
{
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10)
{
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10)
{
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
offset_test_passed =
fabs(data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs)) > dog_threshold
&&fabs(ds) < 1.0f && fabs(dx) < 1.0f && fabs(dy) < 1.0f;
}
}
}
}
if(offset_test_passed) result = v > nmax ? 1.0 : -1.0;
}
key_finish:
if(in_image) d_key[index] = make_float4(result, dx, dy, ds);
}
void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key, float Tdog, float Tedge)
{
int width = dog->GetImgWidth(), height = dog->GetImgHeight();
float Tdog1 = (GlobalUtil::_SubpixelLocalization? 0.8f : 1.0f) * Tdog;
CuTexImage* dogp = dog - 1;
CuTexImage* dogn = dog + 1;
#ifdef KEY_OFFSET_ONE
dim3 grid((width - 1 + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height - 1 + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
#else
dim3 grid((width + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
#endif
dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY);
dogp->BindTexture(texP);
dog ->BindTexture(texC);
dogn->BindTexture(texN);
Tedge = (Tedge+1)*(Tedge+1)/Tedge;
hipLaunchKernelGGL(( ComputeKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) key->_cuData, width,
width -1, height -1, Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization);
}
void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
if(row > 0 && row < height -1)
{
#pragma unroll
for(int i = 0; i < 4 ; ++i, ++scol)
{
float4 temp = tex1Dfetch(texDataF4, sidx +i);
v[i] = (scol < ws -1 && scol > 0 && temp.x!=0) ? 1 : 0;
}
}
hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist)
{
int ws = key->GetImgWidth(), hs = key->GetImgHeight();
int wd = hist->GetImgWidth(), hd = hist->GetImgHeight();
dim3 grid((wd + HIST_INIT_WIDTH - 1)/ HIST_INIT_WIDTH, hd);
dim3 block(HIST_INIT_WIDTH, 1);
key->BindTexture(texDataF4);
hipLaunchKernelGGL(( InitHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*) hist->_cuData, ws, wd, hd);
}
void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
#pragma unroll
for(int i = 0; i < 4 && scol < ws; ++i, ++scol)
{
int4 temp = tex1Dfetch(texDataI4, sidx + i);
v[i] = temp.x + temp.y + temp.z + temp.w;
}
d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::ReduceHistogram(CuTexImage*hist1, CuTexImage* hist2)
{
int ws = hist1->GetImgWidth(), hs = hist1->GetImgHeight();
int wd = hist2->GetImgWidth(), hd = hist2->GetImgHeight();
int temp = (int)floor(logf(float(wd * 2/ 3)) / logf(2.0f));
const int wi = min(7, max(temp , 0));
hist1->BindTexture(texDataI4);
const int BW = 1 << wi, BH = 1 << (7 - wi);
dim3 grid((wd + BW - 1)/ BW, (hd + BH -1) / BH);
dim3 block(BW, BH);
hipLaunchKernelGGL(( ReduceHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*)hist2->_cuData, ws, wd, hd);
}
void __global__ ListGen_Kernel(int4* d_list, int width)
{
int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int4 pos = tex1Dfetch(texDataList, idx1);
int idx2 = IMUL(pos.y, width) + pos.x;
int4 temp = tex1Dfetch(texDataI4, idx2);
int sum1 = temp.x + temp.y;
int sum2 = sum1 + temp.z;
pos.x <<= 2;
if(pos.z >= sum2)
{
pos.x += 3;
pos.z -= sum2;
}else if(pos.z >= sum1)
{
pos.x += 2;
pos.z -= sum1;
}else if(pos.z >= temp.x)
{
pos.x += 1;
pos.z -= temp.x;
}
d_list[idx1] = pos;
}
//input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist)
{
int len = list->GetImgWidth();
list->BindTexture(texDataList);
hist->BindTexture(texDataI4);
dim3 grid((len + LISTGEN_BLOCK_DIM -1) /LISTGEN_BLOCK_DIM);
dim3 block(LISTGEN_BLOCK_DIM);
hipLaunchKernelGGL(( ListGen_Kernel), dim3(grid), dim3(block), 0, 0, (int4*) list->_cuData, hist->GetImgWidth());
}
void __global__ ComputeOrientation_Kernel(float4* d_list,
int list_len,
int width, int height,
float sigma, float sigma_step,
float gaussian_factor, float sample_factor,
int num_orientation,
int existing_keypoint,
int subpixel,
int keepsign)
{
const float ten_degree_per_radius = 5.7295779513082320876798154814105;
const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= list_len) return;
float4 key;
if(existing_keypoint)
{
key = tex1Dfetch(texDataF4, idx);
}else
{
int4 ikey = tex1Dfetch(texDataList, idx);
key.x = ikey.x + 0.5f;
key.y = ikey.y + 0.5f;
key.z = sigma;
if(subpixel || keepsign)
{
float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);
if(subpixel)
{
key.x += offset.y;
key.y += offset.z;
key.z *= pow(sigma_step, offset.w);
}
if(keepsign) key.z *= offset.x;
}
}
if(num_orientation == 0)
{
key.w = 0;
d_list[idx] = key;
return;
}
float vote[37];
float gsigma = key.z * gaussian_factor;
float win = fabs(key.z) * sample_factor;
float dist_threshold = win * win + 0.5;
float factor = -0.5f / (gsigma * gsigma);
float xmin = max(1.5f, floor(key.x - win) + 0.5f);
float ymin = max(1.5f, floor(key.y - win) + 0.5f);
float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);
float ymax = min(height -1.5f, floor(key.y + win) + 0.5f);
#pragma unroll
for(int i = 0; i < 36; ++i) vote[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - key.x;
float dy = y - key.y;
float sq_dist = dx * dx + dy * dy;
if(sq_dist >= dist_threshold) continue;
float2 got = tex2D(texDataF2, x, y);
float weight = got.x * exp(sq_dist * factor);
float fidx = floor(got.y * ten_degree_per_radius);
int oidx = fidx;
if(oidx < 0) oidx += 36;
vote[oidx] += weight;
}
}
//filter the vote
const float one_third = 1.0 /3.0;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
vote[36] = vote[0];
float pre = vote[35];
#pragma unroll
for(int j = 0; j < 36; ++j)
{
float temp = one_third * (pre + vote[j] + vote[j + 1]);
pre = vote[j]; vote[j] = temp;
}
}
vote[36] = vote[0];
if(num_orientation == 1 || existing_keypoint)
{
int index_max = 0;
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
{
index_max = vote[i] > max_vote? i : index_max;
max_vote = max(max_vote, vote[i]);
}
float pre = vote[index_max == 0? 35 : index_max -1];
float next = vote[index_max + 1];
float weight = max_vote;
float off = 0.5f * FDIV(next - pre, weight + weight - next - pre);
key.w = radius_per_ten_degrees * (index_max + 0.5f + off);
d_list[idx] = key;
}else
{
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i) max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_rot[2], max_vot[2] = {0, 0};
int ocount = 0;
#pragma unroll
for(int i =0; i < 36; ++i)
{
float next = vote[i + 1];
if(vote[i] > vote_threshold && vote[i] > pre && vote[i] > next)
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
///
if(weight > max_vot[1])
{
if(weight > max_vot[0])
{
max_vot[1] = max_vot[0];
max_rot[1] = max_rot[0];
max_vot[0] = weight;
max_rot[0] = rot;
}
else
{
max_vot[1] = weight;
max_rot[1] = rot;
}
ocount ++;
}
}
pre = vote[i];
}
float fr1 = max_rot[0] / 36.0f;
if(fr1 < 0) fr1 += 1.0f;
unsigned short us1 = ocount == 0? 65535 : ((unsigned short )floor(fr1 * 65535.0f));
unsigned short us2 = 65535;
if(ocount > 1)
{
float fr2 = max_rot[1] / 36.0f;
if(fr2 < 0) fr2 += 1.0f;
us2 = (unsigned short ) floor(fr2 * 65535.0f);
}
unsigned int uspack = (us2 << 16) | us1;
key.w = __int_as_float(uspack);
d_list[idx] = key;
}
}
void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage*key,
float sigma, float sigma_step, int existing_keypoint)
{
int len = list->GetImgWidth();
if(len <= 0) return;
int width = got->GetImgWidth(), height = got->GetImgHeight();
if(existing_keypoint)
{
list->BindTexture(texDataF4);
}else
{
list->BindTexture(texDataList);
if(GlobalUtil::_SubpixelLocalization) key->BindTexture(texDataF4);
}
got->BindTexture2D(texDataF2);
const int block_width = len < ORIENTATION_COMPUTE_PER_BLOCK ? 16 : ORIENTATION_COMPUTE_PER_BLOCK;
dim3 grid((len + block_width -1) / block_width);
dim3 block(block_width);
hipLaunchKernelGGL(( ComputeOrientation_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) list->_cuData,
len, width, height, sigma, sigma_step,
GlobalUtil::_OrientationGaussianFactor,
GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor,
GlobalUtil::_FixedOrientation? 0 : GlobalUtil::_MaxOrientation,
existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign);
ProgramCU::CheckErrorCUDA("ComputeOrientation");
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptor_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
float spt = fabs(key.z * window_factor);
float s, c; __sincosf(key.w, &s, &c);
float anglef = key.w > 3.14159265358979323846? key.w - (2.0 * 3.14159265358979323846) : key.w ;
float cspt = c * spt, sspt = s * spt;
float crspt = c / spt, srspt = s / spt;
float2 offsetpt, pt;
float xmin, ymin, xmax, ymax, bsz;
offsetpt.x = ix - 1.5f;
offsetpt.y = iy - 1.5f;
pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x;
pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y;
bsz = fabs(cspt) + fabs(sspt);
xmin = max(1.5f, floor(pt.x - bsz) + 0.5f);
ymin = max(1.5f, floor(pt.y - bsz) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - pt.x;
float dy = y - pt.y;
float nx = crspt * dx + srspt * dy;
float ny = crspt * dy - srspt * dx;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float dnx = nx + offsetpt.x;
float dny = ny + offsetpt.y;
float ww = exp(-0.125f * (dnx * dnx + dny * dny));
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = ww * wx * wy * cc.x;
float theta = (anglef - cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptorRECT_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
//float aspect_ratio = key.w / key.z;
//float aspect_sq = aspect_ratio * aspect_ratio;
float sptx = key.z * 0.25, spty = key.w * 0.25;
float xmin, ymin, xmax, ymax; float2 pt;
pt.x = sptx * (ix + 0.5f) + key.x;
pt.y = spty * (iy + 0.5f) + key.y;
xmin = max(1.5f, floor(pt.x - sptx) + 0.5f);
ymin = max(1.5f, floor(pt.y - spty) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + sptx) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + spty) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float nx = (x - pt.x) / sptx;
float ny = (y - pt.y) / spty;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = wx * wy * cc.x;
float theta = (- cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
float4 temp[32];
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int sidx = idx << 5;
float norm1 = 0, norm2 = 0;
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i] = tex1Dfetch(texDataF4, sidx +i);
norm1 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm1 = rsqrt(norm1);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x = min(0.2f, temp[i].x * norm1);
temp[i].y = min(0.2f, temp[i].y * norm1);
temp[i].z = min(0.2f, temp[i].z * norm1);
temp[i].w = min(0.2f, temp[i].w * norm1);
norm2 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm2 = rsqrt(norm2);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x *= norm2; temp[i].y *= norm2;
temp[i].z *= norm2; temp[i].w *= norm2;
d_des[sidx + i] = temp[i];
}
}
void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex, int rect, int stream)
{
int num = list->GetImgWidth();
int width = got->GetImgWidth();
int height = got->GetImgHeight();
dtex->InitTexture(num * 128, 1, 1);
got->BindTexture2D(texDataF2);
list->BindTexture(texDataF4);
int block_width = DESCRIPTOR_COMPUTE_BLOCK_SIZE;
dim3 grid((num * 16 + block_width -1) / block_width);
dim3 block(block_width);
if(rect)
{
if(GlobalUtil::_UseDynamicIndexing)
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}else
{
if(GlobalUtil::_UseDynamicIndexing)
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
if(GlobalUtil::_NormalizedSIFT)
{
dtex->BindTexture(texDataF4);
const int block_width = DESCRIPTOR_NORMALIZ_PER_BLOCK;
dim3 grid((num + block_width -1) / block_width);
dim3 block(block_width);
hipLaunchKernelGGL(( NormalizeDescriptor_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num);
}
CheckErrorCUDA("ComputeDescriptor");
}
//////////////////////////////////////////////////////
void ProgramCU::FinishCUDA()
{
hipDeviceSynchronize();
}
int ProgramCU::CheckErrorCUDA(const char* location)
{
hipError_t e = hipGetLastError();
if(e)
{
if(location) fprintf(stderr, "%s:\t", location);
fprintf(stderr, "%s\n", hipGetErrorString(e));
//assert(0);
return 1;
}else
{
return 0;
}
}
void __global__ ConvertDOG_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0.5 : saturate(0.5+20.0*v);
}
}
///
void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = dog->GetImgWidth(), height = dog ->GetImgHeight();
dog->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertDOG");
}
void __global__ ConvertGRD_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index << 1);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0 : saturate(5 * v);
}
}
void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = got->GetImgWidth(), height = got ->GetImgHeight();
got->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertGRD_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertGRD");
}
void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float4 keyv = tex1Dfetch(texDataF4, index);
int is_key = (keyv.x == 1.0f || keyv.x == -1.0f);
int inside = col > 0 && row > 0 && row < height -1 && col < width - 1;
float v = inside? saturate(0.5 + 20 * tex1Dfetch(texData, index)) : 0.5;
d_result[index] = is_key && inside ?
(keyv.x > 0? make_float4(1.0f, 0, 0, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)):
make_float4(v, v, v, 1.0f) ;
}
}
void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = key->GetImgWidth(), height = key ->GetImgHeight();
dog->BindTexture(texData);
key->BindTexture(texDataF4);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, width, height);
}
void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
float4 v = tex1Dfetch(texDataF4, idx);
d_result[idx] = make_float4(v.x, v.y, 0, 1.0f);
}
void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out)
{
int num = ftex->GetImgWidth();
int block_width = 64;
dim3 grid((num + block_width -1) /block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
hipLaunchKernelGGL(( DisplayKeyPoint_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, num);
ProgramCU::CheckErrorCUDA("DisplayKeyPoint");
}
void __global__ DisplayKeyBox_Kernel(float4* d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int kidx = idx / 10, vidx = idx - IMUL(kidx , 10);
float4 v = tex1Dfetch(texDataF4, kidx);
float sz = fabs(v.z * 3.0f);
///////////////////////
float s, c; __sincosf(v.w, &s, &c);
///////////////////////
float dx = vidx == 0? 0 : ((vidx <= 4 || vidx >= 9)? sz : -sz);
float dy = vidx <= 1? 0 : ((vidx <= 2 || vidx >= 7)? -sz : sz);
float4 pos;
pos.x = v.x + c * dx - s * dy;
pos.y = v.y + c * dy + s * dx;
pos.z = 0; pos.w = 1.0f;
d_result[idx] = pos;
}
void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out)
{
int len = ftex->GetImgWidth();
int block_width = 32;
dim3 grid((len * 10 + block_width -1) / block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
hipLaunchKernelGGL(( DisplayKeyBox_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, len * 10);
}
///////////////////////////////////////////////////////////////////
inline void CuTexImage:: BindTexture(textureReference& texRef)
{
hipBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes);
}
inline void CuTexImage::BindTexture2D(textureReference& texRef)
{
#if defined(SIFTGPU_ENABLE_LINEAR_TEX2D)
hipBindTexture2D(0, &texRef, _cuData, &texRef.channelDesc, _imgWidth, _imgHeight, _imgWidth* _numChannel* sizeof(float));
#else
hipChannelFormatDesc desc;
hipGetChannelDesc(&desc, _cuData2D);
hipBindTextureToArray(&texRef, _cuData2D, &desc);
#endif
}
int ProgramCU::CheckCudaDevice(int device)
{
int count = 0, device_used;
if(hipGetDeviceCount(&count) != hipSuccess || count <= 0)
{
ProgramCU::CheckErrorCUDA("CheckCudaDevice");
return 0;
}else if(count == 1)
{
hipDeviceProp_t deviceProp;
if ( hipGetDeviceProperties(&deviceProp, 0) != hipSuccess ||
(deviceProp.major == 9999 && deviceProp.minor == 9999))
{
fprintf(stderr, "CheckCudaDevice: no device supporting CUDA.\n");
return 0;
}else
{
GlobalUtil::_MemCapGPU = deviceProp.totalGlobalMem / 1024;
GlobalUtil::_texMaxDimGL = 32768;
if(GlobalUtil::_verbose)
fprintf(stdout, "NOTE: changing maximum texture dimension to %d\n", GlobalUtil::_texMaxDimGL);
}
}
if(device >0 && device < count)
{
hipSetDevice(device);
CheckErrorCUDA("hipSetDevice\n");
}
hipGetDevice(&device_used);
if(device != device_used)
fprintf(stderr, "\nERROR: Cannot set device to %d\n"
"\nWARNING: Use # %d device instead (out of %d)\n", device, device_used, count);
return 1;
}
////////////////////////////////////////////////////////////////////////////////////////
// siftmatch funtions
//////////////////////////////////////////////////////////////////////////////////////////
#define MULT_TBLOCK_DIMX 128
#define MULT_TBLOCK_DIMY 1
#define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX)
#define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY)
texture<uint4, 1, hipReadModeElementType> texDes1;
texture<uint4, 1, hipReadModeElementType> texDes2;
void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y, idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
int read_idx1 = idx01 * 8 + threadIdx.x, read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
///////////////////////////////////////////////////////////////
//Load feature descriptors
///////////////////////////////////////////////////////////////
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
///
if(idx2 >= num2) return;
///////////////////////////////////////////////////////////////////////////
//compare descriptors
int results[MULT_BLOCK_DIMY];
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0;
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
}
void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
hipLaunchKernelGGL(( MultiplyDescriptor_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL));
ProgramCU::CheckErrorCUDA("MultiplyDescriptor");
}
texture<float, 1, hipReadModeElementType> texLoc1;
texture<float2, 1, hipReadModeElementType> texLoc2;
struct Matrix33{float mat[3][3];};
void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp,
Matrix33 H, float hdistmax, Matrix33 F, float fdistmax)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY);
int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y;
int idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
__shared__ float loc1[MULT_BLOCK_DIMY * 2];
int read_idx1 = idx01 * 8 + threadIdx.x ;
int read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
if(threadIdx.x < MULT_BLOCK_DIMY * 2)
{
loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x);
}
__syncthreads();
if(idx2 >= num2) return;
int results[MULT_BLOCK_DIMY];
/////////////////////////////////////////////////////////////////////////////////////////////
//geometric verification
/////////////////////////////////////////////////////////////////////////////////////////////
int good_count = 0;
float2 loc2 = tex1Dfetch(texLoc2, idx2);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
float* loci = loc1 + i * 2;
float locx = loci[0], locy = loci[1];
//homography
float x[3], diff[2];
x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2];
x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2];
x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2];
diff[0] = fabs(FDIV(x[0], x[2]) - loc2.x);
diff[1] = fabs(FDIV(x[1], x[2]) - loc2.y);
if(diff[0] < hdistmax && diff[1] < hdistmax)
{
//check fundamental matrix
float fx1[3], ftx2[3], x2fx1, se;
fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2];
fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2];
fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2];
ftx2[0] = F.mat[0][0] * loc2.x + F.mat[1][0] * loc2.y + F.mat[2][0];
ftx2[1] = F.mat[0][1] * loc2.x + F.mat[1][1] * loc2.y + F.mat[2][1];
//ftx2[2] = F.mat[0][2] * loc2.x + F.mat[1][2] * loc2.y + F.mat[2][2];
x2fx1 = loc2.x * fx1[0] + loc2.y * fx1[1] + fx1[2];
se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]);
results[i] = se < fdistmax? 0: -262144;
}else
{
results[i] = -262144;
}
}else
{
results[i] = -262144;
}
good_count += (results[i] >=0);
}
/////////////////////////////////////////////////////////////////////////////////////////////
///compare feature descriptors anyway
/////////////////////////////////////////////////////////////////////////////////////////////
if(good_count > 0)
{
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i= 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
}else
{
break;
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
else break;
}
}
}
void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2,
CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT,
float H[3][3], float hdistmax, float F[3][3], float fdistmax)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
Matrix33 MatF, MatH;
//copy the matrix
memcpy(MatF.mat, F, 9 * sizeof(float));
memcpy(MatH.mat, H, 9 * sizeof(float));
//thread blocks
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
//intermediate results
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3);
loc1->BindTexture(texLoc1);
loc2->BindTexture(texLoc2);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
hipLaunchKernelGGL(( MultiplyDescriptorG_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL),
MatH, hdistmax, MatF, fdistmax);
}
texture<int, 1, hipReadModeElementType> texDOT;
#define ROWMATCH_BLOCK_WIDTH 32
#define ROWMATCH_BLOCK_HEIGHT 1
void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax)
{
#if ROWMATCH_BLOCK_HEIGHT == 1
__shared__ int dotmax[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotidx[ROWMATCH_BLOCK_WIDTH];
int row = blockIdx.y;
#else
__shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
int* dotmax = x_dotmax[threadIdx.y];
int* dotnxt = x_dotnxt[threadIdx.y];
int* dotidx = x_dotidx[threadIdx.y];
int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y;
#endif
int base_address = IMUL(row , num2);
int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;
for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)
{
if(threadIdx.x + i < num2)
{
int v = tex1Dfetch(texDOT, base_address + threadIdx.x + i);//d_dot[base_address + threadIdx.x + i];//
bool test = v > t_dotmax;
t_dotnxt = test? t_dotmax : max(t_dotnxt, v);
t_dotidx = test? (threadIdx.x + i) : t_dotidx;
t_dotmax = test? v: t_dotmax;
}
__syncthreads();
}
dotmax[threadIdx.x] = t_dotmax;
dotnxt[threadIdx.x] = t_dotnxt;
dotidx[threadIdx.x] = t_dotidx;
__syncthreads();
#pragma unroll
for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2)
{
if(threadIdx.x < step)
{
int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step];
bool test = v2 > v1;
dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2);
dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x];
dotmax[threadIdx.x] = test? v2 : v1;
}
__syncthreads();
}
if(threadIdx.x == 0)
{
float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0));
float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;//? : -1;
}
}
void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax)
{
int num1 = texDot->GetImgHeight();
int num2 = texDot->GetImgWidth();
dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);
dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);
texDot->BindTexture(texDOT);
hipLaunchKernelGGL(( RowMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData,
(int*)texMatch->_cuData, num2, distmax, ratiomax);
}
#define COLMATCH_BLOCK_WIDTH 32
//texture<int3, 1, hipReadModeElementType> texCT;
void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax)
{
int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x;
if(col >= num2) return;
int3 result = d_crt[col];//tex1Dfetch(texCT, col);
int read_idx = col + num2;
for(int i = 1; i < height; ++i, read_idx += num2)
{
int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx);
result = result.x < temp.x?
make_int3(temp.x, temp.y, max(result.x, temp.z)) :
make_int3(result.x, result.y, max(result.z, temp.x));
}
float dist = acos(min(result.x * 0.000003814697265625f, 1.0));
float distn = acos(min(result.z * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;//? : -1;
}
void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax)
{
int height = texCRT->GetImgHeight();
int num2 = texCRT->GetImgWidth();
//texCRT->BindTexture(texCT);
dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);
dim3 block(COLMATCH_BLOCK_WIDTH);
hipLaunchKernelGGL(( ColMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax);
}
#endif
|
9d660dc1f88db0c2c0f394afbd104ca803d7ae9c.cu
|
////////////////////////////////////////////////////////////////////////////
// File: ProgramCU.cu
// Author: Changchang Wu
// Description : implementation of ProgramCU and all CUDA kernels
//
// Copyright (c) 2007 University of North Carolina at Chapel Hill
// All Rights Reserved
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes, without
// fee, and without a written agreement is hereby granted, provided that the
// above copyright notice and the following paragraph appear in all copies.
//
// The University of North Carolina at Chapel Hill make no representations
// about the suitability of this software for any purpose. It is provided
// 'as is' without express or implied warranty.
//
// Please send BUG REPORTS to [email protected]
//
////////////////////////////////////////////////////////////////////////////
#if defined(CUDA_SIFTGPU_ENABLED)
#include "GL/glew.h"
#include "stdio.h"
#include "CuTexImage.h"
#include "ProgramCU.h"
#include "GlobalUtil.h"
//----------------------------------------------------------------
//Begin SiftGPU setting section.
//////////////////////////////////////////////////////////
#define IMUL(X,Y) __mul24(X,Y)
//#define FDIV(X,Y) ((X)/(Y))
#define FDIV(X,Y) __fdividef(X,Y)
/////////////////////////////////////////////////////////
//filter kernel width range (don't change this)
#define KERNEL_MAX_WIDTH 33
#define KERNEL_MIN_WIDTH 5
//////////////////////////////////////////////////////////
//horizontal filter block size (32, 64, 128, 256, 512)
#define FILTERH_TILE_WIDTH 128
//thread block for vertical filter. FILTERV_BLOCK_WIDTH can be (4, 8 or 16)
#define FILTERV_BLOCK_WIDTH 16
#define FILTERV_BLOCK_HEIGHT 32
//The corresponding image patch for a thread block
#define FILTERV_PIXEL_PER_THREAD 4
#define FILTERV_TILE_WIDTH FILTERV_BLOCK_WIDTH
#define FILTERV_TILE_HEIGHT (FILTERV_PIXEL_PER_THREAD * FILTERV_BLOCK_HEIGHT)
//////////////////////////////////////////////////////////
//thread block size for computing Difference of Gaussian
#define DOG_BLOCK_LOG_DIMX 7
#define DOG_BLOCK_LOG_DIMY 0
#define DOG_BLOCK_DIMX (1 << DOG_BLOCK_LOG_DIMX)
#define DOG_BLOCK_DIMY (1 << DOG_BLOCK_LOG_DIMY)
//////////////////////////////////////////////////////////
//thread block size for keypoint detection
#define KEY_BLOCK_LOG_DIMX 3
#define KEY_BLOCK_LOG_DIMY 3
#define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX)
#define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY)
//#define KEY_OFFSET_ONE
//make KEY_BLOCK_LOG_DIMX 4 will make the write coalesced..
//but it seems uncoalesced writes don't affect the speed
//////////////////////////////////////////////////////////
//thread block size for initializing list generation (64, 128, 256, 512 ...)
#define HIST_INIT_WIDTH 128
//thread block size for generating feature list (32, 64, 128, 256, 512, ...)
#define LISTGEN_BLOCK_DIM 128
/////////////////////////////////////////////////////////
//how many keypoint orientations to compute in a block
#define ORIENTATION_COMPUTE_PER_BLOCK 64
//how many keypoint descriptor to compute in a block (2, 4, 8, 16, 32)
#define DESCRIPTOR_COMPUTE_PER_BLOCK 4
#define DESCRIPTOR_COMPUTE_BLOCK_SIZE (16 * DESCRIPTOR_COMPUTE_PER_BLOCK)
//how many keypoint descriptor to normalized in a block (32, ...)
#define DESCRIPTOR_NORMALIZ_PER_BLOCK 32
///////////////////////////////////////////
//Thread block size for visualization
//(This doesn't affect the speed of computation)
#define BLOCK_LOG_DIM 4
#define BLOCK_DIM (1 << BLOCK_LOG_DIM)
//End SiftGPU setting section.
//----------------------------------------------------------------
__device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH];
texture<float, 1, cudaReadModeElementType> texData;
texture<unsigned char, 1, cudaReadModeNormalizedFloat> texDataB;
texture<float2, 2, cudaReadModeElementType> texDataF2;
texture<float4, 1, cudaReadModeElementType> texDataF4;
texture<int4, 1, cudaReadModeElementType> texDataI4;
texture<int4, 1, cudaReadModeElementType> texDataList;
//template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];}
//template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; }
//////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterH( float* d_result, int width)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1;
const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH;
__shared__ float data[CACHE_WIDTH];
const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH);
const int col = bcol + threadIdx.x;
const int index_min = IMUL(blockIdx.y, width);
const int index_max = index_min + width - 1;
int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x;
int cache_index = threadIdx.x;
float value = 0;
#pragma unroll
for(int j = 0; j < CACHE_COUNT; ++j)
{
if(cache_index < CACHE_WIDTH)
{
int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index);
data[cache_index] = tex1Dfetch(texData,fetch_index);
src_index += FILTERH_TILE_WIDTH;
cache_index += FILTERH_TILE_WIDTH;
}
}
__syncthreads();
if(col >= width) return;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[threadIdx.x + i]* d_kernel[i]);
}
// value = Conv<FW-1>(data + threadIdx.x);
d_result[index_min + col] = value;
}
////////////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterV(float* d_result, int width, int height)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1;
const int TEMP = CACHE_WIDTH & 0xf;
//add some extra space to avoid bank conflict
#if FILTERV_TILE_WIDTH == 16
//make the stride 16 * n +/- 1
const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP;
#elif FILTERV_TILE_WIDTH == 8
//make the stride 16 * n +/- 2
const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP);
#elif FILTERV_TILE_WIDTH == 4
//make the stride 16 * n +/- 4
const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP);
#else
#error
#endif
const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;
const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_BLOCK_HEIGHT - 1) / FILTERV_BLOCK_HEIGHT;
const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_BLOCK_HEIGHT -1) / FILTERV_BLOCK_HEIGHT;
__shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH];
const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT);
const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x;
const int row_first = row_block_first - HALF_WIDTH;
const int data_index_max = IMUL(height - 1, width) + col;
const int cache_col_start = threadIdx.y;
const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH);
int cache_index = cache_col_start + cache_row_start;
int data_index = IMUL(row_first + cache_col_start, width) + col;
if(col < width)
{
#pragma unroll
for(int i = 0; i < CACHE_COUNT; ++i)
{
if(cache_col_start < CACHE_WIDTH - i * FILTERV_BLOCK_HEIGHT)
{
int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index);
data[cache_index + i * FILTERV_BLOCK_HEIGHT] = tex1Dfetch(texData,fetch_index);
data_index += IMUL(FILTERV_BLOCK_HEIGHT, width);
}
}
}
__syncthreads();
if(col >= width) return;
int row = row_block_first + threadIdx.y;
int index_start = cache_row_start + threadIdx.y;
#pragma unroll
for(int i = 0; i < WRITE_COUNT; ++i,
row += FILTERV_BLOCK_HEIGHT, index_start += FILTERV_BLOCK_HEIGHT)
{
if(row < height)
{
int index_dest = IMUL(row, width) + col;
float value = 0;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[index_start + i] * d_kernel[i]);
}
d_result[index_dest] = value;
}
}
}
template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width)
{
const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1);
const float INV_SCALE = 1.0f / (float(SCALE));
int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(col >= width) return;
int row = blockIdx.y >> LOG_SCALE;
int index = row * width + col;
int dst_row = blockIdx.y;
int dst_idx= (width * dst_row + col) * SCALE;
int helper = blockIdx.y & SCALE_MASK;
if (helper)
{
float v11 = tex1Dfetch(texData, index);
float v12 = tex1Dfetch(texData, index + 1);
index += width;
float v21 = tex1Dfetch(texData, index);
float v22 = tex1Dfetch(texData, index + 1);
float w1 = INV_SCALE * helper, w2 = 1.0 - w1;
float v1 = (v21 * w1 + w2 * v11);
float v2 = (v22 * w1 + w2 * v12);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}else
{
float v1 = tex1Dfetch(texData, index);
float v2 = tex1Dfetch(texData, index + 1);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
src->BindTexture(texData);
dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale);
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : UpsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, width); break;
case 2 : UpsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, width); break;
case 3 : UpsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, width); break;
default: break;
}
}
template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << LOG_SCALE), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << LOG_SCALE;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
__global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << log_scale), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << log_scale;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int src_width = src->GetImgWidth(), dst_width = dst->GetImgWidth() ;
src->BindTexture(texData);
dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight());
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : DownsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break;
case 2 : DownsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break;
case 3 : DownsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break;
default: DownsampleKernel <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width, log_scale);
}
}
__global__ void ChannelReduce_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texData, index*4);
}
__global__ void ChannelReduce_Convert_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
float4 rgba = tex1Dfetch(texDataF4, index);
d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z;
}
void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
if(convert_rgb)
{
src->BindTexture(texDataF4);
ChannelReduce_Convert_Kernel<<<grid, block>>>((float*)dst->_cuData);
}else
{
src->BindTexture(texData);
ChannelReduce_Kernel<<<grid, block>>>((float*)dst->_cuData);
}
}
__global__ void ConvertByteToFloat_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texDataB, index);
}
void ProgramCU::ConvertByteToFloat(CuTexImage*src, CuTexImage* dst)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
src->BindTexture(texDataB);
ConvertByteToFloat_Kernel<<<grid, block>>>((float*)dst->_cuData);
}
void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width)
{
int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ) ;//
width = 2*sz + 1;
if(width > KERNEL_MAX_WIDTH)
{
//filter size truncation
sz = KERNEL_MAX_WIDTH >> 1;
width =KERNEL_MAX_WIDTH;
}else if(width < KERNEL_MIN_WIDTH)
{
sz = KERNEL_MIN_WIDTH >> 1;
width =KERNEL_MIN_WIDTH;
}
float rv = 1.0f/(sigma*sigma), v, ksum =0;
// pre-compute filter
for( i = -sz ; i <= sz ; ++i)
{
kernel[i+sz] = v = exp(-0.5f * i * i *rv) ;
ksum += v;
}
//normalize the kernel
rv = 1.0f/ksum;
for(i = 0; i< width ;i++) kernel[i]*=rv;
}
template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
//horizontal filtering
src->BindTexture(texData);
dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height);
dim3 blockh(FILTERH_TILE_WIDTH);
FilterH<FW><<<gridh, blockh>>>((float*)buf->_cuData, width);
CheckErrorCUDA("FilterH");
///vertical filtering
buf->BindTexture(texData);
dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/ FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT);
dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_BLOCK_HEIGHT);
FilterV<FW><<<gridv, blockv>>>((float*)dst->_cuData, width, height);
CheckErrorCUDA("FilterV");
}
//////////////////////////////////////////////////////////////////////
// tested on 2048x1500 image, the time on pyramid construction is
// OpenGL version : 18ms
// CUDA version: 28 ms
void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma)
{
float filter_kernel[KERNEL_MAX_WIDTH]; int width;
CreateFilterKernel(sigma, filter_kernel, width);
cudaMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, cudaMemcpyHostToDevice);
switch(width)
{
case 5: FilterImage< 5>(dst, src, buf); break;
case 7: FilterImage< 7>(dst, src, buf); break;
case 9: FilterImage< 9>(dst, src, buf); break;
case 11: FilterImage<11>(dst, src, buf); break;
case 13: FilterImage<13>(dst, src, buf); break;
case 15: FilterImage<15>(dst, src, buf); break;
case 17: FilterImage<17>(dst, src, buf); break;
case 19: FilterImage<19>(dst, src, buf); break;
case 21: FilterImage<21>(dst, src, buf); break;
case 23: FilterImage<23>(dst, src, buf); break;
case 25: FilterImage<25>(dst, src, buf); break;
case 27: FilterImage<27>(dst, src, buf); break;
case 29: FilterImage<29>(dst, src, buf); break;
case 31: FilterImage<31>(dst, src, buf); break;
case 33: FilterImage<33>(dst, src, buf); break;
default: break;
}
}
texture<float, 1, cudaReadModeElementType> texC;
texture<float, 1, cudaReadModeElementType> texP;
texture<float, 1, cudaReadModeElementType> texN;
void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
float vxn = tex1Dfetch(texC, index + 1);
float vxp = tex1Dfetch(texC, index - 1);
float vyp = tex1Dfetch(texC, index - width);
float vyn = tex1Dfetch(texC, index + width);
float dx = vxn - vxp, dy = vyn - vyp;
float grd = 0.5f * sqrt(dx * dx + dy * dy);
float rot = (grd == 0.0f? 0.0f : atan2(dy, dx));
d_got[index] = make_float2(grd, rot);
}
}
void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
}
}
void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got)
{
int width = gus->GetImgWidth(), height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
(gus -1)->BindTexture(texP);
if(got->_cuData)
ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, (float2*) got->_cuData, width, height);
else
ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, width, height);
}
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1);\
datai[1] = tex1Dfetch(tex, idx);\
datai[2] = tex1Dfetch(tex, idx + 1);\
if(v > nmax)\
{\
nmax = max(nmax, datai[0]);\
nmax = max(nmax, datai[1]);\
nmax = max(nmax, datai[2]);\
if(v < nmax) goto key_finish;\
}else\
{\
nmin = min(nmin, datai[0]);\
nmin = min(nmin, datai[1]);\
nmin = min(nmin, datai[2]);\
if(v > nmin) goto key_finish;\
}
void __global__ ComputeKEY_Kernel(float4* d_key, int width, int colmax, int rowmax,
float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization)
{
float data[3][3], v;
float datap[3][3], datan[3][3];
#ifdef KEY_OFFSET_ONE
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y + 1;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x + 1;
#else
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x;
#endif
int index = IMUL(row, width) + col;
int idx[3] ={index - width, index, index + width};
int in_image =0;
float nmax, nmin, result = 0.0f;
float dx = 0, dy = 0, ds = 0;
bool offset_test_passed = true;
#ifdef KEY_OFFSET_ONE
if(row < rowmax && col < colmax)
#else
if(row > 0 && col > 0 && row < rowmax && col < colmax)
#endif
{
in_image = 1;
data[1][1] = v = tex1Dfetch(texC, idx[1]);
if(fabs(v) <= dog_threshold0) goto key_finish;
data[1][0] = tex1Dfetch(texC, idx[1] - 1);
data[1][2] = tex1Dfetch(texC, idx[1] + 1);
nmax = max(data[1][0], data[1][2]);
nmin = min(data[1][0], data[1][2]);
if(v <=nmax && v >= nmin) goto key_finish;
//if((v > nmax && v < 0 )|| (v < nmin && v > 0)) goto key_finish;
READ_CMP_DOG_DATA(data[0], texC, idx[0]);
READ_CMP_DOG_DATA(data[2], texC, idx[2]);
//edge supression
float vx2 = v * 2.0f;
float fxx = data[1][0] + data[1][2] - vx2;
float fyy = data[0][1] + data[2][1] - vx2;
float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]);
float temp1 = fxx * fyy - fxy * fxy;
float temp2 = (fxx + fyy) * (fxx + fyy);
if(temp1 <=0 || temp2 > edge_threshold * temp1) goto key_finish;
//read the previous level
READ_CMP_DOG_DATA(datap[0], texP, idx[0]);
READ_CMP_DOG_DATA(datap[1], texP, idx[1]);
READ_CMP_DOG_DATA(datap[2], texP, idx[2]);
//read the next level
READ_CMP_DOG_DATA(datan[0], texN, idx[0]);
READ_CMP_DOG_DATA(datan[1], texN, idx[1]);
READ_CMP_DOG_DATA(datan[2], texN, idx[2]);
if(subpixel_localization)
{
//subpixel localization
float fx = 0.5f * (data[1][2] - data[1][0]);
float fy = 0.5f * (data[2][1] - data[0][1]);
float fs = 0.5f * (datan[1][1] - datap[1][1]);
float fss = (datan[1][1] + datap[1][1] - vx2);
float fxs = 0.25f* (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]);
float fys = 0.25f* (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10)
{
if(maxa == A1.x)
{
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x)
{
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y))
{
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10)
{
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10)
{
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
offset_test_passed =
fabs(data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs)) > dog_threshold
&&fabs(ds) < 1.0f && fabs(dx) < 1.0f && fabs(dy) < 1.0f;
}
}
}
}
if(offset_test_passed) result = v > nmax ? 1.0 : -1.0;
}
key_finish:
if(in_image) d_key[index] = make_float4(result, dx, dy, ds);
}
void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key, float Tdog, float Tedge)
{
int width = dog->GetImgWidth(), height = dog->GetImgHeight();
float Tdog1 = (GlobalUtil::_SubpixelLocalization? 0.8f : 1.0f) * Tdog;
CuTexImage* dogp = dog - 1;
CuTexImage* dogn = dog + 1;
#ifdef KEY_OFFSET_ONE
dim3 grid((width - 1 + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height - 1 + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
#else
dim3 grid((width + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
#endif
dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY);
dogp->BindTexture(texP);
dog ->BindTexture(texC);
dogn->BindTexture(texN);
Tedge = (Tedge+1)*(Tedge+1)/Tedge;
ComputeKEY_Kernel<<<grid, block>>>((float4*) key->_cuData, width,
width -1, height -1, Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization);
}
void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
if(row > 0 && row < height -1)
{
#pragma unroll
for(int i = 0; i < 4 ; ++i, ++scol)
{
float4 temp = tex1Dfetch(texDataF4, sidx +i);
v[i] = (scol < ws -1 && scol > 0 && temp.x!=0) ? 1 : 0;
}
}
hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist)
{
int ws = key->GetImgWidth(), hs = key->GetImgHeight();
int wd = hist->GetImgWidth(), hd = hist->GetImgHeight();
dim3 grid((wd + HIST_INIT_WIDTH - 1)/ HIST_INIT_WIDTH, hd);
dim3 block(HIST_INIT_WIDTH, 1);
key->BindTexture(texDataF4);
InitHist_Kernel<<<grid, block>>>((int4*) hist->_cuData, ws, wd, hd);
}
void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
#pragma unroll
for(int i = 0; i < 4 && scol < ws; ++i, ++scol)
{
int4 temp = tex1Dfetch(texDataI4, sidx + i);
v[i] = temp.x + temp.y + temp.z + temp.w;
}
d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::ReduceHistogram(CuTexImage*hist1, CuTexImage* hist2)
{
int ws = hist1->GetImgWidth(), hs = hist1->GetImgHeight();
int wd = hist2->GetImgWidth(), hd = hist2->GetImgHeight();
int temp = (int)floor(logf(float(wd * 2/ 3)) / logf(2.0f));
const int wi = min(7, max(temp , 0));
hist1->BindTexture(texDataI4);
const int BW = 1 << wi, BH = 1 << (7 - wi);
dim3 grid((wd + BW - 1)/ BW, (hd + BH -1) / BH);
dim3 block(BW, BH);
ReduceHist_Kernel<<<grid, block>>>((int4*)hist2->_cuData, ws, wd, hd);
}
void __global__ ListGen_Kernel(int4* d_list, int width)
{
int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int4 pos = tex1Dfetch(texDataList, idx1);
int idx2 = IMUL(pos.y, width) + pos.x;
int4 temp = tex1Dfetch(texDataI4, idx2);
int sum1 = temp.x + temp.y;
int sum2 = sum1 + temp.z;
pos.x <<= 2;
if(pos.z >= sum2)
{
pos.x += 3;
pos.z -= sum2;
}else if(pos.z >= sum1)
{
pos.x += 2;
pos.z -= sum1;
}else if(pos.z >= temp.x)
{
pos.x += 1;
pos.z -= temp.x;
}
d_list[idx1] = pos;
}
//input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist)
{
int len = list->GetImgWidth();
list->BindTexture(texDataList);
hist->BindTexture(texDataI4);
dim3 grid((len + LISTGEN_BLOCK_DIM -1) /LISTGEN_BLOCK_DIM);
dim3 block(LISTGEN_BLOCK_DIM);
ListGen_Kernel<<<grid, block>>>((int4*) list->_cuData, hist->GetImgWidth());
}
void __global__ ComputeOrientation_Kernel(float4* d_list,
int list_len,
int width, int height,
float sigma, float sigma_step,
float gaussian_factor, float sample_factor,
int num_orientation,
int existing_keypoint,
int subpixel,
int keepsign)
{
const float ten_degree_per_radius = 5.7295779513082320876798154814105;
const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= list_len) return;
float4 key;
if(existing_keypoint)
{
key = tex1Dfetch(texDataF4, idx);
}else
{
int4 ikey = tex1Dfetch(texDataList, idx);
key.x = ikey.x + 0.5f;
key.y = ikey.y + 0.5f;
key.z = sigma;
if(subpixel || keepsign)
{
float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);
if(subpixel)
{
key.x += offset.y;
key.y += offset.z;
key.z *= pow(sigma_step, offset.w);
}
if(keepsign) key.z *= offset.x;
}
}
if(num_orientation == 0)
{
key.w = 0;
d_list[idx] = key;
return;
}
float vote[37];
float gsigma = key.z * gaussian_factor;
float win = fabs(key.z) * sample_factor;
float dist_threshold = win * win + 0.5;
float factor = -0.5f / (gsigma * gsigma);
float xmin = max(1.5f, floor(key.x - win) + 0.5f);
float ymin = max(1.5f, floor(key.y - win) + 0.5f);
float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);
float ymax = min(height -1.5f, floor(key.y + win) + 0.5f);
#pragma unroll
for(int i = 0; i < 36; ++i) vote[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - key.x;
float dy = y - key.y;
float sq_dist = dx * dx + dy * dy;
if(sq_dist >= dist_threshold) continue;
float2 got = tex2D(texDataF2, x, y);
float weight = got.x * exp(sq_dist * factor);
float fidx = floor(got.y * ten_degree_per_radius);
int oidx = fidx;
if(oidx < 0) oidx += 36;
vote[oidx] += weight;
}
}
//filter the vote
const float one_third = 1.0 /3.0;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
vote[36] = vote[0];
float pre = vote[35];
#pragma unroll
for(int j = 0; j < 36; ++j)
{
float temp = one_third * (pre + vote[j] + vote[j + 1]);
pre = vote[j]; vote[j] = temp;
}
}
vote[36] = vote[0];
if(num_orientation == 1 || existing_keypoint)
{
int index_max = 0;
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
{
index_max = vote[i] > max_vote? i : index_max;
max_vote = max(max_vote, vote[i]);
}
float pre = vote[index_max == 0? 35 : index_max -1];
float next = vote[index_max + 1];
float weight = max_vote;
float off = 0.5f * FDIV(next - pre, weight + weight - next - pre);
key.w = radius_per_ten_degrees * (index_max + 0.5f + off);
d_list[idx] = key;
}else
{
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i) max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_rot[2], max_vot[2] = {0, 0};
int ocount = 0;
#pragma unroll
for(int i =0; i < 36; ++i)
{
float next = vote[i + 1];
if(vote[i] > vote_threshold && vote[i] > pre && vote[i] > next)
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
///
if(weight > max_vot[1])
{
if(weight > max_vot[0])
{
max_vot[1] = max_vot[0];
max_rot[1] = max_rot[0];
max_vot[0] = weight;
max_rot[0] = rot;
}
else
{
max_vot[1] = weight;
max_rot[1] = rot;
}
ocount ++;
}
}
pre = vote[i];
}
float fr1 = max_rot[0] / 36.0f;
if(fr1 < 0) fr1 += 1.0f;
unsigned short us1 = ocount == 0? 65535 : ((unsigned short )floor(fr1 * 65535.0f));
unsigned short us2 = 65535;
if(ocount > 1)
{
float fr2 = max_rot[1] / 36.0f;
if(fr2 < 0) fr2 += 1.0f;
us2 = (unsigned short ) floor(fr2 * 65535.0f);
}
unsigned int uspack = (us2 << 16) | us1;
key.w = __int_as_float(uspack);
d_list[idx] = key;
}
}
void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage*key,
float sigma, float sigma_step, int existing_keypoint)
{
int len = list->GetImgWidth();
if(len <= 0) return;
int width = got->GetImgWidth(), height = got->GetImgHeight();
if(existing_keypoint)
{
list->BindTexture(texDataF4);
}else
{
list->BindTexture(texDataList);
if(GlobalUtil::_SubpixelLocalization) key->BindTexture(texDataF4);
}
got->BindTexture2D(texDataF2);
const int block_width = len < ORIENTATION_COMPUTE_PER_BLOCK ? 16 : ORIENTATION_COMPUTE_PER_BLOCK;
dim3 grid((len + block_width -1) / block_width);
dim3 block(block_width);
ComputeOrientation_Kernel<<<grid, block>>>((float4*) list->_cuData,
len, width, height, sigma, sigma_step,
GlobalUtil::_OrientationGaussianFactor,
GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor,
GlobalUtil::_FixedOrientation? 0 : GlobalUtil::_MaxOrientation,
existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign);
ProgramCU::CheckErrorCUDA("ComputeOrientation");
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptor_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
float spt = fabs(key.z * window_factor);
float s, c; __sincosf(key.w, &s, &c);
float anglef = key.w > 3.14159265358979323846? key.w - (2.0 * 3.14159265358979323846) : key.w ;
float cspt = c * spt, sspt = s * spt;
float crspt = c / spt, srspt = s / spt;
float2 offsetpt, pt;
float xmin, ymin, xmax, ymax, bsz;
offsetpt.x = ix - 1.5f;
offsetpt.y = iy - 1.5f;
pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x;
pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y;
bsz = fabs(cspt) + fabs(sspt);
xmin = max(1.5f, floor(pt.x - bsz) + 0.5f);
ymin = max(1.5f, floor(pt.y - bsz) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - pt.x;
float dy = y - pt.y;
float nx = crspt * dx + srspt * dy;
float ny = crspt * dy - srspt * dx;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float dnx = nx + offsetpt.x;
float dny = ny + offsetpt.y;
float ww = exp(-0.125f * (dnx * dnx + dny * dny));
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = ww * wx * wy * cc.x;
float theta = (anglef - cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptorRECT_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
//float aspect_ratio = key.w / key.z;
//float aspect_sq = aspect_ratio * aspect_ratio;
float sptx = key.z * 0.25, spty = key.w * 0.25;
float xmin, ymin, xmax, ymax; float2 pt;
pt.x = sptx * (ix + 0.5f) + key.x;
pt.y = spty * (iy + 0.5f) + key.y;
xmin = max(1.5f, floor(pt.x - sptx) + 0.5f);
ymin = max(1.5f, floor(pt.y - spty) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + sptx) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + spty) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float nx = (x - pt.x) / sptx;
float ny = (y - pt.y) / spty;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = wx * wy * cc.x;
float theta = (- cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
float4 temp[32];
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int sidx = idx << 5;
float norm1 = 0, norm2 = 0;
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i] = tex1Dfetch(texDataF4, sidx +i);
norm1 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm1 = rsqrt(norm1);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x = min(0.2f, temp[i].x * norm1);
temp[i].y = min(0.2f, temp[i].y * norm1);
temp[i].z = min(0.2f, temp[i].z * norm1);
temp[i].w = min(0.2f, temp[i].w * norm1);
norm2 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm2 = rsqrt(norm2);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x *= norm2; temp[i].y *= norm2;
temp[i].z *= norm2; temp[i].w *= norm2;
d_des[sidx + i] = temp[i];
}
}
void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex, int rect, int stream)
{
int num = list->GetImgWidth();
int width = got->GetImgWidth();
int height = got->GetImgHeight();
dtex->InitTexture(num * 128, 1, 1);
got->BindTexture2D(texDataF2);
list->BindTexture(texDataF4);
int block_width = DESCRIPTOR_COMPUTE_BLOCK_SIZE;
dim3 grid((num * 16 + block_width -1) / block_width);
dim3 block(block_width);
if(rect)
{
if(GlobalUtil::_UseDynamicIndexing)
ComputeDescriptorRECT_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
ComputeDescriptorRECT_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}else
{
if(GlobalUtil::_UseDynamicIndexing)
ComputeDescriptor_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
ComputeDescriptor_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
if(GlobalUtil::_NormalizedSIFT)
{
dtex->BindTexture(texDataF4);
const int block_width = DESCRIPTOR_NORMALIZ_PER_BLOCK;
dim3 grid((num + block_width -1) / block_width);
dim3 block(block_width);
NormalizeDescriptor_Kernel<<<grid, block>>>((float4*) dtex->_cuData, num);
}
CheckErrorCUDA("ComputeDescriptor");
}
//////////////////////////////////////////////////////
void ProgramCU::FinishCUDA()
{
cudaThreadSynchronize();
}
int ProgramCU::CheckErrorCUDA(const char* location)
{
cudaError_t e = cudaGetLastError();
if(e)
{
if(location) fprintf(stderr, "%s:\t", location);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
//assert(0);
return 1;
}else
{
return 0;
}
}
void __global__ ConvertDOG_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0.5 : saturate(0.5+20.0*v);
}
}
///
void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = dog->GetImgWidth(), height = dog ->GetImgHeight();
dog->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertDOG_Kernel<<<grid, block>>>((float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertDOG");
}
void __global__ ConvertGRD_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index << 1);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0 : saturate(5 * v);
}
}
void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = got->GetImgWidth(), height = got ->GetImgHeight();
got->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertGRD_Kernel<<<grid, block>>>((float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertGRD");
}
void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float4 keyv = tex1Dfetch(texDataF4, index);
int is_key = (keyv.x == 1.0f || keyv.x == -1.0f);
int inside = col > 0 && row > 0 && row < height -1 && col < width - 1;
float v = inside? saturate(0.5 + 20 * tex1Dfetch(texData, index)) : 0.5;
d_result[index] = is_key && inside ?
(keyv.x > 0? make_float4(1.0f, 0, 0, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)):
make_float4(v, v, v, 1.0f) ;
}
}
void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = key->GetImgWidth(), height = key ->GetImgHeight();
dog->BindTexture(texData);
key->BindTexture(texDataF4);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertKEY_Kernel<<<grid, block>>>((float4*) out->_cuData, width, height);
}
void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
float4 v = tex1Dfetch(texDataF4, idx);
d_result[idx] = make_float4(v.x, v.y, 0, 1.0f);
}
void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out)
{
int num = ftex->GetImgWidth();
int block_width = 64;
dim3 grid((num + block_width -1) /block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
DisplayKeyPoint_Kernel<<<grid, block>>>((float4*) out->_cuData, num);
ProgramCU::CheckErrorCUDA("DisplayKeyPoint");
}
void __global__ DisplayKeyBox_Kernel(float4* d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int kidx = idx / 10, vidx = idx - IMUL(kidx , 10);
float4 v = tex1Dfetch(texDataF4, kidx);
float sz = fabs(v.z * 3.0f);
///////////////////////
float s, c; __sincosf(v.w, &s, &c);
///////////////////////
float dx = vidx == 0? 0 : ((vidx <= 4 || vidx >= 9)? sz : -sz);
float dy = vidx <= 1? 0 : ((vidx <= 2 || vidx >= 7)? -sz : sz);
float4 pos;
pos.x = v.x + c * dx - s * dy;
pos.y = v.y + c * dy + s * dx;
pos.z = 0; pos.w = 1.0f;
d_result[idx] = pos;
}
void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out)
{
int len = ftex->GetImgWidth();
int block_width = 32;
dim3 grid((len * 10 + block_width -1) / block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
DisplayKeyBox_Kernel<<<grid, block>>>((float4*) out->_cuData, len * 10);
}
///////////////////////////////////////////////////////////////////
inline void CuTexImage:: BindTexture(textureReference& texRef)
{
cudaBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes);
}
inline void CuTexImage::BindTexture2D(textureReference& texRef)
{
#if defined(SIFTGPU_ENABLE_LINEAR_TEX2D)
cudaBindTexture2D(0, &texRef, _cuData, &texRef.channelDesc, _imgWidth, _imgHeight, _imgWidth* _numChannel* sizeof(float));
#else
cudaChannelFormatDesc desc;
cudaGetChannelDesc(&desc, _cuData2D);
cudaBindTextureToArray(&texRef, _cuData2D, &desc);
#endif
}
int ProgramCU::CheckCudaDevice(int device)
{
int count = 0, device_used;
if(cudaGetDeviceCount(&count) != cudaSuccess || count <= 0)
{
ProgramCU::CheckErrorCUDA("CheckCudaDevice");
return 0;
}else if(count == 1)
{
cudaDeviceProp deviceProp;
if ( cudaGetDeviceProperties(&deviceProp, 0) != cudaSuccess ||
(deviceProp.major == 9999 && deviceProp.minor == 9999))
{
fprintf(stderr, "CheckCudaDevice: no device supporting CUDA.\n");
return 0;
}else
{
GlobalUtil::_MemCapGPU = deviceProp.totalGlobalMem / 1024;
GlobalUtil::_texMaxDimGL = 32768;
if(GlobalUtil::_verbose)
fprintf(stdout, "NOTE: changing maximum texture dimension to %d\n", GlobalUtil::_texMaxDimGL);
}
}
if(device >0 && device < count)
{
cudaSetDevice(device);
CheckErrorCUDA("cudaSetDevice\n");
}
cudaGetDevice(&device_used);
if(device != device_used)
fprintf(stderr, "\nERROR: Cannot set device to %d\n"
"\nWARNING: Use # %d device instead (out of %d)\n", device, device_used, count);
return 1;
}
////////////////////////////////////////////////////////////////////////////////////////
// siftmatch funtions
//////////////////////////////////////////////////////////////////////////////////////////
#define MULT_TBLOCK_DIMX 128
#define MULT_TBLOCK_DIMY 1
#define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX)
#define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY)
texture<uint4, 1, cudaReadModeElementType> texDes1;
texture<uint4, 1, cudaReadModeElementType> texDes2;
void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y, idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
int read_idx1 = idx01 * 8 + threadIdx.x, read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
///////////////////////////////////////////////////////////////
//Load feature descriptors
///////////////////////////////////////////////////////////////
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
///
if(idx2 >= num2) return;
///////////////////////////////////////////////////////////////////////////
//compare descriptors
int results[MULT_BLOCK_DIMY];
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0;
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
}
void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
MultiplyDescriptor_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL));
ProgramCU::CheckErrorCUDA("MultiplyDescriptor");
}
texture<float, 1, cudaReadModeElementType> texLoc1;
texture<float2, 1, cudaReadModeElementType> texLoc2;
struct Matrix33{float mat[3][3];};
void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp,
Matrix33 H, float hdistmax, Matrix33 F, float fdistmax)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY);
int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y;
int idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
__shared__ float loc1[MULT_BLOCK_DIMY * 2];
int read_idx1 = idx01 * 8 + threadIdx.x ;
int read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
if(threadIdx.x < MULT_BLOCK_DIMY * 2)
{
loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x);
}
__syncthreads();
if(idx2 >= num2) return;
int results[MULT_BLOCK_DIMY];
/////////////////////////////////////////////////////////////////////////////////////////////
//geometric verification
/////////////////////////////////////////////////////////////////////////////////////////////
int good_count = 0;
float2 loc2 = tex1Dfetch(texLoc2, idx2);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
float* loci = loc1 + i * 2;
float locx = loci[0], locy = loci[1];
//homography
float x[3], diff[2];
x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2];
x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2];
x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2];
diff[0] = fabs(FDIV(x[0], x[2]) - loc2.x);
diff[1] = fabs(FDIV(x[1], x[2]) - loc2.y);
if(diff[0] < hdistmax && diff[1] < hdistmax)
{
//check fundamental matrix
float fx1[3], ftx2[3], x2fx1, se;
fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2];
fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2];
fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2];
ftx2[0] = F.mat[0][0] * loc2.x + F.mat[1][0] * loc2.y + F.mat[2][0];
ftx2[1] = F.mat[0][1] * loc2.x + F.mat[1][1] * loc2.y + F.mat[2][1];
//ftx2[2] = F.mat[0][2] * loc2.x + F.mat[1][2] * loc2.y + F.mat[2][2];
x2fx1 = loc2.x * fx1[0] + loc2.y * fx1[1] + fx1[2];
se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]);
results[i] = se < fdistmax? 0: -262144;
}else
{
results[i] = -262144;
}
}else
{
results[i] = -262144;
}
good_count += (results[i] >=0);
}
/////////////////////////////////////////////////////////////////////////////////////////////
///compare feature descriptors anyway
/////////////////////////////////////////////////////////////////////////////////////////////
if(good_count > 0)
{
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i= 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
}else
{
break;
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
else break;
}
}
}
void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2,
CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT,
float H[3][3], float hdistmax, float F[3][3], float fdistmax)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
Matrix33 MatF, MatH;
//copy the matrix
memcpy(MatF.mat, F, 9 * sizeof(float));
memcpy(MatH.mat, H, 9 * sizeof(float));
//thread blocks
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
//intermediate results
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3);
loc1->BindTexture(texLoc1);
loc2->BindTexture(texLoc2);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
MultiplyDescriptorG_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL),
MatH, hdistmax, MatF, fdistmax);
}
texture<int, 1, cudaReadModeElementType> texDOT;
#define ROWMATCH_BLOCK_WIDTH 32
#define ROWMATCH_BLOCK_HEIGHT 1
void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax)
{
#if ROWMATCH_BLOCK_HEIGHT == 1
__shared__ int dotmax[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotidx[ROWMATCH_BLOCK_WIDTH];
int row = blockIdx.y;
#else
__shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
int* dotmax = x_dotmax[threadIdx.y];
int* dotnxt = x_dotnxt[threadIdx.y];
int* dotidx = x_dotidx[threadIdx.y];
int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y;
#endif
int base_address = IMUL(row , num2);
int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;
for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)
{
if(threadIdx.x + i < num2)
{
int v = tex1Dfetch(texDOT, base_address + threadIdx.x + i);//d_dot[base_address + threadIdx.x + i];//
bool test = v > t_dotmax;
t_dotnxt = test? t_dotmax : max(t_dotnxt, v);
t_dotidx = test? (threadIdx.x + i) : t_dotidx;
t_dotmax = test? v: t_dotmax;
}
__syncthreads();
}
dotmax[threadIdx.x] = t_dotmax;
dotnxt[threadIdx.x] = t_dotnxt;
dotidx[threadIdx.x] = t_dotidx;
__syncthreads();
#pragma unroll
for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2)
{
if(threadIdx.x < step)
{
int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step];
bool test = v2 > v1;
dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2);
dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x];
dotmax[threadIdx.x] = test? v2 : v1;
}
__syncthreads();
}
if(threadIdx.x == 0)
{
float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0));
float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;//? : -1;
}
}
void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax)
{
int num1 = texDot->GetImgHeight();
int num2 = texDot->GetImgWidth();
dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);
dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);
texDot->BindTexture(texDOT);
RowMatch_Kernel<<<grid, block>>>((int*)texDot->_cuData,
(int*)texMatch->_cuData, num2, distmax, ratiomax);
}
#define COLMATCH_BLOCK_WIDTH 32
//texture<int3, 1, cudaReadModeElementType> texCT;
void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax)
{
int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x;
if(col >= num2) return;
int3 result = d_crt[col];//tex1Dfetch(texCT, col);
int read_idx = col + num2;
for(int i = 1; i < height; ++i, read_idx += num2)
{
int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx);
result = result.x < temp.x?
make_int3(temp.x, temp.y, max(result.x, temp.z)) :
make_int3(result.x, result.y, max(result.z, temp.x));
}
float dist = acos(min(result.x * 0.000003814697265625f, 1.0));
float distn = acos(min(result.z * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;//? : -1;
}
void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax)
{
int height = texCRT->GetImgHeight();
int num2 = texCRT->GetImgWidth();
//texCRT->BindTexture(texCT);
dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);
dim3 block(COLMATCH_BLOCK_WIDTH);
ColMatch_Kernel<<<grid, block>>>((int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax);
}
#endif
|
2d94601690b1f588858b5bf64538f1b68aa915b7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "yoloPlugin.h"
#include <iostream>
#define NUM_HEAD 3
#define NUM_ANCH 3
#define CONF_THRES 0.6f
using namespace nvinfer1;
using nvinfer1::plugin::YoloDetectLayer;
using nvinfer1::plugin::YoloPluginCreator;
namespace
{
const char* Yolo_PLUGIN_VERSION{"1"};
const char* Yolo_PLUGIN_NAME{"Yolo_TRT"};
} // namespace
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define CHECK_CUDA(call) \
do \
{ \
hipError_t status = call; \
if (status != hipSuccess) \
{ \
return status; \
} \
} while (0)
const int CUDA_NUM_THREADS = 512;
dim3 GET_BLOCKS_(uint n)
{
uint k = (n - 1) /CUDA_NUM_THREADS + 1;
uint x = k ;
uint y = 1 ;
if (x > 65535 )
{
x = ceil(sqrt(x));
y = (n - 1 )/(x*CUDA_NUM_THREADS) + 1;
}
dim3 d = {x,y,1} ;
return d;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
// Parameterized constructor
YoloDetectLayer::YoloDetectLayer(
int num_cls,
int max_det,
std::vector<int> h,
std::vector<int> w,
std::vector<int> strides, //TODO
const Weights* anchors):
mNumcls(num_cls), mMaxdet(max_det){
for (int i=0; i<NUM_HEAD; i++){
mHeights[i] = h[i];
mWidths[i] = w[i];
mStrides[i] = strides[i];
}
mAnchors = copyToDevice(anchors[0].values, anchors[0].count);
}
YoloDetectLayer::YoloDetectLayer(const void* buffer, size_t length)
{
const char* d = static_cast<const char*>(buffer);
const char* a = d;
mNumcls = read<int>(d);
mMaxdet = read<int>(d);
CUASSERT(hipMemcpy(mHeights, d, NUM_HEAD * sizeof(int), hipMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
CUASSERT(hipMemcpy(mWidths, d, NUM_HEAD * sizeof(int), hipMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
CUASSERT(hipMemcpy(mStrides, d, NUM_HEAD * sizeof(int), hipMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
int count = read<int>(d);
mAnchors = deserializeToDevice(d, count);
ASSERT(d == a + length);
}
int YoloDetectLayer::getNbOutputs() const
{
// Plugin layer has 2 outputs
return 2;
}
int YoloDetectLayer::initialize()
{
return STATUS_SUCCESS;
}
Dims YoloDetectLayer::getOutputDimensions(int index, const Dims* inputs, int nbInputs)
{
ASSERT(index == 0 || index == 1 || index == 2);
ASSERT(nbInputs == 3);
if (index==0) return Dims3(mMaxdet, 1, 4);
else return Dims2(mMaxdet, mNumcls);
}
size_t YoloDetectLayer::getWorkspaceSize(int maxBatchSize) const
{
return 0;
}
__global__ void Reshape(const float *input, float *loc, float *cof, int w, int h,
int numClass, int mMaxdet, int stride, const float *anchors, int* countAddress)
{
CUDA_KERNEL_LOOP(idx, h*w*NUM_ANCH)
{
int mapSize = w * h;
int anchorPos = idx / mapSize;
int mapPos = idx % mapSize;
int infoLen = 5 + numClass;
if (input[(anchorPos*infoLen+4)*mapSize+mapPos]<CONF_THRES) continue;
int count = (int)atomicAdd(countAddress, 1);
if (count >= mMaxdet-1) return;
for (int i = 0; i < numClass; ++i)
{
cof[numClass * count + i] = input[(anchorPos*infoLen+i+5)*mapSize+mapPos];
}
int row = mapPos / w;
int col = mapPos % w;
float ax, ay, aw, ah;
ax = (col - 0.5f + 2.0f * input[(anchorPos*infoLen)*mapSize+mapPos]) * stride;
ay = (row - 0.5f + 2.0f * input[(anchorPos*infoLen+1)*mapSize+mapPos]) * stride;
aw = 2.0f * input[(anchorPos*infoLen+2)*mapSize+mapPos];
ah = 2.0f * input[(anchorPos*infoLen+3)*mapSize+mapPos];
aw = aw * aw * anchors[anchorPos*2];
ah = ah * ah * anchors[anchorPos*2+1];
loc[4 * count] = (ax - aw/2)/w/stride;
loc[4 * count+1] = (ay - ah/2)/h/stride;
loc[4 * count+2] = (ax + aw/2)/w/stride;
loc[4 * count+3] = (ay + ah/2)/h/stride;
}
}
int YoloDetectLayer::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream)
{
float* loc = static_cast<float *>(outputs[0]);
float* cof = static_cast<float *>(outputs[1]);
CHECK_CUDA(hipMalloc((void**)&count, sizeof(int)));
CUASSERT(hipMemset(count, 0, sizeof(int)));
CUASSERT(hipMemset(outputs[0], 0, mMaxdet*4*sizeof(float)));
CUASSERT(hipMemset(outputs[1], 0, mMaxdet*mNumcls*sizeof(float)));
for (int i=0; i<NUM_HEAD; i++)
{
const float* anchors = static_cast<const float *>(mAnchors.values) + 2 * NUM_ANCH * i;
hipLaunchKernelGGL(( Reshape) , dim3(GET_BLOCKS_(mHeights[i]*mWidths[i]*NUM_ANCH)), dim3(CUDA_NUM_THREADS), 0, stream ,
static_cast<const float *>(inputs[i]), loc, cof, mWidths[i], mHeights[i], mNumcls, mMaxdet, mStrides[i], anchors, count);
}
hipFree(count);
return 0;
}
size_t YoloDetectLayer::getSerializationSize() const
{
return sizeof(int) * 12 + mAnchors.count * sizeof(float);
}
void YoloDetectLayer::serialize(void* buffer) const
{
char *d = reinterpret_cast<char*>(buffer), *a = d;
write(d, mNumcls);
write(d, mMaxdet);
CUASSERT(hipMemcpy(d, mHeights, NUM_HEAD * sizeof(int), hipMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
CUASSERT(hipMemcpy(d, mWidths, NUM_HEAD * sizeof(int), hipMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
CUASSERT(hipMemcpy(d, mStrides, NUM_HEAD * sizeof(int), hipMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
write(d, (int) mAnchors.count);
serializeFromDevice(d, mAnchors);
ASSERT(d == a + getSerializationSize());
}
bool YoloDetectLayer::supportsFormat(DataType type, PluginFormat format) const
{
return (type == DataType::kFLOAT && format == PluginFormat::kNCHW);
}
Weights YoloDetectLayer::copyToDevice(const void* hostData, size_t count)
{
void* deviceData;
CUASSERT(hipMalloc(&deviceData, count * sizeof(float)));
CUASSERT(hipMemcpy(deviceData, hostData, count * sizeof(float), hipMemcpyHostToDevice));
return Weights{DataType::kFLOAT, deviceData, int64_t(count)};
}
void YoloDetectLayer::serializeFromDevice(char*& hostBuffer, Weights deviceWeights) const
{
CUASSERT(hipMemcpy(hostBuffer, deviceWeights.values, deviceWeights.count * sizeof(float), hipMemcpyDeviceToHost));
hostBuffer += deviceWeights.count * sizeof(float);
}
Weights YoloDetectLayer::deserializeToDevice(const char*& hostBuffer, size_t count)
{
Weights w = copyToDevice(hostBuffer, count);
hostBuffer += count * sizeof(float);
return w;
}
const char* YoloDetectLayer::getPluginType() const
{
return Yolo_PLUGIN_NAME;
}
const char* YoloDetectLayer::getPluginVersion() const
{
return Yolo_PLUGIN_VERSION;
}
void YoloDetectLayer::terminate() {
if (count)
{
hipFree(count);
count = nullptr;
}
}
void YoloDetectLayer::destroy()
{
delete this;
}
IPluginV2Ext* YoloDetectLayer::clone() const
{
IPluginV2Ext* plugin = new YoloDetectLayer(*this);
plugin->setPluginNamespace(mPluginNamespace.c_str());
return plugin;
}
// Set plugin namespace
void YoloDetectLayer::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloDetectLayer::getPluginNamespace() const
{
return mPluginNamespace.c_str();
}
// Return the DataType of the plugin output at the requested index.
DataType YoloDetectLayer::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
// Only DataType::kFLOAT is acceptable by the plugin layer
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloDetectLayer::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloDetectLayer::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
// Configure the layer with input and output data types.
// inutDims: input Dimensions for the plugin layer
// nInputs : Number of inputs to the plugin layer
// outputDims: output Dimensions from the plugin layer
// nOutputs: number of outputs from the plugin layer
// type: DataType configuration for the plugin layer
// format: format NCHW, NHWC etc
// maxbatchSize: maximum batch size for the plugin layer
void YoloDetectLayer::configurePlugin(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs,
const DataType* inputTypes, const DataType* outputTypes, const bool* inputIsBroadcast,
const bool* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize)
{
ASSERT(*inputTypes == DataType::kFLOAT && floatFormat == PluginFormat::kNCHW);
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloDetectLayer::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloDetectLayer::detachFromContext() {}
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.emplace_back(PluginField("num_cls", nullptr, PluginFieldType::kINT32, 1));
mPluginAttributes.emplace_back(PluginField("max_det", nullptr, PluginFieldType::kINT32, 1));
mPluginAttributes.emplace_back(PluginField("heights", nullptr, PluginFieldType::kINT32, 3));
mPluginAttributes.emplace_back(PluginField("widths", nullptr, PluginFieldType::kINT32, 3));
mPluginAttributes.emplace_back(PluginField("strides", nullptr, PluginFieldType::kINT32, 3));
mPluginAttributes.emplace_back(PluginField("anchors", nullptr, PluginFieldType::kFLOAT32, 18));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return Yolo_PLUGIN_NAME;
}
const char* YoloPluginCreator::getPluginVersion() const
{
return Yolo_PLUGIN_VERSION;
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2Ext* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int num_cls, max_det;
std::vector<int> heights, widths;
std::vector<int> strides;
std::vector<float> anchors;
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; ++i)
{
const char* attrName = fields[i].name;
if (!strcmp(attrName, "num_cls"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
num_cls = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "max_det"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
max_det = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "heights"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
int size = fields[i].length;
heights.reserve(size);
const auto* w = static_cast<const int*>(fields[i].data);
for (int j = 0; j < size; j++)
{
heights.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "widths"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
int size = fields[i].length;
widths.reserve(size);
const auto* w = static_cast<const int*>(fields[i].data);
for (int j = 0; j < size; j++)
{
widths.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "strides"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
int size = fields[i].length;
strides.reserve(size);
const auto* w = static_cast<const int*>(fields[i].data);
for (int j = 0; j < size; j++)
{
strides.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "anchors"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
int size = fields[i].length;
anchors.reserve(size);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < size; j++)
{
anchors.push_back(*w);
w++;
}
}
}
Weights mAnchors{DataType::kFLOAT, anchors.data(), (int64_t) anchors.size()};
YoloDetectLayer* obj = new YoloDetectLayer(num_cls, max_det,
heights, widths,
strides, &mAnchors);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2Ext* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call Normalize::destroy()
YoloDetectLayer* obj = new YoloDetectLayer(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
|
2d94601690b1f588858b5bf64538f1b68aa915b7.cu
|
#include "yoloPlugin.h"
#include <iostream>
#define NUM_HEAD 3
#define NUM_ANCH 3
#define CONF_THRES 0.6f
using namespace nvinfer1;
using nvinfer1::plugin::YoloDetectLayer;
using nvinfer1::plugin::YoloPluginCreator;
namespace
{
const char* Yolo_PLUGIN_VERSION{"1"};
const char* Yolo_PLUGIN_NAME{"Yolo_TRT"};
} // namespace
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define CHECK_CUDA(call) \
do \
{ \
cudaError_t status = call; \
if (status != cudaSuccess) \
{ \
return status; \
} \
} while (0)
const int CUDA_NUM_THREADS = 512;
dim3 GET_BLOCKS_(uint n)
{
uint k = (n - 1) /CUDA_NUM_THREADS + 1;
uint x = k ;
uint y = 1 ;
if (x > 65535 )
{
x = ceil(sqrt(x));
y = (n - 1 )/(x*CUDA_NUM_THREADS) + 1;
}
dim3 d = {x,y,1} ;
return d;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
// Parameterized constructor
YoloDetectLayer::YoloDetectLayer(
int num_cls,
int max_det,
std::vector<int> h,
std::vector<int> w,
std::vector<int> strides, //TODO
const Weights* anchors):
mNumcls(num_cls), mMaxdet(max_det){
for (int i=0; i<NUM_HEAD; i++){
mHeights[i] = h[i];
mWidths[i] = w[i];
mStrides[i] = strides[i];
}
mAnchors = copyToDevice(anchors[0].values, anchors[0].count);
}
YoloDetectLayer::YoloDetectLayer(const void* buffer, size_t length)
{
const char* d = static_cast<const char*>(buffer);
const char* a = d;
mNumcls = read<int>(d);
mMaxdet = read<int>(d);
CUASSERT(cudaMemcpy(mHeights, d, NUM_HEAD * sizeof(int), cudaMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
CUASSERT(cudaMemcpy(mWidths, d, NUM_HEAD * sizeof(int), cudaMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
CUASSERT(cudaMemcpy(mStrides, d, NUM_HEAD * sizeof(int), cudaMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
int count = read<int>(d);
mAnchors = deserializeToDevice(d, count);
ASSERT(d == a + length);
}
int YoloDetectLayer::getNbOutputs() const
{
// Plugin layer has 2 outputs
return 2;
}
int YoloDetectLayer::initialize()
{
return STATUS_SUCCESS;
}
Dims YoloDetectLayer::getOutputDimensions(int index, const Dims* inputs, int nbInputs)
{
ASSERT(index == 0 || index == 1 || index == 2);
ASSERT(nbInputs == 3);
if (index==0) return Dims3(mMaxdet, 1, 4);
else return Dims2(mMaxdet, mNumcls);
}
size_t YoloDetectLayer::getWorkspaceSize(int maxBatchSize) const
{
return 0;
}
__global__ void Reshape(const float *input, float *loc, float *cof, int w, int h,
int numClass, int mMaxdet, int stride, const float *anchors, int* countAddress)
{
CUDA_KERNEL_LOOP(idx, h*w*NUM_ANCH)
{
int mapSize = w * h;
int anchorPos = idx / mapSize;
int mapPos = idx % mapSize;
int infoLen = 5 + numClass;
if (input[(anchorPos*infoLen+4)*mapSize+mapPos]<CONF_THRES) continue;
int count = (int)atomicAdd(countAddress, 1);
if (count >= mMaxdet-1) return;
for (int i = 0; i < numClass; ++i)
{
cof[numClass * count + i] = input[(anchorPos*infoLen+i+5)*mapSize+mapPos];
}
int row = mapPos / w;
int col = mapPos % w;
float ax, ay, aw, ah;
ax = (col - 0.5f + 2.0f * input[(anchorPos*infoLen)*mapSize+mapPos]) * stride;
ay = (row - 0.5f + 2.0f * input[(anchorPos*infoLen+1)*mapSize+mapPos]) * stride;
aw = 2.0f * input[(anchorPos*infoLen+2)*mapSize+mapPos];
ah = 2.0f * input[(anchorPos*infoLen+3)*mapSize+mapPos];
aw = aw * aw * anchors[anchorPos*2];
ah = ah * ah * anchors[anchorPos*2+1];
loc[4 * count] = (ax - aw/2)/w/stride;
loc[4 * count+1] = (ay - ah/2)/h/stride;
loc[4 * count+2] = (ax + aw/2)/w/stride;
loc[4 * count+3] = (ay + ah/2)/h/stride;
}
}
int YoloDetectLayer::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream)
{
float* loc = static_cast<float *>(outputs[0]);
float* cof = static_cast<float *>(outputs[1]);
CHECK_CUDA(cudaMalloc((void**)&count, sizeof(int)));
CUASSERT(cudaMemset(count, 0, sizeof(int)));
CUASSERT(cudaMemset(outputs[0], 0, mMaxdet*4*sizeof(float)));
CUASSERT(cudaMemset(outputs[1], 0, mMaxdet*mNumcls*sizeof(float)));
for (int i=0; i<NUM_HEAD; i++)
{
const float* anchors = static_cast<const float *>(mAnchors.values) + 2 * NUM_ANCH * i;
Reshape <<< GET_BLOCKS_(mHeights[i]*mWidths[i]*NUM_ANCH), CUDA_NUM_THREADS, 0, stream >>>
(static_cast<const float *>(inputs[i]), loc, cof, mWidths[i], mHeights[i], mNumcls, mMaxdet, mStrides[i], anchors, count);
}
cudaFree(count);
return 0;
}
size_t YoloDetectLayer::getSerializationSize() const
{
return sizeof(int) * 12 + mAnchors.count * sizeof(float);
}
void YoloDetectLayer::serialize(void* buffer) const
{
char *d = reinterpret_cast<char*>(buffer), *a = d;
write(d, mNumcls);
write(d, mMaxdet);
CUASSERT(cudaMemcpy(d, mHeights, NUM_HEAD * sizeof(int), cudaMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
CUASSERT(cudaMemcpy(d, mWidths, NUM_HEAD * sizeof(int), cudaMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
CUASSERT(cudaMemcpy(d, mStrides, NUM_HEAD * sizeof(int), cudaMemcpyHostToHost));
d += NUM_HEAD * sizeof(int);
write(d, (int) mAnchors.count);
serializeFromDevice(d, mAnchors);
ASSERT(d == a + getSerializationSize());
}
bool YoloDetectLayer::supportsFormat(DataType type, PluginFormat format) const
{
return (type == DataType::kFLOAT && format == PluginFormat::kNCHW);
}
Weights YoloDetectLayer::copyToDevice(const void* hostData, size_t count)
{
void* deviceData;
CUASSERT(cudaMalloc(&deviceData, count * sizeof(float)));
CUASSERT(cudaMemcpy(deviceData, hostData, count * sizeof(float), cudaMemcpyHostToDevice));
return Weights{DataType::kFLOAT, deviceData, int64_t(count)};
}
void YoloDetectLayer::serializeFromDevice(char*& hostBuffer, Weights deviceWeights) const
{
CUASSERT(cudaMemcpy(hostBuffer, deviceWeights.values, deviceWeights.count * sizeof(float), cudaMemcpyDeviceToHost));
hostBuffer += deviceWeights.count * sizeof(float);
}
Weights YoloDetectLayer::deserializeToDevice(const char*& hostBuffer, size_t count)
{
Weights w = copyToDevice(hostBuffer, count);
hostBuffer += count * sizeof(float);
return w;
}
const char* YoloDetectLayer::getPluginType() const
{
return Yolo_PLUGIN_NAME;
}
const char* YoloDetectLayer::getPluginVersion() const
{
return Yolo_PLUGIN_VERSION;
}
void YoloDetectLayer::terminate() {
if (count)
{
cudaFree(count);
count = nullptr;
}
}
void YoloDetectLayer::destroy()
{
delete this;
}
IPluginV2Ext* YoloDetectLayer::clone() const
{
IPluginV2Ext* plugin = new YoloDetectLayer(*this);
plugin->setPluginNamespace(mPluginNamespace.c_str());
return plugin;
}
// Set plugin namespace
void YoloDetectLayer::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloDetectLayer::getPluginNamespace() const
{
return mPluginNamespace.c_str();
}
// Return the DataType of the plugin output at the requested index.
DataType YoloDetectLayer::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
// Only DataType::kFLOAT is acceptable by the plugin layer
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloDetectLayer::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloDetectLayer::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
// Configure the layer with input and output data types.
// inutDims: input Dimensions for the plugin layer
// nInputs : Number of inputs to the plugin layer
// outputDims: output Dimensions from the plugin layer
// nOutputs: number of outputs from the plugin layer
// type: DataType configuration for the plugin layer
// format: format NCHW, NHWC etc
// maxbatchSize: maximum batch size for the plugin layer
void YoloDetectLayer::configurePlugin(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs,
const DataType* inputTypes, const DataType* outputTypes, const bool* inputIsBroadcast,
const bool* outputIsBroadcast, PluginFormat floatFormat, int maxBatchSize)
{
ASSERT(*inputTypes == DataType::kFLOAT && floatFormat == PluginFormat::kNCHW);
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloDetectLayer::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloDetectLayer::detachFromContext() {}
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.emplace_back(PluginField("num_cls", nullptr, PluginFieldType::kINT32, 1));
mPluginAttributes.emplace_back(PluginField("max_det", nullptr, PluginFieldType::kINT32, 1));
mPluginAttributes.emplace_back(PluginField("heights", nullptr, PluginFieldType::kINT32, 3));
mPluginAttributes.emplace_back(PluginField("widths", nullptr, PluginFieldType::kINT32, 3));
mPluginAttributes.emplace_back(PluginField("strides", nullptr, PluginFieldType::kINT32, 3));
mPluginAttributes.emplace_back(PluginField("anchors", nullptr, PluginFieldType::kFLOAT32, 18));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return Yolo_PLUGIN_NAME;
}
const char* YoloPluginCreator::getPluginVersion() const
{
return Yolo_PLUGIN_VERSION;
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2Ext* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
int num_cls, max_det;
std::vector<int> heights, widths;
std::vector<int> strides;
std::vector<float> anchors;
const PluginField* fields = fc->fields;
for (int i = 0; i < fc->nbFields; ++i)
{
const char* attrName = fields[i].name;
if (!strcmp(attrName, "num_cls"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
num_cls = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "max_det"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
max_det = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "heights"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
int size = fields[i].length;
heights.reserve(size);
const auto* w = static_cast<const int*>(fields[i].data);
for (int j = 0; j < size; j++)
{
heights.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "widths"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
int size = fields[i].length;
widths.reserve(size);
const auto* w = static_cast<const int*>(fields[i].data);
for (int j = 0; j < size; j++)
{
widths.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "strides"))
{
ASSERT(fields[i].type == PluginFieldType::kINT32);
int size = fields[i].length;
strides.reserve(size);
const auto* w = static_cast<const int*>(fields[i].data);
for (int j = 0; j < size; j++)
{
strides.push_back(*w);
w++;
}
}
else if (!strcmp(attrName, "anchors"))
{
ASSERT(fields[i].type == PluginFieldType::kFLOAT32);
int size = fields[i].length;
anchors.reserve(size);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < size; j++)
{
anchors.push_back(*w);
w++;
}
}
}
Weights mAnchors{DataType::kFLOAT, anchors.data(), (int64_t) anchors.size()};
YoloDetectLayer* obj = new YoloDetectLayer(num_cls, max_det,
heights, widths,
strides, &mAnchors);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2Ext* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call Normalize::destroy()
YoloDetectLayer* obj = new YoloDetectLayer(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
|
2f014d9c879e73ebd5c0c3b98e167cbbf5972332.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, MEMORY
// CORRUPTION
// srad kernel
__global__ void srad2(fp d_lambda, int d_Nr, int d_Nc, long d_Ne, int *d_iN,
int *d_iS, int *d_jE, int *d_jW, fp *d_dN, fp *d_dS,
fp *d_dE, fp *d_dW,
bool *d_c,
fp *d_I) {
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx * NUMBER_THREADS + tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
bool d_cN, d_cS, d_cW, d_cE;
fp d_D;
// figure out row/col location in new matrix
row = (ei + 1) % d_Nr - 1; // (0-n) row
col = (ei + 1) / d_Nr + 1 - 1; // (0-n) column
if ((ei + 1) % d_Nr == 0) {
row = d_Nr - 1;
col = col - 1;
}
if (ei < d_Ne) { // make sure that only threads matching jobs run
// diffusion coefficent
d_cN = d_c[ei]; // north diffusion coefficient
d_cS = d_c[d_iS[row] + d_Nr * col]; // south diffusion coefficient
d_cW = d_c[ei]; // west diffusion coefficient
d_cE = d_c[row + d_Nr * d_jE[col]]; // east diffusion coefficient
// divergence (equ 58)
d_D = d_cN * d_dN[ei] + d_cS * d_dS[ei] + d_cW * d_dW[ei] +
d_cE * d_dE[ei]; // divergence
// image update (equ 61) (every element of IMAGE)
d_I[ei] =
d_I[ei] +
0.25 * d_lambda *
d_D; // updates image (based on input time step and divergence)
}
}
|
2f014d9c879e73ebd5c0c3b98e167cbbf5972332.cu
|
// BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, MEMORY
// CORRUPTION
// srad kernel
__global__ void srad2(fp d_lambda, int d_Nr, int d_Nc, long d_Ne, int *d_iN,
int *d_iS, int *d_jE, int *d_jW, fp *d_dN, fp *d_dS,
fp *d_dE, fp *d_dW,
bool *d_c,
fp *d_I) {
// indexes
int bx = blockIdx.x; // get current horizontal block index
int tx = threadIdx.x; // get current horizontal thread index
int ei = bx * NUMBER_THREADS + tx; // more threads than actual elements !!!
int row; // column, x position
int col; // row, y position
// variables
bool d_cN, d_cS, d_cW, d_cE;
fp d_D;
// figure out row/col location in new matrix
row = (ei + 1) % d_Nr - 1; // (0-n) row
col = (ei + 1) / d_Nr + 1 - 1; // (0-n) column
if ((ei + 1) % d_Nr == 0) {
row = d_Nr - 1;
col = col - 1;
}
if (ei < d_Ne) { // make sure that only threads matching jobs run
// diffusion coefficent
d_cN = d_c[ei]; // north diffusion coefficient
d_cS = d_c[d_iS[row] + d_Nr * col]; // south diffusion coefficient
d_cW = d_c[ei]; // west diffusion coefficient
d_cE = d_c[row + d_Nr * d_jE[col]]; // east diffusion coefficient
// divergence (equ 58)
d_D = d_cN * d_dN[ei] + d_cS * d_dS[ei] + d_cW * d_dW[ei] +
d_cE * d_dE[ei]; // divergence
// image update (equ 61) (every element of IMAGE)
d_I[ei] =
d_I[ei] +
0.25 * d_lambda *
d_D; // updates image (based on input time step and divergence)
}
}
|
70921e4621fd33c2f40a8ae78ee056967638c216.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <ops/declarable/helpers/compare_elem.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void comparator(void *vx, const Nd4jLong *xShapeInfo, Nd4jLong length, const bool isStrict, void *reductionBuffer, bool *z) {
auto x = reinterpret_cast<T*>(vx);
auto reduction = reinterpret_cast<uint32_t*>(reductionBuffer);
extern __shared__ uint32_t shared[];
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
shared[threadIdx.x] = 0;
// each thread will compare 2 elements: E and E+1
for (int e = tid; e < length - 1; e += blockDim.x * gridDim.x) {
auto val0 = x[shape::getIndexOffset(e, xShapeInfo, length)];
auto val1 = x[shape::getIndexOffset(e+1, xShapeInfo, length)];
bool v = false;
if (isStrict)
v = val1 > val0;
else
v = val1 >= val0;
// store comparison result in shared memory
shared[threadIdx.x] += v ? 0 : 1;
}
__syncthreads();
// aggregate sums in shared memory
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
shared[threadIdx.x] += shared[threadIdx.x + activeThreads];
__syncthreads();
}
// store over the grid if we have more than 1 block
if (gridDim.x > 1) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reduction[blockIdx.x] = shared[0];
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
shared[threadIdx.x] = 0;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
shared[threadIdx.x] += reduction[i];
__syncthreads();
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
shared[threadIdx.x] += shared[threadIdx.x + activeThreads];
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
z[0] = shared[0] == 0;
}
}
}
else {
// if we have only 1 block, we just store results right away
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int*>(reductionBuffer);
tc[16384] = 0;
z[0] = shared[0] == 0;
}
}
}
template<typename T>
static void _compare_elem(nd4j::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) {
auto z = NDArrayFactory::create<bool>(false, context);
const int numThreads = 256;
const int numBlocks = nd4j::math::nd4j_min<int>(128, nd4j::math::nd4j_max<int>(1, input->lengthOf() / numThreads));
hipLaunchKernelGGL(( comparator<T>), dim3(numBlocks), dim3(numThreads), numThreads * 4 + 1024, *context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), input->lengthOf(), isStrictlyIncreasing, context->getReductionPointer(), reinterpret_cast<bool *>(z.specialBuffer()));
z.tickWriteDevice();
nd4j::DebugHelper::checkErrorCode(context->getCudaStream(), "is_strictly_increasing");
output = z.e<bool>(0);
}
void compare_elem(nd4j::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) {
auto xType = input->dataType();
input->syncToDevice();
BUILD_SINGLE_SELECTOR(xType, _compare_elem, (context, input, isStrictlyIncreasing, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void _compare_elem, (nd4j::LaunchContext * context, NDArray *A, bool isStrictlyIncreasing, bool& output);, LIBND4J_TYPES);
}
}
}
|
70921e4621fd33c2f40a8ae78ee056967638c216.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <ops/declarable/helpers/compare_elem.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void comparator(void *vx, const Nd4jLong *xShapeInfo, Nd4jLong length, const bool isStrict, void *reductionBuffer, bool *z) {
auto x = reinterpret_cast<T*>(vx);
auto reduction = reinterpret_cast<uint32_t*>(reductionBuffer);
extern __shared__ uint32_t shared[];
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
shared[threadIdx.x] = 0;
// each thread will compare 2 elements: E and E+1
for (int e = tid; e < length - 1; e += blockDim.x * gridDim.x) {
auto val0 = x[shape::getIndexOffset(e, xShapeInfo, length)];
auto val1 = x[shape::getIndexOffset(e+1, xShapeInfo, length)];
bool v = false;
if (isStrict)
v = val1 > val0;
else
v = val1 >= val0;
// store comparison result in shared memory
shared[threadIdx.x] += v ? 0 : 1;
}
__syncthreads();
// aggregate sums in shared memory
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
shared[threadIdx.x] += shared[threadIdx.x + activeThreads];
__syncthreads();
}
// store over the grid if we have more than 1 block
if (gridDim.x > 1) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reduction[blockIdx.x] = shared[0];
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
shared[threadIdx.x] = 0;
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
shared[threadIdx.x] += reduction[i];
__syncthreads();
for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
shared[threadIdx.x] += shared[threadIdx.x + activeThreads];
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
z[0] = shared[0] == 0;
}
}
}
else {
// if we have only 1 block, we just store results right away
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int*>(reductionBuffer);
tc[16384] = 0;
z[0] = shared[0] == 0;
}
}
}
template<typename T>
static void _compare_elem(nd4j::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) {
auto z = NDArrayFactory::create<bool>(false, context);
const int numThreads = 256;
const int numBlocks = nd4j::math::nd4j_min<int>(128, nd4j::math::nd4j_max<int>(1, input->lengthOf() / numThreads));
comparator<T><<<numBlocks, numThreads, numThreads * 4 + 1024, *context->getCudaStream()>>>(input->specialBuffer(), input->specialShapeInfo(), input->lengthOf(), isStrictlyIncreasing, context->getReductionPointer(), reinterpret_cast<bool *>(z.specialBuffer()));
z.tickWriteDevice();
nd4j::DebugHelper::checkErrorCode(context->getCudaStream(), "is_strictly_increasing");
output = z.e<bool>(0);
}
void compare_elem(nd4j::LaunchContext * context, NDArray *input, bool isStrictlyIncreasing, bool& output) {
auto xType = input->dataType();
input->syncToDevice();
BUILD_SINGLE_SELECTOR(xType, _compare_elem, (context, input, isStrictlyIncreasing, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void _compare_elem, (nd4j::LaunchContext * context, NDArray *A, bool isStrictlyIncreasing, bool& output);, LIBND4J_TYPES);
}
}
}
|
0303d15d610cd50d7abde6c74fb5d6b05f6dcf1b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <hiprand/hiprand.h>
#include <assert.h>
#include <unistd.h>
#include <rocblas.h>
#include <iostream>
#include <complex.h>
#include <math.h>
#include <hip/hip_complex.h>
#include <hip/hip_runtime.h>
#include "cublas_beamformer.h"
using namespace std;
// Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU
void GPU_fill(hipComplex *A, int nr_rows_A, int nr_cols_A) {
hipComplex *G;
G = new hipComplex[nr_rows_A*nr_cols_A];
for(int i = 0; i < nr_rows_A*nr_cols_A; ++i){
G[i].x = (i + 1)%(nr_rows_A*nr_cols_A/(N_BIN));
G[i].y = (i + 1)%(nr_rows_A*nr_cols_A/(N_BIN));
}
hipMemcpy(A,G,nr_rows_A * nr_cols_A * sizeof(hipComplex),hipMemcpyHostToDevice);
delete[] G;
}
void GPU_fill2(hipComplex *A, int nr_rows_A, int nr_cols_A) {
hipComplex *G;
G = new hipComplex[nr_rows_A*nr_cols_A];
for(int i = 0; i < nr_rows_A*nr_cols_A; ++i){
G[i].x = i%(nr_rows_A*nr_cols_A/(N_BIN));
G[i].y = i%(nr_rows_A*nr_cols_A/(N_BIN));
}
hipMemcpy(A,G,nr_rows_A * nr_cols_A * sizeof(hipComplex),hipMemcpyHostToDevice);
delete[] G;
}
void print_matrix(const hipComplex *A, int nr_rows_A, int nr_cols_A, int nr_sheets_A) {
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
for(int k = 0; k < nr_sheets_A; ++k){
// cout << A[j * nr_rows_A + i].x << "+" << A[j * nr_rows_A + i].y << "i" <<" ";
printf("%i,%i,%i: %e + %e i\n",i,j,k,A[k*nr_rows_A*nr_cols_A + j * nr_rows_A + i].x, A[k*nr_rows_A*nr_cols_A + j * nr_rows_A + i].y);
}
}
// cout << endl;
}
// cout << endl;
// for(int i = 0; i < nr_rows_A*nr_cols_A; ++i){
// printf("%i,: %e + %e i\n",i,A[i].x, A[i].y);
// }
}
void print_matrix2(const float *A, int nr_rows_A, int nr_cols_A) {
// for(int j = 0; j < nr_cols_A; ++j){
// for(int i = 0; i < nr_rows_A; ++i){
// //cout << A[j * nr_rows_A + i].x << "+" << A[j * nr_rows_A + i].y << "i" <<" ";
// printf("%i,%i: %e\n",i,j,A[j * nr_rows_A + i]);
// }
// cout << endl;
// }
// cout << endl;
for(int i = 0; i < nr_rows_A*nr_cols_A; ++i){
printf("%i,: %e\n",i,A[i]);
}
}
static hipComplex * d_weights = NULL;
void update_weights(char * filename){
char weight_filename[128];
strcpy(weight_filename, filename);
FILE * weights;
float * bf_weights;
float complex * weights_dc;
float complex * weights_dc_n;
// Allocate heap memory for file data
bf_weights = (float *)malloc(2*N_WEIGHTS*sizeof(float));
weights_dc = (float complex *)malloc(N_WEIGHTS*sizeof(float complex *));
weights_dc_n = (float complex *)malloc(N_WEIGHTS*sizeof(float complex *));
weights = fopen(weight_filename, "r");
int j;
if (weights != NULL) {
fread(bf_weights, sizeof(float), 2*N_WEIGHTS, weights);
// Convert to complex numbers (do a conjugate at the same time)
for(j = 0; j < N_WEIGHTS; j++){
weights_dc_n[j] = bf_weights[2*j] - bf_weights[(2*j)+1]*I;
}
// Transpose the weights
int m,n;
float complex transpose[N_BEAM][N_ELE*N_BIN];
for(m=0;m<N_BEAM;m++){
for(n=0;n<N_ELE*N_BIN;n++){
transpose[m][n] = weights_dc_n[m*N_ELE*N_BIN + n];
}
}
for(n=0;n<N_ELE*N_BIN;n++){
for(m=0;m<N_BEAM;m++){
weights_dc[n*N_BEAM+ m] = transpose[m][n];
}
}
fclose(weights);
}
free(bf_weights);
// Copy weights to device
hipMemcpy(d_weights, weights_dc, N_WEIGHTS*sizeof(hipComplex), hipMemcpyHostToDevice); //r_weights instead of weights_dc //*N_TIME
free(weights_dc);
}
static hipComplex **d_arr_A = NULL; static hipComplex **d_arr_B = NULL; static hipComplex **d_arr_C = NULL;
static hipComplex * d_beamformed = NULL;
static hipComplex * d_data = NULL;
static hipComplex * d_data1 = NULL;
static float * d_outputs;
void init_beamformer(){
// Allocate memory for the weights, data, beamformer output, and sti output.
hipMalloc((void **)&d_weights, N_WEIGHTS*sizeof(hipComplex)); //*N_TIME
hipMalloc((void **)&d_data1, N_SAMP*sizeof(hipComplex));
hipMalloc((void **)&d_data, N_SAMP*sizeof(hipComplex));
hipError_t err_malloc = hipMalloc((void **)&d_beamformed, N_TBF*sizeof(hipComplex));
if (err_malloc != hipSuccess) {
printf("CUDA Error (cudaMalloc2): %s\n", hipGetErrorString(err_malloc));
}
hipMalloc((void **)&d_outputs, N_POL*(N_OUTPUTS*sizeof(float)/2));
// This is all memory allocated to arrays that are used by gemmBatched.
// Allocate 3 arrays on CPU
hipError_t cudaStat;
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C;
nr_rows_A = N_BEAM;
nr_cols_A = N_ELE;
nr_rows_B = N_ELE;
nr_cols_B = N_TIME;
nr_rows_C = N_BEAM;
nr_cols_C = N_TIME;
// Allocate memory to host arrays.
const hipComplex **h_arr_A = 0; const hipComplex **h_arr_B = 0; hipComplex **h_arr_C = 0;
h_arr_A = (const hipComplex **)malloc(nr_rows_A * nr_cols_A *N_BIN*sizeof(const hipComplex*));
h_arr_B = (const hipComplex **)malloc(nr_rows_B * nr_cols_B *N_BIN*sizeof(const hipComplex*));
h_arr_C = (hipComplex **)malloc(nr_rows_C * nr_cols_C *N_BIN*sizeof(hipComplex*));
// Allocate memory for each batch in an array.
for(int i = 0; i < N_BIN; i++){
h_arr_A[i] = d_weights + i*nr_rows_A*nr_cols_A;
h_arr_B[i] = d_data + i*nr_rows_B*nr_cols_B;
h_arr_C[i] = d_beamformed + i*nr_rows_C*nr_cols_C;
}
// delete[] d_A;
// delete[] d_B;
// Allocate memory to arrays on device.
cudaStat = hipMalloc((void **)&d_arr_A,nr_rows_A * nr_cols_A * N_BIN * sizeof(hipComplex*));
assert(!cudaStat);
cudaStat = hipMalloc((void **)&d_arr_B,nr_rows_B * nr_cols_B * N_BIN * sizeof(hipComplex*));
assert(!cudaStat);
cudaStat = hipMalloc((void **)&d_arr_C,nr_rows_C * nr_cols_C * N_BIN * sizeof(hipComplex*));
assert(!cudaStat);
// Copy memory from host to device.
cudaStat = hipMemcpy(d_arr_A,h_arr_A,nr_rows_A * nr_cols_A * N_BIN * sizeof(hipComplex*),hipMemcpyHostToDevice);
assert(!cudaStat);
cudaStat = hipMemcpy(d_arr_B,h_arr_B,nr_rows_B * nr_cols_B * N_BIN * sizeof(hipComplex*),hipMemcpyHostToDevice);
assert(!cudaStat);
cudaStat = hipMemcpy(d_arr_C,h_arr_C,nr_rows_C * nr_cols_C * N_BIN * sizeof(hipComplex*),hipMemcpyHostToDevice);
assert(!cudaStat);
}
__global__
void data_restructure(hipComplex * data, hipComplex * data_restruc){
int e = threadIdx.x;
int t = blockIdx.x;
int f = blockIdx.y;
//Restructure data so that the frequency bin is the slowest moving index
data_restruc[f*N_TIME*N_ELE + t*N_ELE + e] = data[t*N_BIN*N_ELE + f*N_ELE + e];
}
void data_in(char * input_filename){
FILE * data;
// File data pointers
float * bf_data;
// Complex data pointers
float complex * data_dc;
// Allocate heap memory for file data
bf_data = (float *)malloc(2*N_SAMP*sizeof(float));
data_dc = (float complex *)malloc(N_SAMP*sizeof(float complex *));
// Open files
data = fopen(input_filename, "r");
/*********************************************************
* Read in Data
*********************************************************/
if (data != NULL) {
fread(bf_data, sizeof(float), 2*N_SAMP, data);
int j;
// Make 'em complex!
for (j = 0; j < N_SAMP; j++) {
data_dc[j] = bf_data[2*j] + bf_data[(2*j)+1]*I;
}
// Specify grid and block dimensions
dim3 dimBlock_d(N_ELE, 1, 1);
dim3 dimGrid_d(N_TIME, N_BIN, 1);
hipComplex * d_data_in = d_data1;
hipComplex * d_data_out = d_data;
hipMemcpy(d_data_in, data_dc, N_SAMP*sizeof(hipComplex), hipMemcpyHostToDevice);
// Restructure data for hipblasCgemmBatched function.
hipLaunchKernelGGL(( data_restructure), dim3(dimGrid_d), dim3(dimBlock_d), 0, 0, d_data_in, d_data_out);
fclose(data);
}
free(bf_data);
free(data_dc);
}
void beamform(hipblasHandle_t handle) {
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C;
nr_rows_A = N_BEAM;
nr_cols_A = N_ELE;
nr_rows_B = N_ELE;
nr_cols_B = N_TIME;
nr_rows_C = N_BEAM;
// Leading dimensions are always the rows of each matrix since the data is stored in a column-wise order.
int lda=nr_rows_A,ldb=nr_rows_B,ldc=nr_rows_C;
hipComplex alf;
hipComplex bet;
alf.x = 1;
alf.y = 0;
bet.x = 0;
bet.y = 0;
int batchCount = N_BIN; // There must be the same number of batches in each array.
hipblasStatus_t stat;
/*
This function performs a matrix multiplication of the data and the weights.
Weights - d_arr_A, Data - d_arr_B, and the output - d_arr_C.
*/
stat = hipblasCgemmBatched(
handle, // handle to the cuBLAS library context.
HIPBLAS_OP_N, // Operation on matrices within array A.
HIPBLAS_OP_N, // Operation on matrices within array B.
nr_rows_A, // Number of rows in matrix A and C.
nr_cols_B, // Number of columns in matrix B and C.
nr_cols_A, // Number of columns and rows in matrix A and B respectively.
&alf, // Scalar used for multiplication.
(const hipComplex **)d_arr_A, // Weight array of pointers.
lda, // Leading dimension of each batch or matrix in array A.
(const hipComplex **)d_arr_B, // Data array of pointers.
ldb, // Leading dimension of each batch or matrix in array B.
&bet, // Scalar used for multiplication.
(hipComplex **)d_arr_C, // Output array of pointers.
ldc, // Leading dimension of each batch or matrix in array C.
batchCount); // Number of batches in each array.
if(stat != HIPBLAS_STATUS_SUCCESS){
cerr << "hipblasCgemmBatched failed" << endl;
exit(1);
}
assert(!hipGetLastError());
//Free GPU memory
// hipFree(d_A);
// hipFree(d_B);
// hipFree(d_C);
// Destroy the handle
//hipblasDestroy(handle);
}
__global__
void sti_reduction(hipComplex * data_in, float * data_out) {
int f = blockIdx.x;
int b = blockIdx.y;
int t = threadIdx.x;
int s = blockIdx.z;
int h = sample_idx(s*N_TIME_STI + t,b,f); // Preprocessor macro used for the output of the beamformer. More detail can be seen in the header file. (First set of beams)
int h1 = sample_idx(s*N_TIME_STI + t,b+N_BEAM1,f); // Preprocessor macro used for the output of the beamformer. More detail can be seen in the header file. (Last set of beams)
// Temporary variables used for updating.
float beam_power1;
float beam_power2;
float cross_power1;
float cross_power2;
cuFloatComplex samp1;
cuFloatComplex samp2;
float scale = 1.0/N_TIME_STI; // Scale power by number of samples per STI window.
__shared__ cuFloatComplex reduced_array1[N_STI_BLOC];
__shared__ cuFloatComplex reduced_array[N_STI_BLOC];
if (t < N_TIME_STI) {
// X polarization (XX*).
samp1.x = data_in[h].x;
samp1.y = data_in[h].y;
beam_power1 = (samp1.x * samp1.x) + (samp1.y * samp1.y); // Beamformer output multiplied by its conjugate (absolute value squared).
reduced_array[t].x = beam_power1;
// Y polarization (YY*).
samp2.x = data_in[h1].x;
samp2.y = data_in[h1].y;
beam_power2 = (samp2.x * samp2.x) + (samp2.y * samp2.y); // Beamformer output multiplied by its conjugate (absolute value squared).
reduced_array[t].y = beam_power2;
// Cross polarization (XY*).
cross_power1 = (samp1.x * samp2.x) + (samp1.y * samp2.y); // Real part of cross polarization.
cross_power2 = (samp1.y * samp2.x) - (samp1.x * samp2.y); // Imaginary part of cross polarization.
reduced_array1[t].x = cross_power1;
reduced_array1[t].y = cross_power2;
}
else{
reduced_array[t].x = 0.0;
reduced_array[t].y = 0.0;
reduced_array1[t].x = 0.0;
reduced_array1[t].y = 0.0;
}
__syncthreads();
// Reduction is performed by splitting up the threads in each block and summing them all up.
// The number of threads in each block needs to be a power of two in order for the reduction to work. (No left over threads).
for(int k = blockDim.x/2; k>0; k>>=1){
if(t<k){
reduced_array[t].x += reduced_array[t+k].x;
reduced_array[t].y += reduced_array[t+k].y;
reduced_array1[t].x += reduced_array1[t+k].x;
reduced_array1[t].y += reduced_array1[t+k].y;
}
__syncthreads();
}
// After reduction is complete, assign each reduced to value to appropriate position in output array.
if(t == 0){
data_out[output_idx(0,b,s,f)] = reduced_array[0].x*scale; // XX*.
data_out[output_idx(1,b,s,f)] = reduced_array[0].y*scale; // YY*.
data_out[output_idx(2,b,s,f)] = reduced_array1[0].x*scale; // XY* real.
data_out[output_idx(3,b,s,f)] = reduced_array1[0].y*scale; // XY* imaginary.
}
}
void run_beamformer(hipblasHandle_t handle, float * data_out){
// Specify grid and block dimensions
dim3 dimBlock(N_STI_BLOC, 1, 1);
dim3 dimGrid(N_BIN, N_BEAM1, N_STI);
printf("Starting beamformer\n");
// Call beamformer function containing hipblasCgemmBatched()
beamform(handle);
hipError_t err_code = hipGetLastError();
if (err_code != hipSuccess) {
printf("CUDA Error (beamform): %s\n", hipGetErrorString(err_code));
}
hipComplex * d_data_in = d_beamformed;
float * d_data_out = d_outputs;
printf("Starting sti_reduction\n");
// Call STI reduction kernel.
hipLaunchKernelGGL(( sti_reduction), dim3(dimGrid), dim3(dimBlock), 0, 0, d_data_in, d_data_out);
printf("Finishing sti_reduction\n");
err_code = hipGetLastError();
if (err_code != hipSuccess) {
printf("CUDA Error (sti_reduction): %s\n", hipGetErrorString(err_code));
}
// Copy output data from device to host.
hipMemcpy(data_out, d_data_out, N_POL*(N_OUTPUTS*sizeof(float)/2),hipMemcpyDeviceToHost);
hipFree(d_data);
hipFree(d_outputs);
}
|
0303d15d610cd50d7abde6c74fb5d6b05f6dcf1b.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <curand.h>
#include <assert.h>
#include <unistd.h>
#include <cublas_v2.h>
#include <iostream>
#include <complex.h>
#include <math.h>
#include <cuComplex.h>
#include <cuda_runtime.h>
#include "cublas_beamformer.h"
using namespace std;
// Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU
void GPU_fill(cuComplex *A, int nr_rows_A, int nr_cols_A) {
cuComplex *G;
G = new cuComplex[nr_rows_A*nr_cols_A];
for(int i = 0; i < nr_rows_A*nr_cols_A; ++i){
G[i].x = (i + 1)%(nr_rows_A*nr_cols_A/(N_BIN));
G[i].y = (i + 1)%(nr_rows_A*nr_cols_A/(N_BIN));
}
cudaMemcpy(A,G,nr_rows_A * nr_cols_A * sizeof(cuComplex),cudaMemcpyHostToDevice);
delete[] G;
}
void GPU_fill2(cuComplex *A, int nr_rows_A, int nr_cols_A) {
cuComplex *G;
G = new cuComplex[nr_rows_A*nr_cols_A];
for(int i = 0; i < nr_rows_A*nr_cols_A; ++i){
G[i].x = i%(nr_rows_A*nr_cols_A/(N_BIN));
G[i].y = i%(nr_rows_A*nr_cols_A/(N_BIN));
}
cudaMemcpy(A,G,nr_rows_A * nr_cols_A * sizeof(cuComplex),cudaMemcpyHostToDevice);
delete[] G;
}
void print_matrix(const cuComplex *A, int nr_rows_A, int nr_cols_A, int nr_sheets_A) {
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
for(int k = 0; k < nr_sheets_A; ++k){
// cout << A[j * nr_rows_A + i].x << "+" << A[j * nr_rows_A + i].y << "i" <<" ";
printf("%i,%i,%i: %e + %e i\n",i,j,k,A[k*nr_rows_A*nr_cols_A + j * nr_rows_A + i].x, A[k*nr_rows_A*nr_cols_A + j * nr_rows_A + i].y);
}
}
// cout << endl;
}
// cout << endl;
// for(int i = 0; i < nr_rows_A*nr_cols_A; ++i){
// printf("%i,: %e + %e i\n",i,A[i].x, A[i].y);
// }
}
void print_matrix2(const float *A, int nr_rows_A, int nr_cols_A) {
// for(int j = 0; j < nr_cols_A; ++j){
// for(int i = 0; i < nr_rows_A; ++i){
// //cout << A[j * nr_rows_A + i].x << "+" << A[j * nr_rows_A + i].y << "i" <<" ";
// printf("%i,%i: %e\n",i,j,A[j * nr_rows_A + i]);
// }
// cout << endl;
// }
// cout << endl;
for(int i = 0; i < nr_rows_A*nr_cols_A; ++i){
printf("%i,: %e\n",i,A[i]);
}
}
static cuComplex * d_weights = NULL;
void update_weights(char * filename){
char weight_filename[128];
strcpy(weight_filename, filename);
FILE * weights;
float * bf_weights;
float complex * weights_dc;
float complex * weights_dc_n;
// Allocate heap memory for file data
bf_weights = (float *)malloc(2*N_WEIGHTS*sizeof(float));
weights_dc = (float complex *)malloc(N_WEIGHTS*sizeof(float complex *));
weights_dc_n = (float complex *)malloc(N_WEIGHTS*sizeof(float complex *));
weights = fopen(weight_filename, "r");
int j;
if (weights != NULL) {
fread(bf_weights, sizeof(float), 2*N_WEIGHTS, weights);
// Convert to complex numbers (do a conjugate at the same time)
for(j = 0; j < N_WEIGHTS; j++){
weights_dc_n[j] = bf_weights[2*j] - bf_weights[(2*j)+1]*I;
}
// Transpose the weights
int m,n;
float complex transpose[N_BEAM][N_ELE*N_BIN];
for(m=0;m<N_BEAM;m++){
for(n=0;n<N_ELE*N_BIN;n++){
transpose[m][n] = weights_dc_n[m*N_ELE*N_BIN + n];
}
}
for(n=0;n<N_ELE*N_BIN;n++){
for(m=0;m<N_BEAM;m++){
weights_dc[n*N_BEAM+ m] = transpose[m][n];
}
}
fclose(weights);
}
free(bf_weights);
// Copy weights to device
cudaMemcpy(d_weights, weights_dc, N_WEIGHTS*sizeof(cuComplex), cudaMemcpyHostToDevice); //r_weights instead of weights_dc //*N_TIME
free(weights_dc);
}
static cuComplex **d_arr_A = NULL; static cuComplex **d_arr_B = NULL; static cuComplex **d_arr_C = NULL;
static cuComplex * d_beamformed = NULL;
static cuComplex * d_data = NULL;
static cuComplex * d_data1 = NULL;
static float * d_outputs;
void init_beamformer(){
// Allocate memory for the weights, data, beamformer output, and sti output.
cudaMalloc((void **)&d_weights, N_WEIGHTS*sizeof(cuComplex)); //*N_TIME
cudaMalloc((void **)&d_data1, N_SAMP*sizeof(cuComplex));
cudaMalloc((void **)&d_data, N_SAMP*sizeof(cuComplex));
cudaError_t err_malloc = cudaMalloc((void **)&d_beamformed, N_TBF*sizeof(cuComplex));
if (err_malloc != cudaSuccess) {
printf("CUDA Error (cudaMalloc2): %s\n", cudaGetErrorString(err_malloc));
}
cudaMalloc((void **)&d_outputs, N_POL*(N_OUTPUTS*sizeof(float)/2));
// This is all memory allocated to arrays that are used by gemmBatched.
// Allocate 3 arrays on CPU
cudaError_t cudaStat;
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C;
nr_rows_A = N_BEAM;
nr_cols_A = N_ELE;
nr_rows_B = N_ELE;
nr_cols_B = N_TIME;
nr_rows_C = N_BEAM;
nr_cols_C = N_TIME;
// Allocate memory to host arrays.
const cuComplex **h_arr_A = 0; const cuComplex **h_arr_B = 0; cuComplex **h_arr_C = 0;
h_arr_A = (const cuComplex **)malloc(nr_rows_A * nr_cols_A *N_BIN*sizeof(const cuComplex*));
h_arr_B = (const cuComplex **)malloc(nr_rows_B * nr_cols_B *N_BIN*sizeof(const cuComplex*));
h_arr_C = (cuComplex **)malloc(nr_rows_C * nr_cols_C *N_BIN*sizeof(cuComplex*));
// Allocate memory for each batch in an array.
for(int i = 0; i < N_BIN; i++){
h_arr_A[i] = d_weights + i*nr_rows_A*nr_cols_A;
h_arr_B[i] = d_data + i*nr_rows_B*nr_cols_B;
h_arr_C[i] = d_beamformed + i*nr_rows_C*nr_cols_C;
}
// delete[] d_A;
// delete[] d_B;
// Allocate memory to arrays on device.
cudaStat = cudaMalloc((void **)&d_arr_A,nr_rows_A * nr_cols_A * N_BIN * sizeof(cuComplex*));
assert(!cudaStat);
cudaStat = cudaMalloc((void **)&d_arr_B,nr_rows_B * nr_cols_B * N_BIN * sizeof(cuComplex*));
assert(!cudaStat);
cudaStat = cudaMalloc((void **)&d_arr_C,nr_rows_C * nr_cols_C * N_BIN * sizeof(cuComplex*));
assert(!cudaStat);
// Copy memory from host to device.
cudaStat = cudaMemcpy(d_arr_A,h_arr_A,nr_rows_A * nr_cols_A * N_BIN * sizeof(cuComplex*),cudaMemcpyHostToDevice);
assert(!cudaStat);
cudaStat = cudaMemcpy(d_arr_B,h_arr_B,nr_rows_B * nr_cols_B * N_BIN * sizeof(cuComplex*),cudaMemcpyHostToDevice);
assert(!cudaStat);
cudaStat = cudaMemcpy(d_arr_C,h_arr_C,nr_rows_C * nr_cols_C * N_BIN * sizeof(cuComplex*),cudaMemcpyHostToDevice);
assert(!cudaStat);
}
__global__
void data_restructure(cuComplex * data, cuComplex * data_restruc){
int e = threadIdx.x;
int t = blockIdx.x;
int f = blockIdx.y;
//Restructure data so that the frequency bin is the slowest moving index
data_restruc[f*N_TIME*N_ELE + t*N_ELE + e] = data[t*N_BIN*N_ELE + f*N_ELE + e];
}
void data_in(char * input_filename){
FILE * data;
// File data pointers
float * bf_data;
// Complex data pointers
float complex * data_dc;
// Allocate heap memory for file data
bf_data = (float *)malloc(2*N_SAMP*sizeof(float));
data_dc = (float complex *)malloc(N_SAMP*sizeof(float complex *));
// Open files
data = fopen(input_filename, "r");
/*********************************************************
* Read in Data
*********************************************************/
if (data != NULL) {
fread(bf_data, sizeof(float), 2*N_SAMP, data);
int j;
// Make 'em complex!
for (j = 0; j < N_SAMP; j++) {
data_dc[j] = bf_data[2*j] + bf_data[(2*j)+1]*I;
}
// Specify grid and block dimensions
dim3 dimBlock_d(N_ELE, 1, 1);
dim3 dimGrid_d(N_TIME, N_BIN, 1);
cuComplex * d_data_in = d_data1;
cuComplex * d_data_out = d_data;
cudaMemcpy(d_data_in, data_dc, N_SAMP*sizeof(cuComplex), cudaMemcpyHostToDevice);
// Restructure data for cublasCgemmBatched function.
data_restructure<<<dimGrid_d, dimBlock_d>>>(d_data_in, d_data_out);
fclose(data);
}
free(bf_data);
free(data_dc);
}
void beamform(cublasHandle_t handle) {
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C;
nr_rows_A = N_BEAM;
nr_cols_A = N_ELE;
nr_rows_B = N_ELE;
nr_cols_B = N_TIME;
nr_rows_C = N_BEAM;
// Leading dimensions are always the rows of each matrix since the data is stored in a column-wise order.
int lda=nr_rows_A,ldb=nr_rows_B,ldc=nr_rows_C;
cuComplex alf;
cuComplex bet;
alf.x = 1;
alf.y = 0;
bet.x = 0;
bet.y = 0;
int batchCount = N_BIN; // There must be the same number of batches in each array.
cublasStatus_t stat;
/*
This function performs a matrix multiplication of the data and the weights.
Weights - d_arr_A, Data - d_arr_B, and the output - d_arr_C.
*/
stat = cublasCgemmBatched(
handle, // handle to the cuBLAS library context.
CUBLAS_OP_N, // Operation on matrices within array A.
CUBLAS_OP_N, // Operation on matrices within array B.
nr_rows_A, // Number of rows in matrix A and C.
nr_cols_B, // Number of columns in matrix B and C.
nr_cols_A, // Number of columns and rows in matrix A and B respectively.
&alf, // Scalar used for multiplication.
(const cuComplex **)d_arr_A, // Weight array of pointers.
lda, // Leading dimension of each batch or matrix in array A.
(const cuComplex **)d_arr_B, // Data array of pointers.
ldb, // Leading dimension of each batch or matrix in array B.
&bet, // Scalar used for multiplication.
(cuComplex **)d_arr_C, // Output array of pointers.
ldc, // Leading dimension of each batch or matrix in array C.
batchCount); // Number of batches in each array.
if(stat != CUBLAS_STATUS_SUCCESS){
cerr << "cublasCgemmBatched failed" << endl;
exit(1);
}
assert(!cudaGetLastError());
//Free GPU memory
// cudaFree(d_A);
// cudaFree(d_B);
// cudaFree(d_C);
// Destroy the handle
//cublasDestroy(handle);
}
__global__
void sti_reduction(cuComplex * data_in, float * data_out) {
int f = blockIdx.x;
int b = blockIdx.y;
int t = threadIdx.x;
int s = blockIdx.z;
int h = sample_idx(s*N_TIME_STI + t,b,f); // Preprocessor macro used for the output of the beamformer. More detail can be seen in the header file. (First set of beams)
int h1 = sample_idx(s*N_TIME_STI + t,b+N_BEAM1,f); // Preprocessor macro used for the output of the beamformer. More detail can be seen in the header file. (Last set of beams)
// Temporary variables used for updating.
float beam_power1;
float beam_power2;
float cross_power1;
float cross_power2;
cuFloatComplex samp1;
cuFloatComplex samp2;
float scale = 1.0/N_TIME_STI; // Scale power by number of samples per STI window.
__shared__ cuFloatComplex reduced_array1[N_STI_BLOC];
__shared__ cuFloatComplex reduced_array[N_STI_BLOC];
if (t < N_TIME_STI) {
// X polarization (XX*).
samp1.x = data_in[h].x;
samp1.y = data_in[h].y;
beam_power1 = (samp1.x * samp1.x) + (samp1.y * samp1.y); // Beamformer output multiplied by its conjugate (absolute value squared).
reduced_array[t].x = beam_power1;
// Y polarization (YY*).
samp2.x = data_in[h1].x;
samp2.y = data_in[h1].y;
beam_power2 = (samp2.x * samp2.x) + (samp2.y * samp2.y); // Beamformer output multiplied by its conjugate (absolute value squared).
reduced_array[t].y = beam_power2;
// Cross polarization (XY*).
cross_power1 = (samp1.x * samp2.x) + (samp1.y * samp2.y); // Real part of cross polarization.
cross_power2 = (samp1.y * samp2.x) - (samp1.x * samp2.y); // Imaginary part of cross polarization.
reduced_array1[t].x = cross_power1;
reduced_array1[t].y = cross_power2;
}
else{
reduced_array[t].x = 0.0;
reduced_array[t].y = 0.0;
reduced_array1[t].x = 0.0;
reduced_array1[t].y = 0.0;
}
__syncthreads();
// Reduction is performed by splitting up the threads in each block and summing them all up.
// The number of threads in each block needs to be a power of two in order for the reduction to work. (No left over threads).
for(int k = blockDim.x/2; k>0; k>>=1){
if(t<k){
reduced_array[t].x += reduced_array[t+k].x;
reduced_array[t].y += reduced_array[t+k].y;
reduced_array1[t].x += reduced_array1[t+k].x;
reduced_array1[t].y += reduced_array1[t+k].y;
}
__syncthreads();
}
// After reduction is complete, assign each reduced to value to appropriate position in output array.
if(t == 0){
data_out[output_idx(0,b,s,f)] = reduced_array[0].x*scale; // XX*.
data_out[output_idx(1,b,s,f)] = reduced_array[0].y*scale; // YY*.
data_out[output_idx(2,b,s,f)] = reduced_array1[0].x*scale; // XY* real.
data_out[output_idx(3,b,s,f)] = reduced_array1[0].y*scale; // XY* imaginary.
}
}
void run_beamformer(cublasHandle_t handle, float * data_out){
// Specify grid and block dimensions
dim3 dimBlock(N_STI_BLOC, 1, 1);
dim3 dimGrid(N_BIN, N_BEAM1, N_STI);
printf("Starting beamformer\n");
// Call beamformer function containing cublasCgemmBatched()
beamform(handle);
cudaError_t err_code = cudaGetLastError();
if (err_code != cudaSuccess) {
printf("CUDA Error (beamform): %s\n", cudaGetErrorString(err_code));
}
cuComplex * d_data_in = d_beamformed;
float * d_data_out = d_outputs;
printf("Starting sti_reduction\n");
// Call STI reduction kernel.
sti_reduction<<<dimGrid, dimBlock>>>(d_data_in, d_data_out);
printf("Finishing sti_reduction\n");
err_code = cudaGetLastError();
if (err_code != cudaSuccess) {
printf("CUDA Error (sti_reduction): %s\n", cudaGetErrorString(err_code));
}
// Copy output data from device to host.
cudaMemcpy(data_out, d_data_out, N_POL*(N_OUTPUTS*sizeof(float)/2),cudaMemcpyDeviceToHost);
cudaFree(d_data);
cudaFree(d_outputs);
}
|
7fb0024e71d17ab61af85b2462bc1918967c6a7c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaKernel.cuh"
#include <helper_cuda.h>
#include <iostream>
#include <math_functions.hpp>
namespace CudaSpace
{
__device__ bool use_color_map = false;
__device__ float max_height = 0;
__device__ float *point_buffer;
__device__ int *LOD_indexes, *LOD_resolutions;
__device__ CudaSpace::Color *color_map;
__device__ int LOD_levels, stride_x = 1;
__device__ glm::vec3 *frame_dimension;
__device__ glm::vec3 *camera_forward;
__device__ glm::vec3 *grid_camera_position;
__device__ glm::ivec2 *texture_resolution;
__device__ glm::ivec2 *point_buffer_resolution;
__device__ glm::ivec2 *boundary;
__device__ glm::mat3x3 *pixel_to_grid_matrix;
/*
* Get a colormap value from a height map index
*/
__device__ void getColorMapValue(int posX, int posZ, bool mirrorX, bool mirrorZ, Color& result)
{
if (mirrorX)
posX = LOD_resolutions[0] - 1 - posX;
if (mirrorZ)
posZ = LOD_resolutions[0] - 1 - posZ;
result = color_map[posX + posZ * LOD_resolutions[0]];
}
/*
* Get a value based on max height
*/
__device__ void getHeightColorValue(float height, Color& result)
{
unsigned char r, g, b;
height = height * 2 / max_height ;
if(height > 1)
{
height -= 1;
r = 255;
g = 255 - height * 255;
b = 0;
}
else
{
r = 255 * height;
g = r;
b = 255 - height * 255;
}
result = Color(r, g, b);
}
/*
* Retrieve the height value from point buffer based on LOD and position
*/
__device__ float getPointBufferValue(int posX, int posZ, bool mirrorX, bool mirrorZ, int LOD)
{
if (mirrorX)
posX = LOD_resolutions[LOD] - 1 - posX;
if (mirrorZ)
posZ = LOD_resolutions[LOD] - 1 - posZ;
return point_buffer[LOD_indexes[LOD] + posX + posZ * LOD_resolutions[LOD]];
}
/*
*Calculate exit point based on current ray position
*/
__device__ void calculateExitPointAndEdge(glm::vec3& entry, glm::vec3& direction, glm::vec3& exit, int &edge, int LOD)
{
float tX, tZ;
tX = ((floor(entry.x / pow(2.f, LOD)) + 1) * pow(2.f, LOD) - entry.x) / direction.x;
tZ = ((floor(entry.z / pow(2.f, LOD)) + 1) * pow(2.f, LOD) - entry.z) / direction.z;
if(tX <= tZ)
{
exit = entry + tX * direction;
exit.x = (floor(entry.x / pow(2.f, LOD)) + 1) * pow(2.f, LOD);
edge = floor(exit.x / pow(2.f, LOD));
}
else
{
exit = entry + tZ * direction;
exit.z = (floor(entry.z / pow(2.f, LOD)) + 1) * pow(2.f, LOD);
edge = floor(exit.z / pow(2.f, LOD));
}
}
/*
* Test if the ray intersects with the height field
*/
__device__ bool testIntersection(glm::vec3 &entry, glm::vec3 &exit, glm::vec3 &direction, bool mirrorX, bool mirrorZ, int &LOD)
{
bool result;
float height;
height = getPointBufferValue(floor(entry.x / pow(2.f, LOD)), floor(entry.z / pow(2.f, LOD)), mirrorX, mirrorZ, LOD);
if(direction.y >= 0)
{
result = entry.y <= height;
}
else
{
result = exit.y <= height;
if (result)
entry += glm::max(0.f, (height - entry.y) / direction.y) * direction;
}
return result;
}
/*
* Dick, C., et al. (2009). GPU ray-casting for scalable terrain rendering. Proceedings of EUROGRAPHICS, Citeseer.
* ray_direction MUST be normalized
*/
__device__ void castRay(glm::vec3& ray_position, glm::vec3& ray_direction, Color& result)
{
bool mirrorX, mirrorZ;
glm::vec3 ray_exit;
int edge;
int LOD = LOD_levels - 1;
bool intersection;
/*Mirror direction to simplify algorithm*/
if(ray_direction.x < 0)
{
mirrorX = true;
ray_direction.x = -ray_direction.x;
ray_position.x = point_buffer_resolution->x * pow(2.f, LOD) - ray_position.x;
}
else
{
mirrorX = false;
}
if(ray_direction.z < 0)
{
mirrorZ = true;
ray_direction.z = -ray_direction.z;
ray_position.z = point_buffer_resolution->y * pow(2.f, LOD) - ray_position.z;
}
else
{
mirrorZ = false;
}
/*Advance ray until it is outside of the buffer*/
while(ray_position.x < boundary->x && ray_position.z < boundary->y && !(ray_direction.y > 0 && ray_position.y > max_height))
{
calculateExitPointAndEdge(ray_position, ray_direction, ray_exit, edge, LOD);
intersection = testIntersection(ray_position, ray_exit, ray_direction, mirrorX, mirrorZ, LOD);
if(intersection)
{
if (LOD > 0)
LOD--;
else
{
if (use_color_map)
getColorMapValue(floor(ray_position.x), floor(ray_position.z), mirrorX, mirrorZ, result);
else
getHeightColorValue(ray_position.y, result);
return;
}
}
else
{
LOD = glm::min(LOD + 1 - (edge % 2), LOD_levels - 1);
ray_position = ray_exit;
}
}
}
/*
* Converts a pixel position to the grid space
* Pinhole camera model - From: Realistic Ray Tracing by Peter Shirley, pages 37-42
*/
__device__ glm::vec3 viewToGridSpace(glm::ivec2 &pixel_position)
{
glm::vec3 result = glm::vec3(
frame_dimension->x / 2.0f - (frame_dimension->x) * pixel_position.x / (texture_resolution->x - 1),
-frame_dimension->y / 2.0f + (frame_dimension->y) * pixel_position.y / (texture_resolution->y - 1),
-frame_dimension->z);
return result;
}
/*
* Start the ray tracing algorithm for each pixel
*/
__global__ void cuda_rayTrace(unsigned char* color_buffer)
{
/*2D Grid and Block*/
int pixel_x, pixel_y, threadId;
pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
threadId = pixel_x + pixel_y * texture_resolution->x;
Color color_value(static_cast<unsigned char>(200), static_cast<unsigned char>(200), static_cast<unsigned char>(200));
glm::vec3 ray_direction, ray_position;
glm::ivec2 pixel_position;
/*Get the pixel position of this thread*/
pixel_position = glm::ivec2(pixel_x, pixel_y);
/*Calculate ray direction and cast ray*/
ray_direction = *pixel_to_grid_matrix * viewToGridSpace(pixel_position);
ray_position = ray_direction + *grid_camera_position;
ray_direction = normalize(ray_direction);
castRay(ray_position, ray_direction, color_value);
//GL_RGB
color_buffer[threadId * 3] = color_value.r;
color_buffer[threadId * 3 + 1] = color_value.g;
color_buffer[threadId * 3 + 2] = color_value.b;
}
/*
* Set device parameters
*/
__global__ void cuda_setParameters(glm::vec3 frame_dim, glm::vec3 camera_for, glm::vec3 grid_camera_pos, bool use_color, float max_height)
{
*frame_dimension = frame_dim;
*grid_camera_position = grid_camera_pos;
use_color_map = use_color;
CudaSpace::max_height = max_height;
/*Basis change matrix from view to grid space*/
glm::vec3 u, v, w;
w = -camera_for;
u = glm::normalize(glm::cross(glm::vec3(0, 100, 0), w));
v = glm::cross(w, u);
*pixel_to_grid_matrix = glm::mat3x3(u,v,w);
}
/*
* Initialize device
*/
__global__ void cuda_initializeDeviceVariables(glm::ivec2 point_buffer_resolution, glm::ivec2 texture_resolution, float* point_buffer, CudaSpace::Color *color_map, int LOD_levels, int stride_x, float max_height)
{
CudaSpace::texture_resolution = new glm::ivec2();
CudaSpace::point_buffer_resolution = new glm::ivec2();
LOD_indexes = new int[LOD_levels]();
LOD_resolutions = new int[LOD_levels]();
LOD_resolutions[LOD_levels - 1] = point_buffer_resolution.x;
LOD_indexes[LOD_levels - 1] = 0;
for(auto i = LOD_levels - 2; i >= 0; i--)
{
LOD_indexes[i] = LOD_indexes[i + 1] + LOD_resolutions[i + 1] * LOD_resolutions[i + 1];
LOD_resolutions[i] = LOD_resolutions[i + 1] * 2;
}
boundary = new glm::ivec2(LOD_resolutions[0], LOD_resolutions[0]);
frame_dimension = new glm::vec3();
pixel_to_grid_matrix = new glm::mat3x3();
grid_camera_position = new glm::vec3();
*CudaSpace::point_buffer_resolution = point_buffer_resolution;
*CudaSpace::texture_resolution = texture_resolution;
CudaSpace::point_buffer = point_buffer;
CudaSpace::color_map = color_map;
CudaSpace::LOD_levels = LOD_levels;
CudaSpace::stride_x = stride_x;
CudaSpace::max_height = max_height;
}
/*
* Free device's variables
*/
__global__ void cuda_freeDeviceVariables()
{
delete(grid_camera_position);
delete(texture_resolution);
delete(frame_dimension);
delete(pixel_to_grid_matrix);
delete(point_buffer_resolution);
delete[](LOD_indexes);
delete[](LOD_resolutions);
}
/*
* Set grid and block dimensions, create LOD, pass parameters to device and call kernels
*/
__host__ void rayTrace(glm::ivec2& texture_resolution, glm::vec3& frame_dimensions, glm::vec3& camera_forward, glm::vec3& grid_camera_pos, unsigned char* color_buffer, bool use_color_map, float max_height)
{
/*
* Things to consider:
* Branch divergence inside a warp
* Maximum number threads and blocks inside a SM
* Maximum number of threads per block
*/
dim3 gridSize, blockSize;
cuda_setParameters << <1, 1 >> > (frame_dimensions, camera_forward, grid_camera_pos, use_color_map, max_height);
checkCudaErrors(hipDeviceSynchronize());
blockSize = dim3(1, texture_resolution.y/2);
// ReSharper disable CppAssignedValueIsNeverUsed
gridSize = dim3(texture_resolution.x / blockSize.x, texture_resolution.y / blockSize.y);
cuda_rayTrace << <gridSize, blockSize >> > (color_buffer);
checkCudaErrors(hipDeviceSynchronize());
}
/*
* Initialize variables in the device
*/
__host__ void initializeDeviceVariables(glm::ivec2& point_buffer_res, glm::ivec2& texture_res, float* d_gpu_pointBuffer, CudaSpace::Color* d_color_map, int LOD_levels, int stride_x, float max_height)
{
cuda_initializeDeviceVariables << <1, 1 >> > (point_buffer_res, texture_res, d_gpu_pointBuffer, d_color_map, LOD_levels, stride_x, max_height);
checkCudaErrors(hipDeviceSynchronize());
}
/*
* Free memory addresses in device
*/
__host__ void freeDeviceVariables()
{
cuda_freeDeviceVariables << <1, 1 >> > ();
checkCudaErrors(hipDeviceSynchronize());
}
}
|
7fb0024e71d17ab61af85b2462bc1918967c6a7c.cu
|
#include "CudaKernel.cuh"
#include <helper_cuda.h>
#include <iostream>
#include <math_functions.hpp>
namespace CudaSpace
{
__device__ bool use_color_map = false;
__device__ float max_height = 0;
__device__ float *point_buffer;
__device__ int *LOD_indexes, *LOD_resolutions;
__device__ CudaSpace::Color *color_map;
__device__ int LOD_levels, stride_x = 1;
__device__ glm::vec3 *frame_dimension;
__device__ glm::vec3 *camera_forward;
__device__ glm::vec3 *grid_camera_position;
__device__ glm::ivec2 *texture_resolution;
__device__ glm::ivec2 *point_buffer_resolution;
__device__ glm::ivec2 *boundary;
__device__ glm::mat3x3 *pixel_to_grid_matrix;
/*
* Get a colormap value from a height map index
*/
__device__ void getColorMapValue(int posX, int posZ, bool mirrorX, bool mirrorZ, Color& result)
{
if (mirrorX)
posX = LOD_resolutions[0] - 1 - posX;
if (mirrorZ)
posZ = LOD_resolutions[0] - 1 - posZ;
result = color_map[posX + posZ * LOD_resolutions[0]];
}
/*
* Get a value based on max height
*/
__device__ void getHeightColorValue(float height, Color& result)
{
unsigned char r, g, b;
height = height * 2 / max_height ;
if(height > 1)
{
height -= 1;
r = 255;
g = 255 - height * 255;
b = 0;
}
else
{
r = 255 * height;
g = r;
b = 255 - height * 255;
}
result = Color(r, g, b);
}
/*
* Retrieve the height value from point buffer based on LOD and position
*/
__device__ float getPointBufferValue(int posX, int posZ, bool mirrorX, bool mirrorZ, int LOD)
{
if (mirrorX)
posX = LOD_resolutions[LOD] - 1 - posX;
if (mirrorZ)
posZ = LOD_resolutions[LOD] - 1 - posZ;
return point_buffer[LOD_indexes[LOD] + posX + posZ * LOD_resolutions[LOD]];
}
/*
*Calculate exit point based on current ray position
*/
__device__ void calculateExitPointAndEdge(glm::vec3& entry, glm::vec3& direction, glm::vec3& exit, int &edge, int LOD)
{
float tX, tZ;
tX = ((floor(entry.x / pow(2.f, LOD)) + 1) * pow(2.f, LOD) - entry.x) / direction.x;
tZ = ((floor(entry.z / pow(2.f, LOD)) + 1) * pow(2.f, LOD) - entry.z) / direction.z;
if(tX <= tZ)
{
exit = entry + tX * direction;
exit.x = (floor(entry.x / pow(2.f, LOD)) + 1) * pow(2.f, LOD);
edge = floor(exit.x / pow(2.f, LOD));
}
else
{
exit = entry + tZ * direction;
exit.z = (floor(entry.z / pow(2.f, LOD)) + 1) * pow(2.f, LOD);
edge = floor(exit.z / pow(2.f, LOD));
}
}
/*
* Test if the ray intersects with the height field
*/
__device__ bool testIntersection(glm::vec3 &entry, glm::vec3 &exit, glm::vec3 &direction, bool mirrorX, bool mirrorZ, int &LOD)
{
bool result;
float height;
height = getPointBufferValue(floor(entry.x / pow(2.f, LOD)), floor(entry.z / pow(2.f, LOD)), mirrorX, mirrorZ, LOD);
if(direction.y >= 0)
{
result = entry.y <= height;
}
else
{
result = exit.y <= height;
if (result)
entry += glm::max(0.f, (height - entry.y) / direction.y) * direction;
}
return result;
}
/*
* Dick, C., et al. (2009). GPU ray-casting for scalable terrain rendering. Proceedings of EUROGRAPHICS, Citeseer.
* ray_direction MUST be normalized
*/
__device__ void castRay(glm::vec3& ray_position, glm::vec3& ray_direction, Color& result)
{
bool mirrorX, mirrorZ;
glm::vec3 ray_exit;
int edge;
int LOD = LOD_levels - 1;
bool intersection;
/*Mirror direction to simplify algorithm*/
if(ray_direction.x < 0)
{
mirrorX = true;
ray_direction.x = -ray_direction.x;
ray_position.x = point_buffer_resolution->x * pow(2.f, LOD) - ray_position.x;
}
else
{
mirrorX = false;
}
if(ray_direction.z < 0)
{
mirrorZ = true;
ray_direction.z = -ray_direction.z;
ray_position.z = point_buffer_resolution->y * pow(2.f, LOD) - ray_position.z;
}
else
{
mirrorZ = false;
}
/*Advance ray until it is outside of the buffer*/
while(ray_position.x < boundary->x && ray_position.z < boundary->y && !(ray_direction.y > 0 && ray_position.y > max_height))
{
calculateExitPointAndEdge(ray_position, ray_direction, ray_exit, edge, LOD);
intersection = testIntersection(ray_position, ray_exit, ray_direction, mirrorX, mirrorZ, LOD);
if(intersection)
{
if (LOD > 0)
LOD--;
else
{
if (use_color_map)
getColorMapValue(floor(ray_position.x), floor(ray_position.z), mirrorX, mirrorZ, result);
else
getHeightColorValue(ray_position.y, result);
return;
}
}
else
{
LOD = glm::min(LOD + 1 - (edge % 2), LOD_levels - 1);
ray_position = ray_exit;
}
}
}
/*
* Converts a pixel position to the grid space
* Pinhole camera model - From: Realistic Ray Tracing by Peter Shirley, pages 37-42
*/
__device__ glm::vec3 viewToGridSpace(glm::ivec2 &pixel_position)
{
glm::vec3 result = glm::vec3(
frame_dimension->x / 2.0f - (frame_dimension->x) * pixel_position.x / (texture_resolution->x - 1),
-frame_dimension->y / 2.0f + (frame_dimension->y) * pixel_position.y / (texture_resolution->y - 1),
-frame_dimension->z);
return result;
}
/*
* Start the ray tracing algorithm for each pixel
*/
__global__ void cuda_rayTrace(unsigned char* color_buffer)
{
/*2D Grid and Block*/
int pixel_x, pixel_y, threadId;
pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
threadId = pixel_x + pixel_y * texture_resolution->x;
Color color_value(static_cast<unsigned char>(200), static_cast<unsigned char>(200), static_cast<unsigned char>(200));
glm::vec3 ray_direction, ray_position;
glm::ivec2 pixel_position;
/*Get the pixel position of this thread*/
pixel_position = glm::ivec2(pixel_x, pixel_y);
/*Calculate ray direction and cast ray*/
ray_direction = *pixel_to_grid_matrix * viewToGridSpace(pixel_position);
ray_position = ray_direction + *grid_camera_position;
ray_direction = normalize(ray_direction);
castRay(ray_position, ray_direction, color_value);
//GL_RGB
color_buffer[threadId * 3] = color_value.r;
color_buffer[threadId * 3 + 1] = color_value.g;
color_buffer[threadId * 3 + 2] = color_value.b;
}
/*
* Set device parameters
*/
__global__ void cuda_setParameters(glm::vec3 frame_dim, glm::vec3 camera_for, glm::vec3 grid_camera_pos, bool use_color, float max_height)
{
*frame_dimension = frame_dim;
*grid_camera_position = grid_camera_pos;
use_color_map = use_color;
CudaSpace::max_height = max_height;
/*Basis change matrix from view to grid space*/
glm::vec3 u, v, w;
w = -camera_for;
u = glm::normalize(glm::cross(glm::vec3(0, 100, 0), w));
v = glm::cross(w, u);
*pixel_to_grid_matrix = glm::mat3x3(u,v,w);
}
/*
* Initialize device
*/
__global__ void cuda_initializeDeviceVariables(glm::ivec2 point_buffer_resolution, glm::ivec2 texture_resolution, float* point_buffer, CudaSpace::Color *color_map, int LOD_levels, int stride_x, float max_height)
{
CudaSpace::texture_resolution = new glm::ivec2();
CudaSpace::point_buffer_resolution = new glm::ivec2();
LOD_indexes = new int[LOD_levels]();
LOD_resolutions = new int[LOD_levels]();
LOD_resolutions[LOD_levels - 1] = point_buffer_resolution.x;
LOD_indexes[LOD_levels - 1] = 0;
for(auto i = LOD_levels - 2; i >= 0; i--)
{
LOD_indexes[i] = LOD_indexes[i + 1] + LOD_resolutions[i + 1] * LOD_resolutions[i + 1];
LOD_resolutions[i] = LOD_resolutions[i + 1] * 2;
}
boundary = new glm::ivec2(LOD_resolutions[0], LOD_resolutions[0]);
frame_dimension = new glm::vec3();
pixel_to_grid_matrix = new glm::mat3x3();
grid_camera_position = new glm::vec3();
*CudaSpace::point_buffer_resolution = point_buffer_resolution;
*CudaSpace::texture_resolution = texture_resolution;
CudaSpace::point_buffer = point_buffer;
CudaSpace::color_map = color_map;
CudaSpace::LOD_levels = LOD_levels;
CudaSpace::stride_x = stride_x;
CudaSpace::max_height = max_height;
}
/*
* Free device's variables
*/
__global__ void cuda_freeDeviceVariables()
{
delete(grid_camera_position);
delete(texture_resolution);
delete(frame_dimension);
delete(pixel_to_grid_matrix);
delete(point_buffer_resolution);
delete[](LOD_indexes);
delete[](LOD_resolutions);
}
/*
* Set grid and block dimensions, create LOD, pass parameters to device and call kernels
*/
__host__ void rayTrace(glm::ivec2& texture_resolution, glm::vec3& frame_dimensions, glm::vec3& camera_forward, glm::vec3& grid_camera_pos, unsigned char* color_buffer, bool use_color_map, float max_height)
{
/*
* Things to consider:
* Branch divergence inside a warp
* Maximum number threads and blocks inside a SM
* Maximum number of threads per block
*/
dim3 gridSize, blockSize;
cuda_setParameters << <1, 1 >> > (frame_dimensions, camera_forward, grid_camera_pos, use_color_map, max_height);
checkCudaErrors(cudaDeviceSynchronize());
blockSize = dim3(1, texture_resolution.y/2);
// ReSharper disable CppAssignedValueIsNeverUsed
gridSize = dim3(texture_resolution.x / blockSize.x, texture_resolution.y / blockSize.y);
cuda_rayTrace << <gridSize, blockSize >> > (color_buffer);
checkCudaErrors(cudaDeviceSynchronize());
}
/*
* Initialize variables in the device
*/
__host__ void initializeDeviceVariables(glm::ivec2& point_buffer_res, glm::ivec2& texture_res, float* d_gpu_pointBuffer, CudaSpace::Color* d_color_map, int LOD_levels, int stride_x, float max_height)
{
cuda_initializeDeviceVariables << <1, 1 >> > (point_buffer_res, texture_res, d_gpu_pointBuffer, d_color_map, LOD_levels, stride_x, max_height);
checkCudaErrors(cudaDeviceSynchronize());
}
/*
* Free memory addresses in device
*/
__host__ void freeDeviceVariables()
{
cuda_freeDeviceVariables << <1, 1 >> > ();
checkCudaErrors(cudaDeviceSynchronize());
}
}
|
fc7c154e9a1c20e036546568907192029b76e175.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
* Dwarf Mine - The 13-11 Benchmark
*
* Copyright (c) 2013 Bnger, Thomas; Kieschnick, Christian; Kusber,
* Michael; Lohse, Henning; Wuttke, Nikolai; Xylander, Oliver; Yao, Gary;
* Zimmermann, Florian
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*****************************************************************************/
#include "CudaProxy.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "Field.h"
#include "State.cuh"
#include "Move.h"
#include "Simulator.cuh"
#include "Random.cuh"
#include "Debug.cuh"
#include <assert.h>
__global__ void setupStateForRandom(hiprandState_t* states, float* randomValues, size_t numberOfRandomValues)
{
hiprand_init(threadIdx.x, 0, 0, &states[threadIdx.x]);
for (size_t i = 0; i + threadIdx.x < numberOfRandomValues; i += 128)
{
hiprandState_t deviceState = states[threadIdx.x];
randomValues[i + threadIdx.x] = 1.0f - hiprand_uniform(&deviceState); // delivers (0, 1] - we need [0, 1)
states[threadIdx.x] = deviceState;
}
}
__device__ bool doStep(CudaGameState& state, CudaSimulator& simulator, size_t limit, float fakedRandom = -1)
{
cassert(state.size == FIELD_DIMENSION * FIELD_DIMENSION, "Block %d, Thread %d detected invalid field size of %li\n", blockIdx.x, threadIdx.x, state.size);
__syncthreads();
simulator.calculatePossibleMoves();
size_t moveCount = simulator.countPossibleMoves();
__syncthreads();
if (moveCount > 0)
{
size_t index = simulator.getRandomMoveIndex(moveCount, fakedRandom);
cassert(index < state.size, "Block %d, Thread %d: Round %d detected unexpected move index %d for maximal playfield size %lu\n", blockIdx.x, limit, index, state.size);
simulator.flipEnemyCounter(index, limit);
cassert(!state.isUnchanged(), "Block %d: %lu detected unchanged state\n", blockIdx.x, limit);
}
state.currentPlayer = state.getEnemyPlayer();
return moveCount > 0;
}
__device__ void expandLeaf(CudaSimulator& simulator, CudaGameState& state)
{
size_t passCounter = 0;
size_t rounds = 0;
while (passCounter < 2)
{
bool passedMove = !doStep(state, simulator, rounds);
passCounter = (passedMove ? passCounter + 1 : 0);
cassert (rounds < MAXIMAL_MOVE_COUNT, "Detected rounds overflowing maximal count %d in %d\n", MAXIMAL_MOVE_COUNT, threadIdx.x);
rounds++;
}
__syncthreads();
}
__global__ void simulateGamePreRandom(size_t reiterations, size_t numberOfBlocks, float* randomValues, size_t numberOfPlayfields, const Field* playfields, Player currentPlayer, Result* results)
{
int playfieldIndex = threadIdx.x;
size_t blockIterations = size_t(ceil(reiterations * 1.0 / numberOfBlocks));
for (size_t i = 0; i < blockIterations; ++i)
{
size_t randomSeed = i * numberOfBlocks + blockIdx.x;
cassert(randomSeed < reiterations + 121, "SeedIndex %lu exceeded reiterations\n", randomSeed);
size_t node = randomNumber(randomValues, &randomSeed, numberOfPlayfields);
__shared__ Field sharedPlayfield[FIELD_SIZE];
__shared__ Field oldPlayfield[FIELD_SIZE];
__shared__ bool possibleMoves[FIELD_SIZE];
size_t playfieldOffset = FIELD_SIZE * node;
sharedPlayfield[playfieldIndex] = playfields[playfieldOffset + playfieldIndex];
CudaGameState state(
sharedPlayfield,
oldPlayfield,
possibleMoves,
FIELD_DIMENSION,
currentPlayer
);
CudaSimulator simulator(&state, randomValues, randomSeed);
__syncthreads();
expandLeaf(simulator, state);
__syncthreads();
if (state.isWinner(currentPlayer))
{
if (threadIdx.x == 0)
results[node].wins++;
}
if (threadIdx.x == 0)
{
results[node].visits ++;
}
}
}
__global__ void testRandomNumber(float fakedRandom, size_t maximum, size_t* randomNumberResult)
{
*randomNumberResult = randomNumber(NULL, maximum, fakedRandom);
}
__global__ void testNumberOfMarkedFields(size_t* resultSum, const bool* playfield)
{
*resultSum = numberOfMarkedFields(playfield);
}
__global__ void testDoStep(Field* playfield, Player currentPlayer, float fakedRandom)
{
int playfieldIndex = threadIdx.x;
__shared__ Field sharedPlayfield[FIELD_SIZE];
__shared__ Field oldPlayfield[FIELD_SIZE];
__shared__ bool possibleMoves[FIELD_SIZE];
sharedPlayfield[playfieldIndex] = playfield[playfieldIndex];
// this part may be a shared variable?
CudaGameState state(
sharedPlayfield,
oldPlayfield,
possibleMoves,
FIELD_DIMENSION,
currentPlayer
);
CudaSimulator simulator(&state, &fakedRandom, 0);
doStep(state, simulator, 0, fakedRandom);
playfield[playfieldIndex] = sharedPlayfield[playfieldIndex];
}
__global__ void testExpandLeaf(Field* playfield, Player currentPlayer, size_t* wins, size_t* visits)
{
int playfieldIndex = threadIdx.x;
__shared__ Field sharedPlayfield[FIELD_SIZE];
__shared__ Field oldPlayfield[FIELD_SIZE];
__shared__ bool possibleMoves[FIELD_SIZE];
sharedPlayfield[playfieldIndex] = playfield[playfieldIndex];
CudaGameState state (
sharedPlayfield,
oldPlayfield,
possibleMoves,
FIELD_DIMENSION,
currentPlayer
);
float fakedRandom = 0;
CudaSimulator simulator(&state, &fakedRandom, 0);
__syncthreads();
expandLeaf(simulator, state);
if (state.isWinner(currentPlayer))
{
if (threadIdx.x == 0) ++(*wins);
}
if (threadIdx.x == 0)
{
++(*visits);
}
playfield[playfieldIndex] = sharedPlayfield[playfieldIndex];
}
|
fc7c154e9a1c20e036546568907192029b76e175.cu
|
/*****************************************************************************
* Dwarf Mine - The 13-11 Benchmark
*
* Copyright (c) 2013 Bünger, Thomas; Kieschnick, Christian; Kusber,
* Michael; Lohse, Henning; Wuttke, Nikolai; Xylander, Oliver; Yao, Gary;
* Zimmermann, Florian
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*****************************************************************************/
#include "CudaProxy.h"
#include <curand.h>
#include <curand_kernel.h>
#include "Field.h"
#include "State.cuh"
#include "Move.h"
#include "Simulator.cuh"
#include "Random.cuh"
#include "Debug.cuh"
#include <assert.h>
__global__ void setupStateForRandom(curandState* states, float* randomValues, size_t numberOfRandomValues)
{
curand_init(threadIdx.x, 0, 0, &states[threadIdx.x]);
for (size_t i = 0; i + threadIdx.x < numberOfRandomValues; i += 128)
{
curandState deviceState = states[threadIdx.x];
randomValues[i + threadIdx.x] = 1.0f - curand_uniform(&deviceState); // delivers (0, 1] - we need [0, 1)
states[threadIdx.x] = deviceState;
}
}
__device__ bool doStep(CudaGameState& state, CudaSimulator& simulator, size_t limit, float fakedRandom = -1)
{
cassert(state.size == FIELD_DIMENSION * FIELD_DIMENSION, "Block %d, Thread %d detected invalid field size of %li\n", blockIdx.x, threadIdx.x, state.size);
__syncthreads();
simulator.calculatePossibleMoves();
size_t moveCount = simulator.countPossibleMoves();
__syncthreads();
if (moveCount > 0)
{
size_t index = simulator.getRandomMoveIndex(moveCount, fakedRandom);
cassert(index < state.size, "Block %d, Thread %d: Round %d detected unexpected move index %d for maximal playfield size %lu\n", blockIdx.x, limit, index, state.size);
simulator.flipEnemyCounter(index, limit);
cassert(!state.isUnchanged(), "Block %d: %lu detected unchanged state\n", blockIdx.x, limit);
}
state.currentPlayer = state.getEnemyPlayer();
return moveCount > 0;
}
__device__ void expandLeaf(CudaSimulator& simulator, CudaGameState& state)
{
size_t passCounter = 0;
size_t rounds = 0;
while (passCounter < 2)
{
bool passedMove = !doStep(state, simulator, rounds);
passCounter = (passedMove ? passCounter + 1 : 0);
cassert (rounds < MAXIMAL_MOVE_COUNT, "Detected rounds overflowing maximal count %d in %d\n", MAXIMAL_MOVE_COUNT, threadIdx.x);
rounds++;
}
__syncthreads();
}
__global__ void simulateGamePreRandom(size_t reiterations, size_t numberOfBlocks, float* randomValues, size_t numberOfPlayfields, const Field* playfields, Player currentPlayer, Result* results)
{
int playfieldIndex = threadIdx.x;
size_t blockIterations = size_t(ceil(reiterations * 1.0 / numberOfBlocks));
for (size_t i = 0; i < blockIterations; ++i)
{
size_t randomSeed = i * numberOfBlocks + blockIdx.x;
cassert(randomSeed < reiterations + 121, "SeedIndex %lu exceeded reiterations\n", randomSeed);
size_t node = randomNumber(randomValues, &randomSeed, numberOfPlayfields);
__shared__ Field sharedPlayfield[FIELD_SIZE];
__shared__ Field oldPlayfield[FIELD_SIZE];
__shared__ bool possibleMoves[FIELD_SIZE];
size_t playfieldOffset = FIELD_SIZE * node;
sharedPlayfield[playfieldIndex] = playfields[playfieldOffset + playfieldIndex];
CudaGameState state(
sharedPlayfield,
oldPlayfield,
possibleMoves,
FIELD_DIMENSION,
currentPlayer
);
CudaSimulator simulator(&state, randomValues, randomSeed);
__syncthreads();
expandLeaf(simulator, state);
__syncthreads();
if (state.isWinner(currentPlayer))
{
if (threadIdx.x == 0)
results[node].wins++;
}
if (threadIdx.x == 0)
{
results[node].visits ++;
}
}
}
__global__ void testRandomNumber(float fakedRandom, size_t maximum, size_t* randomNumberResult)
{
*randomNumberResult = randomNumber(NULL, maximum, fakedRandom);
}
__global__ void testNumberOfMarkedFields(size_t* resultSum, const bool* playfield)
{
*resultSum = numberOfMarkedFields(playfield);
}
__global__ void testDoStep(Field* playfield, Player currentPlayer, float fakedRandom)
{
int playfieldIndex = threadIdx.x;
__shared__ Field sharedPlayfield[FIELD_SIZE];
__shared__ Field oldPlayfield[FIELD_SIZE];
__shared__ bool possibleMoves[FIELD_SIZE];
sharedPlayfield[playfieldIndex] = playfield[playfieldIndex];
// this part may be a shared variable?
CudaGameState state(
sharedPlayfield,
oldPlayfield,
possibleMoves,
FIELD_DIMENSION,
currentPlayer
);
CudaSimulator simulator(&state, &fakedRandom, 0);
doStep(state, simulator, 0, fakedRandom);
playfield[playfieldIndex] = sharedPlayfield[playfieldIndex];
}
__global__ void testExpandLeaf(Field* playfield, Player currentPlayer, size_t* wins, size_t* visits)
{
int playfieldIndex = threadIdx.x;
__shared__ Field sharedPlayfield[FIELD_SIZE];
__shared__ Field oldPlayfield[FIELD_SIZE];
__shared__ bool possibleMoves[FIELD_SIZE];
sharedPlayfield[playfieldIndex] = playfield[playfieldIndex];
CudaGameState state (
sharedPlayfield,
oldPlayfield,
possibleMoves,
FIELD_DIMENSION,
currentPlayer
);
float fakedRandom = 0;
CudaSimulator simulator(&state, &fakedRandom, 0);
__syncthreads();
expandLeaf(simulator, state);
if (state.isWinner(currentPlayer))
{
if (threadIdx.x == 0) ++(*wins);
}
if (threadIdx.x == 0)
{
++(*visits);
}
playfield[playfieldIndex] = sharedPlayfield[playfieldIndex];
}
|
4afcba9d136804b65578c6d14ec37ab7873c54d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=2048 --gridDim=64
struct s {
int x;
};
struct t : s {
};
__global__ void foo(t p, t q) {
p.x = q.x;
}
|
4afcba9d136804b65578c6d14ec37ab7873c54d8.cu
|
//pass
//--blockDim=2048 --gridDim=64
struct s {
int x;
};
struct t : s {
};
__global__ void foo(t p, t q) {
p.x = q.x;
}
|
223f594e5f40e6d9c8c383d91ff08f524a8bc8a5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void prefixsum_combine(float* in, int in_length, float* out, int out_length){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < out_length && blockIdx.x > 0){
out[idx] += in[blockIdx.x - 1];
}
}
|
223f594e5f40e6d9c8c383d91ff08f524a8bc8a5.cu
|
#include "includes.h"
__global__ void prefixsum_combine(float* in, int in_length, float* out, int out_length){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < out_length && blockIdx.x > 0){
out[idx] += in[blockIdx.x - 1];
}
}
|
b0d4e135ff4967a92a7bb5301eacb4efd216d549.hip
|
// !!! This is a file automatically generated by hipify!!!
//! Ising model evolution
/*!
\param G Spins on the square lattice [n-by-n]
\param w Weight matrix [5-by-5]
\param k Number of iterations [scalar]
\param n Number of lattice points per dim [scalar]
NOTE: Both matrices G and w are stored in row-major format.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "ising.h"
#define BLOCKDIM 32
__global__
void apply_w(int * data,int * result, double * filter, int n){
int my_x=blockIdx.x*blockDim.x+threadIdx.x;
int my_y=blockIdx.y*blockDim.y+threadIdx.y;
int my_id=my_x*n+my_y;
//If thread is outside of compute id threshhold it doesnt need to do anything
if(my_x>=n||my_y>=n){
return;
}
double sum=0;
for(int i=0;i<5;i++){
for(int j=0;j<5;j++){
sum+=filter[i*5+j]*data[n*((n+(my_x+i-2))%n)+((n+(my_y+j-2))%n)];
}
}
if((sum<1e-5)&&(sum>-(1e-5))){
result[my_id]=data[my_id];
}
else if(sum<0){
result[my_id]=-1;
}
else{
result[my_id]=1;
}
}
void ising( int *G, double *w, int k, int n){
int * dev_temp;
int * dev_G;
int * dev_res;
double * dev_w;
if(hipMalloc(&dev_G,n*n*sizeof(int))!=hipSuccess||hipMalloc(&dev_res,n*n*sizeof(int))!=hipSuccess||hipMalloc(&dev_w,25*sizeof(double))!=hipSuccess){
printf("Error: could not allocate memory on device!");
return;
}
//copy data to GPU Device
hipMemcpy(dev_G,G,n*n*sizeof(int),hipMemcpyDefault);
hipMemcpy(dev_w,w,25*sizeof(double),hipMemcpyDefault);
//execute kernel
for(int rep=0;rep<k;rep++){
dim3 dimBlock(BLOCKDIM,BLOCKDIM);
dim3 dimGrid(n/BLOCKDIM+1,n/BLOCKDIM+1);
hipLaunchKernelGGL(( apply_w), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_G,dev_res,dev_w,n);
dev_temp=dev_res;
dev_res=dev_G;
dev_G=dev_temp;
}
//Bring results back to CPU Host
hipMemcpy(G,dev_G,n*n*sizeof(int),hipMemcpyDefault);
}
|
b0d4e135ff4967a92a7bb5301eacb4efd216d549.cu
|
//! Ising model evolution
/*!
\param G Spins on the square lattice [n-by-n]
\param w Weight matrix [5-by-5]
\param k Number of iterations [scalar]
\param n Number of lattice points per dim [scalar]
NOTE: Both matrices G and w are stored in row-major format.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <cuda.h>
#include "ising.h"
#define BLOCKDIM 32
__global__
void apply_w(int * data,int * result, double * filter, int n){
int my_x=blockIdx.x*blockDim.x+threadIdx.x;
int my_y=blockIdx.y*blockDim.y+threadIdx.y;
int my_id=my_x*n+my_y;
//If thread is outside of compute id threshhold it doesnt need to do anything
if(my_x>=n||my_y>=n){
return;
}
double sum=0;
for(int i=0;i<5;i++){
for(int j=0;j<5;j++){
sum+=filter[i*5+j]*data[n*((n+(my_x+i-2))%n)+((n+(my_y+j-2))%n)];
}
}
if((sum<1e-5)&&(sum>-(1e-5))){
result[my_id]=data[my_id];
}
else if(sum<0){
result[my_id]=-1;
}
else{
result[my_id]=1;
}
}
void ising( int *G, double *w, int k, int n){
int * dev_temp;
int * dev_G;
int * dev_res;
double * dev_w;
if(cudaMalloc(&dev_G,n*n*sizeof(int))!=cudaSuccess||cudaMalloc(&dev_res,n*n*sizeof(int))!=cudaSuccess||cudaMalloc(&dev_w,25*sizeof(double))!=cudaSuccess){
printf("Error: could not allocate memory on device!");
return;
}
//copy data to GPU Device
cudaMemcpy(dev_G,G,n*n*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(dev_w,w,25*sizeof(double),cudaMemcpyDefault);
//execute kernel
for(int rep=0;rep<k;rep++){
dim3 dimBlock(BLOCKDIM,BLOCKDIM);
dim3 dimGrid(n/BLOCKDIM+1,n/BLOCKDIM+1);
apply_w<<<dimGrid,dimBlock>>>(dev_G,dev_res,dev_w,n);
dev_temp=dev_res;
dev_res=dev_G;
dev_G=dev_temp;
}
//Bring results back to CPU Host
cudaMemcpy(G,dev_G,n*n*sizeof(int),cudaMemcpyDefault);
}
|
1dc2a0203caf1d2069a3c6928518388d0232da83.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <random/rng.cuh>
#include <stats/mean.cuh>
#include <stats/mean_center.cuh>
#include "matrix_vector_op.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T, typename IdxType>
struct MeanCenterInputs {
T tolerance, mean;
IdxType rows, cols;
bool sample, rowMajor, bcastAlongRows;
unsigned long long int seed;
};
template <typename T, typename IdxType>
::std::ostream &operator<<(::std::ostream &os,
const MeanCenterInputs<T, IdxType> &dims) {
return os;
}
template <typename T, typename IdxType>
class MeanCenterTest
: public ::testing::TestWithParam<MeanCenterInputs<T, IdxType>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MeanCenterInputs<T, IdxType>>::GetParam();
raft::random::Rng r(params.seed);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
auto rows = params.rows, cols = params.cols;
auto len = rows * cols;
IdxType vecLen = params.bcastAlongRows ? cols : rows;
allocate(out, len);
allocate(out_ref, len);
allocate(data, len);
allocate(meanVec, vecLen);
r.normal(data, len, params.mean, (T)1.0, stream);
raft::stats::mean(meanVec, data, cols, rows, params.sample, params.rowMajor,
stream);
meanCenter(out, data, meanVec, cols, rows, params.rowMajor,
params.bcastAlongRows, stream);
raft::linalg::naiveMatVec(out_ref, data, meanVec, cols, rows,
params.rowMajor, params.bcastAlongRows, (T)-1.0);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(meanVec));
}
protected:
MeanCenterInputs<T, IdxType> params;
T *data, *meanVec, *out, *out_ref;
};
const std::vector<MeanCenterInputs<float, int>> inputsf_i32 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, int> MeanCenterTestF_i32;
TEST_P(MeanCenterTestF_i32, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF_i32,
::testing::ValuesIn(inputsf_i32));
const std::vector<MeanCenterInputs<float, size_t>> inputsf_i64 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, size_t> MeanCenterTestF_i64;
TEST_P(MeanCenterTestF_i64, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF_i64,
::testing::ValuesIn(inputsf_i64));
const std::vector<MeanCenterInputs<double, int>> inputsd_i32 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, int> MeanCenterTestD_i32;
TEST_P(MeanCenterTestD_i32, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD_i32,
::testing::ValuesIn(inputsd_i32));
const std::vector<MeanCenterInputs<double, size_t>> inputsd_i64 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, size_t> MeanCenterTestD_i64;
TEST_P(MeanCenterTestD_i64, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD_i64,
::testing::ValuesIn(inputsd_i64));
} // end namespace Stats
} // end namespace MLCommon
|
1dc2a0203caf1d2069a3c6928518388d0232da83.cu
|
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <random/rng.cuh>
#include <stats/mean.cuh>
#include <stats/mean_center.cuh>
#include "matrix_vector_op.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T, typename IdxType>
struct MeanCenterInputs {
T tolerance, mean;
IdxType rows, cols;
bool sample, rowMajor, bcastAlongRows;
unsigned long long int seed;
};
template <typename T, typename IdxType>
::std::ostream &operator<<(::std::ostream &os,
const MeanCenterInputs<T, IdxType> &dims) {
return os;
}
template <typename T, typename IdxType>
class MeanCenterTest
: public ::testing::TestWithParam<MeanCenterInputs<T, IdxType>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MeanCenterInputs<T, IdxType>>::GetParam();
raft::random::Rng r(params.seed);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
auto rows = params.rows, cols = params.cols;
auto len = rows * cols;
IdxType vecLen = params.bcastAlongRows ? cols : rows;
allocate(out, len);
allocate(out_ref, len);
allocate(data, len);
allocate(meanVec, vecLen);
r.normal(data, len, params.mean, (T)1.0, stream);
raft::stats::mean(meanVec, data, cols, rows, params.sample, params.rowMajor,
stream);
meanCenter(out, data, meanVec, cols, rows, params.rowMajor,
params.bcastAlongRows, stream);
raft::linalg::naiveMatVec(out_ref, data, meanVec, cols, rows,
params.rowMajor, params.bcastAlongRows, (T)-1.0);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(meanVec));
}
protected:
MeanCenterInputs<T, IdxType> params;
T *data, *meanVec, *out, *out_ref;
};
const std::vector<MeanCenterInputs<float, int>> inputsf_i32 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, int> MeanCenterTestF_i32;
TEST_P(MeanCenterTestF_i32, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF_i32,
::testing::ValuesIn(inputsf_i32));
const std::vector<MeanCenterInputs<float, size_t>> inputsf_i64 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, size_t> MeanCenterTestF_i64;
TEST_P(MeanCenterTestF_i64, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF_i64,
::testing::ValuesIn(inputsf_i64));
const std::vector<MeanCenterInputs<double, int>> inputsd_i32 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, int> MeanCenterTestD_i32;
TEST_P(MeanCenterTestD_i32, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD_i32,
::testing::ValuesIn(inputsd_i32));
const std::vector<MeanCenterInputs<double, size_t>> inputsd_i64 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, size_t> MeanCenterTestD_i64;
TEST_P(MeanCenterTestD_i64, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD_i64,
::testing::ValuesIn(inputsd_i64));
} // end namespace Stats
} // end namespace MLCommon
|
18474eb042893776271e8e9d6063728da56e5c99.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cusparse_v2.h>
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include "../../test_utils.cuh"
#include <raft/distance/distance_types.hpp>
#include <raft/sparse/neighbors/knn.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace sparse {
namespace selection {
using namespace raft;
using namespace raft::sparse;
template <typename value_idx, typename value_t>
struct SparseKNNInputs {
value_idx n_cols;
std::vector<value_idx> indptr_h;
std::vector<value_idx> indices_h;
std::vector<value_t> data_h;
std::vector<value_t> out_dists_ref_h;
std::vector<value_idx> out_indices_ref_h;
int k;
int batch_size_index = 2;
int batch_size_query = 2;
raft::distance::DistanceType metric = raft::distance::DistanceType::L2SqrtExpanded;
};
template <typename value_idx, typename value_t>
::std::ostream& operator<<(::std::ostream& os, const SparseKNNInputs<value_idx, value_t>& dims)
{
return os;
}
template <typename value_idx, typename value_t>
class SparseKNNTest : public ::testing::TestWithParam<SparseKNNInputs<value_idx, value_t>> {
public:
SparseKNNTest()
: params(::testing::TestWithParam<SparseKNNInputs<value_idx, value_t>>::GetParam()),
indptr(0, resource::get_cuda_stream(handle)),
indices(0, resource::get_cuda_stream(handle)),
data(0, resource::get_cuda_stream(handle)),
out_indices(0, resource::get_cuda_stream(handle)),
out_dists(0, resource::get_cuda_stream(handle)),
out_indices_ref(0, resource::get_cuda_stream(handle)),
out_dists_ref(0, resource::get_cuda_stream(handle))
{
}
protected:
void SetUp() override
{
n_rows = params.indptr_h.size() - 1;
nnz = params.indices_h.size();
k = params.k;
make_data();
raft::sparse::neighbors::brute_force_knn<value_idx, value_t>(indptr.data(),
indices.data(),
data.data(),
nnz,
n_rows,
params.n_cols,
indptr.data(),
indices.data(),
data.data(),
nnz,
n_rows,
params.n_cols,
out_indices.data(),
out_dists.data(),
k,
handle,
params.batch_size_index,
params.batch_size_query,
params.metric);
RAFT_CUDA_TRY(hipStreamSynchronize(resource::get_cuda_stream(handle)));
}
void compare()
{
ASSERT_TRUE(devArrMatch(
out_dists_ref.data(), out_dists.data(), n_rows * k, CompareApprox<value_t>(1e-4)));
ASSERT_TRUE(
devArrMatch(out_indices_ref.data(), out_indices.data(), n_rows * k, Compare<value_idx>()));
}
protected:
void make_data()
{
std::vector<value_idx> indptr_h = params.indptr_h;
std::vector<value_idx> indices_h = params.indices_h;
std::vector<value_t> data_h = params.data_h;
auto stream = resource::get_cuda_stream(handle);
indptr.resize(indptr_h.size(), stream);
indices.resize(indices_h.size(), stream);
data.resize(data_h.size(), stream);
update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream);
update_device(indices.data(), indices_h.data(), indices_h.size(), stream);
update_device(data.data(), data_h.data(), data_h.size(), stream);
std::vector<value_t> out_dists_ref_h = params.out_dists_ref_h;
std::vector<value_idx> out_indices_ref_h = params.out_indices_ref_h;
out_indices_ref.resize(out_indices_ref_h.size(), stream);
out_dists_ref.resize(out_dists_ref_h.size(), stream);
update_device(
out_indices_ref.data(), out_indices_ref_h.data(), out_indices_ref_h.size(), stream);
update_device(out_dists_ref.data(), out_dists_ref_h.data(), out_dists_ref_h.size(), stream);
out_dists.resize(n_rows * k, stream);
out_indices.resize(n_rows * k, stream);
}
raft::resources handle;
int n_rows, nnz, k;
// input data
rmm::device_uvector<value_idx> indptr, indices;
rmm::device_uvector<value_t> data;
// output data
rmm::device_uvector<value_idx> out_indices;
rmm::device_uvector<value_t> out_dists;
rmm::device_uvector<value_idx> out_indices_ref;
rmm::device_uvector<value_t> out_dists_ref;
SparseKNNInputs<value_idx, value_t> params;
};
const std::vector<SparseKNNInputs<int, float>> inputs_i32_f = {
{9, // ncols
{0, 2, 4, 6, 8}, // indptr
{0, 4, 0, 3, 0, 2, 0, 8}, // indices
{0.0f, 1.0f, 5.0f, 6.0f, 5.0f, 6.0f, 0.0f, 1.0f}, // data
{0, 1.41421, 0, 7.87401, 0, 7.87401, 0, 1.41421}, // dists
{0, 3, 1, 0, 2, 0, 3, 0}, // inds
2,
2,
2,
raft::distance::DistanceType::L2SqrtExpanded}};
typedef SparseKNNTest<int, float> SparseKNNTestF;
TEST_P(SparseKNNTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(SparseKNNTest, SparseKNNTestF, ::testing::ValuesIn(inputs_i32_f));
}; // end namespace selection
}; // end namespace sparse
}; // end namespace raft
|
18474eb042893776271e8e9d6063728da56e5c99.cu
|
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cusparse_v2.h>
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include "../../test_utils.cuh"
#include <raft/distance/distance_types.hpp>
#include <raft/sparse/neighbors/knn.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace sparse {
namespace selection {
using namespace raft;
using namespace raft::sparse;
template <typename value_idx, typename value_t>
struct SparseKNNInputs {
value_idx n_cols;
std::vector<value_idx> indptr_h;
std::vector<value_idx> indices_h;
std::vector<value_t> data_h;
std::vector<value_t> out_dists_ref_h;
std::vector<value_idx> out_indices_ref_h;
int k;
int batch_size_index = 2;
int batch_size_query = 2;
raft::distance::DistanceType metric = raft::distance::DistanceType::L2SqrtExpanded;
};
template <typename value_idx, typename value_t>
::std::ostream& operator<<(::std::ostream& os, const SparseKNNInputs<value_idx, value_t>& dims)
{
return os;
}
template <typename value_idx, typename value_t>
class SparseKNNTest : public ::testing::TestWithParam<SparseKNNInputs<value_idx, value_t>> {
public:
SparseKNNTest()
: params(::testing::TestWithParam<SparseKNNInputs<value_idx, value_t>>::GetParam()),
indptr(0, resource::get_cuda_stream(handle)),
indices(0, resource::get_cuda_stream(handle)),
data(0, resource::get_cuda_stream(handle)),
out_indices(0, resource::get_cuda_stream(handle)),
out_dists(0, resource::get_cuda_stream(handle)),
out_indices_ref(0, resource::get_cuda_stream(handle)),
out_dists_ref(0, resource::get_cuda_stream(handle))
{
}
protected:
void SetUp() override
{
n_rows = params.indptr_h.size() - 1;
nnz = params.indices_h.size();
k = params.k;
make_data();
raft::sparse::neighbors::brute_force_knn<value_idx, value_t>(indptr.data(),
indices.data(),
data.data(),
nnz,
n_rows,
params.n_cols,
indptr.data(),
indices.data(),
data.data(),
nnz,
n_rows,
params.n_cols,
out_indices.data(),
out_dists.data(),
k,
handle,
params.batch_size_index,
params.batch_size_query,
params.metric);
RAFT_CUDA_TRY(cudaStreamSynchronize(resource::get_cuda_stream(handle)));
}
void compare()
{
ASSERT_TRUE(devArrMatch(
out_dists_ref.data(), out_dists.data(), n_rows * k, CompareApprox<value_t>(1e-4)));
ASSERT_TRUE(
devArrMatch(out_indices_ref.data(), out_indices.data(), n_rows * k, Compare<value_idx>()));
}
protected:
void make_data()
{
std::vector<value_idx> indptr_h = params.indptr_h;
std::vector<value_idx> indices_h = params.indices_h;
std::vector<value_t> data_h = params.data_h;
auto stream = resource::get_cuda_stream(handle);
indptr.resize(indptr_h.size(), stream);
indices.resize(indices_h.size(), stream);
data.resize(data_h.size(), stream);
update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream);
update_device(indices.data(), indices_h.data(), indices_h.size(), stream);
update_device(data.data(), data_h.data(), data_h.size(), stream);
std::vector<value_t> out_dists_ref_h = params.out_dists_ref_h;
std::vector<value_idx> out_indices_ref_h = params.out_indices_ref_h;
out_indices_ref.resize(out_indices_ref_h.size(), stream);
out_dists_ref.resize(out_dists_ref_h.size(), stream);
update_device(
out_indices_ref.data(), out_indices_ref_h.data(), out_indices_ref_h.size(), stream);
update_device(out_dists_ref.data(), out_dists_ref_h.data(), out_dists_ref_h.size(), stream);
out_dists.resize(n_rows * k, stream);
out_indices.resize(n_rows * k, stream);
}
raft::resources handle;
int n_rows, nnz, k;
// input data
rmm::device_uvector<value_idx> indptr, indices;
rmm::device_uvector<value_t> data;
// output data
rmm::device_uvector<value_idx> out_indices;
rmm::device_uvector<value_t> out_dists;
rmm::device_uvector<value_idx> out_indices_ref;
rmm::device_uvector<value_t> out_dists_ref;
SparseKNNInputs<value_idx, value_t> params;
};
const std::vector<SparseKNNInputs<int, float>> inputs_i32_f = {
{9, // ncols
{0, 2, 4, 6, 8}, // indptr
{0, 4, 0, 3, 0, 2, 0, 8}, // indices
{0.0f, 1.0f, 5.0f, 6.0f, 5.0f, 6.0f, 0.0f, 1.0f}, // data
{0, 1.41421, 0, 7.87401, 0, 7.87401, 0, 1.41421}, // dists
{0, 3, 1, 0, 2, 0, 3, 0}, // inds
2,
2,
2,
raft::distance::DistanceType::L2SqrtExpanded}};
typedef SparseKNNTest<int, float> SparseKNNTestF;
TEST_P(SparseKNNTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(SparseKNNTest, SparseKNNTestF, ::testing::ValuesIn(inputs_i32_f));
}; // end namespace selection
}; // end namespace sparse
}; // end namespace raft
|
c407a384d1fb4b24b0f89b425d335a547adee7bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "group_spatial_softmax_op.h"
namespace caffe2 {
namespace {
__global__ void GroupSpatialSoftmaxKernel(const int num, const int A, const int W,
const int H, const float* Xdata, float* Pdata, const int num_classes) {
// Loop throuh labels (N x A x H x W)
CUDA_1D_KERNEL_LOOP(index, num * A * H * W) {
int D = num_classes * A;
int x = index % W;
int y = (index / W) % H;
int a = (index / (W * H)) % A;
int i = index / W / H / A;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
__global__ void SumProbsKernel(const int N, const int A, const int W,
const int H, const float* Ydata, const float* dYdata,
float* sum_probs_data, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * A * W * H) {
int D = num_classes * A;
int x = i % W;
int y = (i / W) % H;
int a = (i / (W * H)) % A;
int n = i / (W * H * A);
sum_probs_data[i] = 0.0;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = n * (H * W * D) + c * (H * W) + y * W + x;
sum_probs_data[i] += (Ydata[idx] * dYdata[idx]);
}
}
}
__global__ void SubSumKernel(
const int N, const int A, const int W, const int H,
const float* sum_probs_data, float* dXdata, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * (A * num_classes) * W * H) {
int D = num_classes * A;
int x = i % W;
int y = (i / W) % H;
int a = ((i / (W * H)) % D) / num_classes;
int n = i / W / H / D;
int idx = n * (H * W * A) + a * (H * W) + y * W + x;
dXdata[i] = (dXdata[i] - sum_probs_data[idx]);
}
}
} // namespace
template <>
bool GroupSpatialSoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto* P = Output(0); // Probabilities from softmax
int N = X.dim32(0);
int D = X.dim32(1);
int H = X.dim32(2);
int W = X.dim32(3);
int A = D / num_classes_;
P->ResizeLike(X);
DCHECK_EQ(X.ndim(), 4);
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
hipLaunchKernelGGL(( GroupSpatialSoftmaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, A, W, H, Xdata, Pdata, num_classes_);
return true;
}
template<>
bool GroupSpatialSoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0); // Probabilities from softmax
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_EQ(Y.ndim(), 4);
int N = Y.dim32(0);
int D = Y.dim32(1);
int H = Y.dim32(2);
int W = Y.dim32(3);
int A = D / num_classes_;
dX->ResizeLike(Y);
if (sum_probs_.size() != N * A * H * W) {
ReinitializeTensor(&sum_probs_, {N * A * H * W}, at::dtype<float>().device(CUDA));
}
const float* Ydata = Y.data<float>();
const float* dYdata = dY.data<float>();
float* dXdata = dX->mutable_data<float>();
float* sum_probs_data = sum_probs_.mutable_data<float>();
math::Set<float, CUDAContext>(
sum_probs_.size(), 0.0f, sum_probs_data, &context_);
// Complete math:
// J_ij = h_i (delta_ij - h_j)
// d x_i = sum_j d h_ij = sum_j J_ij * dy_j
// = sum_j h_i (delta_ij - h_j) * dy_j
// = h_i dy_i - (sum_j h_i h_j dy_j)
// = h_i dy_i - h_i sum_j h_j dy_j
// Step 0: dx = dy
context_.Copy<float, CUDAContext, CUDAContext>(Y.size(), dYdata, dXdata);
// Step 1: s = Sum(dY[j] * Y[j])
hipLaunchKernelGGL(( SumProbsKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
N, A, W, H, Ydata, dYdata, sum_probs_data, num_classes_);
// Step 2: dX[i] = dX[i] - s
hipLaunchKernelGGL(( SubSumKernel), dim3(CAFFE_GET_BLOCKS(Y.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
N, A, W, H, sum_probs_.data<float>(), dXdata, num_classes_);
// Step 3: dX[i] = Y[i] * dX[i]
math::Mul<float, CUDAContext>(Y.size(), dXdata, Ydata, dXdata, &context_);
return true;
}
REGISTER_CUDA_OPERATOR(GroupSpatialSoftmax,
GroupSpatialSoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(GroupSpatialSoftmaxGradient,
GroupSpatialSoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
c407a384d1fb4b24b0f89b425d335a547adee7bd.cu
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "group_spatial_softmax_op.h"
namespace caffe2 {
namespace {
__global__ void GroupSpatialSoftmaxKernel(const int num, const int A, const int W,
const int H, const float* Xdata, float* Pdata, const int num_classes) {
// Loop throuh labels (N x A x H x W)
CUDA_1D_KERNEL_LOOP(index, num * A * H * W) {
int D = num_classes * A;
int x = index % W;
int y = (index / W) % H;
int a = (index / (W * H)) % A;
int i = index / W / H / A;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
__global__ void SumProbsKernel(const int N, const int A, const int W,
const int H, const float* Ydata, const float* dYdata,
float* sum_probs_data, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * A * W * H) {
int D = num_classes * A;
int x = i % W;
int y = (i / W) % H;
int a = (i / (W * H)) % A;
int n = i / (W * H * A);
sum_probs_data[i] = 0.0;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = n * (H * W * D) + c * (H * W) + y * W + x;
sum_probs_data[i] += (Ydata[idx] * dYdata[idx]);
}
}
}
__global__ void SubSumKernel(
const int N, const int A, const int W, const int H,
const float* sum_probs_data, float* dXdata, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * (A * num_classes) * W * H) {
int D = num_classes * A;
int x = i % W;
int y = (i / W) % H;
int a = ((i / (W * H)) % D) / num_classes;
int n = i / W / H / D;
int idx = n * (H * W * A) + a * (H * W) + y * W + x;
dXdata[i] = (dXdata[i] - sum_probs_data[idx]);
}
}
} // namespace
template <>
bool GroupSpatialSoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto* P = Output(0); // Probabilities from softmax
int N = X.dim32(0);
int D = X.dim32(1);
int H = X.dim32(2);
int W = X.dim32(3);
int A = D / num_classes_;
P->ResizeLike(X);
DCHECK_EQ(X.ndim(), 4);
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
GroupSpatialSoftmaxKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, A, W, H, Xdata, Pdata, num_classes_);
return true;
}
template<>
bool GroupSpatialSoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0); // Probabilities from softmax
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_EQ(Y.ndim(), 4);
int N = Y.dim32(0);
int D = Y.dim32(1);
int H = Y.dim32(2);
int W = Y.dim32(3);
int A = D / num_classes_;
dX->ResizeLike(Y);
if (sum_probs_.size() != N * A * H * W) {
ReinitializeTensor(&sum_probs_, {N * A * H * W}, at::dtype<float>().device(CUDA));
}
const float* Ydata = Y.data<float>();
const float* dYdata = dY.data<float>();
float* dXdata = dX->mutable_data<float>();
float* sum_probs_data = sum_probs_.mutable_data<float>();
math::Set<float, CUDAContext>(
sum_probs_.size(), 0.0f, sum_probs_data, &context_);
// Complete math:
// J_ij = h_i (delta_ij - h_j)
// d x_i = sum_j d h_ij = sum_j J_ij * dy_j
// = sum_j h_i (delta_ij - h_j) * dy_j
// = h_i dy_i - (sum_j h_i h_j dy_j)
// = h_i dy_i - h_i sum_j h_j dy_j
// Step 0: dx = dy
context_.Copy<float, CUDAContext, CUDAContext>(Y.size(), dYdata, dXdata);
// Step 1: s = Sum(dY[j] * Y[j])
SumProbsKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
N, A, W, H, Ydata, dYdata, sum_probs_data, num_classes_);
// Step 2: dX[i] = dX[i] - s
SubSumKernel<<<CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
N, A, W, H, sum_probs_.data<float>(), dXdata, num_classes_);
// Step 3: dX[i] = Y[i] * dX[i]
math::Mul<float, CUDAContext>(Y.size(), dXdata, Ydata, dXdata, &context_);
return true;
}
REGISTER_CUDA_OPERATOR(GroupSpatialSoftmax,
GroupSpatialSoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(GroupSpatialSoftmaxGradient,
GroupSpatialSoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
95258f8bac62e7c39f10326a83cbaba2c69d2ebd.hip
|
// !!! This is a file automatically generated by hipify!!!
// Includes CUDA
#include <hip/hip_runtime.h>
// Utilities and timing functions
#include <hip/hip_runtime.h> // includes cuda.h and hip/hip_runtime_api.h
#include "device_launch_parameters.h"
#include <hip/hip_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include "opencv2/opencv.hpp"
using namespace cv;
const char *photoFilename = "data/photo.jpg";
const char *frameFilename = "data/frame.jpg";
const char *resultFilename = "data/result.jpg";
__global__ void ZoomKernel(double scale_x, double scale_y, uchar3 *origin, uchar3 *result, int width, int height, int size){
unsigned int col = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int row = threadIdx.y + blockDim.y*blockIdx.y;
int sy = (int)row * scale_y;
sy = (sy < (height - 1)) ? sy : (height - 1);
int sx = (int)col * scale_x;
sx = (sx < (width - 1)) ? sx : (width - 1);
result[row*size + col].x = origin[sy*width + sx].x;
result[row*size + col].y = origin[sy*width + sx].y;
result[row*size + col].z = origin[sy*width + sx].z;
}
void Zoom(Mat photo, Mat smallphoto){
size_t d_smallphoto_size = smallphoto.cols*smallphoto.rows * sizeof(uchar3);
uchar3 *d_smallphoto = NULL;
hipMalloc(&d_smallphoto, d_smallphoto_size);
size_t d_photo_size = photo.cols*photo.rows*sizeof(uchar3);
uchar3 *d_photo = NULL;
hipMalloc(&d_photo, d_photo_size);
hipMemcpy(d_photo, photo.data, d_photo_size, hipMemcpyHostToDevice);
double scale_x = (double)photo.cols / smallphoto.cols;
double scale_y = (double)photo.rows / smallphoto.rows;
dim3 dimBlock(32, 32);
dim3 dimGrid((smallphoto.rows + dimBlock.x - 1) / dimBlock.x, (smallphoto.cols + dimBlock.y - 1) / dimBlock.y);
ZoomKernel << <dimBlock, dimGrid >> >(scale_x, scale_y, d_photo, d_smallphoto, photo.cols, photo.rows, smallphoto.cols);
hipMemcpy(smallphoto.data, d_smallphoto, d_smallphoto_size, hipMemcpyDeviceToHost);
hipFree(d_smallphoto);
hipFree(d_photo);
}
__global__ void GrayKernel(uchar1 *result, uchar3 *origin, int width){
unsigned int col = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int row = threadIdx.y + blockDim.y*blockIdx.y;
result[row*width + col].x = origin[row*width + col].x*0.11 + origin[row*width + col].y*0.59 + origin[row*width + col].z*0.3;
}
void ConvertGray(Mat framegray, Mat frame){
size_t d_framegray_size = framegray.cols*framegray.rows * sizeof(uchar1);
uchar1 *d_framegray = NULL;
hipMalloc(&d_framegray, d_framegray_size);
size_t d_frame_size = frame.cols*frame.rows*sizeof(uchar3);
uchar3 *d_frame = NULL;
hipMalloc(&d_frame, d_frame_size);
hipMemcpy(d_frame, frame.data, d_frame_size, hipMemcpyHostToDevice);
dim3 dimBlock(32, 32);
dim3 dimGrid((frame.cols + dimBlock.x - 1) / dimBlock.x, (frame.rows + dimBlock.y - 1) / dimBlock.y);
GrayKernel << <dimBlock, dimGrid >> >(d_framegray, d_frame, frame.cols);
hipMemcpy(framegray.data, d_framegray, d_framegray_size, hipMemcpyDeviceToHost);
hipError_t err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed! (error code %s)!\n", hipGetErrorString(err));
}
hipFree(d_frame);
hipFree(d_framegray);
}
__global__ void CoverKernel(uchar3 *result, uchar3 *smallphoto, uchar1 *gray, int photowidth, int framewidth, int startX, int startY){
unsigned int col = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int row = threadIdx.y + blockDim.y*blockIdx.y;
if (col < 350 && row < 350){
if (gray[startY*framewidth + startX + row*framewidth + col].x < 20){
result[startY*framewidth + startX + row*framewidth + col].x = smallphoto[row*photowidth + col].x;
result[startY*framewidth + startX + row*framewidth + col].y = smallphoto[row*photowidth + col].y;
result[startY*framewidth + startX + row*framewidth + col].z = smallphoto[row*photowidth + col].z;
}
}
}
void cover(Mat frame, Mat smallphoto, Mat framegray,int startX,int startY,int width){
size_t d_frame_size = frame.cols*frame.rows*sizeof(uchar3);
uchar3 *d_frame = NULL;
hipMalloc(&d_frame, d_frame_size);
hipMemcpy(d_frame, frame.data, d_frame_size, hipMemcpyHostToDevice);
size_t d_framegray_size = framegray.cols*framegray.rows*sizeof(uchar1);
uchar1 *d_framegray = NULL;
hipMalloc(&d_framegray, d_framegray_size);
hipMemcpy(d_framegray, framegray.data, d_framegray_size, hipMemcpyHostToDevice);
size_t d_smallphoto_size = smallphoto.cols*smallphoto.rows*sizeof(uchar3);
uchar3 *d_smallphoto = NULL;
hipMalloc(&d_smallphoto, d_smallphoto_size);
hipMemcpy(d_smallphoto, smallphoto.data, d_smallphoto_size, hipMemcpyHostToDevice);
dim3 dimBlock(32, 32);
dim3 dimGrid((smallphoto.rows + dimBlock.x - 1) / dimBlock.x, (smallphoto.cols + dimBlock.y - 1) / dimBlock.y);
CoverKernel << <dimBlock, dimGrid >> >(d_frame, d_smallphoto, d_framegray, smallphoto.cols, frame.cols, startX, startY);
hipMemcpy(frame.data, d_frame, d_frame_size, hipMemcpyDeviceToHost);
hipFree(d_frame);
hipFree(d_framegray);
hipFree(d_smallphoto);
}
int main(){
cv::Mat photo,smallphoto, frame, framegray, result;
photo = cv::imread(photoFilename);
frame = cv::imread(frameFilename,1);
framegray = cv::Mat(frame.size(), CV_8UC1, cv::Scalar::all(0));
smallphoto = cv::Mat(cv::Size(350, 350), photo.type(), cv::Scalar::all(0));
Zoom(photo, smallphoto);
ConvertGray(framegray, frame);
cover(frame, smallphoto, framegray, 180, 125, 350);
cvNamedWindow("gray");
imshow("gray", framegray);
cvNamedWindow("frame");
imshow("frame", frame);
cvNamedWindow("small");
cv::imshow("small", smallphoto);
cv::waitKey();
return 0;
}
|
95258f8bac62e7c39f10326a83cbaba2c69d2ebd.cu
|
// Includes CUDA
#include <cuda_runtime.h>
// Utilities and timing functions
#include <cuda.h> // includes cuda.h and cuda_runtime_api.h
#include "device_launch_parameters.h"
#include <cuda_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include "opencv2/opencv.hpp"
using namespace cv;
const char *photoFilename = "data/photo.jpg";
const char *frameFilename = "data/frame.jpg";
const char *resultFilename = "data/result.jpg";
__global__ void ZoomKernel(double scale_x, double scale_y, uchar3 *origin, uchar3 *result, int width, int height, int size){
unsigned int col = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int row = threadIdx.y + blockDim.y*blockIdx.y;
int sy = (int)row * scale_y;
sy = (sy < (height - 1)) ? sy : (height - 1);
int sx = (int)col * scale_x;
sx = (sx < (width - 1)) ? sx : (width - 1);
result[row*size + col].x = origin[sy*width + sx].x;
result[row*size + col].y = origin[sy*width + sx].y;
result[row*size + col].z = origin[sy*width + sx].z;
}
void Zoom(Mat photo, Mat smallphoto){
size_t d_smallphoto_size = smallphoto.cols*smallphoto.rows * sizeof(uchar3);
uchar3 *d_smallphoto = NULL;
cudaMalloc(&d_smallphoto, d_smallphoto_size);
size_t d_photo_size = photo.cols*photo.rows*sizeof(uchar3);
uchar3 *d_photo = NULL;
cudaMalloc(&d_photo, d_photo_size);
cudaMemcpy(d_photo, photo.data, d_photo_size, cudaMemcpyHostToDevice);
double scale_x = (double)photo.cols / smallphoto.cols;
double scale_y = (double)photo.rows / smallphoto.rows;
dim3 dimBlock(32, 32);
dim3 dimGrid((smallphoto.rows + dimBlock.x - 1) / dimBlock.x, (smallphoto.cols + dimBlock.y - 1) / dimBlock.y);
ZoomKernel << <dimBlock, dimGrid >> >(scale_x, scale_y, d_photo, d_smallphoto, photo.cols, photo.rows, smallphoto.cols);
cudaMemcpy(smallphoto.data, d_smallphoto, d_smallphoto_size, cudaMemcpyDeviceToHost);
cudaFree(d_smallphoto);
cudaFree(d_photo);
}
__global__ void GrayKernel(uchar1 *result, uchar3 *origin, int width){
unsigned int col = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int row = threadIdx.y + blockDim.y*blockIdx.y;
result[row*width + col].x = origin[row*width + col].x*0.11 + origin[row*width + col].y*0.59 + origin[row*width + col].z*0.3;
}
void ConvertGray(Mat framegray, Mat frame){
size_t d_framegray_size = framegray.cols*framegray.rows * sizeof(uchar1);
uchar1 *d_framegray = NULL;
cudaMalloc(&d_framegray, d_framegray_size);
size_t d_frame_size = frame.cols*frame.rows*sizeof(uchar3);
uchar3 *d_frame = NULL;
cudaMalloc(&d_frame, d_frame_size);
cudaMemcpy(d_frame, frame.data, d_frame_size, cudaMemcpyHostToDevice);
dim3 dimBlock(32, 32);
dim3 dimGrid((frame.cols + dimBlock.x - 1) / dimBlock.x, (frame.rows + dimBlock.y - 1) / dimBlock.y);
GrayKernel << <dimBlock, dimGrid >> >(d_framegray, d_frame, frame.cols);
cudaMemcpy(framegray.data, d_framegray, d_framegray_size, cudaMemcpyDeviceToHost);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed! (error code %s)!\n", cudaGetErrorString(err));
}
cudaFree(d_frame);
cudaFree(d_framegray);
}
__global__ void CoverKernel(uchar3 *result, uchar3 *smallphoto, uchar1 *gray, int photowidth, int framewidth, int startX, int startY){
unsigned int col = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int row = threadIdx.y + blockDim.y*blockIdx.y;
if (col < 350 && row < 350){
if (gray[startY*framewidth + startX + row*framewidth + col].x < 20){
result[startY*framewidth + startX + row*framewidth + col].x = smallphoto[row*photowidth + col].x;
result[startY*framewidth + startX + row*framewidth + col].y = smallphoto[row*photowidth + col].y;
result[startY*framewidth + startX + row*framewidth + col].z = smallphoto[row*photowidth + col].z;
}
}
}
void cover(Mat frame, Mat smallphoto, Mat framegray,int startX,int startY,int width){
size_t d_frame_size = frame.cols*frame.rows*sizeof(uchar3);
uchar3 *d_frame = NULL;
cudaMalloc(&d_frame, d_frame_size);
cudaMemcpy(d_frame, frame.data, d_frame_size, cudaMemcpyHostToDevice);
size_t d_framegray_size = framegray.cols*framegray.rows*sizeof(uchar1);
uchar1 *d_framegray = NULL;
cudaMalloc(&d_framegray, d_framegray_size);
cudaMemcpy(d_framegray, framegray.data, d_framegray_size, cudaMemcpyHostToDevice);
size_t d_smallphoto_size = smallphoto.cols*smallphoto.rows*sizeof(uchar3);
uchar3 *d_smallphoto = NULL;
cudaMalloc(&d_smallphoto, d_smallphoto_size);
cudaMemcpy(d_smallphoto, smallphoto.data, d_smallphoto_size, cudaMemcpyHostToDevice);
dim3 dimBlock(32, 32);
dim3 dimGrid((smallphoto.rows + dimBlock.x - 1) / dimBlock.x, (smallphoto.cols + dimBlock.y - 1) / dimBlock.y);
CoverKernel << <dimBlock, dimGrid >> >(d_frame, d_smallphoto, d_framegray, smallphoto.cols, frame.cols, startX, startY);
cudaMemcpy(frame.data, d_frame, d_frame_size, cudaMemcpyDeviceToHost);
cudaFree(d_frame);
cudaFree(d_framegray);
cudaFree(d_smallphoto);
}
int main(){
cv::Mat photo,smallphoto, frame, framegray, result;
photo = cv::imread(photoFilename);
frame = cv::imread(frameFilename,1);
framegray = cv::Mat(frame.size(), CV_8UC1, cv::Scalar::all(0));
smallphoto = cv::Mat(cv::Size(350, 350), photo.type(), cv::Scalar::all(0));
Zoom(photo, smallphoto);
ConvertGray(framegray, frame);
cover(frame, smallphoto, framegray, 180, 125, 350);
cvNamedWindow("gray");
imshow("gray", framegray);
cvNamedWindow("frame");
imshow("frame", frame);
cvNamedWindow("small");
cv::imshow("small", smallphoto);
cv::waitKey();
return 0;
}
|
test_main.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "aes.h"
/*
void nouveau128( int argc, char *argv[] ) {
uchar4 *h_round_keys;
uchar4 *d_cipher_key, *d_round_keys;
uint8_t *d_sbox, *d_inv_sbox;
char *h_file;
char *d_file;
size_t file_size;
// Generates a 128-bits cipher-key from three uint64_t
d_cipher_key = d_generateCipherKey128( 0x95A8EE8E89979B9E,
0xFDCBC6EB9797528D );
// Loads file from disk
file_size = loadFileIntoMemory(&h_file, argv[1]);
// Allocates memory for various resources
h_round_keys = (uchar4 *) malloc(11 * 4 * sizeof(uchar4));
hipMalloc((void **) &d_round_keys, 11 * 4 * sizeof(uchar4));
hipMalloc((void **) &d_sbox, 256 * sizeof(uint8_t));
hipMalloc((void **) &d_inv_sbox, 256 * sizeof(uint8_t));
hipMalloc((void **) &d_file, file_size * sizeof(char));
// Copies memory to the device
hipMemcpy(d_sbox, s_box, 256 * sizeof(uint8_t), hipMemcpyHostToDevice);
hipMemcpy(d_inv_sbox, inv_s_box, 256 * sizeof(uint8_t), hipMemcpyHostToDevice);
hipMemcpy(d_file, h_file, file_size * sizeof(char), hipMemcpyHostToDevice);
// Generates the round keys, storing them on the global memory
d_round_keys = d_expandKey128( d_cipher_key, d_sbox );
// Encrypts the file
h_file = encryptHostToHost128(h_file, file_size, d_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Decrypts the file
h_file = decryptHostToHost128(h_file, file_size, d_inv_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[3], file_size);
// Frees up memory that is not used anymore
free(h_round_keys);
free(h_file);
hipFree(d_cipher_key);
hipFree(d_round_keys);
hipFree(d_inv_sbox);
hipFree(d_sbox);
hipFree(d_file);
}
void nouveau192( int argc, char *argv[] ) {
uchar4 *h_round_keys;
uchar4 *d_cipher_key, *d_round_keys;
uint8_t *d_sbox, *d_inv_sbox;
char *h_file;
char *d_file;
size_t file_size;
// Generates a 192-bits cipher-key from three uint64_t
d_cipher_key = d_generateCipherKey192( 0x95A8EE8E89979B9E,
0xFDCBC6EB9797528D,
0x432DC26061553818 );
// Loads file from disk
file_size = loadFileIntoMemory(&h_file, argv[1]);
// Allocates memory for various resources
h_round_keys = (uchar4 *) malloc(13 * 6 * sizeof(uchar4));
hipMalloc((void **) &d_round_keys, 13 * 6 * sizeof(uchar4));
hipMalloc((void **) &d_sbox, 256 * sizeof(uint8_t));
hipMalloc((void **) &d_inv_sbox, 256 * sizeof(uint8_t));
hipMalloc((void **) &d_file, file_size * sizeof(char));
// Copies memory to the device
hipMemcpy(d_sbox, s_box, 256 * sizeof(uint8_t), hipMemcpyHostToDevice);
hipMemcpy(d_inv_sbox, inv_s_box, 256 * sizeof(uint8_t), hipMemcpyHostToDevice);
hipMemcpy(d_file, h_file, file_size * sizeof(char), hipMemcpyHostToDevice);
// Generates the round keys, storing them on the global memory
d_round_keys = d_expandKey192( d_cipher_key, d_sbox );
// Encrypts the file
h_file = encryptHostToHost192(h_file, file_size, d_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Decrypts the file
h_file = decryptHostToHost192(h_file, file_size, d_inv_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[3], file_size);
// Frees up memory that is not used anymore
free(h_round_keys);
free(h_file);
hipFree(d_cipher_key);
hipFree(d_round_keys);
hipFree(d_inv_sbox);
hipFree(d_sbox);
hipFree(d_file);
}
*/
void nouveau256( int argc, char *argv[] ) {
uchar4 *h_round_keys;
uchar4 *d_cipher_key, *d_round_keys;
uint8_t *d_sbox, *d_inv_sbox;
char *h_file;
char *d_file;
size_t file_size;
// Generates 256-bits cipher-key from four uint64_t
d_cipher_key = d_generateCipherKey256( 0x95A8EE8E89979B9E,
0xFDCBC6EB9797528D,
0x432DC26061553818,
0xEA635EC5D5A7727E );
// Loads file from disk
file_size = loadFileIntoMemory(&h_file, argv[1]);
// Allocates memory for various resources
h_round_keys = (uchar4 *) malloc(15 * 8 * sizeof(uchar4));
hipMalloc((void **) &d_round_keys, 15 * 8 * sizeof(uchar4));
hipMalloc((void **) &d_sbox, 256 * sizeof(uint8_t));
hipMalloc((void **) &d_inv_sbox, 256 * sizeof(uint8_t));
hipMalloc((void **) &d_file, file_size * sizeof(char));
// Copies memory to the device
hipMemcpy(d_sbox, s_box, 256 * sizeof(uint8_t), hipMemcpyHostToDevice);
hipMemcpy(d_inv_sbox, inv_s_box, 256 * sizeof(uint8_t), hipMemcpyHostToDevice);
hipMemcpy(d_file, h_file, file_size * sizeof(char), hipMemcpyHostToDevice);
// Generates the round keys, storing them on the global memory
d_round_keys = d_expandKey256( d_cipher_key, d_sbox );
// Encrypts the file
h_file = encryptHostToHost256(h_file, file_size, d_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Decrypts the file
h_file = decryptHostToHost256(h_file, file_size, d_inv_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[3], file_size);
// Frees up memory that is not used anymore
free(h_round_keys);
free(h_file);
hipFree(d_cipher_key);
hipFree(d_round_keys);
hipFree(d_inv_sbox);
hipFree(d_sbox);
hipFree(d_file);
}
int main( int argc, char *argv[] ) {
//nouveau128(argc, argv);
//nouveau192(argc, argv);
nouveau256(argc, argv);
return 0;
}
|
test_main.cu
|
#include <stdlib.h>
#include "aes.h"
/*
void nouveau128( int argc, char *argv[] ) {
uchar4 *h_round_keys;
uchar4 *d_cipher_key, *d_round_keys;
uint8_t *d_sbox, *d_inv_sbox;
char *h_file;
char *d_file;
size_t file_size;
// Generates a 128-bits cipher-key from three uint64_t
d_cipher_key = d_generateCipherKey128( 0x95A8EE8E89979B9E,
0xFDCBC6EB9797528D );
// Loads file from disk
file_size = loadFileIntoMemory(&h_file, argv[1]);
// Allocates memory for various resources
h_round_keys = (uchar4 *) malloc(11 * 4 * sizeof(uchar4));
cudaMalloc((void **) &d_round_keys, 11 * 4 * sizeof(uchar4));
cudaMalloc((void **) &d_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_inv_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_file, file_size * sizeof(char));
// Copies memory to the device
cudaMemcpy(d_sbox, s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_inv_sbox, inv_s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_file, h_file, file_size * sizeof(char), cudaMemcpyHostToDevice);
// Generates the round keys, storing them on the global memory
d_round_keys = d_expandKey128( d_cipher_key, d_sbox );
// Encrypts the file
h_file = encryptHostToHost128(h_file, file_size, d_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Decrypts the file
h_file = decryptHostToHost128(h_file, file_size, d_inv_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[3], file_size);
// Frees up memory that is not used anymore
free(h_round_keys);
free(h_file);
cudaFree(d_cipher_key);
cudaFree(d_round_keys);
cudaFree(d_inv_sbox);
cudaFree(d_sbox);
cudaFree(d_file);
}
void nouveau192( int argc, char *argv[] ) {
uchar4 *h_round_keys;
uchar4 *d_cipher_key, *d_round_keys;
uint8_t *d_sbox, *d_inv_sbox;
char *h_file;
char *d_file;
size_t file_size;
// Generates a 192-bits cipher-key from three uint64_t
d_cipher_key = d_generateCipherKey192( 0x95A8EE8E89979B9E,
0xFDCBC6EB9797528D,
0x432DC26061553818 );
// Loads file from disk
file_size = loadFileIntoMemory(&h_file, argv[1]);
// Allocates memory for various resources
h_round_keys = (uchar4 *) malloc(13 * 6 * sizeof(uchar4));
cudaMalloc((void **) &d_round_keys, 13 * 6 * sizeof(uchar4));
cudaMalloc((void **) &d_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_inv_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_file, file_size * sizeof(char));
// Copies memory to the device
cudaMemcpy(d_sbox, s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_inv_sbox, inv_s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_file, h_file, file_size * sizeof(char), cudaMemcpyHostToDevice);
// Generates the round keys, storing them on the global memory
d_round_keys = d_expandKey192( d_cipher_key, d_sbox );
// Encrypts the file
h_file = encryptHostToHost192(h_file, file_size, d_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Decrypts the file
h_file = decryptHostToHost192(h_file, file_size, d_inv_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[3], file_size);
// Frees up memory that is not used anymore
free(h_round_keys);
free(h_file);
cudaFree(d_cipher_key);
cudaFree(d_round_keys);
cudaFree(d_inv_sbox);
cudaFree(d_sbox);
cudaFree(d_file);
}
*/
void nouveau256( int argc, char *argv[] ) {
uchar4 *h_round_keys;
uchar4 *d_cipher_key, *d_round_keys;
uint8_t *d_sbox, *d_inv_sbox;
char *h_file;
char *d_file;
size_t file_size;
// Generates 256-bits cipher-key from four uint64_t
d_cipher_key = d_generateCipherKey256( 0x95A8EE8E89979B9E,
0xFDCBC6EB9797528D,
0x432DC26061553818,
0xEA635EC5D5A7727E );
// Loads file from disk
file_size = loadFileIntoMemory(&h_file, argv[1]);
// Allocates memory for various resources
h_round_keys = (uchar4 *) malloc(15 * 8 * sizeof(uchar4));
cudaMalloc((void **) &d_round_keys, 15 * 8 * sizeof(uchar4));
cudaMalloc((void **) &d_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_inv_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_file, file_size * sizeof(char));
// Copies memory to the device
cudaMemcpy(d_sbox, s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_inv_sbox, inv_s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_file, h_file, file_size * sizeof(char), cudaMemcpyHostToDevice);
// Generates the round keys, storing them on the global memory
d_round_keys = d_expandKey256( d_cipher_key, d_sbox );
// Encrypts the file
h_file = encryptHostToHost256(h_file, file_size, d_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Decrypts the file
h_file = decryptHostToHost256(h_file, file_size, d_inv_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[3], file_size);
// Frees up memory that is not used anymore
free(h_round_keys);
free(h_file);
cudaFree(d_cipher_key);
cudaFree(d_round_keys);
cudaFree(d_inv_sbox);
cudaFree(d_sbox);
cudaFree(d_file);
}
int main( int argc, char *argv[] ) {
//nouveau128(argc, argv);
//nouveau192(argc, argv);
nouveau256(argc, argv);
return 0;
}
|
b01d8138f9f846e4ee0ce077fbefcf8f137ba661.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N (1<<24)
#define THREADS_PER_BLOCK 512
#define BLOCK_NUM (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK // 1<<15 block
void random_floats(float *x, int Num);
__global__ void kernel1(float *a, float *b, float *out, int n);
__global__ void kernel2WithAtomicOp(float *a, float *b, float *out, int n);
int main(void){
printf("N: %d, block num: %d\n", N, BLOCK_NUM);
// initialization
float *a, *b, *reduce, *sum;
a = (float *)malloc(N * sizeof(float));
random_floats(a, N);
b = (float *)malloc(N * sizeof(float));
random_floats(b, N);
reduce = (float *)malloc(BLOCK_NUM * sizeof(float));
sum = (float *)malloc(sizeof(float));
*sum = 0;
// cudaEvent initialization
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// create space on gpu side
float *da, *db, *dreduce, *dsum;
hipMalloc((void **)&da, N * sizeof(float));
hipMalloc((void **)&db, N * sizeof(float));
hipMalloc((void **)&dreduce, BLOCK_NUM * sizeof(float));
hipMalloc((void **)&dsum, sizeof(float));
// copy from cpu to gpu
hipMemcpy(da, a, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(db, b, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dsum, sum, sizeof(float), hipMemcpyHostToDevice);
// kernel1: shared memory + parallel reduction
hipEventRecord(start);
hipLaunchKernelGGL(( kernel1), dim3(BLOCK_NUM), dim3(THREADS_PER_BLOCK), 0, 0, da, db, dreduce, N);
hipEventRecord(stop);
//// copy back to cpu
hipMemcpy(reduce, dreduce, BLOCK_NUM*sizeof(float), hipMemcpyDeviceToHost);
//// add up all elements in reduce
*sum = 0;
for (int i = 0; i < BLOCK_NUM; i++)
*sum += reduce[i];
printf("result from Kernel1 with sum on CPU side: %f\n", *sum);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("kernel 1 execution time: %f milliseconds\n\n", milliseconds);
// kernel2: shared memory + parallel reduction + atomic operation
hipEventRecord(start);
hipLaunchKernelGGL(( kernel2WithAtomicOp), dim3(BLOCK_NUM), dim3(THREADS_PER_BLOCK), 0, 0, da, db, dsum, N);
hipEventRecord(stop);
//// copy back to cpu
hipMemcpy(sum, dsum, sizeof(float), hipMemcpyDeviceToHost);
printf("result from Kernel2 with sum on GPU side: %f\n", *sum);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("kernel 2 execution time: %f milliseconds\n\n", milliseconds);
free(a); free(b); free(reduce); free(sum);
hipFree(da); hipFree(db); hipFree(dreduce); hipFree(dsum);
hipEventDestroy(start); hipEventDestroy(stop);
return 0;
}
void random_floats(float *x, int Num)
{
for (int i = 0; i < Num; i++)
{
x[i] = (float)rand() / RAND_MAX;
}
}
__global__ void kernel1(float *a, float *b, float *out, int n){
__shared__ float sdata[THREADS_PER_BLOCK];
int tid = threadIdx.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = 0.0;
if (index < n)
sdata[tid] = a[index] * b[index];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2){
int ix = 2 * s * tid;
if (ix < blockDim.x)
sdata[ix] += sdata[ix+s];
__syncthreads();
}
if (tid == 0) out[blockIdx.x] = sdata[0];
}
__global__ void kernel2WithAtomicOp(float *a, float *b, float *out, int n){
__shared__ float sdata[THREADS_PER_BLOCK];
int tid = threadIdx.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = 0.0;
if (index < n)
sdata[tid] = a[index] * b[index];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2){
int ix = 2 * s * tid;
if (ix < blockDim.x)
sdata[ix] += sdata[ix+s];
__syncthreads();
}
if (tid == 0)
atomicAdd(out, sdata[0]);
}
|
b01d8138f9f846e4ee0ce077fbefcf8f137ba661.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N (1<<24)
#define THREADS_PER_BLOCK 512
#define BLOCK_NUM (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK // 1<<15 block
void random_floats(float *x, int Num);
__global__ void kernel1(float *a, float *b, float *out, int n);
__global__ void kernel2WithAtomicOp(float *a, float *b, float *out, int n);
int main(void){
printf("N: %d, block num: %d\n", N, BLOCK_NUM);
// initialization
float *a, *b, *reduce, *sum;
a = (float *)malloc(N * sizeof(float));
random_floats(a, N);
b = (float *)malloc(N * sizeof(float));
random_floats(b, N);
reduce = (float *)malloc(BLOCK_NUM * sizeof(float));
sum = (float *)malloc(sizeof(float));
*sum = 0;
// cudaEvent initialization
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// create space on gpu side
float *da, *db, *dreduce, *dsum;
cudaMalloc((void **)&da, N * sizeof(float));
cudaMalloc((void **)&db, N * sizeof(float));
cudaMalloc((void **)&dreduce, BLOCK_NUM * sizeof(float));
cudaMalloc((void **)&dsum, sizeof(float));
// copy from cpu to gpu
cudaMemcpy(da, a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(db, b, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dsum, sum, sizeof(float), cudaMemcpyHostToDevice);
// kernel1: shared memory + parallel reduction
cudaEventRecord(start);
kernel1<<<BLOCK_NUM, THREADS_PER_BLOCK>>>(da, db, dreduce, N);
cudaEventRecord(stop);
//// copy back to cpu
cudaMemcpy(reduce, dreduce, BLOCK_NUM*sizeof(float), cudaMemcpyDeviceToHost);
//// add up all elements in reduce
*sum = 0;
for (int i = 0; i < BLOCK_NUM; i++)
*sum += reduce[i];
printf("result from Kernel1 with sum on CPU side: %f\n", *sum);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel 1 execution time: %f milliseconds\n\n", milliseconds);
// kernel2: shared memory + parallel reduction + atomic operation
cudaEventRecord(start);
kernel2WithAtomicOp<<<BLOCK_NUM, THREADS_PER_BLOCK>>>(da, db, dsum, N);
cudaEventRecord(stop);
//// copy back to cpu
cudaMemcpy(sum, dsum, sizeof(float), cudaMemcpyDeviceToHost);
printf("result from Kernel2 with sum on GPU side: %f\n", *sum);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel 2 execution time: %f milliseconds\n\n", milliseconds);
free(a); free(b); free(reduce); free(sum);
cudaFree(da); cudaFree(db); cudaFree(dreduce); cudaFree(dsum);
cudaEventDestroy(start); cudaEventDestroy(stop);
return 0;
}
void random_floats(float *x, int Num)
{
for (int i = 0; i < Num; i++)
{
x[i] = (float)rand() / RAND_MAX;
}
}
__global__ void kernel1(float *a, float *b, float *out, int n){
__shared__ float sdata[THREADS_PER_BLOCK];
int tid = threadIdx.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = 0.0;
if (index < n)
sdata[tid] = a[index] * b[index];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2){
int ix = 2 * s * tid;
if (ix < blockDim.x)
sdata[ix] += sdata[ix+s];
__syncthreads();
}
if (tid == 0) out[blockIdx.x] = sdata[0];
}
__global__ void kernel2WithAtomicOp(float *a, float *b, float *out, int n){
__shared__ float sdata[THREADS_PER_BLOCK];
int tid = threadIdx.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = 0.0;
if (index < n)
sdata[tid] = a[index] * b[index];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2){
int ix = 2 * s * tid;
if (ix < blockDim.x)
sdata[ix] += sdata[ix+s];
__syncthreads();
}
if (tid == 0)
atomicAdd(out, sdata[0]);
}
|
84ae49037ba755f2fbc4699f4e7c50cebb78551f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Advance quicksort (from nvidia sample code cuda 8.0)
1. A small-set insertion sort. We do this on any set with <=32 elements
2. A partitioning kernel, which - given a pivot - separates an input
array into elements <=pivot, and > pivot. Two quicksorts will then
be launched to resolve each of these.
3. A quicksort co-ordinator, which figures out what kernels to launch
and when.
*/
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
#include "helper_cuda.h"
#include "helper_string.h"
#include "cdpQuicksort.h"
/* -- Inline PTX call to return index of highest non-zero bit in a word -- */
static __device__ __forceinline__ unsigned int __qsflo(unsigned int word)
{
unsigned int ret;
asm volatile("bfind.u32 %0, %1;" : "=r"(ret) : "r"(word));
return ret;
}
/*-- ringbufAlloc
Allocates from a ringbuffer. Allows for not failing when we run out
of stack for tracking the offset counts for each sort subsection.
We use the atomicMax trick to allow out-of-order retirement. If we
hit the size limit on the ringbuffer, then we spin-wait for people
to complete.
*/
template< typename T >
static __device__ T *ringbufAlloc(qsortRingbuf *ringbuf)
{
// Wait for there to be space in the ring buffer. We'll retry only a fixed
// number of times and then fail, to avoid an out-of-memory deadlock.
unsigned int loop = 10000;
while (((ringbuf->head - ringbuf->tail) >= ringbuf->stacksize) && (loop-- > 0));
if (loop == 0)
return NULL;
// Note that element includes a little index book-keeping, for freeing later.
unsigned int index = atomicAdd((unsigned int *) &ringbuf->head, 1);
T *ret = (T *)(ringbuf->stackbase) + (index & (ringbuf->stacksize-1));
ret->index = index;
return ret;
}
/*-- ringbufFree
Releases an element from the ring buffer. If every element is released
up to and including this one, we can advance the tail to indicate that
space is now available.
*/
template< typename T >
static __device__ void ringbufFree(qsortRingbuf *ringbuf, T *data)
{
unsigned int index = data->index; // Non-wrapped index to free
unsigned int count = atomicAdd((unsigned int *)&(ringbuf->count), 1) + 1;
unsigned int max = atomicMax((unsigned int *)&(ringbuf->max), index + 1);
// Update the tail if need be. Note we update "max" to be the new value in ringbuf->max
if (max < (index+1)) max = index + 1;
if (max == count)
atomicMax((unsigned int *)&(ringbuf->tail), count);
}
/*-- qsort_warp
Simplest possible implementation, does a per-warp quicksort with no inter-warp
communication. This has a high atomic issue rate, but the rest should actually
be fairly quick because of low work per thread.
A warp finds its section of the data, then writes all data <pivot to one
buffer and all data >pivot to the other. Atomics are used to get a unique
section of the buffer.
Obvious optimisation: do multiple chunks per warp, to increase in-flight loads
and cover the instruction overhead.
*/
__global__ void qsort_warp(unsigned *indata,
unsigned *outdata,
unsigned int offset,
unsigned int len,
qsortAtomicData *atomicData,
qsortRingbuf *atomicDataStack,
unsigned int source_is_indata,
unsigned int depth)
{
// New in CUDA 9.0. Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// Find my data offset, based on warp ID
unsigned int thread_id = threadIdx.x + (blockIdx.x << QSORT_BLOCKSIZE_SHIFT);
// unsigned int warp_id = threadIdx.x >> 5; // Used for debug only
unsigned int lane_id = threadIdx.x & (warpSize-1);
// Exit if I"m outside the range of sort to be done
if (thread_id >= len)
return;
//
// First part of the algorithm. Each warp counts the number of elements that are
// greater/less than the pivot.
//
// When a warp knows its count, it updates an atomic counter.
//
// Read in the data and the pivot. Arbitary pivot selection for now.
unsigned pivot = indata[offset + len/2];
unsigned data = indata[offset + thread_id];
// Count how many are <= and how many are > pivot.
// If all are <= pivot then we adjust the comparison
// because otherwise the sort will move nothing and
// we'll iterate forever.
cg::coalesced_group active = cg::coalesced_threads();
unsigned int greater = (data > pivot);
unsigned int gt_mask = active.ballot(greater);
if (gt_mask == 0)
{
greater = (data >= pivot);
gt_mask = active.ballot(greater); //Must re-ballot for adjusted compartor
}
unsigned int lt_mask = active.ballot(!greater);
unsigned int gt_count = __popc(gt_mask);
unsigned int lt_count = __popc(lt_mask);
// Atomically adjust the lt_ and gt_offsets by this amount. Only one thread need do this.
// Share the result using shfl
unsigned int lt_offset, gt_offset;
if (lane_id == 0)
{
if (lt_count > 0)
lt_offset = atomicAdd((unsigned int *) &atomicData->lt_offset, lt_count);
if (gt_count > 0)
gt_offset = len - (atomicAdd((unsigned int *) &atomicData->gt_offset, gt_count) + gt_count);
}
lt_offset = active.shfl((int)lt_offset, 0); //Everyone pulls the offsets from lane 0
gt_offset = active.shfl((int)gt_offset, 0);
// Now compute my own personal offset within this. I need to know how many
// threads with a land ID less than mine are going to write to the same buffer
// as me. We can use popc to implement a single-operation warp scan in this case.
unsigned lane_mask_lt;
asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lane_mask_lt));
unsigned int my_mask = greater ? gt_mask : lt_mask;
unsigned int my_offset = __popc(my_mask & lane_mask_lt);
// Move data.
my_offset += greater ? gt_offset : lt_offset;
outdata[offset + my_offset] = data;
// Count up if we're the last warp in. If so, then Kepler will launch the next
// set of sorts directly from here.
if (lane_id == 0)
{
// Count "elements written". If I wrote the last one, then trigger the next qsorts
unsigned int mycount = lt_count + gt_count;
if (atomicAdd((unsigned int *) &atomicData->sorted_count, mycount) + mycount == len)
{
// We're the last warp to do any sorting. Therefore it's up to us to launch the next stage.
unsigned int lt_len = atomicData->lt_offset;
unsigned int gt_len = atomicData->gt_offset;
hipStream_t lstream, rstream;
hipStreamCreateWithFlags(&lstream, hipStreamNonBlocking);
hipStreamCreateWithFlags(&rstream, hipStreamNonBlocking);
// Begin by freeing our atomicData storage. It's better for the ringbuffer algorithm
// if we free when we're done, rather than re-using (makes for less fragmentation).
ringbufFree<qsortAtomicData>(atomicDataStack, atomicData);
// Exceptional case: if "lt_len" is zero, then all values in the batch
// are equal. We are then done (may need to copy into correct buffer, though)
if (lt_len == 0)
{
if (source_is_indata)
hipMemcpyAsync(indata+offset, outdata+offset, gt_len*sizeof(unsigned), hipMemcpyDeviceToDevice, lstream);
return;
}
// Start with lower half first
if (lt_len > BITONICSORT_LEN)
{
// If we've exceeded maximum depth, fall through to backup big_bitonicsort
if (depth >= QSORT_MAXDEPTH)
{
// The final bitonic stage sorts in-place in "outdata". We therefore
// re-use "indata" as the out-of-range tracking buffer. For (2^n)+1
// elements we need (2^(n+1)) bytes of our buffer. The backup qsort
// buffer is at least this large when sizeof(QTYPE) >= 2.
hipLaunchKernelGGL(( big_bitonicsort), dim3(1), dim3(BITONICSORT_LEN), 0, lstream , outdata, source_is_indata ? indata : outdata, indata, offset, lt_len);
}
else
{
// Launch another quicksort. We need to allocate more storage for the atmoic data.
if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL)
printf("Stack-allocation error. Failing left child launch.\n");
else
{
atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0;
unsigned int numblocks = (unsigned int)(lt_len+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
hipLaunchKernelGGL(( qsort_warp), dim3(numblocks), dim3(QSORT_BLOCKSIZE), 0, lstream , outdata, indata, offset, lt_len, atomicData, atomicDataStack, !source_is_indata, depth+1);
}
}
}
else if (lt_len > 1)
{
// Final stage uses a bitonic sort instead. It's important to
// make sure the final stage ends up in the correct (original) buffer.
// We launch the smallest power-of-2 number of threads that we can.
unsigned int bitonic_len = 1 << (__qsflo(lt_len-1)+1);
hipLaunchKernelGGL(( bitonicsort), dim3(1), dim3(bitonic_len), 0, lstream , outdata, source_is_indata ? indata : outdata, offset, lt_len);
}
// Finally, if we sorted just one single element, we must stil make
// sure that it winds up in the correct place.
else if (source_is_indata && (lt_len == 1))
indata[offset] = outdata[offset];
if (hipPeekAtLastError() != hipSuccess)
printf("Left-side launch fail: %s\n", hipGetErrorString(hipGetLastError()));
// Now the upper half.
if (gt_len > BITONICSORT_LEN)
{
// If we've exceeded maximum depth, fall through to backup big_bitonicsort
if (depth >= QSORT_MAXDEPTH)
hipLaunchKernelGGL(( big_bitonicsort), dim3(1), dim3(BITONICSORT_LEN), 0, rstream , outdata, source_is_indata ? indata : outdata, indata, offset+lt_len, gt_len);
else
{
// Allocate new atomic storage for this launch
if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL)
printf("Stack allocation error! Failing right-side launch.\n");
else
{
atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0;
unsigned int numblocks = (unsigned int)(gt_len+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
hipLaunchKernelGGL(( qsort_warp), dim3(numblocks), dim3(QSORT_BLOCKSIZE), 0, rstream , outdata, indata, offset+lt_len, gt_len, atomicData, atomicDataStack, !source_is_indata, depth+1);
}
}
}
else if (gt_len > 1)
{
unsigned int bitonic_len = 1 << (__qsflo(gt_len-1U)+1);
hipLaunchKernelGGL(( bitonicsort), dim3(1), dim3(bitonic_len), 0, rstream , outdata, source_is_indata ? indata : outdata, offset+lt_len, gt_len);
}
else if (source_is_indata && (gt_len == 1))
indata[offset+lt_len] = outdata[offset+lt_len];
if (hipPeekAtLastError() != hipSuccess)
printf("Right-side launch fail: %s\n", hipGetErrorString(hipGetLastError()));
}
}
}
/*-- run_quicksort
Host-side code to run the Kepler version of quicksort. It's pretty
simple, because all launch control is handled on the device via CDP.
All parallel quicksorts require an equal-sized scratch buffer. This
must be passed in a ahead of time.
Returns the time elapsed for the sort.
*/
float run_quicksort_cdp(unsigned *gpudata, unsigned *scratchdata, unsigned int count, hipStream_t stream)
{
unsigned int stacksize = QSORT_STACK_ELEMS;
// This is the stack, for atomic tracking of each sort's status
qsortAtomicData *gpustack;
checkCudaErrors(hipMalloc((void **)&gpustack, stacksize * sizeof(qsortAtomicData)));
checkCudaErrors(hipMemset(gpustack, 0, sizeof(qsortAtomicData))); // Only need set first entry to 0
// Create the memory ringbuffer used for handling the stack.
// Initialise everything to where it needs to be.
qsortRingbuf buf;
qsortRingbuf *ringbuf;
checkCudaErrors(hipMalloc((void **)&ringbuf, sizeof(qsortRingbuf)));
buf.head = 1; // We start with one allocation
buf.tail = 0;
buf.count = 0;
buf.max = 0;
buf.stacksize = stacksize;
buf.stackbase = gpustack;
checkCudaErrors(hipMemcpy(ringbuf, &buf, sizeof(buf), hipMemcpyHostToDevice));
// Timing events ...
hipEvent_t ev1, ev2;
checkCudaErrors(hipEventCreate(&ev1));
checkCudaErrors(hipEventCreate(&ev2));
checkCudaErrors(hipEventRecord(ev1));
// Now we trivially launch the qsort kernel
if (count > BITONICSORT_LEN)
{
unsigned int numblocks = (unsigned int)(count+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
hipLaunchKernelGGL(( qsort_warp), dim3(numblocks), dim3(QSORT_BLOCKSIZE), 0, stream , gpudata, scratchdata, 0U, count, gpustack, ringbuf, true, 0);
}
else
{
hipLaunchKernelGGL(( bitonicsort), dim3(1), dim3(BITONICSORT_LEN) , 0, 0, gpudata, gpudata, 0, count);
}
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipEventRecord(ev2));
checkCudaErrors(hipDeviceSynchronize());
float elapse=0.0f;
if (hipPeekAtLastError() != hipSuccess)
printf("Launch failure: %s\n", hipGetErrorString(hipGetLastError()));
else
checkCudaErrors(hipEventElapsedTime(&elapse, ev1, ev2));
// Sanity check that the stack allocator is doing the right thing
checkCudaErrors(hipMemcpy(&buf, ringbuf, sizeof(*ringbuf), hipMemcpyDeviceToHost));
if (count > BITONICSORT_LEN && buf.head != buf.tail)
{
printf("Stack allocation error!\nRingbuf:\n");
printf("\t head = %u\n", buf.head);
printf("\t tail = %u\n", buf.tail);
printf("\tcount = %u\n", buf.count);
printf("\t max = %u\b", buf.max);
}
// Release our stack data once we're done
checkCudaErrors(hipFree(ringbuf));
checkCudaErrors(hipFree(gpustack));
return elapse;
}
int run_qsort(unsigned int size, int seed, int debug, int loop, int verbose)
{
if (seed > 0)
srand(seed);
// Create and set up our test
unsigned *gpudata, *scratchdata;
checkCudaErrors(hipMalloc((void **)&gpudata, size*sizeof(unsigned)));
checkCudaErrors(hipMalloc((void **)&scratchdata, size*sizeof(unsigned)));
// Create CPU data.
unsigned *data = new unsigned[size];
unsigned int min = loop ? loop : size;
unsigned int max = size;
loop = (loop == 0) ? 1 : loop;
for (size=min; size<=max; size+=loop)
{
if (verbose)
printf(" Input: ");
for (unsigned int i=0; i<size; i++)
{
// Build data 8 bits at a time
data[i] = 0;
char *ptr = (char *)&(data[i]);
for (unsigned j=0; j<sizeof(unsigned); j++)
{
// Easy-to-read data in debug mode
if (debug)
{
*ptr++ = (char)(rand() % 10);
break;
}
*ptr++ = (char)(rand() & 255);
}
if (verbose)
{
if (i && !(i%32))
printf("\n ");
printf("%u ", data[i]);
}
}
if (verbose)
printf("\n");
checkCudaErrors(hipMemcpy(gpudata, data, size*sizeof(unsigned), hipMemcpyHostToDevice));
// So we're now populated and ready to go! We size our launch as
// blocks of up to BLOCKSIZE threads, and appropriate grid size.
float elapse;
elapse = run_quicksort_cdp(gpudata, scratchdata, size, NULL);
checkCudaErrors(hipDeviceSynchronize());
// Copy back the data and verify correct sort
checkCudaErrors(hipMemcpy(data, gpudata, size*sizeof(unsigned), hipMemcpyDeviceToHost));
if (verbose)
{
printf("Output: ");
for (unsigned int i=0; i<size; i++)
{
if (i && !(i%32)) printf("\n ");
printf("%u ", data[i]);
}
printf("\n");
}
unsigned int check;
for (check=1; check<size; check++)
{
if (data[check] < data[check-1])
{
printf("FAILED at element: %d\n", check);
break;
}
}
if (check != size)
{
printf(" cdpAdvancedQuicksort FAILED\n");
exit(EXIT_FAILURE);
}
else
printf(" cdpAdvancedQuciksort PASSSED\n");
// Display the time between event recordings
printf("Sorted %u elems in %.3f ms (%.3f Melems/sec)\n", size, elapse, (float)size/(elapse*1000.0f));
fflush(stdout);
}
// Release everything and we're done
checkCudaErrors(hipFree(scratchdata));
checkCudaErrors(hipFree(gpudata));
delete(data);
return 0;
}
int main(int argc, char *argv[])
{
int size = 71780;
unsigned int seed = 100;
int debug = 0;
int loop = 0;
int verbose = 0;
// Get device properties
int cuda_device = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t properties;
checkCudaErrors(hipGetDeviceProperties(&properties, cuda_device));
int cdpCapable = (properties.major == 3 && properties.minor >= 5) || properties.major >= 4;
printf("GPU device %s has compute capabilities (SM %d.%d)\n", properties.name, properties.major, properties.minor);
if (!cdpCapable)
{
printf("cdpAdvancedQuicksort requires SM 3.5 or higher to use CUDA Dynamic Parallelism. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("Running qsort on %d elements with seed %d, on %s\n", size, seed, properties.name);
run_qsort(size, seed, debug, loop, verbose);
exit(EXIT_SUCCESS);
}
|
84ae49037ba755f2fbc4699f4e7c50cebb78551f.cu
|
/*
Advance quicksort (from nvidia sample code cuda 8.0)
1. A small-set insertion sort. We do this on any set with <=32 elements
2. A partitioning kernel, which - given a pivot - separates an input
array into elements <=pivot, and > pivot. Two quicksorts will then
be launched to resolve each of these.
3. A quicksort co-ordinator, which figures out what kernels to launch
and when.
*/
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
#include "helper_cuda.h"
#include "helper_string.h"
#include "cdpQuicksort.h"
/* -- Inline PTX call to return index of highest non-zero bit in a word -- */
static __device__ __forceinline__ unsigned int __qsflo(unsigned int word)
{
unsigned int ret;
asm volatile("bfind.u32 %0, %1;" : "=r"(ret) : "r"(word));
return ret;
}
/*-- ringbufAlloc
Allocates from a ringbuffer. Allows for not failing when we run out
of stack for tracking the offset counts for each sort subsection.
We use the atomicMax trick to allow out-of-order retirement. If we
hit the size limit on the ringbuffer, then we spin-wait for people
to complete.
*/
template< typename T >
static __device__ T *ringbufAlloc(qsortRingbuf *ringbuf)
{
// Wait for there to be space in the ring buffer. We'll retry only a fixed
// number of times and then fail, to avoid an out-of-memory deadlock.
unsigned int loop = 10000;
while (((ringbuf->head - ringbuf->tail) >= ringbuf->stacksize) && (loop-- > 0));
if (loop == 0)
return NULL;
// Note that element includes a little index book-keeping, for freeing later.
unsigned int index = atomicAdd((unsigned int *) &ringbuf->head, 1);
T *ret = (T *)(ringbuf->stackbase) + (index & (ringbuf->stacksize-1));
ret->index = index;
return ret;
}
/*-- ringbufFree
Releases an element from the ring buffer. If every element is released
up to and including this one, we can advance the tail to indicate that
space is now available.
*/
template< typename T >
static __device__ void ringbufFree(qsortRingbuf *ringbuf, T *data)
{
unsigned int index = data->index; // Non-wrapped index to free
unsigned int count = atomicAdd((unsigned int *)&(ringbuf->count), 1) + 1;
unsigned int max = atomicMax((unsigned int *)&(ringbuf->max), index + 1);
// Update the tail if need be. Note we update "max" to be the new value in ringbuf->max
if (max < (index+1)) max = index + 1;
if (max == count)
atomicMax((unsigned int *)&(ringbuf->tail), count);
}
/*-- qsort_warp
Simplest possible implementation, does a per-warp quicksort with no inter-warp
communication. This has a high atomic issue rate, but the rest should actually
be fairly quick because of low work per thread.
A warp finds its section of the data, then writes all data <pivot to one
buffer and all data >pivot to the other. Atomics are used to get a unique
section of the buffer.
Obvious optimisation: do multiple chunks per warp, to increase in-flight loads
and cover the instruction overhead.
*/
__global__ void qsort_warp(unsigned *indata,
unsigned *outdata,
unsigned int offset,
unsigned int len,
qsortAtomicData *atomicData,
qsortRingbuf *atomicDataStack,
unsigned int source_is_indata,
unsigned int depth)
{
// New in CUDA 9.0. Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
// Find my data offset, based on warp ID
unsigned int thread_id = threadIdx.x + (blockIdx.x << QSORT_BLOCKSIZE_SHIFT);
// unsigned int warp_id = threadIdx.x >> 5; // Used for debug only
unsigned int lane_id = threadIdx.x & (warpSize-1);
// Exit if I"m outside the range of sort to be done
if (thread_id >= len)
return;
//
// First part of the algorithm. Each warp counts the number of elements that are
// greater/less than the pivot.
//
// When a warp knows its count, it updates an atomic counter.
//
// Read in the data and the pivot. Arbitary pivot selection for now.
unsigned pivot = indata[offset + len/2];
unsigned data = indata[offset + thread_id];
// Count how many are <= and how many are > pivot.
// If all are <= pivot then we adjust the comparison
// because otherwise the sort will move nothing and
// we'll iterate forever.
cg::coalesced_group active = cg::coalesced_threads();
unsigned int greater = (data > pivot);
unsigned int gt_mask = active.ballot(greater);
if (gt_mask == 0)
{
greater = (data >= pivot);
gt_mask = active.ballot(greater); //Must re-ballot for adjusted compartor
}
unsigned int lt_mask = active.ballot(!greater);
unsigned int gt_count = __popc(gt_mask);
unsigned int lt_count = __popc(lt_mask);
// Atomically adjust the lt_ and gt_offsets by this amount. Only one thread need do this.
// Share the result using shfl
unsigned int lt_offset, gt_offset;
if (lane_id == 0)
{
if (lt_count > 0)
lt_offset = atomicAdd((unsigned int *) &atomicData->lt_offset, lt_count);
if (gt_count > 0)
gt_offset = len - (atomicAdd((unsigned int *) &atomicData->gt_offset, gt_count) + gt_count);
}
lt_offset = active.shfl((int)lt_offset, 0); //Everyone pulls the offsets from lane 0
gt_offset = active.shfl((int)gt_offset, 0);
// Now compute my own personal offset within this. I need to know how many
// threads with a land ID less than mine are going to write to the same buffer
// as me. We can use popc to implement a single-operation warp scan in this case.
unsigned lane_mask_lt;
asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lane_mask_lt));
unsigned int my_mask = greater ? gt_mask : lt_mask;
unsigned int my_offset = __popc(my_mask & lane_mask_lt);
// Move data.
my_offset += greater ? gt_offset : lt_offset;
outdata[offset + my_offset] = data;
// Count up if we're the last warp in. If so, then Kepler will launch the next
// set of sorts directly from here.
if (lane_id == 0)
{
// Count "elements written". If I wrote the last one, then trigger the next qsorts
unsigned int mycount = lt_count + gt_count;
if (atomicAdd((unsigned int *) &atomicData->sorted_count, mycount) + mycount == len)
{
// We're the last warp to do any sorting. Therefore it's up to us to launch the next stage.
unsigned int lt_len = atomicData->lt_offset;
unsigned int gt_len = atomicData->gt_offset;
cudaStream_t lstream, rstream;
cudaStreamCreateWithFlags(&lstream, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&rstream, cudaStreamNonBlocking);
// Begin by freeing our atomicData storage. It's better for the ringbuffer algorithm
// if we free when we're done, rather than re-using (makes for less fragmentation).
ringbufFree<qsortAtomicData>(atomicDataStack, atomicData);
// Exceptional case: if "lt_len" is zero, then all values in the batch
// are equal. We are then done (may need to copy into correct buffer, though)
if (lt_len == 0)
{
if (source_is_indata)
cudaMemcpyAsync(indata+offset, outdata+offset, gt_len*sizeof(unsigned), cudaMemcpyDeviceToDevice, lstream);
return;
}
// Start with lower half first
if (lt_len > BITONICSORT_LEN)
{
// If we've exceeded maximum depth, fall through to backup big_bitonicsort
if (depth >= QSORT_MAXDEPTH)
{
// The final bitonic stage sorts in-place in "outdata". We therefore
// re-use "indata" as the out-of-range tracking buffer. For (2^n)+1
// elements we need (2^(n+1)) bytes of our buffer. The backup qsort
// buffer is at least this large when sizeof(QTYPE) >= 2.
big_bitonicsort<<< 1, BITONICSORT_LEN, 0, lstream >>>(outdata, source_is_indata ? indata : outdata, indata, offset, lt_len);
}
else
{
// Launch another quicksort. We need to allocate more storage for the atmoic data.
if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL)
printf("Stack-allocation error. Failing left child launch.\n");
else
{
atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0;
unsigned int numblocks = (unsigned int)(lt_len+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
qsort_warp<<< numblocks, QSORT_BLOCKSIZE, 0, lstream >>>(outdata, indata, offset, lt_len, atomicData, atomicDataStack, !source_is_indata, depth+1);
}
}
}
else if (lt_len > 1)
{
// Final stage uses a bitonic sort instead. It's important to
// make sure the final stage ends up in the correct (original) buffer.
// We launch the smallest power-of-2 number of threads that we can.
unsigned int bitonic_len = 1 << (__qsflo(lt_len-1)+1);
bitonicsort<<< 1, bitonic_len, 0, lstream >>>(outdata, source_is_indata ? indata : outdata, offset, lt_len);
}
// Finally, if we sorted just one single element, we must stil make
// sure that it winds up in the correct place.
else if (source_is_indata && (lt_len == 1))
indata[offset] = outdata[offset];
if (cudaPeekAtLastError() != cudaSuccess)
printf("Left-side launch fail: %s\n", cudaGetErrorString(cudaGetLastError()));
// Now the upper half.
if (gt_len > BITONICSORT_LEN)
{
// If we've exceeded maximum depth, fall through to backup big_bitonicsort
if (depth >= QSORT_MAXDEPTH)
big_bitonicsort<<< 1, BITONICSORT_LEN, 0, rstream >>>(outdata, source_is_indata ? indata : outdata, indata, offset+lt_len, gt_len);
else
{
// Allocate new atomic storage for this launch
if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL)
printf("Stack allocation error! Failing right-side launch.\n");
else
{
atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0;
unsigned int numblocks = (unsigned int)(gt_len+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
qsort_warp<<< numblocks, QSORT_BLOCKSIZE, 0, rstream >>>(outdata, indata, offset+lt_len, gt_len, atomicData, atomicDataStack, !source_is_indata, depth+1);
}
}
}
else if (gt_len > 1)
{
unsigned int bitonic_len = 1 << (__qsflo(gt_len-1U)+1);
bitonicsort<<< 1, bitonic_len, 0, rstream >>>(outdata, source_is_indata ? indata : outdata, offset+lt_len, gt_len);
}
else if (source_is_indata && (gt_len == 1))
indata[offset+lt_len] = outdata[offset+lt_len];
if (cudaPeekAtLastError() != cudaSuccess)
printf("Right-side launch fail: %s\n", cudaGetErrorString(cudaGetLastError()));
}
}
}
/*-- run_quicksort
Host-side code to run the Kepler version of quicksort. It's pretty
simple, because all launch control is handled on the device via CDP.
All parallel quicksorts require an equal-sized scratch buffer. This
must be passed in a ahead of time.
Returns the time elapsed for the sort.
*/
float run_quicksort_cdp(unsigned *gpudata, unsigned *scratchdata, unsigned int count, cudaStream_t stream)
{
unsigned int stacksize = QSORT_STACK_ELEMS;
// This is the stack, for atomic tracking of each sort's status
qsortAtomicData *gpustack;
checkCudaErrors(cudaMalloc((void **)&gpustack, stacksize * sizeof(qsortAtomicData)));
checkCudaErrors(cudaMemset(gpustack, 0, sizeof(qsortAtomicData))); // Only need set first entry to 0
// Create the memory ringbuffer used for handling the stack.
// Initialise everything to where it needs to be.
qsortRingbuf buf;
qsortRingbuf *ringbuf;
checkCudaErrors(cudaMalloc((void **)&ringbuf, sizeof(qsortRingbuf)));
buf.head = 1; // We start with one allocation
buf.tail = 0;
buf.count = 0;
buf.max = 0;
buf.stacksize = stacksize;
buf.stackbase = gpustack;
checkCudaErrors(cudaMemcpy(ringbuf, &buf, sizeof(buf), cudaMemcpyHostToDevice));
// Timing events ...
cudaEvent_t ev1, ev2;
checkCudaErrors(cudaEventCreate(&ev1));
checkCudaErrors(cudaEventCreate(&ev2));
checkCudaErrors(cudaEventRecord(ev1));
// Now we trivially launch the qsort kernel
if (count > BITONICSORT_LEN)
{
unsigned int numblocks = (unsigned int)(count+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
qsort_warp<<< numblocks, QSORT_BLOCKSIZE, 0, stream >>>(gpudata, scratchdata, 0U, count, gpustack, ringbuf, true, 0);
}
else
{
bitonicsort<<< 1, BITONICSORT_LEN >>>(gpudata, gpudata, 0, count);
}
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaEventRecord(ev2));
checkCudaErrors(cudaDeviceSynchronize());
float elapse=0.0f;
if (cudaPeekAtLastError() != cudaSuccess)
printf("Launch failure: %s\n", cudaGetErrorString(cudaGetLastError()));
else
checkCudaErrors(cudaEventElapsedTime(&elapse, ev1, ev2));
// Sanity check that the stack allocator is doing the right thing
checkCudaErrors(cudaMemcpy(&buf, ringbuf, sizeof(*ringbuf), cudaMemcpyDeviceToHost));
if (count > BITONICSORT_LEN && buf.head != buf.tail)
{
printf("Stack allocation error!\nRingbuf:\n");
printf("\t head = %u\n", buf.head);
printf("\t tail = %u\n", buf.tail);
printf("\tcount = %u\n", buf.count);
printf("\t max = %u\b", buf.max);
}
// Release our stack data once we're done
checkCudaErrors(cudaFree(ringbuf));
checkCudaErrors(cudaFree(gpustack));
return elapse;
}
int run_qsort(unsigned int size, int seed, int debug, int loop, int verbose)
{
if (seed > 0)
srand(seed);
// Create and set up our test
unsigned *gpudata, *scratchdata;
checkCudaErrors(cudaMalloc((void **)&gpudata, size*sizeof(unsigned)));
checkCudaErrors(cudaMalloc((void **)&scratchdata, size*sizeof(unsigned)));
// Create CPU data.
unsigned *data = new unsigned[size];
unsigned int min = loop ? loop : size;
unsigned int max = size;
loop = (loop == 0) ? 1 : loop;
for (size=min; size<=max; size+=loop)
{
if (verbose)
printf(" Input: ");
for (unsigned int i=0; i<size; i++)
{
// Build data 8 bits at a time
data[i] = 0;
char *ptr = (char *)&(data[i]);
for (unsigned j=0; j<sizeof(unsigned); j++)
{
// Easy-to-read data in debug mode
if (debug)
{
*ptr++ = (char)(rand() % 10);
break;
}
*ptr++ = (char)(rand() & 255);
}
if (verbose)
{
if (i && !(i%32))
printf("\n ");
printf("%u ", data[i]);
}
}
if (verbose)
printf("\n");
checkCudaErrors(cudaMemcpy(gpudata, data, size*sizeof(unsigned), cudaMemcpyHostToDevice));
// So we're now populated and ready to go! We size our launch as
// blocks of up to BLOCKSIZE threads, and appropriate grid size.
float elapse;
elapse = run_quicksort_cdp(gpudata, scratchdata, size, NULL);
checkCudaErrors(cudaDeviceSynchronize());
// Copy back the data and verify correct sort
checkCudaErrors(cudaMemcpy(data, gpudata, size*sizeof(unsigned), cudaMemcpyDeviceToHost));
if (verbose)
{
printf("Output: ");
for (unsigned int i=0; i<size; i++)
{
if (i && !(i%32)) printf("\n ");
printf("%u ", data[i]);
}
printf("\n");
}
unsigned int check;
for (check=1; check<size; check++)
{
if (data[check] < data[check-1])
{
printf("FAILED at element: %d\n", check);
break;
}
}
if (check != size)
{
printf(" cdpAdvancedQuicksort FAILED\n");
exit(EXIT_FAILURE);
}
else
printf(" cdpAdvancedQuciksort PASSSED\n");
// Display the time between event recordings
printf("Sorted %u elems in %.3f ms (%.3f Melems/sec)\n", size, elapse, (float)size/(elapse*1000.0f));
fflush(stdout);
}
// Release everything and we're done
checkCudaErrors(cudaFree(scratchdata));
checkCudaErrors(cudaFree(gpudata));
delete(data);
return 0;
}
int main(int argc, char *argv[])
{
int size = 71780;
unsigned int seed = 100;
int debug = 0;
int loop = 0;
int verbose = 0;
// Get device properties
int cuda_device = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp properties;
checkCudaErrors(cudaGetDeviceProperties(&properties, cuda_device));
int cdpCapable = (properties.major == 3 && properties.minor >= 5) || properties.major >= 4;
printf("GPU device %s has compute capabilities (SM %d.%d)\n", properties.name, properties.major, properties.minor);
if (!cdpCapable)
{
printf("cdpAdvancedQuicksort requires SM 3.5 or higher to use CUDA Dynamic Parallelism. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("Running qsort on %d elements with seed %d, on %s\n", size, seed, properties.name);
run_qsort(size, seed, debug, loop, verbose);
exit(EXIT_SUCCESS);
}
|
e4f5e47e9487e7bc3b57d513f800ebf4bb24574c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2021 by Contributors
* \file array/cuda/rowwise_sampling.cu
* \brief uniform rowwise sampling
*/
#include <dgl/random.h>
#include <dgl/runtime/device_api.h>
#include <hiprand/hiprand_kernel.h>
#include <numeric>
#include "./dgl_cub.cuh"
#include "../../array/cuda/atomic.cuh"
#include "../../runtime/cuda/cuda_common.h"
using namespace dgl::aten::cuda;
namespace dgl {
namespace aten {
namespace impl {
namespace {
constexpr int BLOCK_SIZE = 128;
/**
* @brief Compute the size of each row in the sampled CSR, without replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template<typename IdType>
__global__ void _CSRRowWiseSampleDegreeKernel(
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
IdType * const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (tIdx < num_rows) {
const int in_row = in_rows[tIdx];
const int out_row = tIdx;
out_deg[out_row] = min(static_cast<IdType>(num_picks), in_ptr[in_row + 1] - in_ptr[in_row]);
if (out_row == num_rows - 1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Compute the size of each row in the sampled CSR, with replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template<typename IdType>
__global__ void _CSRRowWiseSampleDegreeReplaceKernel(
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
IdType * const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (tIdx < num_rows) {
const int64_t in_row = in_rows[tIdx];
const int64_t out_row = tIdx;
if (in_ptr[in_row + 1] - in_ptr[in_row] == 0) {
out_deg[out_row] = 0;
} else {
out_deg[out_row] = static_cast<IdType>(num_picks);
}
if (out_row == num_rows - 1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Perform row-wise uniform sampling on a CSR matrix,
* and generate a COO matrix, without replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template<typename IdType, int TILE_SIZE>
__global__ void _CSRRowWiseSampleUniformKernel(
const uint64_t rand_seed,
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
const IdType * const in_index,
const IdType * const data,
const IdType * const out_ptr,
IdType * const out_rows,
IdType * const out_cols,
IdType * const out_idxs) {
// we assign one warp per row
assert(blockDim.x == BLOCK_SIZE);
int64_t out_row = blockIdx.x * TILE_SIZE;
const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
hiprandStatePhilox4_32_10_t rng;
hiprand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t deg = in_ptr[row + 1] - in_row_start;
const int64_t out_row_start = out_ptr[out_row];
if (deg <= num_picks) {
// just copy row when there is not enough nodes to sample.
for (int idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) {
const IdType in_idx = in_row_start + idx;
out_rows[out_row_start + idx] = row;
out_cols[out_row_start + idx] = in_index[in_idx];
out_idxs[out_row_start + idx] = data ? data[in_idx] : in_idx;
}
} else {
// generate permutation list via reservoir algorithm
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
out_idxs[out_row_start + idx] = idx;
}
__syncthreads();
for (int idx = num_picks + threadIdx.x; idx < deg; idx += BLOCK_SIZE) {
const int num = hiprand(&rng) % (idx + 1);
if (num < num_picks) {
// use max so as to achieve the replacement order the serial
// algorithm would have
AtomicMax(out_idxs + out_row_start + num, idx);
}
}
__syncthreads();
// copy permutation over
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
const IdType perm_idx = out_idxs[out_row_start + idx] + in_row_start;
out_rows[out_row_start + idx] = row;
out_cols[out_row_start + idx] = in_index[perm_idx];
out_idxs[out_row_start + idx] = data ? data[perm_idx] : perm_idx;
}
}
out_row += 1;
}
}
/**
* @brief Perform row-wise uniform sampling on a CSR matrix,
* and generate a COO matrix, with replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template<typename IdType, int TILE_SIZE>
__global__ void _CSRRowWiseSampleUniformReplaceKernel(
const uint64_t rand_seed,
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
const IdType * const in_index,
const IdType * const data,
const IdType * const out_ptr,
IdType * const out_rows,
IdType * const out_cols,
IdType * const out_idxs) {
// we assign one warp per row
assert(blockDim.x == BLOCK_SIZE);
int64_t out_row = blockIdx.x * TILE_SIZE;
const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
hiprandStatePhilox4_32_10_t rng;
hiprand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t out_row_start = out_ptr[out_row];
const int64_t deg = in_ptr[row + 1] - in_row_start;
if (deg > 0) {
// each thread then blindly copies in rows only if deg > 0.
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
const int64_t edge = hiprand(&rng) % deg;
const int64_t out_idx = out_row_start + idx;
out_rows[out_idx] = row;
out_cols[out_idx] = in_index[in_row_start + edge];
out_idxs[out_idx] = data ? data[in_row_start + edge] : in_row_start + edge;
}
}
out_row += 1;
}
}
} // namespace
///////////////////////////// CSR sampling //////////////////////////
template <DLDeviceType XPU, typename IdType>
COOMatrix CSRRowWiseSamplingUniform(CSRMatrix mat,
IdArray rows,
const int64_t num_picks,
const bool replace) {
const auto& ctx = rows->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
// TODO(dlasalle): Once the device api supports getting the stream from the
// context, that should be used instead of the default stream here.
hipStream_t stream = 0;
const int64_t num_rows = rows->shape[0];
const IdType * const slice_rows = static_cast<const IdType*>(rows->data);
IdArray picked_row = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_col = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_idx = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
const IdType * const in_ptr = static_cast<const IdType*>(mat.indptr->data);
const IdType * const in_cols = static_cast<const IdType*>(mat.indices->data);
IdType* const out_rows = static_cast<IdType*>(picked_row->data);
IdType* const out_cols = static_cast<IdType*>(picked_col->data);
IdType* const out_idxs = static_cast<IdType*>(picked_idx->data);
const IdType* const data = CSRHasData(mat) ?
static_cast<IdType*>(mat.data->data) : nullptr;
// compute degree
IdType * out_deg = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType)));
if (replace) {
const dim3 block(512);
const dim3 grid((num_rows + block.x - 1) / block.x);
CUDA_KERNEL_CALL(
_CSRRowWiseSampleDegreeReplaceKernel,
grid, block, 0, stream,
num_picks, num_rows, slice_rows, in_ptr, out_deg);
} else {
const dim3 block(512);
const dim3 grid((num_rows + block.x - 1) / block.x);
CUDA_KERNEL_CALL(
_CSRRowWiseSampleDegreeKernel,
grid, block, 0, stream,
num_picks, num_rows, slice_rows, in_ptr, out_deg);
}
// fill out_ptr
IdType * out_ptr = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType)));
size_t prefix_temp_size = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(nullptr, prefix_temp_size,
out_deg,
out_ptr,
num_rows+1,
stream));
void * prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(prefix_temp, prefix_temp_size,
out_deg,
out_ptr,
num_rows+1,
stream));
device->FreeWorkspace(ctx, prefix_temp);
device->FreeWorkspace(ctx, out_deg);
hipEvent_t copyEvent;
CUDA_CALL(hipEventCreate(©Event));
// TODO(dlasalle): use pinned memory to overlap with the actual sampling, and wait on
// a cudaevent
IdType new_len;
device->CopyDataFromTo(out_ptr, num_rows * sizeof(new_len), &new_len, 0,
sizeof(new_len),
ctx,
DGLContext{kDLCPU, 0},
mat.indptr->dtype,
stream);
CUDA_CALL(hipEventRecord(copyEvent, stream));
const uint64_t random_seed = RandomEngine::ThreadLocal()->RandInt(1000000000);
// select edges
// the number of rows each thread block will cover
constexpr int TILE_SIZE = 128 / BLOCK_SIZE;
if (replace) { // with replacement
const dim3 block(BLOCK_SIZE);
const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE);
CUDA_KERNEL_CALL(
(_CSRRowWiseSampleUniformReplaceKernel<IdType, TILE_SIZE>),
grid, block, 0, stream,
random_seed,
num_picks,
num_rows,
slice_rows,
in_ptr,
in_cols,
data,
out_ptr,
out_rows,
out_cols,
out_idxs);
} else { // without replacement
const dim3 block(BLOCK_SIZE);
const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE);
CUDA_KERNEL_CALL(
(_CSRRowWiseSampleUniformKernel<IdType, TILE_SIZE>),
grid, block, 0, stream,
random_seed,
num_picks,
num_rows,
slice_rows,
in_ptr,
in_cols,
data,
out_ptr,
out_rows,
out_cols,
out_idxs);
}
device->FreeWorkspace(ctx, out_ptr);
// wait for copying `new_len` to finish
CUDA_CALL(hipEventSynchronize(copyEvent));
CUDA_CALL(hipEventDestroy(copyEvent));
picked_row = picked_row.CreateView({new_len}, picked_row->dtype);
picked_col = picked_col.CreateView({new_len}, picked_col->dtype);
picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype);
return COOMatrix(mat.num_rows, mat.num_cols, picked_row,
picked_col, picked_idx);
}
template COOMatrix CSRRowWiseSamplingUniform<kDLGPU, int32_t>(
CSRMatrix, IdArray, int64_t, bool);
template COOMatrix CSRRowWiseSamplingUniform<kDLGPU, int64_t>(
CSRMatrix, IdArray, int64_t, bool);
} // namespace impl
} // namespace aten
} // namespace dgl
|
e4f5e47e9487e7bc3b57d513f800ebf4bb24574c.cu
|
/*!
* Copyright (c) 2021 by Contributors
* \file array/cuda/rowwise_sampling.cu
* \brief uniform rowwise sampling
*/
#include <dgl/random.h>
#include <dgl/runtime/device_api.h>
#include <curand_kernel.h>
#include <numeric>
#include "./dgl_cub.cuh"
#include "../../array/cuda/atomic.cuh"
#include "../../runtime/cuda/cuda_common.h"
using namespace dgl::aten::cuda;
namespace dgl {
namespace aten {
namespace impl {
namespace {
constexpr int BLOCK_SIZE = 128;
/**
* @brief Compute the size of each row in the sampled CSR, without replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template<typename IdType>
__global__ void _CSRRowWiseSampleDegreeKernel(
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
IdType * const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (tIdx < num_rows) {
const int in_row = in_rows[tIdx];
const int out_row = tIdx;
out_deg[out_row] = min(static_cast<IdType>(num_picks), in_ptr[in_row + 1] - in_ptr[in_row]);
if (out_row == num_rows - 1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Compute the size of each row in the sampled CSR, with replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template<typename IdType>
__global__ void _CSRRowWiseSampleDegreeReplaceKernel(
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
IdType * const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (tIdx < num_rows) {
const int64_t in_row = in_rows[tIdx];
const int64_t out_row = tIdx;
if (in_ptr[in_row + 1] - in_ptr[in_row] == 0) {
out_deg[out_row] = 0;
} else {
out_deg[out_row] = static_cast<IdType>(num_picks);
}
if (out_row == num_rows - 1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Perform row-wise uniform sampling on a CSR matrix,
* and generate a COO matrix, without replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template<typename IdType, int TILE_SIZE>
__global__ void _CSRRowWiseSampleUniformKernel(
const uint64_t rand_seed,
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
const IdType * const in_index,
const IdType * const data,
const IdType * const out_ptr,
IdType * const out_rows,
IdType * const out_cols,
IdType * const out_idxs) {
// we assign one warp per row
assert(blockDim.x == BLOCK_SIZE);
int64_t out_row = blockIdx.x * TILE_SIZE;
const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
curandStatePhilox4_32_10_t rng;
curand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t deg = in_ptr[row + 1] - in_row_start;
const int64_t out_row_start = out_ptr[out_row];
if (deg <= num_picks) {
// just copy row when there is not enough nodes to sample.
for (int idx = threadIdx.x; idx < deg; idx += BLOCK_SIZE) {
const IdType in_idx = in_row_start + idx;
out_rows[out_row_start + idx] = row;
out_cols[out_row_start + idx] = in_index[in_idx];
out_idxs[out_row_start + idx] = data ? data[in_idx] : in_idx;
}
} else {
// generate permutation list via reservoir algorithm
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
out_idxs[out_row_start + idx] = idx;
}
__syncthreads();
for (int idx = num_picks + threadIdx.x; idx < deg; idx += BLOCK_SIZE) {
const int num = curand(&rng) % (idx + 1);
if (num < num_picks) {
// use max so as to achieve the replacement order the serial
// algorithm would have
AtomicMax(out_idxs + out_row_start + num, idx);
}
}
__syncthreads();
// copy permutation over
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
const IdType perm_idx = out_idxs[out_row_start + idx] + in_row_start;
out_rows[out_row_start + idx] = row;
out_cols[out_row_start + idx] = in_index[perm_idx];
out_idxs[out_row_start + idx] = data ? data[perm_idx] : perm_idx;
}
}
out_row += 1;
}
}
/**
* @brief Perform row-wise uniform sampling on a CSR matrix,
* and generate a COO matrix, with replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template<typename IdType, int TILE_SIZE>
__global__ void _CSRRowWiseSampleUniformReplaceKernel(
const uint64_t rand_seed,
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
const IdType * const in_index,
const IdType * const data,
const IdType * const out_ptr,
IdType * const out_rows,
IdType * const out_cols,
IdType * const out_idxs) {
// we assign one warp per row
assert(blockDim.x == BLOCK_SIZE);
int64_t out_row = blockIdx.x * TILE_SIZE;
const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
curandStatePhilox4_32_10_t rng;
curand_init(rand_seed * gridDim.x + blockIdx.x, threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t out_row_start = out_ptr[out_row];
const int64_t deg = in_ptr[row + 1] - in_row_start;
if (deg > 0) {
// each thread then blindly copies in rows only if deg > 0.
for (int idx = threadIdx.x; idx < num_picks; idx += BLOCK_SIZE) {
const int64_t edge = curand(&rng) % deg;
const int64_t out_idx = out_row_start + idx;
out_rows[out_idx] = row;
out_cols[out_idx] = in_index[in_row_start + edge];
out_idxs[out_idx] = data ? data[in_row_start + edge] : in_row_start + edge;
}
}
out_row += 1;
}
}
} // namespace
///////////////////////////// CSR sampling //////////////////////////
template <DLDeviceType XPU, typename IdType>
COOMatrix CSRRowWiseSamplingUniform(CSRMatrix mat,
IdArray rows,
const int64_t num_picks,
const bool replace) {
const auto& ctx = rows->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
// TODO(dlasalle): Once the device api supports getting the stream from the
// context, that should be used instead of the default stream here.
cudaStream_t stream = 0;
const int64_t num_rows = rows->shape[0];
const IdType * const slice_rows = static_cast<const IdType*>(rows->data);
IdArray picked_row = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_col = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_idx = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
const IdType * const in_ptr = static_cast<const IdType*>(mat.indptr->data);
const IdType * const in_cols = static_cast<const IdType*>(mat.indices->data);
IdType* const out_rows = static_cast<IdType*>(picked_row->data);
IdType* const out_cols = static_cast<IdType*>(picked_col->data);
IdType* const out_idxs = static_cast<IdType*>(picked_idx->data);
const IdType* const data = CSRHasData(mat) ?
static_cast<IdType*>(mat.data->data) : nullptr;
// compute degree
IdType * out_deg = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType)));
if (replace) {
const dim3 block(512);
const dim3 grid((num_rows + block.x - 1) / block.x);
CUDA_KERNEL_CALL(
_CSRRowWiseSampleDegreeReplaceKernel,
grid, block, 0, stream,
num_picks, num_rows, slice_rows, in_ptr, out_deg);
} else {
const dim3 block(512);
const dim3 grid((num_rows + block.x - 1) / block.x);
CUDA_KERNEL_CALL(
_CSRRowWiseSampleDegreeKernel,
grid, block, 0, stream,
num_picks, num_rows, slice_rows, in_ptr, out_deg);
}
// fill out_ptr
IdType * out_ptr = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows + 1) * sizeof(IdType)));
size_t prefix_temp_size = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(nullptr, prefix_temp_size,
out_deg,
out_ptr,
num_rows+1,
stream));
void * prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(prefix_temp, prefix_temp_size,
out_deg,
out_ptr,
num_rows+1,
stream));
device->FreeWorkspace(ctx, prefix_temp);
device->FreeWorkspace(ctx, out_deg);
cudaEvent_t copyEvent;
CUDA_CALL(cudaEventCreate(©Event));
// TODO(dlasalle): use pinned memory to overlap with the actual sampling, and wait on
// a cudaevent
IdType new_len;
device->CopyDataFromTo(out_ptr, num_rows * sizeof(new_len), &new_len, 0,
sizeof(new_len),
ctx,
DGLContext{kDLCPU, 0},
mat.indptr->dtype,
stream);
CUDA_CALL(cudaEventRecord(copyEvent, stream));
const uint64_t random_seed = RandomEngine::ThreadLocal()->RandInt(1000000000);
// select edges
// the number of rows each thread block will cover
constexpr int TILE_SIZE = 128 / BLOCK_SIZE;
if (replace) { // with replacement
const dim3 block(BLOCK_SIZE);
const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE);
CUDA_KERNEL_CALL(
(_CSRRowWiseSampleUniformReplaceKernel<IdType, TILE_SIZE>),
grid, block, 0, stream,
random_seed,
num_picks,
num_rows,
slice_rows,
in_ptr,
in_cols,
data,
out_ptr,
out_rows,
out_cols,
out_idxs);
} else { // without replacement
const dim3 block(BLOCK_SIZE);
const dim3 grid((num_rows + TILE_SIZE - 1) / TILE_SIZE);
CUDA_KERNEL_CALL(
(_CSRRowWiseSampleUniformKernel<IdType, TILE_SIZE>),
grid, block, 0, stream,
random_seed,
num_picks,
num_rows,
slice_rows,
in_ptr,
in_cols,
data,
out_ptr,
out_rows,
out_cols,
out_idxs);
}
device->FreeWorkspace(ctx, out_ptr);
// wait for copying `new_len` to finish
CUDA_CALL(cudaEventSynchronize(copyEvent));
CUDA_CALL(cudaEventDestroy(copyEvent));
picked_row = picked_row.CreateView({new_len}, picked_row->dtype);
picked_col = picked_col.CreateView({new_len}, picked_col->dtype);
picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype);
return COOMatrix(mat.num_rows, mat.num_cols, picked_row,
picked_col, picked_idx);
}
template COOMatrix CSRRowWiseSamplingUniform<kDLGPU, int32_t>(
CSRMatrix, IdArray, int64_t, bool);
template COOMatrix CSRRowWiseSamplingUniform<kDLGPU, int64_t>(
CSRMatrix, IdArray, int64_t, bool);
} // namespace impl
} // namespace aten
} // namespace dgl
|
5742cff45302c5b60cae0e91f75a1470a8258ecc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CascadedCommon.h"
#include "CascadedMetadataOnGPU.h"
#include "CudaUtils.h"
#include "common.h"
#include <cassert>
#include <stdexcept>
#include <string>
#include <vector>
namespace nvcomp
{
namespace highlevel
{
/******************************************************************************
* TYPES **********************************************************************
*****************************************************************************/
namespace
{
using VERSION_TYPE = uint16_t;
using NUM_RLES_TYPE = uint8_t;
using NUM_DELTAS_TYPE = uint8_t;
using USE_BITPACKING_TYPE = uint8_t;
using COMP_BYTES_TYPE = uint64_t;
using DECOMP_BYTES_TYPE = uint64_t;
using IN_TYPE_TYPE = int32_t;
using NUM_INPUTS_TYPE = int32_t;
using OFFSET_TYPE = uint64_t;
using HEADER_TYPE = CascadedMetadata::Header;
} // namespace
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const size_t NULL_NUM_INPUTS = static_cast<size_t>(-1);
constexpr const size_t MAX_NUM_RLES = CascadedMetadata::MAX_NUM_RLES;
enum OffsetType : unsigned int
{
OFFSET_VERSION = 0,
OFFSET_NUM_RLES
= roundUpTo(OFFSET_VERSION + sizeof(VERSION_TYPE), sizeof(NUM_RLES_TYPE)),
OFFSET_NUM_DELTAS
= roundUpTo(OFFSET_NUM_RLES + sizeof(NUM_RLES_TYPE), sizeof(NUM_DELTAS_TYPE)),
OFFSET_USE_BITPACKING = roundUpTo(
OFFSET_NUM_DELTAS + sizeof(NUM_DELTAS_TYPE), sizeof(USE_BITPACKING_TYPE)),
OFFSET_COMP_BYTES = roundUpTo(
OFFSET_USE_BITPACKING + sizeof(USE_BITPACKING_TYPE),
sizeof(COMP_BYTES_TYPE)),
OFFSET_DECOMP_BYTES = roundUpTo(
OFFSET_COMP_BYTES + sizeof(COMP_BYTES_TYPE), sizeof(DECOMP_BYTES_TYPE)),
OFFSET_IN_TYPE = roundUpTo(
OFFSET_DECOMP_BYTES + sizeof(DECOMP_BYTES_TYPE), sizeof(IN_TYPE_TYPE)),
OFFSET_NUM_INPUTS
= roundUpTo(OFFSET_IN_TYPE + sizeof(IN_TYPE_TYPE), sizeof(NUM_INPUTS_TYPE)),
OFFSET_HEADERS
= roundUpTo(OFFSET_NUM_INPUTS + sizeof(NUM_INPUTS_TYPE), sizeof(OFFSET_TYPE))
};
} // namespace
/******************************************************************************
* DEVICE FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
inline constexpr __device__ __host__ unsigned int
serializedMetadataSize(const int numInputs)
{
return OFFSET_HEADERS + sizeof(OFFSET_TYPE) * numInputs
+ sizeof(HEADER_TYPE) * numInputs;
}
template <typename T, OffsetType offset>
__device__ __host__ void setField(uint8_t* const data, T const v)
{
*reinterpret_cast<T*>(data + offset) = v;
}
template <typename T, OffsetType offset>
__device__ __host__ void
setField(uint8_t* const data, T const v, const size_t dynamicOffset)
{
reinterpret_cast<T*>(data + offset)[dynamicOffset] = v;
}
template <typename T, OffsetType offset>
__device__ __host__ T getField(const uint8_t* const data)
{
return *reinterpret_cast<const T*>(data + offset);
}
template <typename T, OffsetType offset>
__device__ __host__ T
getField(const uint8_t* const data, const size_t dynamicOffset)
{
return reinterpret_cast<const T*>(data + offset)[dynamicOffset];
}
__device__ __host__ size_t getOffsetsOffset(size_t numInputs)
{
return roundUpTo(
OFFSET_HEADERS + sizeof(HEADER_TYPE) * numInputs, sizeof(OFFSET_TYPE));
}
} // namespace
/******************************************************************************
* KERNELS ********************************************************************
*****************************************************************************/
namespace
{
__global__ void serializeV1(
void* const dest,
const size_t /* destSize */,
const int numRLEs,
const int numDeltas,
const bool useBitPacking,
const size_t comp_bytes,
const size_t decomp_bytes,
const nvcompType_t in_type,
const int numInputs,
size_t* const serializedSizeDevice)
{
using Chunk = uint32_t;
__shared__ uint8_t localBuffer[serializedMetadataSize(MAX_NUM_RLES + 1)];
assert(blockIdx.x == 0);
// master thread assigns local buffer
if (threadIdx.x == 0) {
setField<VERSION_TYPE, OFFSET_VERSION>(localBuffer, 1);
setField<NUM_RLES_TYPE, OFFSET_NUM_RLES>(localBuffer, numRLEs);
setField<NUM_DELTAS_TYPE, OFFSET_NUM_DELTAS>(localBuffer, numDeltas);
setField<USE_BITPACKING_TYPE, OFFSET_USE_BITPACKING>(
localBuffer, useBitPacking);
setField<COMP_BYTES_TYPE, OFFSET_COMP_BYTES>(localBuffer, comp_bytes);
setField<DECOMP_BYTES_TYPE, OFFSET_DECOMP_BYTES>(localBuffer, decomp_bytes);
setField<IN_TYPE_TYPE, OFFSET_IN_TYPE>(localBuffer, in_type);
setField<NUM_INPUTS_TYPE, OFFSET_NUM_INPUTS>(localBuffer, numInputs);
if (serializedSizeDevice) {
*serializedSizeDevice = serializedMetadataSize(numInputs);
}
}
__syncthreads();
// all threads copy to global memory
for (int idx = threadIdx.x; idx * sizeof(Chunk) < OFFSET_HEADERS;
idx += blockDim.x) {
static_cast<Chunk*>(dest)[idx] = reinterpret_cast<Chunk*>(localBuffer)[idx];
}
}
__global__ void setOffset(
void* const serializedMetadata,
const size_t index,
const size_t* const offsetDevice)
{
assert(blockIdx.x == 0);
assert(threadIdx.x == 0);
uint8_t* const serializedBytes = static_cast<uint8_t*>(serializedMetadata);
const int numInputs
= getField<NUM_INPUTS_TYPE, OFFSET_NUM_INPUTS>(serializedBytes);
// dataOffsets
setField<OFFSET_TYPE, static_cast<OffsetType>(0)>(
serializedBytes + getOffsetsOffset(numInputs),
static_cast<OFFSET_TYPE>(*offsetDevice),
index);
}
} // namespace
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
template <typename T>
constexpr bool isFixedWidth()
{
return std::is_same<T, char>::value || std::is_same<T, int8_t>::value
|| std::is_same<T, uint8_t>::value || std::is_same<T, int16_t>::value
|| std::is_same<T, uint16_t>::value || std::is_same<T, int32_t>::value
|| std::is_same<T, uint32_t>::value || std::is_same<T, int64_t>::value
|| std::is_same<T, uint64_t>::value;
}
template <typename T>
size_t readFixedWidthData(
T* const val,
const void* const ptr,
const size_t offset,
const size_t maxSize)
{
assert(isFixedWidth<T>());
size_t newOffset = offset + sizeof(*val);
if (newOffset > maxSize) {
throw std::runtime_error(
"Not enough room to read member, need at least "
+ std::to_string(newOffset) + " bytes, but given only "
+ std::to_string(maxSize));
}
*val = *reinterpret_cast<const T*>(static_cast<const char*>(ptr) + offset);
return newOffset;
}
CascadedMetadata deserializeMetadataFromGPUVersion1(
const void* const devicePtr, const size_t size, hipStream_t stream)
{
NUM_INPUTS_TYPE numInputs;
CudaUtils::copy_async(
&numInputs,
(const NUM_INPUTS_TYPE*)(static_cast<const uint8_t*>(devicePtr) + OFFSET_NUM_INPUTS),
1,
DEVICE_TO_HOST,
stream);
CudaUtils::sync(stream);
std::vector<uint8_t> localBuffer(serializedMetadataSize(numInputs));
if (size < localBuffer.size()) {
throw std::runtime_error(
"Insufficient space to deserialize metadata "
"from: "
+ std::to_string(size) + " but require "
+ std::to_string(localBuffer.size()));
}
CudaUtils::copy_async(
(uint8_t*)localBuffer.data(),
(const uint8_t*)devicePtr,
localBuffer.size(),
DEVICE_TO_HOST,
stream);
CudaUtils::sync(stream);
// here we convert to types of fixed width by the C++ standard rather than
// just doing a memcpy of the struct, to ensure portability.
nvcompCascadedFormatOpts format_opts;
format_opts.num_RLEs
= getField<NUM_RLES_TYPE, OFFSET_NUM_RLES>(localBuffer.data());
format_opts.num_deltas
= getField<NUM_DELTAS_TYPE, OFFSET_NUM_DELTAS>(localBuffer.data());
format_opts.use_bp = getField<USE_BITPACKING_TYPE, OFFSET_USE_BITPACKING>(
localBuffer.data());
const COMP_BYTES_TYPE comp_bytes
= getField<COMP_BYTES_TYPE, OFFSET_COMP_BYTES>(localBuffer.data());
const DECOMP_BYTES_TYPE decomp_bytes
= getField<DECOMP_BYTES_TYPE, OFFSET_DECOMP_BYTES>(localBuffer.data());
const IN_TYPE_TYPE in_type
= getField<IN_TYPE_TYPE, OFFSET_IN_TYPE>(localBuffer.data());
CascadedMetadata metadata(
format_opts,
static_cast<nvcompType_t>(in_type),
decomp_bytes,
comp_bytes);
if (numInputs != static_cast<int>(metadata.getNumInputs())) {
throw std::runtime_error(
"Mismatch in numInputs while deserializing "
"metadata: "
+ std::to_string(numInputs) + " vs. "
+ std::to_string(metadata.getNumInputs()));
}
for (size_t i = 0; i < metadata.getNumInputs(); ++i) {
const HEADER_TYPE header
= getField<HEADER_TYPE, OFFSET_HEADERS>(localBuffer.data(), i);
metadata.setHeader(i, header);
}
for (size_t i = 0; i < metadata.getNumInputs(); ++i) {
const OFFSET_TYPE dataOffset
= getField<OFFSET_TYPE, static_cast<OffsetType>(0)>(
localBuffer.data() + getOffsetsOffset(metadata.getNumInputs()), i);
metadata.setDataOffset(i, dataOffset);
}
return metadata;
}
} // namespace
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
size_t
CascadedMetadataOnGPU::getSerializedSizeOf(const CascadedMetadata& metadata)
{
return serializedMetadataSize(metadata.getNumInputs());
}
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
CascadedMetadataOnGPU::CascadedMetadataOnGPU(void* const ptr, size_t maxSize) :
m_ptr(ptr),
m_maxSize(maxSize),
m_numInputs(NULL_NUM_INPUTS)
{
if (ptr == nullptr) {
throw std::runtime_error("Location given to CascadedMetadataOnGPU() must "
"be a valid pointer.");
} else if (m_maxSize < OFFSET_HEADERS) {
throw std::runtime_error(
"Maximum size given to CascadedMetdataOnGPU() "
"must be greater than "
+ std::to_string(OFFSET_HEADERS) + " bytes.");
}
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void CascadedMetadataOnGPU::copyToGPU(
const CascadedMetadata& metadata,
size_t* const serializedSizeDevice,
hipStream_t stream)
{
const size_t requiredSize = serializedMetadataSize(metadata.getNumInputs());
if (m_maxSize < requiredSize) {
throw std::runtime_error(
"Invalid space for serialized metadata on GPU: "
+ std::to_string(m_maxSize) + " when " + std::to_string(requiredSize)
+ " is needed.");
}
hipLaunchKernelGGL(( serializeV1), dim3(1), dim3(64), 0, stream,
m_ptr,
m_maxSize,
metadata.getNumRLEs(),
metadata.getNumDeltas(),
metadata.useBitPacking(),
metadata.getCompressedSize(),
metadata.getUncompressedSize(),
metadata.getValueType(),
metadata.getNumInputs(),
serializedSizeDevice);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
throw std::runtime_error(
"Failed to launch metadata serialization kernel: "
+ std::to_string(err));
}
if (metadata.haveAnyOffsetsBeenSet()) {
if (!metadata.haveAllOffsetsBeenSet()) {
throw std::runtime_error(
"Only some offsets have been set before calling "
"CascadedMetadataOnGPU::copyToGPU(). This is not "
"a valid state for copying CascadedMetadata to the GPU.");
}
for (size_t i = 0; i < metadata.getNumInputs(); ++i) {
const HEADER_TYPE header = metadata.getHeader(i);
CudaUtils::copy_async(
(HEADER_TYPE*)(static_cast<uint8_t*>(m_ptr) + OFFSET_HEADERS + i * sizeof(header)),
&header,
1,
HOST_TO_DEVICE,
stream);
}
for (size_t i = 0; i < metadata.getNumInputs(); ++i) {
const OFFSET_TYPE offset = metadata.getDataOffset(i);
CudaUtils::copy_async(
(OFFSET_TYPE*)(static_cast<uint8_t*>(m_ptr)
+ getOffsetsOffset(metadata.getNumInputs())
+ sizeof(OFFSET_TYPE) * i),
&offset,
1,
HOST_TO_DEVICE,
stream);
}
}
// if successful, set the number of inputs
m_numInputs = metadata.getNumInputs();
}
void CascadedMetadataOnGPU::copyToGPU(
const CascadedMetadata& metadata, hipStream_t stream)
{
copyToGPU(metadata, nullptr, stream);
}
size_t CascadedMetadataOnGPU::getSerializedSize() const
{
verifyInitialized();
return serializedMetadataSize(m_numInputs);
}
void CascadedMetadataOnGPU::saveOffset(
size_t index, const size_t* offsetDevice, hipStream_t stream)
{
verifyInitialized();
if (index >= m_numInputs) {
throw std::runtime_error(
"Invalid input index " + std::to_string(index) + " / "
+ std::to_string(m_numInputs)
+ " given to "
"CascadedMetadataOnGPU::saveOffsets().");
}
hipLaunchKernelGGL(( setOffset), dim3(1), dim3(1), 0, stream, m_ptr, index, offsetDevice);
}
void CascadedMetadataOnGPU::setCompressedSizeFromGPU(
const size_t* sizeOnDevice, hipStream_t stream)
{
// TODO: re-write so that we don't depend on 64-bit architecture
static_assert(
sizeof(size_t) == sizeof(COMP_BYTES_TYPE),
"Requires size_t be 64 bits wide.");
COMP_BYTES_TYPE* const compBytesDevice = reinterpret_cast<COMP_BYTES_TYPE*>(
static_cast<char*>(m_ptr) + OFFSET_COMP_BYTES);
hipError_t err = hipMemcpyAsync(
compBytesDevice,
sizeOnDevice,
sizeof(COMP_BYTES_TYPE),
hipMemcpyDeviceToDevice,
stream);
if (err != hipSuccess) {
throw std::runtime_error(
"Async memcpy in "
"CascadedMetadataOnGPU::setCompressedSizeFromGPU() failed with: "
+ std::to_string(err));
}
}
CascadedMetadata CascadedMetadataOnGPU::copyToHost(hipStream_t stream)
{
// read the version of the serialized metadata.
VERSION_TYPE version;
CudaUtils::copy_async(
&version,
static_cast<const VERSION_TYPE*>(m_ptr),
1,
DEVICE_TO_HOST,
stream);
CudaUtils::sync(stream);
if (version == 1) {
CascadedMetadata metadata
= deserializeMetadataFromGPUVersion1(m_ptr, m_maxSize, stream);
m_numInputs = metadata.getNumInputs();
return metadata;
} else {
throw std::runtime_error(
"Unsupported Metadata version: " + std::to_string(version));
}
}
CascadedMetadata::Header*
CascadedMetadataOnGPU::getHeaderLocation(const size_t index)
{
verifyIndex(index);
return reinterpret_cast<HEADER_TYPE*>(
static_cast<uint8_t*>(m_ptr) + OFFSET_HEADERS)
+ index;
}
const CascadedMetadata::Header*
CascadedMetadataOnGPU::getHeaderLocation(const size_t index) const
{
verifyIndex(index);
return reinterpret_cast<const HEADER_TYPE*>(
static_cast<const uint8_t*>(m_ptr) + OFFSET_HEADERS)
+ index;
}
/******************************************************************************
* PRIVATE METHODS ************************************************************
*****************************************************************************/
void CascadedMetadataOnGPU::verifyInitialized() const
{
if (m_numInputs == NULL_NUM_INPUTS) {
throw std::runtime_error("CascadedMetadataOnGPU::copyToGPU() or "
"CascadedMetdataOnGPU::copyToHost() must be "
"called before other methods.");
}
}
void CascadedMetadataOnGPU::verifyIndex(const size_t index) const
{
verifyInitialized();
if (index >= m_numInputs) {
throw std::runtime_error(
"Invalid index to set header for: " + std::to_string(index) + " / "
+ std::to_string(m_numInputs));
}
}
} // namespace highlevel
} // namespace nvcomp
|
5742cff45302c5b60cae0e91f75a1470a8258ecc.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CascadedCommon.h"
#include "CascadedMetadataOnGPU.h"
#include "CudaUtils.h"
#include "common.h"
#include <cassert>
#include <stdexcept>
#include <string>
#include <vector>
namespace nvcomp
{
namespace highlevel
{
/******************************************************************************
* TYPES **********************************************************************
*****************************************************************************/
namespace
{
using VERSION_TYPE = uint16_t;
using NUM_RLES_TYPE = uint8_t;
using NUM_DELTAS_TYPE = uint8_t;
using USE_BITPACKING_TYPE = uint8_t;
using COMP_BYTES_TYPE = uint64_t;
using DECOMP_BYTES_TYPE = uint64_t;
using IN_TYPE_TYPE = int32_t;
using NUM_INPUTS_TYPE = int32_t;
using OFFSET_TYPE = uint64_t;
using HEADER_TYPE = CascadedMetadata::Header;
} // namespace
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const size_t NULL_NUM_INPUTS = static_cast<size_t>(-1);
constexpr const size_t MAX_NUM_RLES = CascadedMetadata::MAX_NUM_RLES;
enum OffsetType : unsigned int
{
OFFSET_VERSION = 0,
OFFSET_NUM_RLES
= roundUpTo(OFFSET_VERSION + sizeof(VERSION_TYPE), sizeof(NUM_RLES_TYPE)),
OFFSET_NUM_DELTAS
= roundUpTo(OFFSET_NUM_RLES + sizeof(NUM_RLES_TYPE), sizeof(NUM_DELTAS_TYPE)),
OFFSET_USE_BITPACKING = roundUpTo(
OFFSET_NUM_DELTAS + sizeof(NUM_DELTAS_TYPE), sizeof(USE_BITPACKING_TYPE)),
OFFSET_COMP_BYTES = roundUpTo(
OFFSET_USE_BITPACKING + sizeof(USE_BITPACKING_TYPE),
sizeof(COMP_BYTES_TYPE)),
OFFSET_DECOMP_BYTES = roundUpTo(
OFFSET_COMP_BYTES + sizeof(COMP_BYTES_TYPE), sizeof(DECOMP_BYTES_TYPE)),
OFFSET_IN_TYPE = roundUpTo(
OFFSET_DECOMP_BYTES + sizeof(DECOMP_BYTES_TYPE), sizeof(IN_TYPE_TYPE)),
OFFSET_NUM_INPUTS
= roundUpTo(OFFSET_IN_TYPE + sizeof(IN_TYPE_TYPE), sizeof(NUM_INPUTS_TYPE)),
OFFSET_HEADERS
= roundUpTo(OFFSET_NUM_INPUTS + sizeof(NUM_INPUTS_TYPE), sizeof(OFFSET_TYPE))
};
} // namespace
/******************************************************************************
* DEVICE FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
inline constexpr __device__ __host__ unsigned int
serializedMetadataSize(const int numInputs)
{
return OFFSET_HEADERS + sizeof(OFFSET_TYPE) * numInputs
+ sizeof(HEADER_TYPE) * numInputs;
}
template <typename T, OffsetType offset>
__device__ __host__ void setField(uint8_t* const data, T const v)
{
*reinterpret_cast<T*>(data + offset) = v;
}
template <typename T, OffsetType offset>
__device__ __host__ void
setField(uint8_t* const data, T const v, const size_t dynamicOffset)
{
reinterpret_cast<T*>(data + offset)[dynamicOffset] = v;
}
template <typename T, OffsetType offset>
__device__ __host__ T getField(const uint8_t* const data)
{
return *reinterpret_cast<const T*>(data + offset);
}
template <typename T, OffsetType offset>
__device__ __host__ T
getField(const uint8_t* const data, const size_t dynamicOffset)
{
return reinterpret_cast<const T*>(data + offset)[dynamicOffset];
}
__device__ __host__ size_t getOffsetsOffset(size_t numInputs)
{
return roundUpTo(
OFFSET_HEADERS + sizeof(HEADER_TYPE) * numInputs, sizeof(OFFSET_TYPE));
}
} // namespace
/******************************************************************************
* KERNELS ********************************************************************
*****************************************************************************/
namespace
{
__global__ void serializeV1(
void* const dest,
const size_t /* destSize */,
const int numRLEs,
const int numDeltas,
const bool useBitPacking,
const size_t comp_bytes,
const size_t decomp_bytes,
const nvcompType_t in_type,
const int numInputs,
size_t* const serializedSizeDevice)
{
using Chunk = uint32_t;
__shared__ uint8_t localBuffer[serializedMetadataSize(MAX_NUM_RLES + 1)];
assert(blockIdx.x == 0);
// master thread assigns local buffer
if (threadIdx.x == 0) {
setField<VERSION_TYPE, OFFSET_VERSION>(localBuffer, 1);
setField<NUM_RLES_TYPE, OFFSET_NUM_RLES>(localBuffer, numRLEs);
setField<NUM_DELTAS_TYPE, OFFSET_NUM_DELTAS>(localBuffer, numDeltas);
setField<USE_BITPACKING_TYPE, OFFSET_USE_BITPACKING>(
localBuffer, useBitPacking);
setField<COMP_BYTES_TYPE, OFFSET_COMP_BYTES>(localBuffer, comp_bytes);
setField<DECOMP_BYTES_TYPE, OFFSET_DECOMP_BYTES>(localBuffer, decomp_bytes);
setField<IN_TYPE_TYPE, OFFSET_IN_TYPE>(localBuffer, in_type);
setField<NUM_INPUTS_TYPE, OFFSET_NUM_INPUTS>(localBuffer, numInputs);
if (serializedSizeDevice) {
*serializedSizeDevice = serializedMetadataSize(numInputs);
}
}
__syncthreads();
// all threads copy to global memory
for (int idx = threadIdx.x; idx * sizeof(Chunk) < OFFSET_HEADERS;
idx += blockDim.x) {
static_cast<Chunk*>(dest)[idx] = reinterpret_cast<Chunk*>(localBuffer)[idx];
}
}
__global__ void setOffset(
void* const serializedMetadata,
const size_t index,
const size_t* const offsetDevice)
{
assert(blockIdx.x == 0);
assert(threadIdx.x == 0);
uint8_t* const serializedBytes = static_cast<uint8_t*>(serializedMetadata);
const int numInputs
= getField<NUM_INPUTS_TYPE, OFFSET_NUM_INPUTS>(serializedBytes);
// dataOffsets
setField<OFFSET_TYPE, static_cast<OffsetType>(0)>(
serializedBytes + getOffsetsOffset(numInputs),
static_cast<OFFSET_TYPE>(*offsetDevice),
index);
}
} // namespace
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
template <typename T>
constexpr bool isFixedWidth()
{
return std::is_same<T, char>::value || std::is_same<T, int8_t>::value
|| std::is_same<T, uint8_t>::value || std::is_same<T, int16_t>::value
|| std::is_same<T, uint16_t>::value || std::is_same<T, int32_t>::value
|| std::is_same<T, uint32_t>::value || std::is_same<T, int64_t>::value
|| std::is_same<T, uint64_t>::value;
}
template <typename T>
size_t readFixedWidthData(
T* const val,
const void* const ptr,
const size_t offset,
const size_t maxSize)
{
assert(isFixedWidth<T>());
size_t newOffset = offset + sizeof(*val);
if (newOffset > maxSize) {
throw std::runtime_error(
"Not enough room to read member, need at least "
+ std::to_string(newOffset) + " bytes, but given only "
+ std::to_string(maxSize));
}
*val = *reinterpret_cast<const T*>(static_cast<const char*>(ptr) + offset);
return newOffset;
}
CascadedMetadata deserializeMetadataFromGPUVersion1(
const void* const devicePtr, const size_t size, cudaStream_t stream)
{
NUM_INPUTS_TYPE numInputs;
CudaUtils::copy_async(
&numInputs,
(const NUM_INPUTS_TYPE*)(static_cast<const uint8_t*>(devicePtr) + OFFSET_NUM_INPUTS),
1,
DEVICE_TO_HOST,
stream);
CudaUtils::sync(stream);
std::vector<uint8_t> localBuffer(serializedMetadataSize(numInputs));
if (size < localBuffer.size()) {
throw std::runtime_error(
"Insufficient space to deserialize metadata "
"from: "
+ std::to_string(size) + " but require "
+ std::to_string(localBuffer.size()));
}
CudaUtils::copy_async(
(uint8_t*)localBuffer.data(),
(const uint8_t*)devicePtr,
localBuffer.size(),
DEVICE_TO_HOST,
stream);
CudaUtils::sync(stream);
// here we convert to types of fixed width by the C++ standard rather than
// just doing a memcpy of the struct, to ensure portability.
nvcompCascadedFormatOpts format_opts;
format_opts.num_RLEs
= getField<NUM_RLES_TYPE, OFFSET_NUM_RLES>(localBuffer.data());
format_opts.num_deltas
= getField<NUM_DELTAS_TYPE, OFFSET_NUM_DELTAS>(localBuffer.data());
format_opts.use_bp = getField<USE_BITPACKING_TYPE, OFFSET_USE_BITPACKING>(
localBuffer.data());
const COMP_BYTES_TYPE comp_bytes
= getField<COMP_BYTES_TYPE, OFFSET_COMP_BYTES>(localBuffer.data());
const DECOMP_BYTES_TYPE decomp_bytes
= getField<DECOMP_BYTES_TYPE, OFFSET_DECOMP_BYTES>(localBuffer.data());
const IN_TYPE_TYPE in_type
= getField<IN_TYPE_TYPE, OFFSET_IN_TYPE>(localBuffer.data());
CascadedMetadata metadata(
format_opts,
static_cast<nvcompType_t>(in_type),
decomp_bytes,
comp_bytes);
if (numInputs != static_cast<int>(metadata.getNumInputs())) {
throw std::runtime_error(
"Mismatch in numInputs while deserializing "
"metadata: "
+ std::to_string(numInputs) + " vs. "
+ std::to_string(metadata.getNumInputs()));
}
for (size_t i = 0; i < metadata.getNumInputs(); ++i) {
const HEADER_TYPE header
= getField<HEADER_TYPE, OFFSET_HEADERS>(localBuffer.data(), i);
metadata.setHeader(i, header);
}
for (size_t i = 0; i < metadata.getNumInputs(); ++i) {
const OFFSET_TYPE dataOffset
= getField<OFFSET_TYPE, static_cast<OffsetType>(0)>(
localBuffer.data() + getOffsetsOffset(metadata.getNumInputs()), i);
metadata.setDataOffset(i, dataOffset);
}
return metadata;
}
} // namespace
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
size_t
CascadedMetadataOnGPU::getSerializedSizeOf(const CascadedMetadata& metadata)
{
return serializedMetadataSize(metadata.getNumInputs());
}
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
CascadedMetadataOnGPU::CascadedMetadataOnGPU(void* const ptr, size_t maxSize) :
m_ptr(ptr),
m_maxSize(maxSize),
m_numInputs(NULL_NUM_INPUTS)
{
if (ptr == nullptr) {
throw std::runtime_error("Location given to CascadedMetadataOnGPU() must "
"be a valid pointer.");
} else if (m_maxSize < OFFSET_HEADERS) {
throw std::runtime_error(
"Maximum size given to CascadedMetdataOnGPU() "
"must be greater than "
+ std::to_string(OFFSET_HEADERS) + " bytes.");
}
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void CascadedMetadataOnGPU::copyToGPU(
const CascadedMetadata& metadata,
size_t* const serializedSizeDevice,
cudaStream_t stream)
{
const size_t requiredSize = serializedMetadataSize(metadata.getNumInputs());
if (m_maxSize < requiredSize) {
throw std::runtime_error(
"Invalid space for serialized metadata on GPU: "
+ std::to_string(m_maxSize) + " when " + std::to_string(requiredSize)
+ " is needed.");
}
serializeV1<<<1, 64, 0, stream>>>(
m_ptr,
m_maxSize,
metadata.getNumRLEs(),
metadata.getNumDeltas(),
metadata.useBitPacking(),
metadata.getCompressedSize(),
metadata.getUncompressedSize(),
metadata.getValueType(),
metadata.getNumInputs(),
serializedSizeDevice);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
throw std::runtime_error(
"Failed to launch metadata serialization kernel: "
+ std::to_string(err));
}
if (metadata.haveAnyOffsetsBeenSet()) {
if (!metadata.haveAllOffsetsBeenSet()) {
throw std::runtime_error(
"Only some offsets have been set before calling "
"CascadedMetadataOnGPU::copyToGPU(). This is not "
"a valid state for copying CascadedMetadata to the GPU.");
}
for (size_t i = 0; i < metadata.getNumInputs(); ++i) {
const HEADER_TYPE header = metadata.getHeader(i);
CudaUtils::copy_async(
(HEADER_TYPE*)(static_cast<uint8_t*>(m_ptr) + OFFSET_HEADERS + i * sizeof(header)),
&header,
1,
HOST_TO_DEVICE,
stream);
}
for (size_t i = 0; i < metadata.getNumInputs(); ++i) {
const OFFSET_TYPE offset = metadata.getDataOffset(i);
CudaUtils::copy_async(
(OFFSET_TYPE*)(static_cast<uint8_t*>(m_ptr)
+ getOffsetsOffset(metadata.getNumInputs())
+ sizeof(OFFSET_TYPE) * i),
&offset,
1,
HOST_TO_DEVICE,
stream);
}
}
// if successful, set the number of inputs
m_numInputs = metadata.getNumInputs();
}
void CascadedMetadataOnGPU::copyToGPU(
const CascadedMetadata& metadata, cudaStream_t stream)
{
copyToGPU(metadata, nullptr, stream);
}
size_t CascadedMetadataOnGPU::getSerializedSize() const
{
verifyInitialized();
return serializedMetadataSize(m_numInputs);
}
void CascadedMetadataOnGPU::saveOffset(
size_t index, const size_t* offsetDevice, cudaStream_t stream)
{
verifyInitialized();
if (index >= m_numInputs) {
throw std::runtime_error(
"Invalid input index " + std::to_string(index) + " / "
+ std::to_string(m_numInputs)
+ " given to "
"CascadedMetadataOnGPU::saveOffsets().");
}
setOffset<<<1, 1, 0, stream>>>(m_ptr, index, offsetDevice);
}
void CascadedMetadataOnGPU::setCompressedSizeFromGPU(
const size_t* sizeOnDevice, cudaStream_t stream)
{
// TODO: re-write so that we don't depend on 64-bit architecture
static_assert(
sizeof(size_t) == sizeof(COMP_BYTES_TYPE),
"Requires size_t be 64 bits wide.");
COMP_BYTES_TYPE* const compBytesDevice = reinterpret_cast<COMP_BYTES_TYPE*>(
static_cast<char*>(m_ptr) + OFFSET_COMP_BYTES);
cudaError_t err = cudaMemcpyAsync(
compBytesDevice,
sizeOnDevice,
sizeof(COMP_BYTES_TYPE),
cudaMemcpyDeviceToDevice,
stream);
if (err != cudaSuccess) {
throw std::runtime_error(
"Async memcpy in "
"CascadedMetadataOnGPU::setCompressedSizeFromGPU() failed with: "
+ std::to_string(err));
}
}
CascadedMetadata CascadedMetadataOnGPU::copyToHost(cudaStream_t stream)
{
// read the version of the serialized metadata.
VERSION_TYPE version;
CudaUtils::copy_async(
&version,
static_cast<const VERSION_TYPE*>(m_ptr),
1,
DEVICE_TO_HOST,
stream);
CudaUtils::sync(stream);
if (version == 1) {
CascadedMetadata metadata
= deserializeMetadataFromGPUVersion1(m_ptr, m_maxSize, stream);
m_numInputs = metadata.getNumInputs();
return metadata;
} else {
throw std::runtime_error(
"Unsupported Metadata version: " + std::to_string(version));
}
}
CascadedMetadata::Header*
CascadedMetadataOnGPU::getHeaderLocation(const size_t index)
{
verifyIndex(index);
return reinterpret_cast<HEADER_TYPE*>(
static_cast<uint8_t*>(m_ptr) + OFFSET_HEADERS)
+ index;
}
const CascadedMetadata::Header*
CascadedMetadataOnGPU::getHeaderLocation(const size_t index) const
{
verifyIndex(index);
return reinterpret_cast<const HEADER_TYPE*>(
static_cast<const uint8_t*>(m_ptr) + OFFSET_HEADERS)
+ index;
}
/******************************************************************************
* PRIVATE METHODS ************************************************************
*****************************************************************************/
void CascadedMetadataOnGPU::verifyInitialized() const
{
if (m_numInputs == NULL_NUM_INPUTS) {
throw std::runtime_error("CascadedMetadataOnGPU::copyToGPU() or "
"CascadedMetdataOnGPU::copyToHost() must be "
"called before other methods.");
}
}
void CascadedMetadataOnGPU::verifyIndex(const size_t index) const
{
verifyInitialized();
if (index >= m_numInputs) {
throw std::runtime_error(
"Invalid index to set header for: " + std::to_string(index) + " / "
+ std::to_string(m_numInputs));
}
}
} // namespace highlevel
} // namespace nvcomp
|
f0fba5747853cc56c53535b7d3f523affcef740b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "eks.h"
//========================================================================
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
std::cout << hipGetErrorString( err ) << " in " << file << " at line " << line << std::endl;
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//========================================================================
eks::eks()
{
}
//========================================================================
eks::~eks()
{
}
//========================================================================
void eks::read_card(card& cd)
{
/*read parameters from input card and init EKS program;
*/
// Switch for p-pbar collisions (if true) or p-p collisions (if false)
eks_input_variables.ppbar = cd.ppbar_collider;
// Jet physics is normally in the range of five flavors, so we
// set the number of flavors to 5. (This could be changed for special
// purposes, but the program works with a fixed NFL. Scheme switching
// for as mu becomes greater than a heavy quark mass is not
// implemented.)
eks_input_variables.nflavor = 5.0;
eks_input_variables.nfl = 5;
// Physics parameters:
eks_input_variables.rts = cd.square_s;
// Parton resolution parameters in PT and angle.
eks_input_variables.ptresolution = 0.001;
eks_input_variables.angleresolution = 0.001;
eks_input_variables.lambda = 0.226;
// Ratio of renormalization scale MUUV and factorization
// scale MUCO to PJ:
eks_input_variables.muuvoverpj = cd.renomalization_scale;
eks_input_variables.mucooverpj = cd.factorization_scale;
// Scales to tell RENO where to put rapidities and transverse
// momenta for partons 1 and 2.
eks_input_variables.yscale = 1.2;
eks_input_variables.pscale = 0.06 * cd.square_s;
// Limits on PT and ABS(Y).
eks_input_variables.pjmin = 0.003 * cd.square_s;
eks_input_variables.pjmax = 0.5 * cd.square_s;
eks_input_variables.yjmax = 4.0;
// Switches (should all be .TRUE.)
eks_input_variables.stypea = true;
eks_input_variables.stypeb = true;
eks_input_variables.stypec = true;
eks_input_variables.styped = true;
eks_input_variables.sborn = true;
eks_input_variables.svirtual = true;
eks_input_variables.sacoll = true;
eks_input_variables.sasoft = true;
eks_input_variables.sbcoll = true;
eks_input_variables.sbsoft = true;
eks_input_variables.s1coll = true;
eks_input_variables.s1soft = true;
eks_input_variables.s2coll = true;
eks_input_variables.s2soft = true;
eks_input_variables.s2to2 = true;
eks_input_variables.safinite = true;
eks_input_variables.sbfinite = true;
eks_input_variables.s1finite = true;
eks_input_variables.s2finite = true;
if( ! cd.next_to_leading_order )
{
eks_input_variables.svirtual = false;
eks_input_variables.sacoll = false;
eks_input_variables.sasoft = false;
eks_input_variables.sbcoll = false;
eks_input_variables.sbsoft = false;
eks_input_variables.s1coll = false;
eks_input_variables.s1soft = false;
eks_input_variables.s2coll = false;
eks_input_variables.s2soft = false;
eks_input_variables.safinite = false;
eks_input_variables.sbfinite = false;
eks_input_variables.s1finite = false;
eks_input_variables.s2finite = false;
std::cout << "BORN Calculation only!"<<std::endl;
}
// GPU computing
number_of_blocks = cd.number_of_blocks;
number_of_threads = cd.number_of_threads;
//
if(cd.next_to_leading_order)
{
eks_input_variables.ndim = 7;
}
else
{
eks_input_variables.ndim = 4;
}
}
//========================================================================
float eks::integrand(int* event_index, const double* xx, float* warehouse, float* alphas_in, float* pdf_in, float* z, int n_events)
{
// number of input variables
int n_variables = n_events * eks_input_variables.ndim;
// translate double to float
float input[n_variables];
for(int i = 0; i < n_variables; i++)
{
input[i] = xx[i];
}
// float* temp = (float*)malloc(17*11*n_events*sizeof(float))
// //declare input arrays
int* event_index_on_device;
float* warehouse_on_device;
float* pdf_on_device;
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**) &warehouse_on_device, 17 * 11 * n_events * sizeof(float) ) );
HANDLE_ERROR( hipMalloc( (void**) &event_index_on_device, n_events * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**) &pdf_on_device, 44*n_events * sizeof(float) ) );
// HANDLE_ERROR( hipMalloc( (void**) &eks_setting_on_device, sizeof(eks_input) ) );
// HANDLE_ERROR( hipMalloc( (void**) &input_variables, n_variables * sizeof(float) ) );
// HANDLE_ERROR( hipMalloc( (void**) &pdf_input, 44 * n_events * sizeof(float) ) );
// HANDLE_ERROR( hipMalloc( (void**) &alphas_input, n_events * sizeof(float) ) );
// HANDLE_ERROR( hipMalloc( (void**) &z_input, 2 * n_events * sizeof(float) ) );
//copy input variables to the GPU
// HANDLE_ERROR( hipMemcpy( warehouse_on_device, warehouse, 17 * 11 * n_events * sizeof(float), hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( pdf_on_device, pdf_in, 44 * n_events * sizeof(float), hipMemcpyHostToDevice ) );
//---------------
HANDLE_ERROR( hipMemcpyToSymbol( eks_setting_on_device, &eks_input_variables, sizeof(eks_input), 0, hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpyToSymbol( input_variables, input, n_variables * sizeof(float), 0, hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpyToSymbol( alphas_input, alphas_in, n_events * sizeof(float), 0, hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpyToSymbol( z_input, z, 2 * n_events * sizeof(float) , 0, hipMemcpyHostToDevice ) );
//copy input variables to the GPU
// HANDLE_ERROR( hipMemcpyToSymbol( eks_setting_on_device, &eks_input_variables, sizeof(eks_input_variables) ) );
// HANDLE_ERROR( hipMemcpyToSymbol( "input_variables", input, n_variables * sizeof(float), 0, hipMemcpyHostToDevice ) );
// HANDLE_ERROR( hipMemcpyToSymbol( "pdf_input", pdf_in, 44 * n_events * sizeof(float), 0, hipMemcpyHostToDevice) );
// HANDLE_ERROR( hipMemcpyToSymbol( "alphas_input", alphas_in, n_events * sizeof(float), 0, hipMemcpyHostToDevice ) );
// HANDLE_ERROR( hipMemcpyToSymbol( "z_input", z, 2 * n_events * sizeof(float), 0, hipMemcpyHostToDevice ) );
// // capture the start time
hipEvent_t start, stop;
hipEventCreate( &start);
hipEventCreate( &stop );
hipEventRecord( start, 0 );
//for debug, copy the device memory back to host to see if copy is correct
//pdf and alphas and z for effparton
float* pdf_temp = new float[44 * n_events]; //for leading order it is 44 pdf per event.
float* alphas_temp = new float[n_events];
float* z_temp = new float[n_events * 2];
HANDLE_ERROR( hipMemcpy(pdf_temp, pdf_on_device, 44 * n_events * sizeof(float), hipMemcpyDeviceToHost) );
HANDLE_ERROR( hipMemcpy(alphas_temp, alphas_input, n_events * sizeof(float), hipMemcpyDeviceToHost) );
HANDLE_ERROR( hipMemcpy(z_temp, z_input, n_events * sizeof(float), hipMemcpyDeviceToHost) );
// run!
hipLaunchKernelGGL(( GPU_submit), dim3(number_of_blocks),dim3(number_of_threads), 0, 0, event_index_on_device, warehouse_on_device, pdf_on_device);
//get stop time, and display the timing results
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop );
//std::cout<<"GPU calculation completed================" <<std::endl;
// copy output variables from device to host
HANDLE_ERROR( hipMemcpy(warehouse, warehouse_on_device, 17 * 11 * n_events * sizeof(float), hipMemcpyDeviceToHost) );
HANDLE_ERROR( hipMemcpy(event_index, event_index_on_device, n_events * sizeof(float), hipMemcpyDeviceToHost) );
// HANDLE_ERROR( hipMemcpyFromSymbol( &eks_input_variables, eks_setting_on_device, sizeof(eks_input), 0, hipMemcpyDeviceToHost ) );
// release locked memory
hipFree( event_index_on_device );
hipFree( warehouse_on_device );
hipFree( &eks_setting_on_device);
hipFree( input_variables );
hipFree( pdf_on_device );
hipFree( alphas_input );
hipFree( z_input );
return elapsedTime;
//return 0.0;
}
//========================================================================
//========================================================================
eks& eks::operator=(eks& rhs)
{
return rhs;
}
//========================================================================
|
f0fba5747853cc56c53535b7d3f523affcef740b.cu
|
#include "eks.h"
//========================================================================
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
std::cout << cudaGetErrorString( err ) << " in " << file << " at line " << line << std::endl;
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//========================================================================
eks::eks()
{
}
//========================================================================
eks::~eks()
{
}
//========================================================================
void eks::read_card(card& cd)
{
/*read parameters from input card and init EKS program;
*/
// Switch for p-pbar collisions (if true) or p-p collisions (if false)
eks_input_variables.ppbar = cd.ppbar_collider;
// Jet physics is normally in the range of five flavors, so we
// set the number of flavors to 5. (This could be changed for special
// purposes, but the program works with a fixed NFL. Scheme switching
// for as mu becomes greater than a heavy quark mass is not
// implemented.)
eks_input_variables.nflavor = 5.0;
eks_input_variables.nfl = 5;
// Physics parameters:
eks_input_variables.rts = cd.square_s;
// Parton resolution parameters in PT and angle.
eks_input_variables.ptresolution = 0.001;
eks_input_variables.angleresolution = 0.001;
eks_input_variables.lambda = 0.226;
// Ratio of renormalization scale MUUV and factorization
// scale MUCO to PJ:
eks_input_variables.muuvoverpj = cd.renomalization_scale;
eks_input_variables.mucooverpj = cd.factorization_scale;
// Scales to tell RENO where to put rapidities and transverse
// momenta for partons 1 and 2.
eks_input_variables.yscale = 1.2;
eks_input_variables.pscale = 0.06 * cd.square_s;
// Limits on PT and ABS(Y).
eks_input_variables.pjmin = 0.003 * cd.square_s;
eks_input_variables.pjmax = 0.5 * cd.square_s;
eks_input_variables.yjmax = 4.0;
// Switches (should all be .TRUE.)
eks_input_variables.stypea = true;
eks_input_variables.stypeb = true;
eks_input_variables.stypec = true;
eks_input_variables.styped = true;
eks_input_variables.sborn = true;
eks_input_variables.svirtual = true;
eks_input_variables.sacoll = true;
eks_input_variables.sasoft = true;
eks_input_variables.sbcoll = true;
eks_input_variables.sbsoft = true;
eks_input_variables.s1coll = true;
eks_input_variables.s1soft = true;
eks_input_variables.s2coll = true;
eks_input_variables.s2soft = true;
eks_input_variables.s2to2 = true;
eks_input_variables.safinite = true;
eks_input_variables.sbfinite = true;
eks_input_variables.s1finite = true;
eks_input_variables.s2finite = true;
if( ! cd.next_to_leading_order )
{
eks_input_variables.svirtual = false;
eks_input_variables.sacoll = false;
eks_input_variables.sasoft = false;
eks_input_variables.sbcoll = false;
eks_input_variables.sbsoft = false;
eks_input_variables.s1coll = false;
eks_input_variables.s1soft = false;
eks_input_variables.s2coll = false;
eks_input_variables.s2soft = false;
eks_input_variables.safinite = false;
eks_input_variables.sbfinite = false;
eks_input_variables.s1finite = false;
eks_input_variables.s2finite = false;
std::cout << "BORN Calculation only!"<<std::endl;
}
// GPU computing
number_of_blocks = cd.number_of_blocks;
number_of_threads = cd.number_of_threads;
//
if(cd.next_to_leading_order)
{
eks_input_variables.ndim = 7;
}
else
{
eks_input_variables.ndim = 4;
}
}
//========================================================================
float eks::integrand(int* event_index, const double* xx, float* warehouse, float* alphas_in, float* pdf_in, float* z, int n_events)
{
// number of input variables
int n_variables = n_events * eks_input_variables.ndim;
// translate double to float
float input[n_variables];
for(int i = 0; i < n_variables; i++)
{
input[i] = xx[i];
}
// float* temp = (float*)malloc(17*11*n_events*sizeof(float))
// //declare input arrays
int* event_index_on_device;
float* warehouse_on_device;
float* pdf_on_device;
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**) &warehouse_on_device, 17 * 11 * n_events * sizeof(float) ) );
HANDLE_ERROR( cudaMalloc( (void**) &event_index_on_device, n_events * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**) &pdf_on_device, 44*n_events * sizeof(float) ) );
// HANDLE_ERROR( cudaMalloc( (void**) &eks_setting_on_device, sizeof(eks_input) ) );
// HANDLE_ERROR( cudaMalloc( (void**) &input_variables, n_variables * sizeof(float) ) );
// HANDLE_ERROR( cudaMalloc( (void**) &pdf_input, 44 * n_events * sizeof(float) ) );
// HANDLE_ERROR( cudaMalloc( (void**) &alphas_input, n_events * sizeof(float) ) );
// HANDLE_ERROR( cudaMalloc( (void**) &z_input, 2 * n_events * sizeof(float) ) );
//copy input variables to the GPU
// HANDLE_ERROR( cudaMemcpy( warehouse_on_device, warehouse, 17 * 11 * n_events * sizeof(float), cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( pdf_on_device, pdf_in, 44 * n_events * sizeof(float), cudaMemcpyHostToDevice ) );
//---------------
HANDLE_ERROR( cudaMemcpyToSymbol( eks_setting_on_device, &eks_input_variables, sizeof(eks_input), 0, cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpyToSymbol( input_variables, input, n_variables * sizeof(float), 0, cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpyToSymbol( alphas_input, alphas_in, n_events * sizeof(float), 0, cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpyToSymbol( z_input, z, 2 * n_events * sizeof(float) , 0, cudaMemcpyHostToDevice ) );
//copy input variables to the GPU
// HANDLE_ERROR( cudaMemcpyToSymbol( eks_setting_on_device, &eks_input_variables, sizeof(eks_input_variables) ) );
// HANDLE_ERROR( cudaMemcpyToSymbol( "input_variables", input, n_variables * sizeof(float), 0, cudaMemcpyHostToDevice ) );
// HANDLE_ERROR( cudaMemcpyToSymbol( "pdf_input", pdf_in, 44 * n_events * sizeof(float), 0, cudaMemcpyHostToDevice) );
// HANDLE_ERROR( cudaMemcpyToSymbol( "alphas_input", alphas_in, n_events * sizeof(float), 0, cudaMemcpyHostToDevice ) );
// HANDLE_ERROR( cudaMemcpyToSymbol( "z_input", z, 2 * n_events * sizeof(float), 0, cudaMemcpyHostToDevice ) );
// // capture the start time
cudaEvent_t start, stop;
cudaEventCreate( &start);
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
//for debug, copy the device memory back to host to see if copy is correct
//pdf and alphas and z for effparton
float* pdf_temp = new float[44 * n_events]; //for leading order it is 44 pdf per event.
float* alphas_temp = new float[n_events];
float* z_temp = new float[n_events * 2];
HANDLE_ERROR( cudaMemcpy(pdf_temp, pdf_on_device, 44 * n_events * sizeof(float), cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaMemcpy(alphas_temp, alphas_input, n_events * sizeof(float), cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaMemcpy(z_temp, z_input, n_events * sizeof(float), cudaMemcpyDeviceToHost) );
// run!
GPU_submit<<<number_of_blocks,number_of_threads>>>(event_index_on_device, warehouse_on_device, pdf_on_device);
//get stop time, and display the timing results
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
//std::cout<<"GPU calculation completed================" <<std::endl;
// copy output variables from device to host
HANDLE_ERROR( cudaMemcpy(warehouse, warehouse_on_device, 17 * 11 * n_events * sizeof(float), cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaMemcpy(event_index, event_index_on_device, n_events * sizeof(float), cudaMemcpyDeviceToHost) );
// HANDLE_ERROR( cudaMemcpyFromSymbol( &eks_input_variables, eks_setting_on_device, sizeof(eks_input), 0, cudaMemcpyDeviceToHost ) );
// release locked memory
cudaFree( event_index_on_device );
cudaFree( warehouse_on_device );
cudaFree( &eks_setting_on_device);
cudaFree( input_variables );
cudaFree( pdf_on_device );
cudaFree( alphas_input );
cudaFree( z_input );
return elapsedTime;
//return 0.0;
}
//========================================================================
//========================================================================
eks& eks::operator=(eks& rhs)
{
return rhs;
}
//========================================================================
|
9faf44abe0f397a1a34e29d87bc92f8f1f498d88.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define N 128
#define B 2
__global__ void k(int* in)
{
in[threadIdx.x] = blockIdx.x;
}
int main()
{
int* in = (int*) malloc(N * sizeof(int));
int* din;
hipMalloc((void**) &din, N*sizeof(int));
hipLaunchKernelGGL(( k), dim3(B),dim3(N/B), 0, 0, din);
}
|
9faf44abe0f397a1a34e29d87bc92f8f1f498d88.cu
|
#define N 128
#define B 2
__global__ void k(int* in)
{
in[threadIdx.x] = blockIdx.x;
}
int main()
{
int* in = (int*) malloc(N * sizeof(int));
int* din;
cudaMalloc((void**) &din, N*sizeof(int));
k<<<B,N/B>>>(din);
}
|
070b9faec2071f971b9b6e39a21c31056c033a7c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <hip/hip_cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <fstream>
#include <sstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//# <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 3
#define H 227
#define W 227
#define R 11
#define S 11
#define M 96
#define E 55
#define F 55
#define U 4
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt, int height, int width)
{
for (int y=0;y<2;y++){
for (int x=0;x<2;x++){
for(int i=0; i<num_ch; i++)
{ int row = threadIdx.y; int col = threadIdx.x;
if((2*row+y<height)&&(2*col+x<width))
d_r[blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] += d_o[i*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] ;
}
}
}}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//using namespace cooperative_groups;
float prod;
//cooperative_groups::grid_group g = cooperative_groups::this_grid();
int row = threadIdx.y; int col = threadIdx.x;
//printf("row and col are %d, %d and op idx is %d\n",row,col,(blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col));
//if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
for (int y=0; y<2; y++){
for (int x=0; x<2; x++){
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
float ip =d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(2*row+y)+i)*ip_height+(stride*(2*col+x)+j)];
float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(i*wt_width+j)];
prod = ip*wt;
if((2*row+y<height)&&(2*col+x<width)){
d_o[blockIdx.z*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] += prod;
}
}
}
if(d_o[blockIdx.z*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)]<0)
d_o[blockIdx.z*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)]=0;
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/* WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
// IP[n*C*H*W+k*H*W+c*W+d] = (c+1);
// if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
// IP[n*C*H*W+k*H*W+c*W+d] = 0;
// else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//cpu_end = clock();
dim3 dimGrid(batch_size,96,3);
dim3 dimBlock(28,28,1);
dim3 dimGridRed(batch_size,96,1);
dim3 dimBlockRed(28,28,1)hipLaunchKernelGGL((;ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,55,55,4,227,11,96,batch_size,3);
hipDeviceSynchronize();hipLaunchKernelGGL((
red_ch), dim3(dimGridRed), dim3(dimBlockRed), 0, 0, d_r,d_o,3,batch_size,96,55,55);
hipMemcpy(OPG,d_r,(long int)batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u,t;
float max_error = 0;
string filename = "layer_1_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
hipFree(d_o);
hipFree(d_i);
hipFree(d_w);
hipFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
070b9faec2071f971b9b6e39a21c31056c033a7c.cu
|
#include <stdio.h>
#include <iostream>
#include <cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <fstream>
#include <sstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//# <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 3
#define H 227
#define W 227
#define R 11
#define S 11
#define M 96
#define E 55
#define F 55
#define U 4
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt, int height, int width)
{
for (int y=0;y<2;y++){
for (int x=0;x<2;x++){
for(int i=0; i<num_ch; i++)
{ int row = threadIdx.y; int col = threadIdx.x;
if((2*row+y<height)&&(2*col+x<width))
d_r[blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] += d_o[i*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] ;
}
}
}}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//using namespace cooperative_groups;
float prod;
//cooperative_groups::grid_group g = cooperative_groups::this_grid();
int row = threadIdx.y; int col = threadIdx.x;
//printf("row and col are %d, %d and op idx is %d\n",row,col,(blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col));
//if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
for (int y=0; y<2; y++){
for (int x=0; x<2; x++){
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
float ip =d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(2*row+y)+i)*ip_height+(stride*(2*col+x)+j)];
float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(i*wt_width+j)];
prod = ip*wt;
if((2*row+y<height)&&(2*col+x<width)){
d_o[blockIdx.z*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)] += prod;
}
}
}
if(d_o[blockIdx.z*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)]<0)
d_o[blockIdx.z*(num_wt*num_img*height*width)+blockIdx.x*num_wt*height*width+blockIdx.y*height*width+(2*row+y)*width+(2*col+x)]=0;
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/* WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
// IP[n*C*H*W+k*H*W+c*W+d] = (c+1);
// if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
// IP[n*C*H*W+k*H*W+c*W+d] = 0;
// else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//cpu_end = clock();
dim3 dimGrid(batch_size,96,3);
dim3 dimBlock(28,28,1);
dim3 dimGridRed(batch_size,96,1);
dim3 dimBlockRed(28,28,1);ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,55,55,4,227,11,96,batch_size,3);
cudaDeviceSynchronize();
red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,3,batch_size,96,55,55);
cudaMemcpy(OPG,d_r,(long int)batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u,t;
float max_error = 0;
string filename = "layer_1_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
cudaFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
e06d858a927e8c80165b0cb2e1fedd09bf41d02f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************
* CUDALERP.cu
* KORAL
*
* Author: Kareem Omar
* [email protected]
* https://github.com/komrad36
*
* Last updated Dec 27, 2016
*******************************************************************/
//
// ## Summary ##
// KORAL is a novel, extremely fast, highly accurate, scale- and
// rotation-invariant, CPU-GPU cooperative detector-descriptor.
//
// Detection is based on the author's custom multi-scale KFAST corner
// detector, with rapid bilinear interpolation performed by the GPU
// asynchronously while the CPU works on KFAST.
//
// ## Usage ##
// Basic use of KORAL is extremely easy, although, of course, for a
// larger high-performance pipeline, users will benefit from
// calling KORAL functions directly and modifying it to suit their needs.
//
// To detect and describe, simply #include "KORAL.h" and
// then do:
//
// KORAL koral(scale_factor, scale_levels);
// koral.go(image, width, height, KFAST_threshold);
//
// where scale_factor is the factor by which each scale leve
// is reduced from the previous, scale_levels is the total
// number of such scale levels used, image is a pointer to
// uint8_t (grayscale) image data, and KFAST_threshold
// is the threshold supplied to the KFAST feature detector.
//
// After this call, keypoints are avaiable in a vector at
// koral.kps, while descriptors are available at
// koral.desc.
//
// Portions of KORAL require SSE, AVX, AVX2, and CUDA.
// The author is working on reduced-performance versions
// with lesser requirements, but as the intent of this work
// is primarily novel performance capability, modern
// hardware and this full version are highly recommended.
//
// Description is performed by the GPU using the novel CLATCH
// (CUDA LATCH) binary descriptor kernel.
//
// Rotation invariance is provided by a novel vectorized
// SSE angle weight detector.
//
// All components have been written and carefully tuned by the author
// for maximum performance and have no external dependencies. Some have
// been modified for integration into KORAL,
// but the original standalone projects are all availble on
// the author's GitHub (https://github.com/komrad36).
//
// These individual components are:
// -KFAST (https://github.com/komrad36/KFAST)
// -CUDALERP (https://github.com/komrad36/CUDALERP)
// -FeatureAngle (https://github.com/komrad36/FeatureAngle)
// -CLATCH (https://github.com/komrad36/CLATCH)
//
// In addition, the natural next step of matching descriptors
// is available in the author's currently separate
// project, CUDAK2NN (https://github.com/komrad36/CUDAK2NN).
//
// A key insight responsible for much of the performance of
// this insanely fast system is due to Christopher Parker
// (https://github.com/csp256), to whom I am extremely grateful.
//
// The file 'main.cpp' is a simple test driver
// illustrating example usage.It requires OpenCV
// for image read and keypoint display.KORAL itself,
// however, does not require OpenCV or any other
// external dependencies.
//
// Note that KORAL is a work in progress.
// Suggestions and improvements are welcomed.
//
// ## License ##
// The FAST detector was created by Edward Rosten and Tom Drummond
// as described in the 2006 paper by Rosten and Drummond:
// "Machine learning for high-speed corner detection"
// Edward Rosten and Tom Drummond
// https://www.edwardrosten.com/work/rosten_2006_machine.pdf
//
// The FAST detector is BSD licensed:
//
// Copyright(c) 2006, 2008, 2009, 2010 Edward Rosten
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met :
//
//
// *Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// *Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and / or other materials provided with the distribution.
//
// *Neither the name of the University of Cambridge nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//
//
//
// KORAL is licensed under the MIT License : https://opensource.org/licenses/mit-license.php
//
// Copyright(c) 2016 Kareem Omar, Christopher Parker
//
// Permission is hereby granted, free of charge,
// to any person obtaining a copy of this software and associated documentation
// files(the "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and / or sell copies of the Software, and to permit persons to whom
// the Software is furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
//
// Note again that KORAL is a work in progress.
// Suggestions and improvements are welcomed.
//
#include "koralROS/CUDALERP.h"
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDALERP_kernel(const hipTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const size_t pitch, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = (y + 0.5f)*gys - 0.5f;
const float wt_y = fy - floor(fy);
const float invwt_y = 1.0f - wt_y;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = (x + 0.5f)*gxs - 0.5f;
// less accurate and not really much (or any) faster
// -----------------
// const float res = tex2D<float>(d_img_tex, fx, fy);
// -----------------
const float4 f = tex2Dgather<float4>(d_img_tex, fx + 0.5f, fy + 0.5f);
const float wt_x = fx - floor(fx);
const float invwt_x = 1.0f - wt_x;
const float xa = invwt_x*f.w + wt_x*f.z;
const float xb = invwt_x*f.x + wt_x*f.y;
const float res = 255.0f*(invwt_y*xa + wt_y*xb) + 0.5f;
// -----------------
if (x < neww) d_out[y*pitch + x] = res;
}
}
void CUDALERP(const hipTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const size_t pitch, const uint32_t neww, const uint32_t newh, const hipStream_t stream) {
hipLaunchKernelGGL(( CUDALERP_kernel), dim3({((neww - 1) >> 9) + 1), dim3(newh}), 256, 0, stream, d_img_tex, gxs, gys, d_out, pitch, neww);
}
|
e06d858a927e8c80165b0cb2e1fedd09bf41d02f.cu
|
/*******************************************************************
* CUDALERP.cu
* KORAL
*
* Author: Kareem Omar
* [email protected]
* https://github.com/komrad36
*
* Last updated Dec 27, 2016
*******************************************************************/
//
// ## Summary ##
// KORAL is a novel, extremely fast, highly accurate, scale- and
// rotation-invariant, CPU-GPU cooperative detector-descriptor.
//
// Detection is based on the author's custom multi-scale KFAST corner
// detector, with rapid bilinear interpolation performed by the GPU
// asynchronously while the CPU works on KFAST.
//
// ## Usage ##
// Basic use of KORAL is extremely easy, although, of course, for a
// larger high-performance pipeline, users will benefit from
// calling KORAL functions directly and modifying it to suit their needs.
//
// To detect and describe, simply #include "KORAL.h" and
// then do:
//
// KORAL koral(scale_factor, scale_levels);
// koral.go(image, width, height, KFAST_threshold);
//
// where scale_factor is the factor by which each scale leve
// is reduced from the previous, scale_levels is the total
// number of such scale levels used, image is a pointer to
// uint8_t (grayscale) image data, and KFAST_threshold
// is the threshold supplied to the KFAST feature detector.
//
// After this call, keypoints are avaiable in a vector at
// koral.kps, while descriptors are available at
// koral.desc.
//
// Portions of KORAL require SSE, AVX, AVX2, and CUDA.
// The author is working on reduced-performance versions
// with lesser requirements, but as the intent of this work
// is primarily novel performance capability, modern
// hardware and this full version are highly recommended.
//
// Description is performed by the GPU using the novel CLATCH
// (CUDA LATCH) binary descriptor kernel.
//
// Rotation invariance is provided by a novel vectorized
// SSE angle weight detector.
//
// All components have been written and carefully tuned by the author
// for maximum performance and have no external dependencies. Some have
// been modified for integration into KORAL,
// but the original standalone projects are all availble on
// the author's GitHub (https://github.com/komrad36).
//
// These individual components are:
// -KFAST (https://github.com/komrad36/KFAST)
// -CUDALERP (https://github.com/komrad36/CUDALERP)
// -FeatureAngle (https://github.com/komrad36/FeatureAngle)
// -CLATCH (https://github.com/komrad36/CLATCH)
//
// In addition, the natural next step of matching descriptors
// is available in the author's currently separate
// project, CUDAK2NN (https://github.com/komrad36/CUDAK2NN).
//
// A key insight responsible for much of the performance of
// this insanely fast system is due to Christopher Parker
// (https://github.com/csp256), to whom I am extremely grateful.
//
// The file 'main.cpp' is a simple test driver
// illustrating example usage.It requires OpenCV
// for image read and keypoint display.KORAL itself,
// however, does not require OpenCV or any other
// external dependencies.
//
// Note that KORAL is a work in progress.
// Suggestions and improvements are welcomed.
//
// ## License ##
// The FAST detector was created by Edward Rosten and Tom Drummond
// as described in the 2006 paper by Rosten and Drummond:
// "Machine learning for high-speed corner detection"
// Edward Rosten and Tom Drummond
// https://www.edwardrosten.com/work/rosten_2006_machine.pdf
//
// The FAST detector is BSD licensed:
//
// Copyright(c) 2006, 2008, 2009, 2010 Edward Rosten
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met :
//
//
// *Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// *Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and / or other materials provided with the distribution.
//
// *Neither the name of the University of Cambridge nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//
//
//
// KORAL is licensed under the MIT License : https://opensource.org/licenses/mit-license.php
//
// Copyright(c) 2016 Kareem Omar, Christopher Parker
//
// Permission is hereby granted, free of charge,
// to any person obtaining a copy of this software and associated documentation
// files(the "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and / or sell copies of the Software, and to permit persons to whom
// the Software is furnished to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
//
// Note again that KORAL is a work in progress.
// Suggestions and improvements are welcomed.
//
#include "koralROS/CUDALERP.h"
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(256, 0)
#endif
CUDALERP_kernel(const cudaTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const size_t pitch, const int neww) {
uint32_t x = (blockIdx.x << 9) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y;
const float fy = (y + 0.5f)*gys - 0.5f;
const float wt_y = fy - floor(fy);
const float invwt_y = 1.0f - wt_y;
#pragma unroll
for (int i = 0; i < 2; ++i, ++x) {
const float fx = (x + 0.5f)*gxs - 0.5f;
// less accurate and not really much (or any) faster
// -----------------
// const float res = tex2D<float>(d_img_tex, fx, fy);
// -----------------
const float4 f = tex2Dgather<float4>(d_img_tex, fx + 0.5f, fy + 0.5f);
const float wt_x = fx - floor(fx);
const float invwt_x = 1.0f - wt_x;
const float xa = invwt_x*f.w + wt_x*f.z;
const float xb = invwt_x*f.x + wt_x*f.y;
const float res = 255.0f*(invwt_y*xa + wt_y*xb) + 0.5f;
// -----------------
if (x < neww) d_out[y*pitch + x] = res;
}
}
void CUDALERP(const cudaTextureObject_t d_img_tex, const float gxs, const float gys, uint8_t* __restrict const d_out, const size_t pitch, const uint32_t neww, const uint32_t newh, const cudaStream_t stream) {
CUDALERP_kernel<<<{((neww - 1) >> 9) + 1, newh}, 256, 0, stream>>>(d_img_tex, gxs, gys, d_out, pitch, neww);
}
|
3d63dd95096b9e9f8077443478ee5d080f788179.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cmath> // Without this, abs is the wrong function!
#include <random>
// #include "rconv2.fftx.codegen.hpp"
#include "rconv3.fftx.codegen.hpp"
#include "fftx3utilities.h"
#include "rconv.h"
enum VerbosityLevel { SHOW_CATEGORIES = 1, SHOW_SUBTESTS = 2, SHOW_ROUNDS = 3};
// using namespace fftx;
std::mt19937 generator;
// unifRealDist is uniform over the reals in (-1/2, 1/2).
std::uniform_real_distribution<double> unifRealDist;
// Return random real number.
double unifReal()
{
return unifRealDist(generator);
}
// Fill a_arr with real numbers distributed uniformly in (-1/2, 1/2).
template<int DIM>
void unifRealArray(fftx::array_t<DIM, double>& a_arr)
{
forall([](double(&v),
const fftx::point_t<DIM>& p)
{
v = unifReal();
}, a_arr);
}
template<int DIM>
void convolutionDevice(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
array_t<DIM, double>& a_input,
array_t<DIM, double>& a_output,
array_t<DIM, double>& a_symbol)
{
auto inputDomain = a_input.m_domain;
auto outputDomain = a_output.m_domain;
auto symbolDomain = a_symbol.m_domain;
auto input_size = inputDomain.size();
auto output_size = outputDomain.size();
auto symbol_size = symbolDomain.size();
auto input_bytes = input_size * sizeof(double);
auto output_bytes = output_size * sizeof(double);
auto symbol_bytes = symbol_size * sizeof(double);
double* bufferPtr;
hipMalloc(&bufferPtr, input_bytes + output_bytes + symbol_bytes);
double* inputPtr = bufferPtr;
bufferPtr += input_size;
double* outputPtr = bufferPtr;
bufferPtr += output_size;
double* symbolPtr = bufferPtr;
hipMemcpy(inputPtr, a_input.m_data.local(), input_bytes,
hipMemcpyHostToDevice);
hipMemcpy(symbolPtr, a_symbol.m_data.local(), symbol_bytes,
hipMemcpyHostToDevice);
fftx::array_t<DIM, double> inputDevice(fftx::global_ptr<double>
(inputPtr, 0, 1), inputDomain);
fftx::array_t<DIM, double> outputDevice(fftx::global_ptr<double>
(outputPtr, 0, 1), outputDomain);
fftx::array_t<DIM, double> symbolDevice(fftx::global_ptr<double>
(symbolPtr, 0, 1), symbolDomain);
a_transform(inputDevice, outputDevice, symbolDevice);
hipMemcpy(a_output.m_data.local(), outputPtr, output_bytes,
hipMemcpyDeviceToHost);
}
template<int DIM>
double testConstantSymbol(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_rounds,
int a_verbosity)
{
printf("calling testConstantSymbol<%d>\n", DIM);
array_t<DIM, double> input(a_domain);
array_t<DIM, double> output(a_domain);
array_t<DIM, double> symbol(a_fdomain);
double scaling = 1. / (a_domain.size()*1.);
setConstant(symbol, scaling);
double errConstantSymbol = 0.;
for (int itn = 1; itn <= a_rounds; itn++)
{
unifRealArray(input);
convolutionDevice(a_transform, input, output, symbol);
double err = absMaxDiffArray(input, output);
updateMax(errConstantSymbol, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD random input with constant symbol max error %11.5e\n",
DIM, err);
}
}
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD random input with constant symbol in %d rounds: max error %11.5e\n", DIM, a_rounds, errConstantSymbol);
}
return errConstantSymbol;
}
template<int DIM>
double testDelta(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_verbosity)
{
printf("calling testDelta<%d>\n", DIM);
array_t<DIM, double> input(a_domain);
array_t<DIM, double> output(a_domain);
array_t<DIM, double> symbol(a_fdomain);
setConstant(input, 2.);
point_t<DIM> cornerLo = a_domain.lo;
double scaling = 1. / (a_domain.size()*1.);
forall([cornerLo, scaling](double(&v), const fftx::point_t<DIM>& p)
{
if (p == cornerLo)
{
v = scaling; // WAS 1;
}
else
{
v = 0.;
}
}, symbol);
convolutionDevice(a_transform, input, output, symbol);
double errDelta = absMaxDiffArray(input, output);
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD delta function test: max error %11.5e\n", DIM, errDelta);
}
return errDelta;
}
template<int DIM>
double testPoisson(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_verbosity)
{
printf("calling testPoisson<%d>\n", DIM);
array_t<DIM, double> input(a_domain);
array_t<DIM, double> output(a_domain);
array_t<DIM, double> symbol(a_fdomain);
fftx::point_t<DIM> lo = a_domain.lo;
fftx::point_t<DIM> hi = a_domain.hi;
double center[DIM];
fftx::point_t<DIM> extents = a_domain.extents();
int extentMin = extents[0];
for (int d = 0; d < DIM; d++)
{
center[d] = (lo[d] + hi[d]) * 0.5;
if (extents[d] < extentMin)
{
extentMin = extents[d];
}
}
// Set radius to extentMin/sqrt(2)/2.
double radius2 = (extentMin * extentMin) * (1./8.);
forall([center, radius2](double(&v), const fftx::point_t<DIM>& p)
{
double dist2 = 0.;
for (int d = 0; d < DIM; d++)
{
double displacement2 = p[d] - center[d];
displacement2 *= displacement2;
dist2 += displacement2;
}
if (dist2 < radius2)
{
// v = 1.;
// For periodicity, need sum of rhs over all points to be zero.
v = p[0] - center[0];
}
else
{
v = 0.;
}
}, input);
point_t<DIM> cornerLo = a_domain.lo;
size_t normalize = a_domain.size();
forall([cornerLo, extents, normalize](double(&v), const fftx::point_t<DIM>& p)
{
if (p == cornerLo)
{
v = 0.;
}
else
{
double sin2sum = 0.;
for (int d = 0; d < DIM; d++)
{
double sin1 = sin((p[d]-cornerLo[d])*M_PI/(extents[d]*1.));
sin2sum += sin1 * sin1;
}
v = -1. / ((4 * normalize) * sin2sum);
}
}, symbol);
convolutionDevice(a_transform, input, output, symbol);
array_t<DIM,double> lap2output(a_domain);
laplacian2periodic(lap2output, output);
double errPoisson = absMaxDiffArray(lap2output, input);
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD Poisson test: max error %11.5e\n", DIM, errPoisson);
}
return errPoisson;
}
template<int DIM>
void rconvDimension(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_rounds,
int a_verbosity)
{
std::cout << "***** test " << DIM << "D real convolution on "
<< a_domain << std::endl;
double err = 0.;
updateMax(err,
testConstantSymbol(a_transform, a_domain, a_fdomain,
a_rounds, a_verbosity));
updateMax(err,
testDelta(a_transform, a_domain, a_fdomain,
a_verbosity));
updateMax(err,
testPoisson(a_transform, a_domain, a_fdomain,
a_verbosity));
printf("%dD tests in %d rounds max error %11.5e\n", DIM, a_rounds, err);
}
int main(int argc, char* argv[])
{
// { SHOW_CATEGORIES = 1, SHOW_SUBTESTS = 2, SHOW_ROUNDS = 3};
printf("Usage: %s [verbosity=0] [rounds=20]\n", argv[0]);
int verbosity = 0;
int rounds = 20;
if (argc > 1)
{
verbosity = atoi(argv[1]);
if (argc > 2)
{
rounds = atoi(argv[2]);
}
}
printf("Running with verbosity %d, random %d rounds\n", verbosity, rounds);
/*
Set up random number generator.
*/
std::random_device rd;
generator = std::mt19937(rd());
unifRealDist = std::uniform_real_distribution<double>(-0.5, 0.5);
/*
2-dimensional tests.
*/
// rconv2::init();
// rconvDimension(rconv2::transform, rconv::domain2, rconv::fdomain2,
// rounds, verbosity);
// rconv2::destroy();
/*
3-dimensional tests.
*/
rconv3::init();
rconvDimension(rconv3::transform, rconv::domain3, rconv::fdomain3,
rounds, verbosity);
rconv3::destroy();
printf("%s: All done, exiting\n", argv[0]);
return 0;
}
|
3d63dd95096b9e9f8077443478ee5d080f788179.cu
|
#include <cmath> // Without this, abs is the wrong function!
#include <random>
// #include "rconv2.fftx.codegen.hpp"
#include "rconv3.fftx.codegen.hpp"
#include "fftx3utilities.h"
#include "rconv.h"
enum VerbosityLevel { SHOW_CATEGORIES = 1, SHOW_SUBTESTS = 2, SHOW_ROUNDS = 3};
// using namespace fftx;
std::mt19937 generator;
// unifRealDist is uniform over the reals in (-1/2, 1/2).
std::uniform_real_distribution<double> unifRealDist;
// Return random real number.
double unifReal()
{
return unifRealDist(generator);
}
// Fill a_arr with real numbers distributed uniformly in (-1/2, 1/2).
template<int DIM>
void unifRealArray(fftx::array_t<DIM, double>& a_arr)
{
forall([](double(&v),
const fftx::point_t<DIM>& p)
{
v = unifReal();
}, a_arr);
}
template<int DIM>
void convolutionDevice(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
array_t<DIM, double>& a_input,
array_t<DIM, double>& a_output,
array_t<DIM, double>& a_symbol)
{
auto inputDomain = a_input.m_domain;
auto outputDomain = a_output.m_domain;
auto symbolDomain = a_symbol.m_domain;
auto input_size = inputDomain.size();
auto output_size = outputDomain.size();
auto symbol_size = symbolDomain.size();
auto input_bytes = input_size * sizeof(double);
auto output_bytes = output_size * sizeof(double);
auto symbol_bytes = symbol_size * sizeof(double);
double* bufferPtr;
cudaMalloc(&bufferPtr, input_bytes + output_bytes + symbol_bytes);
double* inputPtr = bufferPtr;
bufferPtr += input_size;
double* outputPtr = bufferPtr;
bufferPtr += output_size;
double* symbolPtr = bufferPtr;
cudaMemcpy(inputPtr, a_input.m_data.local(), input_bytes,
cudaMemcpyHostToDevice);
cudaMemcpy(symbolPtr, a_symbol.m_data.local(), symbol_bytes,
cudaMemcpyHostToDevice);
fftx::array_t<DIM, double> inputDevice(fftx::global_ptr<double>
(inputPtr, 0, 1), inputDomain);
fftx::array_t<DIM, double> outputDevice(fftx::global_ptr<double>
(outputPtr, 0, 1), outputDomain);
fftx::array_t<DIM, double> symbolDevice(fftx::global_ptr<double>
(symbolPtr, 0, 1), symbolDomain);
a_transform(inputDevice, outputDevice, symbolDevice);
cudaMemcpy(a_output.m_data.local(), outputPtr, output_bytes,
cudaMemcpyDeviceToHost);
}
template<int DIM>
double testConstantSymbol(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_rounds,
int a_verbosity)
{
printf("calling testConstantSymbol<%d>\n", DIM);
array_t<DIM, double> input(a_domain);
array_t<DIM, double> output(a_domain);
array_t<DIM, double> symbol(a_fdomain);
double scaling = 1. / (a_domain.size()*1.);
setConstant(symbol, scaling);
double errConstantSymbol = 0.;
for (int itn = 1; itn <= a_rounds; itn++)
{
unifRealArray(input);
convolutionDevice(a_transform, input, output, symbol);
double err = absMaxDiffArray(input, output);
updateMax(errConstantSymbol, err);
if (a_verbosity >= SHOW_ROUNDS)
{
printf("%dD random input with constant symbol max error %11.5e\n",
DIM, err);
}
}
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD random input with constant symbol in %d rounds: max error %11.5e\n", DIM, a_rounds, errConstantSymbol);
}
return errConstantSymbol;
}
template<int DIM>
double testDelta(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_verbosity)
{
printf("calling testDelta<%d>\n", DIM);
array_t<DIM, double> input(a_domain);
array_t<DIM, double> output(a_domain);
array_t<DIM, double> symbol(a_fdomain);
setConstant(input, 2.);
point_t<DIM> cornerLo = a_domain.lo;
double scaling = 1. / (a_domain.size()*1.);
forall([cornerLo, scaling](double(&v), const fftx::point_t<DIM>& p)
{
if (p == cornerLo)
{
v = scaling; // WAS 1;
}
else
{
v = 0.;
}
}, symbol);
convolutionDevice(a_transform, input, output, symbol);
double errDelta = absMaxDiffArray(input, output);
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD delta function test: max error %11.5e\n", DIM, errDelta);
}
return errDelta;
}
template<int DIM>
double testPoisson(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_verbosity)
{
printf("calling testPoisson<%d>\n", DIM);
array_t<DIM, double> input(a_domain);
array_t<DIM, double> output(a_domain);
array_t<DIM, double> symbol(a_fdomain);
fftx::point_t<DIM> lo = a_domain.lo;
fftx::point_t<DIM> hi = a_domain.hi;
double center[DIM];
fftx::point_t<DIM> extents = a_domain.extents();
int extentMin = extents[0];
for (int d = 0; d < DIM; d++)
{
center[d] = (lo[d] + hi[d]) * 0.5;
if (extents[d] < extentMin)
{
extentMin = extents[d];
}
}
// Set radius to extentMin/sqrt(2)/2.
double radius2 = (extentMin * extentMin) * (1./8.);
forall([center, radius2](double(&v), const fftx::point_t<DIM>& p)
{
double dist2 = 0.;
for (int d = 0; d < DIM; d++)
{
double displacement2 = p[d] - center[d];
displacement2 *= displacement2;
dist2 += displacement2;
}
if (dist2 < radius2)
{
// v = 1.;
// For periodicity, need sum of rhs over all points to be zero.
v = p[0] - center[0];
}
else
{
v = 0.;
}
}, input);
point_t<DIM> cornerLo = a_domain.lo;
size_t normalize = a_domain.size();
forall([cornerLo, extents, normalize](double(&v), const fftx::point_t<DIM>& p)
{
if (p == cornerLo)
{
v = 0.;
}
else
{
double sin2sum = 0.;
for (int d = 0; d < DIM; d++)
{
double sin1 = sin((p[d]-cornerLo[d])*M_PI/(extents[d]*1.));
sin2sum += sin1 * sin1;
}
v = -1. / ((4 * normalize) * sin2sum);
}
}, symbol);
convolutionDevice(a_transform, input, output, symbol);
array_t<DIM,double> lap2output(a_domain);
laplacian2periodic(lap2output, output);
double errPoisson = absMaxDiffArray(lap2output, input);
if (a_verbosity >= SHOW_CATEGORIES)
{
printf("%dD Poisson test: max error %11.5e\n", DIM, errPoisson);
}
return errPoisson;
}
template<int DIM>
void rconvDimension(fftx::handle_t (a_transform)
(fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&,
fftx::array_t<DIM, double>&),
fftx::box_t<DIM> a_domain,
fftx::box_t<DIM> a_fdomain,
int a_rounds,
int a_verbosity)
{
std::cout << "***** test " << DIM << "D real convolution on "
<< a_domain << std::endl;
double err = 0.;
updateMax(err,
testConstantSymbol(a_transform, a_domain, a_fdomain,
a_rounds, a_verbosity));
updateMax(err,
testDelta(a_transform, a_domain, a_fdomain,
a_verbosity));
updateMax(err,
testPoisson(a_transform, a_domain, a_fdomain,
a_verbosity));
printf("%dD tests in %d rounds max error %11.5e\n", DIM, a_rounds, err);
}
int main(int argc, char* argv[])
{
// { SHOW_CATEGORIES = 1, SHOW_SUBTESTS = 2, SHOW_ROUNDS = 3};
printf("Usage: %s [verbosity=0] [rounds=20]\n", argv[0]);
int verbosity = 0;
int rounds = 20;
if (argc > 1)
{
verbosity = atoi(argv[1]);
if (argc > 2)
{
rounds = atoi(argv[2]);
}
}
printf("Running with verbosity %d, random %d rounds\n", verbosity, rounds);
/*
Set up random number generator.
*/
std::random_device rd;
generator = std::mt19937(rd());
unifRealDist = std::uniform_real_distribution<double>(-0.5, 0.5);
/*
2-dimensional tests.
*/
// rconv2::init();
// rconvDimension(rconv2::transform, rconv::domain2, rconv::fdomain2,
// rounds, verbosity);
// rconv2::destroy();
/*
3-dimensional tests.
*/
rconv3::init();
rconvDimension(rconv3::transform, rconv::domain3, rconv::fdomain3,
rounds, verbosity);
rconv3::destroy();
printf("%s: All done, exiting\n", argv[0]);
return 0;
}
|
7b522baf2f89601811536ba12a2390070856738f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <math.h>
#include <iostream>
#include <stdint.h>
#include <stdio.h>
#include <time.h>
#define USE_GPU 1
/*
enum Piece
{
empty,
white_reg,
white_reg_moved,
white_king,
white_king_moved,
black_reg,
black_reg_moved,
black_king,
black_king_moved
};*/
typedef uint8_t Piece;
const Piece empty = 0;
const Piece white_reg = empty + 1;
const Piece white_reg_moved = white_reg + 1;
const Piece white_king = white_reg_moved + 1;
const Piece white_king_moved = white_king + 1;
const Piece black_reg = white_king_moved + 1;
const Piece black_reg_moved = black_reg + 1;
const Piece black_king = black_reg_moved + 1;
const Piece black_king_moved = black_king + 1;
struct Board {
Piece pieces[4][8];
//bool valid;
};
enum Turn
{
white,
black
};
struct Pair {
unsigned char first;
unsigned char second;
};
const Board bad_board_host = {{empty}};//, false};
__constant__ Board bad_board = {{empty}};//, false};
#define BLOCK_SIZE 512
#define gpuErrChk(stmt) \
do\
{\
hipError_t errCode = stmt; \
if(errCode != hipSuccess)\
{ \
std::cerr << "gpuErrChk: " << hipGetErrorString(errCode)\
<< " " << __FILE__ << " " << __LINE__ << " "\
<< std::endl;\
return -1;\
}\
} while(0)
__device__ Board outputBoard;
__host__ __device__ void makeMoves(Board * boards, Turn turn, unsigned int tx);
__host__ __device__ int ipow(int base, int exp)
{
int result = 1;
while(exp)
{
if(exp & 1)
{
result *= base;
}
exp >>= 1;
base *= base;
}
return result;
}
__host__ __device__ bool boardEquality(const Board *a, const Board *b)
{
for(int x = 0; x < 4; x++)
{
for(int y = 0; y < 8; y++)
{
if(a->pieces[x][y] != b->pieces[x][y])
{
return false;
}
}
}
return true;
}
__host__ bool boardIsValid_host(const Board *a)
{
return !boardEquality(a, &bad_board_host);
}
__device__ bool boardIsValid_device(const Board *a)
{
return !boardEquality(a, &bad_board);
}
__host__ __device__ int analyseBoard(Board *board)
{
int score = 0;
int white_wins = 1;
int black_wins = 1;
for(int x = 0; x < 4; x++)
{
for(int y = 0; y < 8; y++)
{
//kings are worth 2, pieces are worth 1
Piece piece = board->pieces[x][y];
if (piece != empty && piece <= white_king_moved)
{
score += (piece+1)/2;
white_wins = 0;
}
else if (piece != empty)
{
score -= (piece-3)/2;
black_wins = 0;
}
}
}
score = score + white_wins*10000 + black_wins*-10000;
//returns 1,000,000 if invalid board,
return score*(!(white_wins && black_wins)) + 1000000*(white_wins && black_wins);
}
//reduces by 1 turn, with scores at the leaf nodes
//works with 512 spawned threads
__global__ void analyze_score_tree(int * input, int * output){
int tx = threadIdx.x;
unsigned int blockNum = blockIdx.x+blockIdx.y*gridDim.x;
__shared__ int scores[512];
__shared__ int mins[22];
scores[tx] = input[blockNum*blockDim.x+tx];
__syncthreads();
if(threadIdx.x < 22)
{
int min = 1000000;
for(int i = 0; i < 22; i++)
{
int temp = scores[threadIdx.x*22+i];
if (temp < min && temp != -100000000)
min = temp;
}
mins[threadIdx.x] = min;
}
__syncthreads();
if(threadIdx.x == 0)
{
int max = -100000000;
for(int i = 0; i < 22; i++)
if(mins[i] > max && mins[i] != 1000000)
max = mins[i];
output[blockNum] = max;
}
}
//reduces by 1 turn, with boards at the leaf nodes
//works with 512 spawned threads
__global__ void analyze_board_tree(Board * input, int * output){
int tx = threadIdx.x;
unsigned int blockNum = blockIdx.x+blockIdx.y*gridDim.x;
__shared__ int scores[512];
__shared__ int mins[22];
scores[tx] = analyseBoard(&input[blockNum*blockDim.x+threadIdx.x]);
__syncthreads();
if(threadIdx.x < 22)
{
int min = 1000000;
for(int i = 0; i < 22; i++)
{
int temp = scores[threadIdx.x*22+i];
if (temp < min)
min = temp;
}
mins[threadIdx.x] = min;
}
__syncthreads();
if(threadIdx.x == 0)
{
int max = -100000000;
for(int i = 0; i < 22; i++)
if(mins[i] > max && mins[i] != 1000000)
max = mins[i];
output[blockNum] = max;
}
/*
for(int stride = 2; stride <= 32; stride *= 2)
{
if (board_from_base*(stride) + stride/2 < 22 && board_from_base%stride == 0)
if(scores[base_board+board_from_base*stride+stride/2] < scores[base_board+board_from_base*stride])
scores[base_board+board_from_base*stride] = scores[base_board+board_from_base*stride+stride/2];
__syncthreads();
}
for( int stride = 2; stride <= 32; stride *= 2)
{
int index1 = base_board*stride*22;
int index2 = base_board*stride*22+stride*11;
if(base_board*stride+stride/2 < 22 && base_board%stride == 0)
{
if( scores[index1] < scores[index2] && scores[index2] != 1000000)
scores[base_board*stride*22] = scores[index2];
if (scores[base_board*stride*22] == 1000000)
scores[base_board*stride*22] = -1000000;
}
__syncthreads();
}
if (threadIdx.x == 0)
output[blockNum] = scores[0];*/
}
__global__ void expand(Board * input, Board * output, int len) {
const int shared_size = 484;
__shared__ Board B[shared_size]; //TODO
unsigned int tx = threadIdx.x;
unsigned int blockNum = blockIdx.x+blockIdx.y*gridDim.x;
if (blockNum < len && tx == 0)
B[0] = input[blockNum];
else if (blockNum < len && tx < shared_size)
B[tx] = bad_board;
__syncthreads();
if(tx == 0 && ~boardEquality(&B[tx], &bad_board))
makeMoves(B, white, tx);
__syncthreads();
if(tx < shared_size && ~boardEquality(&B[tx], &bad_board))
makeMoves(B, black, tx);
__syncthreads();
if (tx < shared_size && blockNum < len)
output[blockDim.x*blockNum+tx] = B[tx];
else if (blockNum < len)
output[blockDim.x*blockNum+tx] = bad_board;
}
//TODO: deal with 22 move boundary
__host__ __device__
void makeMoves(Board * boards, Turn turn, unsigned int tx)
{
// tx = 0 condition because only the first thread has a valid board to work on.
if(turn == white && tx == 0)
{
int exp_rate = 22;
int move_idx = 0;
Board b = boards[tx];
Board temp = boards[tx];
for(int x = 0; x < 4; x++)
for(int y = 0; y < 8; y++)
{
if (b.pieces[x][y] == white_reg || b.pieces[x][y] == white_king)
{
/*White pieces move (not take) */
if(y%2 && y < 6 && x != 3 && !b.pieces[x+1][y+1])
{
//printf("white at %d,%d move right\n", x, y);
temp.pieces[x+1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && y < 6 && !b.pieces[x][y+1])
{
//printf("white at %d,%d move left\n", x, y);
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && x != 0 && !b.pieces[x-1][y+1])
{
//printf("white at %d,%d move left\n", x, y);
if (y == 6)
temp.pieces[x-1][y+1] = white_king;
else
temp.pieces[x-1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && !b.pieces[x][y+1])
{
//printf("white at %d,%d move right\n", x, y);
if (y == 6)
temp.pieces[x][y+1] = white_king;
else
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
/*White piece captures a black piece (not become king)*/
if(y%2 && x!= 3 && b.pieces[x+1][y+1] > white_king_moved && !b.pieces[x+1][y+2])
{
//TODO add double takes here
if (y != 5)
temp.pieces[x+1][y+2] = temp.pieces[x][y];
else
temp.pieces[x+1][y+2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x+1][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && x != 0 && b.pieces[x][y+1] > white_king_moved && !b.pieces[x-1][y+2])
{
//TODO add double takes here
if (y != 5)
temp.pieces[x-1][y+2] = temp.pieces[x][y];
else
temp.pieces[x+1][y+2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y < 5 && x != 0 && b.pieces[x-1][y+1] > white_king_moved && !b.pieces[x-1][y+2])
{
//TODO add double takes here
temp.pieces[x-1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y < 5 && x != 3 && b.pieces[x][y+1] > white_king_moved && !b.pieces[x+1][y+2])
{
//TODO add double takes here
temp.pieces[x+1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
}
if (b.pieces[x][y] == white_king)
{
/*White king move backwards(not take) */
if(y%2 && x != 3 && !b.pieces[x+1][y-1])
{
temp.pieces[x+1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && !b.pieces[x][y-1])
{
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && x != 0 && !b.pieces[x-1][y-1])
{
temp.pieces[x-1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && !b.pieces[x][y-1])
{
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && y>1 && x!= 3 && b.pieces[x+1][y-1] > white_king_moved && !b.pieces[x+1][y-2])
{
//TODO add double takes here
temp.pieces[x+1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x+1][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && y>1 && x != 0 && b.pieces[x][y-1] > white_king_moved && !b.pieces[x-1][y-2])
{
//TODO add double takes here
temp.pieces[x-1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && x != 0 && b.pieces[x-1][y-1] > white_king_moved && !b.pieces[x-1][y-2])
{
//TODO add double takes here
temp.pieces[x-1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && x!=3 && b.pieces[x][y-1] > white_king_moved && !b.pieces[x+1][y-2])
{
//TODO add double takes here
temp.pieces[x+1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
}
}
}
else if (tx < 22)
{
int move_idx = 0;
Board b = boards[tx*22];
Board temp = boards[tx*22];
for(int x = 0; x < 4; x++)
for(int y = 0; y < 8; y++)
{
if (b.pieces[x][y] == black_reg || b.pieces[x][y] == black_king)
{
/*White pieces move (not take) */
if(y%2 && x != 3 && !b.pieces[x+1][y-1])
{
//printf("black at %d,%d move right\n", x, y);
if (y == 1)
temp.pieces[x+1][y-1] = black_king;
else
temp.pieces[x+1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && !b.pieces[x][y-1])
{
//printf("black at %d,%d move left\n", x, y);
if (y == 1)
temp.pieces[x+1][y-1] = black_king;
else
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y != 0 && x != 0 && !b.pieces[x-1][y-1])
{
//printf("black at %d,%d move left\n", x, y);
temp.pieces[x-1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && !b.pieces[x][y-1])
{
//printf("black at %d,%d move right\n", x, y);
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
/*White piece captures a black piece*/
if(y%2 && y>1 && x!= 3 && b.pieces[x+1][y-1] > 0 && b.pieces[x+1][y-1] <= white_king_moved && !b.pieces[x+1][y-2])
{
//TODO add double takes here
if (y != 2)
temp.pieces[x+1][y-2] = temp.pieces[x][y];
else
temp.pieces[x+1][y-2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x+1][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y>1 && x != 0 && b.pieces[x][y-1] > 0 && b.pieces[x][y-1] <= white_king_moved && !b.pieces[x-1][y-2])
{
//TODO add double takes here
if (y != 2)
temp.pieces[x-1][y-2] = temp.pieces[x][y];
else
temp.pieces[x+1][y-2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>2 && x != 0 && b.pieces[x-1][y-1] <= white_king_moved && b.pieces[x-1][y-1] > 0 && !b.pieces[x-1][y-2])
{
//TODO add double takes here
temp.pieces[x-1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>2 && x!=3 && b.pieces[x][y-1] <= white_king_moved && b.pieces[x][y-1]>0 && !b.pieces[x+1][y-2])
{
//TODO add double takes here
temp.pieces[x+1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
}
if (b.pieces[x][y] == black_king)
{
/*White king move backwards(not take) */
if(y%2 && y<7 && x != 3 && !b.pieces[x+1][y+1])
{
temp.pieces[x+1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y<7 && !b.pieces[x][y+1])
{
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && x != 0 && !b.pieces[x-1][y+1])
{
temp.pieces[x-1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && !b.pieces[x][y+1])
{
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y<6 && x!= 3 && b.pieces[x+1][y+1] <= white_king_moved && b.pieces[x+1][y+1] > 0 && !b.pieces[x+1][y+2])
{
//TODO add double takes here
temp.pieces[x+1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x+1][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y<6 && x != 0 && b.pieces[x][y+1] <= white_king_moved && b.pieces[x][y+1] > 0 && !b.pieces[x-1][y+2])
{
//TODO add double takes here
temp.pieces[x-1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y<5 && x != 0 && b.pieces[x-1][y+1] <= white_king_moved && b.pieces[x-1][y+1] > 0 && !b.pieces[x-1][y+2])
{
//TODO add double takes here
temp.pieces[x-1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y<5 && x!=3 && b.pieces[x][y+1] <= white_king_moved && b.pieces[x][y+1] > 0 && !b.pieces[x+1][y+2])
{
//TODO add double takes here
temp.pieces[x+1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
}
}
}
}
void printBoard(Board b);
int initBoard(Board *b);
int makeMove(Board *board);
int analyseBoard(Board *board, Turn player);
void reverse(Board * b);
int main(int argc, char **argv) {
Board * b = (Board *)malloc(sizeof(Board)*512);
initBoard(b);
for(int i = 0; i <100; i++)
{
clock_t start = clock(), diff;
makeMove(b);
if(i%2)
printBoard(b[0]);
reverse(b);
//printBoard(b[0]);
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
//printf("Time taken %d seconds %d milliseconds\n", msec/1000, msec%1000);
}
}
void reverse(Board * b)
{
Piece temp;
for(int i = 0; i < 2; i++)
for(int j = 0; j < 8; j++)
{
temp = b->pieces[i][j];
if(b->pieces[3-i][7-j] > 4)
b->pieces[i][j] = b->pieces[3-i][7-j]-4;
else if(b->pieces[3-i][7-j] <= 4 && b->pieces[3-i][7-j] > 0)
b->pieces[i][j] = b->pieces[3-i][7-j]+4;
else
b->pieces[i][j] = b->pieces[3-i][7-j];
if(temp > 4)
b->pieces[3-i][7-j] = temp-4;
else if(temp <= 4 && temp > 0)
b->pieces[3-i][7-j] = temp+4;
else
b->pieces[3-i][7-j] = temp;
}
}
void printBoard(Board b)
{
printf("Board: --------------------------------------\n");
for(int i = 3; i >= 0; i--)
{
for(int j = 0; j < 4; j++)
{
switch(b.pieces[j][i*2+1])
{
case white_reg:
case white_reg_moved:
printf("_|w|");
break;
case white_king:
case white_king_moved:
printf("_|W|");
break;
case black_reg:
case black_reg_moved:
printf("_|b|");
break;
case black_king:
case black_king_moved:
printf("_|B|");
break;
case empty:
printf("_|_|");
break;
default:
printf("x|x|");
break;
}
}
printf("\n");
for(int j = 0; j < 4; j++)
{
switch(b.pieces[j][i*2])
{
case white_reg:
case white_reg_moved:
printf("w|_|");
break;
case white_king:
case white_king_moved:
printf("W|_|");
break;
case black_reg:
case black_reg_moved:
printf("b|_|");
break;
case black_king:
case black_king_moved:
printf("B|_|");
break;
case empty:
printf("_|_|");
break;
default:
printf("x|x|");
break;
}
}
printf("\n");
}
}
int initBoard(Board *board)
{
if(!board)
{
return -1;
}
for(int y = 0; y < 3; y++)
{
for(int x = 0; x < 4; x++)
{
board->pieces[x][y] = white_reg;
board->pieces[x][y + 5] = black_reg;
}
}
return 0;
}
int makeMove(Board *board)
{
Board *host_output;
Board *host_input;
Board *device_output;
Board *device1_output;
Board *device2_output;
Board *device_input;
int inputSize = 1;
int outputSize = inputSize * 512;
host_input = board;
if(USE_GPU)
{
// cuda malloc
hipMalloc(&device_input, inputSize * sizeof(Board));
hipMalloc(&device1_output, outputSize * sizeof(Board));
hipMalloc(&device2_output, outputSize * 512 * sizeof(Board));
// cuda memcpy
hipMemcpy(device_input, host_input, inputSize * sizeof(*device_input), hipMemcpyHostToDevice);
//launch kernel and check errors
//printf("initializing kernel with grid dim: %d and block dim: %d\n", inputSize, BLOCK_SIZE);
dim3 dimGrid(1);
dim3 dimBlock(BLOCK_SIZE);
hipLaunchKernelGGL(( expand), dim3(dimGrid), dim3(dimBlock), 0, 0, device_input, device1_output, inputSize);
hipPeekAtLastError();
hipDeviceSynchronize();
//set up for second kernel launch
inputSize = outputSize;
outputSize = inputSize * 512;
//launch kernel and check errors
//printf("initializing kernel with grid dim: %d and block dim: %d\n", inputSize, BLOCK_SIZE);
dim3 dimGrid2(512);
hipLaunchKernelGGL(( expand), dim3(dimGrid2), dim3(dimBlock), 0, 0, device1_output, device2_output, inputSize);
hipPeekAtLastError();
hipDeviceSynchronize();
int expansion_rate = 512;
dim3 dimGrid3(1*expansion_rate);
dim3 dimGrid4(512*expansion_rate);
//Board *temp_device_output;
Board *third_level_output;
int * device_first_level_scores;
int * device_second_level_scores;
int * device_third_level_scores;
hipMalloc(&device_second_level_scores, 512*512*sizeof(int));
hipMalloc(&device_third_level_scores, 512*expansion_rate*sizeof(int));
hipMalloc(&device_first_level_scores, 512*sizeof(int));
//gpuErrChk(hipMalloc(&temp_device_output, 512*512*expansion_rate*sizeof(Board)));
gpuErrChk(hipMalloc(&third_level_output, 512*expansion_rate*sizeof(Board)));
for(int i = 0; i < 512*512/expansion_rate; i++)
{
device_input = &device2_output[i*expansion_rate];
hipLaunchKernelGGL(( expand), dim3(dimGrid3), dim3(dimBlock), 0, 0, device_input, third_level_output, expansion_rate);
hipPeekAtLastError();
hipDeviceSynchronize();
//expand<<<dimGrid4, dimBlock>>>(third_level_output, temp_device_output, 512*expansion_rate);
//hipPeekAtLastError();
//hipDeviceSynchronize();
//analyze_board_tree<<<dimGrid4, dimBlock>>>(temp_device_output, device_third_level_scores);
//hipPeekAtLastError();
//hipDeviceSynchronize();
hipLaunchKernelGGL(( analyze_board_tree), dim3(dimGrid3), dim3(dimBlock), 0, 0, third_level_output,
&device_second_level_scores[i*expansion_rate]);
hipPeekAtLastError();
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( analyze_score_tree), dim3(dimGrid2), dim3(dimBlock), 0, 0, device_second_level_scores,
device_first_level_scores);
hipPeekAtLastError();
hipDeviceSynchronize();
int * first_level_scores = (int*)malloc(512*sizeof(int));
Board * second_level_boards = (Board*)malloc(512*512*sizeof(Board));
hipMemcpy(first_level_scores, device_first_level_scores, 512*sizeof(int), hipMemcpyDeviceToHost);
int max = -100000;
int index = -1;
for(int i = 0; i < 22; i++)
{
int min = 1000000;
for(int j = 0; j < 22; j++)
if(first_level_scores[22*i+j] < min && first_level_scores[22*i+j] != -100000000)
min = first_level_scores[22*i+j];
if (min > max && min != 1000000)
{
index = i;
max = min;
}
}
Board boards[512];
boards[0] = host_input[0];
makeMoves(boards, white, 0);
host_input[0] = boards[22*index];
hipFree(device_second_level_scores);
hipFree(device_third_level_scores);
hipFree(device_first_level_scores);
hipFree(third_level_output);
hipFree(device_input);
hipFree(device1_output);
hipFree(device2_output);
free(first_level_scores);
free(second_level_boards);
return 0;
} else // iterative version
{
static int numTurns = 0;
int score = 0;
unsigned long size;
if(!numTurns)
{
std::cin >> numTurns;
}
if(numTurns == 4)
{
size = ipow(512, 3);
} else if(numTurns <= 3)
{
size = ipow(512, numTurns);
} else
{
printf("max 4\n");
return -1;
}
host_output = new (std::nothrow) Board[size];
if(!host_output)
{
fprintf(stderr, "operator new failed on size %lu\n", size);
return -1;
}
host_output[0] = *board;
for(int i = 0; i < numTurns && i < 3; i++)
{
Board *temp_output = new (std::nothrow) Board[size];
if(!temp_output)
{
fprintf(stderr, "new failed on size %lu\n", size);
return -1;
}
for( int j = 0; j < ipow(512, i); j++)
{
if(!boardIsValid_host(&host_output[j]))
{
continue;
}
Board b[512] = {empty};
b[0] = host_output[j];
makeMoves(b, white, 0);
for(int k = 0; k < 512; k++)
{
if(boardIsValid_host(&b[k]))
{
makeMoves(b, black, k);
}
temp_output[512 * j + k] = b[k];
}
}
delete[] host_output;
host_output = temp_output;
}
if(numTurns > 3)
{
for(int i = 0; i < ipow(512,3); i++)
{
Board b[512] = {empty};
//Board *temp_output = new (std::nothrow) Board[ipow(512,2)];
b[0] = host_output[i];
makeMoves(b, white, 0);
for(int j = 0; j < 512; j+=22)
{
if(boardIsValid_host(&host_output[i]))
{
makeMoves(b, black, j);
}
}
for(int j = 0; j < 512; j++)
{
if(boardIsValid_host(&host_output[i]))
{
score = ::max(score, analyseBoard(&host_output[i]));
}
}
//delete[] temp_output;
}
} else
{
int * scores = new int[ipow(512,numTurns - 1)];
int max = 0, idx = -1;
for(int i = numTurns; i > 0; i--)
{
for(int j = 0; j < ipow(512,i); j++)
{
if(boardIsValid_host(&host_output[j]))
{
score = ::max(score, analyseBoard(&host_output[i]));
}
if(!(j % 512))
{
scores[j/512] = score;
if(score > max)
{
max = score;
idx = j/512;
}
score = 0;
}
}
}
//printf("%d, %d\n", max, idx);
Board boards[512];
boards[0] = board[0];
makeMoves(boards, white, 0);
board[0] = boards[0];
}
//printf("Score: %d\n", score);
delete [] host_output;
/*
int sum = 0, last_idx;
for(int i = 0; i < size; i++)
{
if(boardIsValid_host(&host_output[i]))
{
sum++;
last_idx = i;
//printBoard(host_output[i]);
}
}
printf("%d %d\n", sum, last_idx);
printBoard(host_output[last_idx]);
*/
*board = host_output[0];
}
return 0;
}
|
7b522baf2f89601811536ba12a2390070856738f.cu
|
#include <algorithm>
#include <math.h>
#include <iostream>
#include <stdint.h>
#include <stdio.h>
#include <time.h>
#define USE_GPU 1
/*
enum Piece
{
empty,
white_reg,
white_reg_moved,
white_king,
white_king_moved,
black_reg,
black_reg_moved,
black_king,
black_king_moved
};*/
typedef uint8_t Piece;
const Piece empty = 0;
const Piece white_reg = empty + 1;
const Piece white_reg_moved = white_reg + 1;
const Piece white_king = white_reg_moved + 1;
const Piece white_king_moved = white_king + 1;
const Piece black_reg = white_king_moved + 1;
const Piece black_reg_moved = black_reg + 1;
const Piece black_king = black_reg_moved + 1;
const Piece black_king_moved = black_king + 1;
struct Board {
Piece pieces[4][8];
//bool valid;
};
enum Turn
{
white,
black
};
struct Pair {
unsigned char first;
unsigned char second;
};
const Board bad_board_host = {{empty}};//, false};
__constant__ Board bad_board = {{empty}};//, false};
#define BLOCK_SIZE 512
#define gpuErrChk(stmt) \
do\
{\
cudaError_t errCode = stmt; \
if(errCode != cudaSuccess)\
{ \
std::cerr << "gpuErrChk: " << cudaGetErrorString(errCode)\
<< " " << __FILE__ << " " << __LINE__ << " "\
<< std::endl;\
return -1;\
}\
} while(0)
__device__ Board outputBoard;
__host__ __device__ void makeMoves(Board * boards, Turn turn, unsigned int tx);
__host__ __device__ int ipow(int base, int exp)
{
int result = 1;
while(exp)
{
if(exp & 1)
{
result *= base;
}
exp >>= 1;
base *= base;
}
return result;
}
__host__ __device__ bool boardEquality(const Board *a, const Board *b)
{
for(int x = 0; x < 4; x++)
{
for(int y = 0; y < 8; y++)
{
if(a->pieces[x][y] != b->pieces[x][y])
{
return false;
}
}
}
return true;
}
__host__ bool boardIsValid_host(const Board *a)
{
return !boardEquality(a, &bad_board_host);
}
__device__ bool boardIsValid_device(const Board *a)
{
return !boardEquality(a, &bad_board);
}
__host__ __device__ int analyseBoard(Board *board)
{
int score = 0;
int white_wins = 1;
int black_wins = 1;
for(int x = 0; x < 4; x++)
{
for(int y = 0; y < 8; y++)
{
//kings are worth 2, pieces are worth 1
Piece piece = board->pieces[x][y];
if (piece != empty && piece <= white_king_moved)
{
score += (piece+1)/2;
white_wins = 0;
}
else if (piece != empty)
{
score -= (piece-3)/2;
black_wins = 0;
}
}
}
score = score + white_wins*10000 + black_wins*-10000;
//returns 1,000,000 if invalid board,
return score*(!(white_wins && black_wins)) + 1000000*(white_wins && black_wins);
}
//reduces by 1 turn, with scores at the leaf nodes
//works with 512 spawned threads
__global__ void analyze_score_tree(int * input, int * output){
int tx = threadIdx.x;
unsigned int blockNum = blockIdx.x+blockIdx.y*gridDim.x;
__shared__ int scores[512];
__shared__ int mins[22];
scores[tx] = input[blockNum*blockDim.x+tx];
__syncthreads();
if(threadIdx.x < 22)
{
int min = 1000000;
for(int i = 0; i < 22; i++)
{
int temp = scores[threadIdx.x*22+i];
if (temp < min && temp != -100000000)
min = temp;
}
mins[threadIdx.x] = min;
}
__syncthreads();
if(threadIdx.x == 0)
{
int max = -100000000;
for(int i = 0; i < 22; i++)
if(mins[i] > max && mins[i] != 1000000)
max = mins[i];
output[blockNum] = max;
}
}
//reduces by 1 turn, with boards at the leaf nodes
//works with 512 spawned threads
__global__ void analyze_board_tree(Board * input, int * output){
int tx = threadIdx.x;
unsigned int blockNum = blockIdx.x+blockIdx.y*gridDim.x;
__shared__ int scores[512];
__shared__ int mins[22];
scores[tx] = analyseBoard(&input[blockNum*blockDim.x+threadIdx.x]);
__syncthreads();
if(threadIdx.x < 22)
{
int min = 1000000;
for(int i = 0; i < 22; i++)
{
int temp = scores[threadIdx.x*22+i];
if (temp < min)
min = temp;
}
mins[threadIdx.x] = min;
}
__syncthreads();
if(threadIdx.x == 0)
{
int max = -100000000;
for(int i = 0; i < 22; i++)
if(mins[i] > max && mins[i] != 1000000)
max = mins[i];
output[blockNum] = max;
}
/*
for(int stride = 2; stride <= 32; stride *= 2)
{
if (board_from_base*(stride) + stride/2 < 22 && board_from_base%stride == 0)
if(scores[base_board+board_from_base*stride+stride/2] < scores[base_board+board_from_base*stride])
scores[base_board+board_from_base*stride] = scores[base_board+board_from_base*stride+stride/2];
__syncthreads();
}
for( int stride = 2; stride <= 32; stride *= 2)
{
int index1 = base_board*stride*22;
int index2 = base_board*stride*22+stride*11;
if(base_board*stride+stride/2 < 22 && base_board%stride == 0)
{
if( scores[index1] < scores[index2] && scores[index2] != 1000000)
scores[base_board*stride*22] = scores[index2];
if (scores[base_board*stride*22] == 1000000)
scores[base_board*stride*22] = -1000000;
}
__syncthreads();
}
if (threadIdx.x == 0)
output[blockNum] = scores[0];*/
}
__global__ void expand(Board * input, Board * output, int len) {
const int shared_size = 484;
__shared__ Board B[shared_size]; //TODO
unsigned int tx = threadIdx.x;
unsigned int blockNum = blockIdx.x+blockIdx.y*gridDim.x;
if (blockNum < len && tx == 0)
B[0] = input[blockNum];
else if (blockNum < len && tx < shared_size)
B[tx] = bad_board;
__syncthreads();
if(tx == 0 && ~boardEquality(&B[tx], &bad_board))
makeMoves(B, white, tx);
__syncthreads();
if(tx < shared_size && ~boardEquality(&B[tx], &bad_board))
makeMoves(B, black, tx);
__syncthreads();
if (tx < shared_size && blockNum < len)
output[blockDim.x*blockNum+tx] = B[tx];
else if (blockNum < len)
output[blockDim.x*blockNum+tx] = bad_board;
}
//TODO: deal with 22 move boundary
__host__ __device__
void makeMoves(Board * boards, Turn turn, unsigned int tx)
{
// tx = 0 condition because only the first thread has a valid board to work on.
if(turn == white && tx == 0)
{
int exp_rate = 22;
int move_idx = 0;
Board b = boards[tx];
Board temp = boards[tx];
for(int x = 0; x < 4; x++)
for(int y = 0; y < 8; y++)
{
if (b.pieces[x][y] == white_reg || b.pieces[x][y] == white_king)
{
/*White pieces move (not take) */
if(y%2 && y < 6 && x != 3 && !b.pieces[x+1][y+1])
{
//printf("white at %d,%d move right\n", x, y);
temp.pieces[x+1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && y < 6 && !b.pieces[x][y+1])
{
//printf("white at %d,%d move left\n", x, y);
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && x != 0 && !b.pieces[x-1][y+1])
{
//printf("white at %d,%d move left\n", x, y);
if (y == 6)
temp.pieces[x-1][y+1] = white_king;
else
temp.pieces[x-1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && !b.pieces[x][y+1])
{
//printf("white at %d,%d move right\n", x, y);
if (y == 6)
temp.pieces[x][y+1] = white_king;
else
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
/*White piece captures a black piece (not become king)*/
if(y%2 && x!= 3 && b.pieces[x+1][y+1] > white_king_moved && !b.pieces[x+1][y+2])
{
//TODO add double takes here
if (y != 5)
temp.pieces[x+1][y+2] = temp.pieces[x][y];
else
temp.pieces[x+1][y+2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x+1][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && x != 0 && b.pieces[x][y+1] > white_king_moved && !b.pieces[x-1][y+2])
{
//TODO add double takes here
if (y != 5)
temp.pieces[x-1][y+2] = temp.pieces[x][y];
else
temp.pieces[x+1][y+2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y < 5 && x != 0 && b.pieces[x-1][y+1] > white_king_moved && !b.pieces[x-1][y+2])
{
//TODO add double takes here
temp.pieces[x-1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y < 5 && x != 3 && b.pieces[x][y+1] > white_king_moved && !b.pieces[x+1][y+2])
{
//TODO add double takes here
temp.pieces[x+1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
}
if (b.pieces[x][y] == white_king)
{
/*White king move backwards(not take) */
if(y%2 && x != 3 && !b.pieces[x+1][y-1])
{
temp.pieces[x+1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && !b.pieces[x][y-1])
{
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && x != 0 && !b.pieces[x-1][y-1])
{
temp.pieces[x-1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && !b.pieces[x][y-1])
{
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && y>1 && x!= 3 && b.pieces[x+1][y-1] > white_king_moved && !b.pieces[x+1][y-2])
{
//TODO add double takes here
temp.pieces[x+1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x+1][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && y>1 && x != 0 && b.pieces[x][y-1] > white_king_moved && !b.pieces[x-1][y-2])
{
//TODO add double takes here
temp.pieces[x-1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && x != 0 && b.pieces[x-1][y-1] > white_king_moved && !b.pieces[x-1][y-2])
{
//TODO add double takes here
temp.pieces[x-1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && x!=3 && b.pieces[x][y-1] > white_king_moved && !b.pieces[x+1][y-2])
{
//TODO add double takes here
temp.pieces[x+1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
}
}
}
else if (tx < 22)
{
int move_idx = 0;
Board b = boards[tx*22];
Board temp = boards[tx*22];
for(int x = 0; x < 4; x++)
for(int y = 0; y < 8; y++)
{
if (b.pieces[x][y] == black_reg || b.pieces[x][y] == black_king)
{
/*White pieces move (not take) */
if(y%2 && x != 3 && !b.pieces[x+1][y-1])
{
//printf("black at %d,%d move right\n", x, y);
if (y == 1)
temp.pieces[x+1][y-1] = black_king;
else
temp.pieces[x+1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && !b.pieces[x][y-1])
{
//printf("black at %d,%d move left\n", x, y);
if (y == 1)
temp.pieces[x+1][y-1] = black_king;
else
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y != 0 && x != 0 && !b.pieces[x-1][y-1])
{
//printf("black at %d,%d move left\n", x, y);
temp.pieces[x-1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && !b.pieces[x][y-1])
{
//printf("black at %d,%d move right\n", x, y);
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
/*White piece captures a black piece*/
if(y%2 && y>1 && x!= 3 && b.pieces[x+1][y-1] > 0 && b.pieces[x+1][y-1] <= white_king_moved && !b.pieces[x+1][y-2])
{
//TODO add double takes here
if (y != 2)
temp.pieces[x+1][y-2] = temp.pieces[x][y];
else
temp.pieces[x+1][y-2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x+1][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y>1 && x != 0 && b.pieces[x][y-1] > 0 && b.pieces[x][y-1] <= white_king_moved && !b.pieces[x-1][y-2])
{
//TODO add double takes here
if (y != 2)
temp.pieces[x-1][y-2] = temp.pieces[x][y];
else
temp.pieces[x+1][y-2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>2 && x != 0 && b.pieces[x-1][y-1] <= white_king_moved && b.pieces[x-1][y-1] > 0 && !b.pieces[x-1][y-2])
{
//TODO add double takes here
temp.pieces[x-1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>2 && x!=3 && b.pieces[x][y-1] <= white_king_moved && b.pieces[x][y-1]>0 && !b.pieces[x+1][y-2])
{
//TODO add double takes here
temp.pieces[x+1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
}
if (b.pieces[x][y] == black_king)
{
/*White king move backwards(not take) */
if(y%2 && y<7 && x != 3 && !b.pieces[x+1][y+1])
{
temp.pieces[x+1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y<7 && !b.pieces[x][y+1])
{
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && x != 0 && !b.pieces[x-1][y+1])
{
temp.pieces[x-1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && !b.pieces[x][y+1])
{
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y<6 && x!= 3 && b.pieces[x+1][y+1] <= white_king_moved && b.pieces[x+1][y+1] > 0 && !b.pieces[x+1][y+2])
{
//TODO add double takes here
temp.pieces[x+1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x+1][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y<6 && x != 0 && b.pieces[x][y+1] <= white_king_moved && b.pieces[x][y+1] > 0 && !b.pieces[x-1][y+2])
{
//TODO add double takes here
temp.pieces[x-1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y<5 && x != 0 && b.pieces[x-1][y+1] <= white_king_moved && b.pieces[x-1][y+1] > 0 && !b.pieces[x-1][y+2])
{
//TODO add double takes here
temp.pieces[x-1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y<5 && x!=3 && b.pieces[x][y+1] <= white_king_moved && b.pieces[x][y+1] > 0 && !b.pieces[x+1][y+2])
{
//TODO add double takes here
temp.pieces[x+1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
}
}
}
}
void printBoard(Board b);
int initBoard(Board *b);
int makeMove(Board *board);
int analyseBoard(Board *board, Turn player);
void reverse(Board * b);
int main(int argc, char **argv) {
Board * b = (Board *)malloc(sizeof(Board)*512);
initBoard(b);
for(int i = 0; i <100; i++)
{
clock_t start = clock(), diff;
makeMove(b);
if(i%2)
printBoard(b[0]);
reverse(b);
//printBoard(b[0]);
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
//printf("Time taken %d seconds %d milliseconds\n", msec/1000, msec%1000);
}
}
void reverse(Board * b)
{
Piece temp;
for(int i = 0; i < 2; i++)
for(int j = 0; j < 8; j++)
{
temp = b->pieces[i][j];
if(b->pieces[3-i][7-j] > 4)
b->pieces[i][j] = b->pieces[3-i][7-j]-4;
else if(b->pieces[3-i][7-j] <= 4 && b->pieces[3-i][7-j] > 0)
b->pieces[i][j] = b->pieces[3-i][7-j]+4;
else
b->pieces[i][j] = b->pieces[3-i][7-j];
if(temp > 4)
b->pieces[3-i][7-j] = temp-4;
else if(temp <= 4 && temp > 0)
b->pieces[3-i][7-j] = temp+4;
else
b->pieces[3-i][7-j] = temp;
}
}
void printBoard(Board b)
{
printf("Board: --------------------------------------\n");
for(int i = 3; i >= 0; i--)
{
for(int j = 0; j < 4; j++)
{
switch(b.pieces[j][i*2+1])
{
case white_reg:
case white_reg_moved:
printf("_|w|");
break;
case white_king:
case white_king_moved:
printf("_|W|");
break;
case black_reg:
case black_reg_moved:
printf("_|b|");
break;
case black_king:
case black_king_moved:
printf("_|B|");
break;
case empty:
printf("_|_|");
break;
default:
printf("x|x|");
break;
}
}
printf("\n");
for(int j = 0; j < 4; j++)
{
switch(b.pieces[j][i*2])
{
case white_reg:
case white_reg_moved:
printf("w|_|");
break;
case white_king:
case white_king_moved:
printf("W|_|");
break;
case black_reg:
case black_reg_moved:
printf("b|_|");
break;
case black_king:
case black_king_moved:
printf("B|_|");
break;
case empty:
printf("_|_|");
break;
default:
printf("x|x|");
break;
}
}
printf("\n");
}
}
int initBoard(Board *board)
{
if(!board)
{
return -1;
}
for(int y = 0; y < 3; y++)
{
for(int x = 0; x < 4; x++)
{
board->pieces[x][y] = white_reg;
board->pieces[x][y + 5] = black_reg;
}
}
return 0;
}
int makeMove(Board *board)
{
Board *host_output;
Board *host_input;
Board *device_output;
Board *device1_output;
Board *device2_output;
Board *device_input;
int inputSize = 1;
int outputSize = inputSize * 512;
host_input = board;
if(USE_GPU)
{
// cuda malloc
cudaMalloc(&device_input, inputSize * sizeof(Board));
cudaMalloc(&device1_output, outputSize * sizeof(Board));
cudaMalloc(&device2_output, outputSize * 512 * sizeof(Board));
// cuda memcpy
cudaMemcpy(device_input, host_input, inputSize * sizeof(*device_input), cudaMemcpyHostToDevice);
//launch kernel and check errors
//printf("initializing kernel with grid dim: %d and block dim: %d\n", inputSize, BLOCK_SIZE);
dim3 dimGrid(1);
dim3 dimBlock(BLOCK_SIZE);
expand<<<dimGrid, dimBlock>>>(device_input, device1_output, inputSize);
cudaPeekAtLastError();
cudaDeviceSynchronize();
//set up for second kernel launch
inputSize = outputSize;
outputSize = inputSize * 512;
//launch kernel and check errors
//printf("initializing kernel with grid dim: %d and block dim: %d\n", inputSize, BLOCK_SIZE);
dim3 dimGrid2(512);
expand<<<dimGrid2, dimBlock>>>(device1_output, device2_output, inputSize);
cudaPeekAtLastError();
cudaDeviceSynchronize();
int expansion_rate = 512;
dim3 dimGrid3(1*expansion_rate);
dim3 dimGrid4(512*expansion_rate);
//Board *temp_device_output;
Board *third_level_output;
int * device_first_level_scores;
int * device_second_level_scores;
int * device_third_level_scores;
cudaMalloc(&device_second_level_scores, 512*512*sizeof(int));
cudaMalloc(&device_third_level_scores, 512*expansion_rate*sizeof(int));
cudaMalloc(&device_first_level_scores, 512*sizeof(int));
//gpuErrChk(cudaMalloc(&temp_device_output, 512*512*expansion_rate*sizeof(Board)));
gpuErrChk(cudaMalloc(&third_level_output, 512*expansion_rate*sizeof(Board)));
for(int i = 0; i < 512*512/expansion_rate; i++)
{
device_input = &device2_output[i*expansion_rate];
expand<<<dimGrid3, dimBlock>>>(device_input, third_level_output, expansion_rate);
cudaPeekAtLastError();
cudaDeviceSynchronize();
//expand<<<dimGrid4, dimBlock>>>(third_level_output, temp_device_output, 512*expansion_rate);
//cudaPeekAtLastError();
//cudaDeviceSynchronize();
//analyze_board_tree<<<dimGrid4, dimBlock>>>(temp_device_output, device_third_level_scores);
//cudaPeekAtLastError();
//cudaDeviceSynchronize();
analyze_board_tree<<<dimGrid3, dimBlock>>>(third_level_output,
&device_second_level_scores[i*expansion_rate]);
cudaPeekAtLastError();
cudaDeviceSynchronize();
}
analyze_score_tree<<<dimGrid2, dimBlock>>>(device_second_level_scores,
device_first_level_scores);
cudaPeekAtLastError();
cudaDeviceSynchronize();
int * first_level_scores = (int*)malloc(512*sizeof(int));
Board * second_level_boards = (Board*)malloc(512*512*sizeof(Board));
cudaMemcpy(first_level_scores, device_first_level_scores, 512*sizeof(int), cudaMemcpyDeviceToHost);
int max = -100000;
int index = -1;
for(int i = 0; i < 22; i++)
{
int min = 1000000;
for(int j = 0; j < 22; j++)
if(first_level_scores[22*i+j] < min && first_level_scores[22*i+j] != -100000000)
min = first_level_scores[22*i+j];
if (min > max && min != 1000000)
{
index = i;
max = min;
}
}
Board boards[512];
boards[0] = host_input[0];
makeMoves(boards, white, 0);
host_input[0] = boards[22*index];
cudaFree(device_second_level_scores);
cudaFree(device_third_level_scores);
cudaFree(device_first_level_scores);
cudaFree(third_level_output);
cudaFree(device_input);
cudaFree(device1_output);
cudaFree(device2_output);
free(first_level_scores);
free(second_level_boards);
return 0;
} else // iterative version
{
static int numTurns = 0;
int score = 0;
unsigned long size;
if(!numTurns)
{
std::cin >> numTurns;
}
if(numTurns == 4)
{
size = ipow(512, 3);
} else if(numTurns <= 3)
{
size = ipow(512, numTurns);
} else
{
printf("max 4\n");
return -1;
}
host_output = new (std::nothrow) Board[size];
if(!host_output)
{
fprintf(stderr, "operator new failed on size %lu\n", size);
return -1;
}
host_output[0] = *board;
for(int i = 0; i < numTurns && i < 3; i++)
{
Board *temp_output = new (std::nothrow) Board[size];
if(!temp_output)
{
fprintf(stderr, "new failed on size %lu\n", size);
return -1;
}
for( int j = 0; j < ipow(512, i); j++)
{
if(!boardIsValid_host(&host_output[j]))
{
continue;
}
Board b[512] = {empty};
b[0] = host_output[j];
makeMoves(b, white, 0);
for(int k = 0; k < 512; k++)
{
if(boardIsValid_host(&b[k]))
{
makeMoves(b, black, k);
}
temp_output[512 * j + k] = b[k];
}
}
delete[] host_output;
host_output = temp_output;
}
if(numTurns > 3)
{
for(int i = 0; i < ipow(512,3); i++)
{
Board b[512] = {empty};
//Board *temp_output = new (std::nothrow) Board[ipow(512,2)];
b[0] = host_output[i];
makeMoves(b, white, 0);
for(int j = 0; j < 512; j+=22)
{
if(boardIsValid_host(&host_output[i]))
{
makeMoves(b, black, j);
}
}
for(int j = 0; j < 512; j++)
{
if(boardIsValid_host(&host_output[i]))
{
score = std::max(score, analyseBoard(&host_output[i]));
}
}
//delete[] temp_output;
}
} else
{
int * scores = new int[ipow(512,numTurns - 1)];
int max = 0, idx = -1;
for(int i = numTurns; i > 0; i--)
{
for(int j = 0; j < ipow(512,i); j++)
{
if(boardIsValid_host(&host_output[j]))
{
score = std::max(score, analyseBoard(&host_output[i]));
}
if(!(j % 512))
{
scores[j/512] = score;
if(score > max)
{
max = score;
idx = j/512;
}
score = 0;
}
}
}
//printf("%d, %d\n", max, idx);
Board boards[512];
boards[0] = board[0];
makeMoves(boards, white, 0);
board[0] = boards[0];
}
//printf("Score: %d\n", score);
delete [] host_output;
/*
int sum = 0, last_idx;
for(int i = 0; i < size; i++)
{
if(boardIsValid_host(&host_output[i]))
{
sum++;
last_idx = i;
//printBoard(host_output[i]);
}
}
printf("%d %d\n", sum, last_idx);
printBoard(host_output[last_idx]);
*/
*board = host_output[0];
}
return 0;
}
|
e3a2884381c9aa5aa378d9ad4dd47199408e145b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "UpsampleLayer.h"
namespace nvinfer1
{
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,
int no_elements, int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
void UpsampleLayerPlugin::forwardGpu(const Dtype* input,Dtype * output,
int N,int C,int H ,int W) {
int numElem = N*C*H*W;
hipLaunchKernelGGL(( upscale), dim3((numElem + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0, input,output, numElem, mScale, C, H, W);
}
size_t type2size(DataType dataType) {
size_t _size = 0;
switch (dataType)
{
case DataType::kFLOAT: _size = sizeof(float);break;
case DataType::kHALF: _size = sizeof(__half);break;
case DataType::kINT8: _size = sizeof(u_int8_t);break;
default:std::cerr << "error data type" << std::endl;
}
return _size;
}
int UpsampleLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
assert(batchSize == 1);
const int channels = mCHW.d[0];
const int64_t in_height = mCHW.d[1];
const int64_t in_width = mCHW.d[2];
const int64_t out_height = mOutputHeight;
const int64_t out_width = mOutputWidth;
int totalElems = batchSize * in_height * in_width * channels;
// Handle no-op resizes efficiently.
if (out_height == in_height && out_width == in_width) {
CUDA_CHECK(hipMemcpyAsync(outputs[0], inputs[0], totalElems * type2size(mDataType), hipMemcpyDeviceToDevice, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
return 0;
}
//CUDA_CHECK(hipStreamSynchronize(stream));
switch (mDataType)
{
case DataType::kFLOAT :
forwardGpu<float>((const float *)inputs[0],(float *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kHALF:
forwardGpu<__half>((const __half *)inputs[0],(__half *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kINT8:
forwardGpu<u_int8_t>((const u_int8_t *)inputs[0],(u_int8_t *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
}
|
e3a2884381c9aa5aa378d9ad4dd47199408e145b.cu
|
#include "UpsampleLayer.h"
namespace nvinfer1
{
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,
int no_elements, int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
void UpsampleLayerPlugin::forwardGpu(const Dtype* input,Dtype * output,
int N,int C,int H ,int W) {
int numElem = N*C*H*W;
upscale<<<(numElem + mThreadCount - 1) / mThreadCount, mThreadCount>>>(input,output, numElem, mScale, C, H, W);
}
size_t type2size(DataType dataType) {
size_t _size = 0;
switch (dataType)
{
case DataType::kFLOAT: _size = sizeof(float);break;
case DataType::kHALF: _size = sizeof(__half);break;
case DataType::kINT8: _size = sizeof(u_int8_t);break;
default:std::cerr << "error data type" << std::endl;
}
return _size;
}
int UpsampleLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
assert(batchSize == 1);
const int channels = mCHW.d[0];
const int64_t in_height = mCHW.d[1];
const int64_t in_width = mCHW.d[2];
const int64_t out_height = mOutputHeight;
const int64_t out_width = mOutputWidth;
int totalElems = batchSize * in_height * in_width * channels;
// Handle no-op resizes efficiently.
if (out_height == in_height && out_width == in_width) {
CUDA_CHECK(cudaMemcpyAsync(outputs[0], inputs[0], totalElems * type2size(mDataType), cudaMemcpyDeviceToDevice, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
return 0;
}
//CUDA_CHECK(cudaStreamSynchronize(stream));
switch (mDataType)
{
case DataType::kFLOAT :
forwardGpu<float>((const float *)inputs[0],(float *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kHALF:
forwardGpu<__half>((const __half *)inputs[0],(__half *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
case DataType::kINT8:
forwardGpu<u_int8_t>((const u_int8_t *)inputs[0],(u_int8_t *)outputs[0],batchSize,mCHW.d[0],mOutputHeight,mOutputWidth);
break;
default:
std::cerr << "error data type" << std::endl;
}
return 0;
};
}
|
f983e6c61e651cdfcead78e7a6900f4b7e00aa12.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdio.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) { \
printf("Error: %s : %d,", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx = 0; idx < N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
bool checkResult(float *A, float *B, int size) {
double epsilon = 1.0E-8;
for (int idx = 0; idx < size; idx++) {
if (abs(A[idx] - B[idx]) > epsilon) {
return false;
}
//printf("%d : %f %f\n", idx, A[idx], B[idx]);
}
return true;
}
__global__ void sumArraysOnDevice(float *A, float *B, float*C) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[idx] = A[idx] + B[idx];
}
void initialData(float *ip, int size) {
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; i++) {
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
int main(int argc, char **argv) {
int nElem = 1024;
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
float *h_C1;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
h_C1 = (float*)malloc(nBytes);
hipMalloc((float**)&d_A, nBytes);
hipMalloc((float**)&d_B, nBytes);
hipMalloc((float**)&d_C, nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
sumArraysOnHost(h_A, h_B, h_C, nElem);
hipLaunchKernelGGL(( sumArraysOnDevice), dim3(1), dim3(nElem), 0, 0, d_A, d_B, d_C);
hipMemcpy(h_C1, d_C, nBytes, hipMemcpyDeviceToHost);
if (!checkResult(h_C, h_C1, nElem)) {
printf("Result is not identity!\n");
} else {
printf("Result is identity!\n");
}
free(h_A);
free(h_B);
free(h_C);
free(h_C1);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
|
f983e6c61e651cdfcead78e7a6900f4b7e00aa12.cu
|
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdio.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s : %d,", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx = 0; idx < N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
bool checkResult(float *A, float *B, int size) {
double epsilon = 1.0E-8;
for (int idx = 0; idx < size; idx++) {
if (abs(A[idx] - B[idx]) > epsilon) {
return false;
}
//printf("%d : %f %f\n", idx, A[idx], B[idx]);
}
return true;
}
__global__ void sumArraysOnDevice(float *A, float *B, float*C) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[idx] = A[idx] + B[idx];
}
void initialData(float *ip, int size) {
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; i++) {
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
int main(int argc, char **argv) {
int nElem = 1024;
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
float *h_C1;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
h_C1 = (float*)malloc(nBytes);
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
sumArraysOnHost(h_A, h_B, h_C, nElem);
sumArraysOnDevice<<<1, nElem>>>(d_A, d_B, d_C);
cudaMemcpy(h_C1, d_C, nBytes, cudaMemcpyDeviceToHost);
if (!checkResult(h_C, h_C1, nElem)) {
printf("Result is not identity!\n");
} else {
printf("Result is identity!\n");
}
free(h_A);
free(h_B);
free(h_C);
free(h_C1);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
a0372146216389a531059289e6b17c1db34e9784.hip
|
// !!! This is a file automatically generated by hipify!!!
// Program corresponding to CythonSBM.cu that can be run directly from the command lin. For testing purposes.
//#include <cmath>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
// Error handling code used in Nvidia example found here: https://docs.nvidia.com/cuda/hiprand/host-api-overview.html#generator-options
#define CUDA_CALL(x) do { if((x)!=hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
//Function to generate brownian path, which is stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double *results, int T, int N) {
hiprandState_t state;
hiprand_init (1234, 0, 0, &state);
double random;
results[0] = 0.0;
for (int j = 1; j < N; j++) {
random = hiprand_normal_double(&state);
results[j] = results[j-1] + random * sqrt((double) T / N);
}
/*
Generate 2 doubles at once. Test later to see if this is more efficient:
double hiprand_normal2_double (state);
*/
}
int main() {
//Arrays to store the brownian path, one for the host and one for the device
int N = 500;
int T = 1;
double results[N];
double *dev_results;
// Allocate space for results array on device
CUDA_CALL(hipMalloc(&dev_results, N * sizeof(double)));
//Call GPU function, with ony one block and one thread
hipLaunchKernelGGL(( randomWalk), dim3(1), dim3(1), 0, 0, dev_results, T, N);
//copy results array from device to host
CUDA_CALL(hipMemcpy(results, dev_results , N * sizeof(double), hipMemcpyDeviceToHost));
// print out path
for (int i=0; i<N; i++) {
printf("%f ", results[i]);
}
printf("\n");
//clean up
CUDA_CALL(hipFree(dev_results));
return 0;
}
|
a0372146216389a531059289e6b17c1db34e9784.cu
|
// Program corresponding to CythonSBM.cu that can be run directly from the command lin. For testing purposes.
//#include <cmath>
#include <curand_kernel.h>
#include <stdio.h>
#include <cuda.h>
// Error handling code used in Nvidia example found here: https://docs.nvidia.com/cuda/curand/host-api-overview.html#generator-options
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
//Function to generate brownian path, which is stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double *results, int T, int N) {
curandState_t state;
curand_init (1234, 0, 0, &state);
double random;
results[0] = 0.0;
for (int j = 1; j < N; j++) {
random = curand_normal_double(&state);
results[j] = results[j-1] + random * sqrt((double) T / N);
}
/*
Generate 2 doubles at once. Test later to see if this is more efficient:
double curand_normal2_double (state);
*/
}
int main() {
//Arrays to store the brownian path, one for the host and one for the device
int N = 500;
int T = 1;
double results[N];
double *dev_results;
// Allocate space for results array on device
CUDA_CALL(cudaMalloc(&dev_results, N * sizeof(double)));
//Call GPU function, with ony one block and one thread
randomWalk<<<1, 1>>>(dev_results, T, N);
//copy results array from device to host
CUDA_CALL(cudaMemcpy(results, dev_results , N * sizeof(double), cudaMemcpyDeviceToHost));
// print out path
for (int i=0; i<N; i++) {
printf("%f ", results[i]);
}
printf("\n");
//clean up
CUDA_CALL(cudaFree(dev_results));
return 0;
}
|
4a2a2188762724006085ffed6a9785c9abd7f4ba.hip
|
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
constexpr char modified_bessel_i1_name[] = "modified_bessel_i1_forward";
void modified_bessel_i1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i1_cuda", [&]() {
jitted_gpu_kernel<modified_bessel_i1_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_i1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_i1_stub, &modified_bessel_i1_kernel_cuda);
} // namespace at::native
|
4a2a2188762724006085ffed6a9785c9abd7f4ba.cu
|
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
constexpr char modified_bessel_i1_name[] = "modified_bessel_i1_forward";
void modified_bessel_i1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i1_cuda", [&]() {
jitted_gpu_kernel<modified_bessel_i1_name, scalar_t, scalar_t, 1>(iterator, modified_bessel_i1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "modified_bessel_i1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return modified_bessel_i1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_modified_bessel_i1_stub, &modified_bessel_i1_kernel_cuda);
} // namespace at::native
|
12f7bf043d039cbc5205ab6a1c674310c0254b0f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
#define MAX_K 4
#define MAX_BATCH_SIZE 32
#define MAX_N 12
Tensor FFModel::aggregate(const Tensor* inputs, /* gate_preds, gate_assign, full_gate_pred, n * exp_pred */
int n, float lambda_bal, const char* name)
{
Aggregate* aggr = new Aggregate(*this, inputs, n, lambda_bal, name);
layers.push_back(aggr);
return aggr->outputs[0];
}
Aggregate::Aggregate(FFModel& model,
const Tensor* _inputs,
int _n, float _lambda_bal, const char* name)
: Op(model, OP_AGGREGATE, name, _n+4, _inputs),
n(_n), lambda_bal(_lambda_bal),
profiling(model.config.profiling)
{
// FIXME: For now, set upper limits Better: Do as follows, but memory is
// assigned per block, so requires to check that
// https://stackoverflow.com/questions/5531247/allocating-shared-memory/5531640#5531640
assert(n <= MAX_N && "Increase MAX_N in #define");
assert(inputs[0].adim[0] <= MAX_K && "Increase MAX_K in #define");
assert(inputs[0].adim[1] <= MAX_BATCH_SIZE && "Increase MAX_BATCH_SIZE in #define");
assert(n+4 == numInputs);
assert(n > 0);
assert(inputs[0].numDim == 2);
assert(inputs[1].numDim == 2);
assert(inputs[2].numDim == 2);
assert(inputs[3].numDim == 2);
for(int i = 0; i < inputs[0].numDim; i++) {
assert(inputs[0].adim[i] == inputs[1].adim[i]);
assert(inputs[0].adim[i] == inputs[2].adim[i]);
}
assert(inputs[0].adim[1] == inputs[3].adim[1]);
assert(inputs[3].adim[0] == n);
// expert inputs
int num_dim = inputs[4].numDim;
int out_dim = inputs[4].adim[0];
for(int i = 1; i < n; i++) {
assert(inputs[i+4].numDim == num_dim);
assert(inputs[i+4].adim[0] == out_dim);
}
// output
outputs[0].numDim = num_dim;
for(int i = 0; i < num_dim-1; i++)
outputs[0].adim[i] = inputs[4].adim[i];
outputs[0].adim[num_dim-1] = inputs[0].adim[num_dim-1];
numWeights = 0;
}
void Aggregate::create_weights(FFModel& model)
{
// Do nothing
}
void Aggregate::create_output_and_partition(FFModel& model)
{
// Retrieve the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Can only partition over the sample dim
assert(part_rect.hi[0] == part_rect.lo[0]);
int num_dim = inputs[4].numDim;
int dims[num_dim];
dims[0] = inputs[0].adim[1];
for (int i = 1; i < num_dim; i++)
dims[i] = inputs[4].adim[num_dim-1-i];
outputs[0] = model.create_tensor<2>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
// Compute partition bound for input
for(int i = 0; i < n+4; i++) {
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<2>(
inputs[i], (IndexSpaceT<2>)task_is, input_lps[i], input_grad_lps[i]);
}
}
}
OpMeta* Aggregate::init_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
Aggregate* agg = (Aggregate*) task->args;
FFHandler handle = *((FFHandler*)task->local_args);
AggregateMeta* m = new AggregateMeta(handle, agg->n);
m->profiling = agg->profiling;
return m;
}
void Aggregate::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
ParallelConfig pc; \
std::string pcname = name; \
ff.config.find_parallel_config(DIM, pcname, pc); \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(AGGREGATE_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Aggregate)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
meta[idx++] = fm.get_result<OpMeta*>(*it); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
__global__
void agg_forward_kernel(float** exp_preds,
const int* exp_assign,
const float* gate_net_preds,
float* output,
int n,
const int k, // num chosen experts
int exp_samples, // max samples per expert
const int batch_size,
int out_dim)
{
__shared__ float* chosen_exp_preds[MAX_K*MAX_BATCH_SIZE];
// Get pred pointers, single thread pre block
if(threadIdx.x == 0) {
int expert_idx[MAX_N] = {0};
for(int i = 0; i < batch_size; i++) {
for(int j = 0; j < k; j++) {
// Get pointer to chosen expert predictions
int expert = exp_assign[i*k+j];
if(expert_idx[expert] >= exp_samples) {
// dropped sample
chosen_exp_preds[i*k+j] = 0;
continue;
}
chosen_exp_preds[i*k+j] = exp_preds[expert] + expert_idx[expert]*out_dim;
expert_idx[expert]++;
}
}
}
// set output tensor to 0
CUDA_KERNEL_LOOP(i, batch_size*out_dim)
{
output[i] = 0.0f;
}
__syncthreads();
// compute output
CUDA_KERNEL_LOOP(i, k*out_dim*batch_size)
{
if(chosen_exp_preds[i/out_dim] != 0) {
float res = gate_net_preds[i/out_dim] * chosen_exp_preds[i/out_dim][i%(out_dim)];
int out_id = (i/(k*out_dim))*out_dim + (i%out_dim);
atomicAdd(output+out_id, res);
}
}
}
__device__
void agg_backward_kernel_gate(const float* output_grad,
float* full_gate_grads,
float** exp_preds,
const int* expert_assign,
const bool* cache_corr,
int* expert_bal, float lambda_bal,
int batch_size, int k, int n, int out_dim)
{
// gate gradient
CUDA_KERNEL_LOOP(i, batch_size*k*out_dim)
{
if (exp_preds[i/out_dim] != 0 && cache_corr[i/(k*out_dim)]) {
int out_id = (i/(k*out_dim))*out_dim + (i%out_dim);
float res = output_grad[out_id] * exp_preds[i/out_dim][i%out_dim];
float* gate_grad_idx = full_gate_grads + (i/(out_dim*k))*n
+ expert_assign[(i/(out_dim*k))*k+(i/out_dim)%k];
atomicAdd(gate_grad_idx, res);
}
}
// balance term
CUDA_KERNEL_LOOP(i, n*batch_size)
{
atomicAdd(full_gate_grads+i, lambda_bal*expert_bal[i%n]);
}
__syncthreads();
// make 0 mean
CUDA_KERNEL_LOOP(i, batch_size*n)
{
int start = (i/n)*n;
float sub = -full_gate_grads[i]/n;
for(int j = 0; j < n; j++) {
atomicAdd(full_gate_grads+start+j, sub);
}
}
}
__device__
void agg_backward_kernel_exp(const float* output_grad,
const float* gate_preds,
float** exp_grads,
int batch_size,
int k,
int out_dim) {
// compute expert gradients
CUDA_KERNEL_LOOP(i, k*out_dim*batch_size)
{
if (exp_grads[i/out_dim] != 0) {
int out_id = (i/(k*out_dim))*out_dim + (i%out_dim);
exp_grads[i/out_dim][i%out_dim] += gate_preds[i/out_dim] * output_grad[out_id];
}
}
}
__global__
void agg_backward_kernel(float** exp_preds,
float** exp_grads,
const int* exp_assign,
const int* true_exp_assign,
const float* gating_net_preds,
float* full_gating_grads,
const float* output_grads,
int n, // num experts
int k, // num chosen experts
int exp_samples, // max samples per expert
float lambda_bal,
int batch_size,
int out_dim)
{
__shared__ float* chosen_exp_preds[MAX_K*MAX_BATCH_SIZE];
__shared__ float* chosen_exp_grads[MAX_K*MAX_BATCH_SIZE];
__shared__ int expert_bal[MAX_N];
__shared__ bool cache_corr[MAX_BATCH_SIZE];
// Get pred pointers, single thread per block
if(threadIdx.x == 0) {
// init arrays
for(int i = 0; i < n; i++) expert_bal[i] = 0;
for(int i = 0; i < batch_size; i++) cache_corr[i] = true;
// Get pointer to chosen expert predictions and expert counts
for(int i = 0; i < batch_size; i++) {
for(int j = 0; j < k; j++) {
int expert = true_exp_assign[k*i + j];
if(expert != exp_assign[k*i + j])
cache_corr[i] = false;
if(expert_bal[expert] >= exp_samples) {
// dropped sample
chosen_exp_preds[i*k+j] = 0;
chosen_exp_grads[i*k+j] = 0;
expert_bal[expert]++;
continue;
}
chosen_exp_preds[i*k+j] = exp_preds[expert] + expert_bal[expert]*out_dim;
chosen_exp_grads[i*k+j] = exp_grads[expert] + expert_bal[expert]*out_dim;
expert_bal[expert]++;
}
}
}
__syncthreads();
// FIXME: These 2 functions could execute independently in parallel
// get expert gradients
agg_backward_kernel_exp(output_grads, gating_net_preds, chosen_exp_grads,
batch_size, k, out_dim);
// get gating net gradients
agg_backward_kernel_gate(output_grads, full_gating_grads, chosen_exp_preds,
exp_assign, cache_corr, expert_bal, (lambda_bal*n)/batch_size, batch_size,
k, n, out_dim);
}
void Aggregate::forward_task(const Task *task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
int n = ((Aggregate*)task->args)->n;
assert((int)regions.size() == n+3);
assert((int)task->regions.size() == n+3);
const AggregateMeta* m = *((AggregateMeta**)task->local_args);
// get gate_pred, gate_assign, output
const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA);
const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA);
const AccessorWO<float, 2> acc_output(regions[n+2], FID_DATA);
Rect<2> rect_gate_pred = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<2> rect_gate_assign = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<2> rect_output = runtime->get_index_space_domain(
ctx, task->regions[n+2].region.get_index_space());
coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1;
assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1);
assert(rect_gate_pred.hi[0] - rect_gate_pred.lo[0] == rect_gate_assign.hi[0] - rect_gate_assign.lo[0]);
assert(batch_size == rect_output.hi[1] - rect_output.lo[1] + 1);
coord_t out_dim = rect_output.hi[0] - rect_output.lo[0] + 1;
// get exp_preds
float* exp_preds[n];
// get first exp_pred and row and out_dim
Domain exp_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
exp_preds[0] = helperGetTensorPointerWO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1;
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
for(int i = 1; i < n; i++) {
exp_domain = runtime->get_index_space_domain(
ctx, task->regions[i+2].region.get_index_space());
exp_preds[i] = helperGetTensorPointerWO<float>(
regions[i+2], task->regions[i+2], FID_DATA, ctx, runtime);
assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1);
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
}
int k = (int)(rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1);
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
// call forward_kernel
hipMemcpy(m->dev_exp_preds, exp_preds, n*sizeof(float*), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( agg_forward_kernel), dim3(GET_BLOCKS(batch_size*k*out_dim)), dim3(min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim))), 0, stream,
m->dev_exp_preds, acc_gate_assign.ptr(rect_gate_assign), acc_gate_pred.ptr(rect_gate_pred),
acc_output.ptr(rect_output), n, k, rows, batch_size, out_dim);
}
void Aggregate::backward_task(const Task *task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
const AggregateMeta* m = *((AggregateMeta**)task->local_args);
int n = ((Aggregate*)task->args)->n;
float lambda_bal = ((Aggregate*)task->args)->lambda_bal;
assert((int)regions.size() == 2*n+5);
assert((int)task->regions.size() == 2*n+5);
// get gate_pred, gate_grad, gate_assign, output_grad
const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA);
const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA);
const AccessorRO<int, 2> acc_true_gate_assign(regions[2], FID_DATA);
const AccessorWO<float, 2> full_acc_gate_grad(regions[3], FID_DATA);
const AccessorRO<float, 2> acc_output_grad(regions[2*n+4], FID_DATA);
Rect<2> rect_gate_pred = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<2> rect_gate_assign = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<2> rect_true_gate_assign = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Rect<2> rect_full_gate_grad = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
Rect<2> rect_out_grad = runtime->get_index_space_domain(
ctx, task->regions[2*n+4].region.get_index_space());
coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1;
assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1);
assert(rect_gate_assign == rect_true_gate_assign);
assert(batch_size == rect_out_grad.hi[1] - rect_out_grad.lo[1] + 1);
assert(batch_size == rect_full_gate_grad.hi[1] - rect_full_gate_grad.lo[1] + 1);
coord_t k = rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1;
assert(rect_gate_pred.hi[0] - rect_gate_pred.lo[0] + 1 == k);
coord_t out_dim = rect_out_grad.hi[0] - rect_out_grad.lo[0] + 1;
assert(n == rect_full_gate_grad.hi[0] - rect_full_gate_grad.lo[0] + 1);
// get exp_preds
float* exp_preds[n];
// get first exp_pred and row
Domain exp_domain = runtime->get_index_space_domain(
ctx, task->regions[4].region.get_index_space());
exp_preds[0] = helperGetTensorPointerRW<float>(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1;
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
for(int i = 1; i < n; i++) {
exp_domain = runtime->get_index_space_domain(
ctx, task->regions[i+4].region.get_index_space());
exp_preds[i] = helperGetTensorPointerRW<float>(
regions[i+4], task->regions[i+4], FID_DATA, ctx, runtime);
assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1);
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
}
// get chosen_exp_grads
float* exp_grads[n];
for(int i = 0; i < n; i++) {
exp_domain = runtime->get_index_space_domain(
ctx, task->regions[n+i+4].region.get_index_space());
exp_grads[i] = helperGetTensorPointerRW<float>(
regions[n+i+4], task->regions[n+i+4], FID_DATA, ctx, runtime);
assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1);
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
}
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
// call backward kernel
hipMemcpy(m->dev_exp_preds, exp_preds, n*sizeof(float*), hipMemcpyHostToDevice);
hipMemcpy(m->dev_exp_grads, exp_grads, n*sizeof(float*), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( agg_backward_kernel), dim3(GET_BLOCKS(batch_size*k*out_dim)), dim3(min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim))), 0, stream,
m->dev_exp_preds, m->dev_exp_grads, acc_gate_assign.ptr(rect_gate_assign),
acc_true_gate_assign.ptr(rect_true_gate_assign), acc_gate_pred.ptr(rect_gate_pred),
full_acc_gate_grad.ptr(rect_full_gate_grad), acc_output_grad.ptr(rect_out_grad),
n, k, rows, lambda_bal, batch_size, out_dim);
}
void Aggregate::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(AGGREGATE_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Aggregate)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// gate_preds
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// gate_assign
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
// exp_preds
for(int i = 0; i < n; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i+4], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[i+4].region));
launcher.add_field(i+2, FID_DATA);
}
// output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(n+2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
void Aggregate::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(AGGREGATE_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Aggregate)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// gate_preds
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// gate_assign
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
// true gate_assign
launcher.add_region_requirement(
RegionRequirement(input_lps[2], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[2].region));
launcher.add_field(2, FID_DATA);
// full_gate gradients
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[3], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[3].region_grad));
launcher.add_field(3, FID_DATA);
// exp_preds
for(int i = 0; i < n; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i+4], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[i+4].region));
launcher.add_field(i+4, FID_DATA);
}
// exp_preds gradients
for(int i = 0; i < n; i++) {
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[i+4], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[i+4].region_grad));
launcher.add_field(i+n+4, FID_DATA);
}
// output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(2*n+4, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
AggregateMeta::AggregateMeta(FFHandler handler, int n)
: OpMeta(handler)
{
checkCUDA(hipMalloc(&dev_exp_preds, n*sizeof(float*)));
checkCUDA(hipMalloc(&dev_exp_grads, n*sizeof(float*)));
}
AggregateMeta::~AggregateMeta(void)
{
checkCUDA(hipFree(&dev_exp_preds));
checkCUDA(hipFree(&dev_exp_grads));
}
bool Aggregate::measure_operator_cost(Simulator* sim,
const ParallelConfig& pc,
CostMetrics& cost_metrics)
{
//TODO: implement
cost_metrics.forward_time = 0.0f;
cost_metrics.backward_time = 0.0f;
cost_metrics.memory_requirement = 0;
return false;
}
|
12f7bf043d039cbc5205ab6a1c674310c0254b0f.cu
|
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
#define MAX_K 4
#define MAX_BATCH_SIZE 32
#define MAX_N 12
Tensor FFModel::aggregate(const Tensor* inputs, /* gate_preds, gate_assign, full_gate_pred, n * exp_pred */
int n, float lambda_bal, const char* name)
{
Aggregate* aggr = new Aggregate(*this, inputs, n, lambda_bal, name);
layers.push_back(aggr);
return aggr->outputs[0];
}
Aggregate::Aggregate(FFModel& model,
const Tensor* _inputs,
int _n, float _lambda_bal, const char* name)
: Op(model, OP_AGGREGATE, name, _n+4, _inputs),
n(_n), lambda_bal(_lambda_bal),
profiling(model.config.profiling)
{
// FIXME: For now, set upper limits Better: Do as follows, but memory is
// assigned per block, so requires to check that
// https://stackoverflow.com/questions/5531247/allocating-shared-memory/5531640#5531640
assert(n <= MAX_N && "Increase MAX_N in #define");
assert(inputs[0].adim[0] <= MAX_K && "Increase MAX_K in #define");
assert(inputs[0].adim[1] <= MAX_BATCH_SIZE && "Increase MAX_BATCH_SIZE in #define");
assert(n+4 == numInputs);
assert(n > 0);
assert(inputs[0].numDim == 2);
assert(inputs[1].numDim == 2);
assert(inputs[2].numDim == 2);
assert(inputs[3].numDim == 2);
for(int i = 0; i < inputs[0].numDim; i++) {
assert(inputs[0].adim[i] == inputs[1].adim[i]);
assert(inputs[0].adim[i] == inputs[2].adim[i]);
}
assert(inputs[0].adim[1] == inputs[3].adim[1]);
assert(inputs[3].adim[0] == n);
// expert inputs
int num_dim = inputs[4].numDim;
int out_dim = inputs[4].adim[0];
for(int i = 1; i < n; i++) {
assert(inputs[i+4].numDim == num_dim);
assert(inputs[i+4].adim[0] == out_dim);
}
// output
outputs[0].numDim = num_dim;
for(int i = 0; i < num_dim-1; i++)
outputs[0].adim[i] = inputs[4].adim[i];
outputs[0].adim[num_dim-1] = inputs[0].adim[num_dim-1];
numWeights = 0;
}
void Aggregate::create_weights(FFModel& model)
{
// Do nothing
}
void Aggregate::create_output_and_partition(FFModel& model)
{
// Retrieve the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Can only partition over the sample dim
assert(part_rect.hi[0] == part_rect.lo[0]);
int num_dim = inputs[4].numDim;
int dims[num_dim];
dims[0] = inputs[0].adim[1];
for (int i = 1; i < num_dim; i++)
dims[i] = inputs[4].adim[num_dim-1-i];
outputs[0] = model.create_tensor<2>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
// Compute partition bound for input
for(int i = 0; i < n+4; i++) {
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<2>(
inputs[i], (IndexSpaceT<2>)task_is, input_lps[i], input_grad_lps[i]);
}
}
}
OpMeta* Aggregate::init_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
Aggregate* agg = (Aggregate*) task->args;
FFHandler handle = *((FFHandler*)task->local_args);
AggregateMeta* m = new AggregateMeta(handle, agg->n);
m->profiling = agg->profiling;
return m;
}
void Aggregate::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
ParallelConfig pc; \
std::string pcname = name; \
ff.config.find_parallel_config(DIM, pcname, pc); \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(AGGREGATE_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Aggregate)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
meta[idx++] = fm.get_result<OpMeta*>(*it); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
__global__
void agg_forward_kernel(float** exp_preds,
const int* exp_assign,
const float* gate_net_preds,
float* output,
int n,
const int k, // num chosen experts
int exp_samples, // max samples per expert
const int batch_size,
int out_dim)
{
__shared__ float* chosen_exp_preds[MAX_K*MAX_BATCH_SIZE];
// Get pred pointers, single thread pre block
if(threadIdx.x == 0) {
int expert_idx[MAX_N] = {0};
for(int i = 0; i < batch_size; i++) {
for(int j = 0; j < k; j++) {
// Get pointer to chosen expert predictions
int expert = exp_assign[i*k+j];
if(expert_idx[expert] >= exp_samples) {
// dropped sample
chosen_exp_preds[i*k+j] = 0;
continue;
}
chosen_exp_preds[i*k+j] = exp_preds[expert] + expert_idx[expert]*out_dim;
expert_idx[expert]++;
}
}
}
// set output tensor to 0
CUDA_KERNEL_LOOP(i, batch_size*out_dim)
{
output[i] = 0.0f;
}
__syncthreads();
// compute output
CUDA_KERNEL_LOOP(i, k*out_dim*batch_size)
{
if(chosen_exp_preds[i/out_dim] != 0) {
float res = gate_net_preds[i/out_dim] * chosen_exp_preds[i/out_dim][i%(out_dim)];
int out_id = (i/(k*out_dim))*out_dim + (i%out_dim);
atomicAdd(output+out_id, res);
}
}
}
__device__
void agg_backward_kernel_gate(const float* output_grad,
float* full_gate_grads,
float** exp_preds,
const int* expert_assign,
const bool* cache_corr,
int* expert_bal, float lambda_bal,
int batch_size, int k, int n, int out_dim)
{
// gate gradient
CUDA_KERNEL_LOOP(i, batch_size*k*out_dim)
{
if (exp_preds[i/out_dim] != 0 && cache_corr[i/(k*out_dim)]) {
int out_id = (i/(k*out_dim))*out_dim + (i%out_dim);
float res = output_grad[out_id] * exp_preds[i/out_dim][i%out_dim];
float* gate_grad_idx = full_gate_grads + (i/(out_dim*k))*n
+ expert_assign[(i/(out_dim*k))*k+(i/out_dim)%k];
atomicAdd(gate_grad_idx, res);
}
}
// balance term
CUDA_KERNEL_LOOP(i, n*batch_size)
{
atomicAdd(full_gate_grads+i, lambda_bal*expert_bal[i%n]);
}
__syncthreads();
// make 0 mean
CUDA_KERNEL_LOOP(i, batch_size*n)
{
int start = (i/n)*n;
float sub = -full_gate_grads[i]/n;
for(int j = 0; j < n; j++) {
atomicAdd(full_gate_grads+start+j, sub);
}
}
}
__device__
void agg_backward_kernel_exp(const float* output_grad,
const float* gate_preds,
float** exp_grads,
int batch_size,
int k,
int out_dim) {
// compute expert gradients
CUDA_KERNEL_LOOP(i, k*out_dim*batch_size)
{
if (exp_grads[i/out_dim] != 0) {
int out_id = (i/(k*out_dim))*out_dim + (i%out_dim);
exp_grads[i/out_dim][i%out_dim] += gate_preds[i/out_dim] * output_grad[out_id];
}
}
}
__global__
void agg_backward_kernel(float** exp_preds,
float** exp_grads,
const int* exp_assign,
const int* true_exp_assign,
const float* gating_net_preds,
float* full_gating_grads,
const float* output_grads,
int n, // num experts
int k, // num chosen experts
int exp_samples, // max samples per expert
float lambda_bal,
int batch_size,
int out_dim)
{
__shared__ float* chosen_exp_preds[MAX_K*MAX_BATCH_SIZE];
__shared__ float* chosen_exp_grads[MAX_K*MAX_BATCH_SIZE];
__shared__ int expert_bal[MAX_N];
__shared__ bool cache_corr[MAX_BATCH_SIZE];
// Get pred pointers, single thread per block
if(threadIdx.x == 0) {
// init arrays
for(int i = 0; i < n; i++) expert_bal[i] = 0;
for(int i = 0; i < batch_size; i++) cache_corr[i] = true;
// Get pointer to chosen expert predictions and expert counts
for(int i = 0; i < batch_size; i++) {
for(int j = 0; j < k; j++) {
int expert = true_exp_assign[k*i + j];
if(expert != exp_assign[k*i + j])
cache_corr[i] = false;
if(expert_bal[expert] >= exp_samples) {
// dropped sample
chosen_exp_preds[i*k+j] = 0;
chosen_exp_grads[i*k+j] = 0;
expert_bal[expert]++;
continue;
}
chosen_exp_preds[i*k+j] = exp_preds[expert] + expert_bal[expert]*out_dim;
chosen_exp_grads[i*k+j] = exp_grads[expert] + expert_bal[expert]*out_dim;
expert_bal[expert]++;
}
}
}
__syncthreads();
// FIXME: These 2 functions could execute independently in parallel
// get expert gradients
agg_backward_kernel_exp(output_grads, gating_net_preds, chosen_exp_grads,
batch_size, k, out_dim);
// get gating net gradients
agg_backward_kernel_gate(output_grads, full_gating_grads, chosen_exp_preds,
exp_assign, cache_corr, expert_bal, (lambda_bal*n)/batch_size, batch_size,
k, n, out_dim);
}
void Aggregate::forward_task(const Task *task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
int n = ((Aggregate*)task->args)->n;
assert((int)regions.size() == n+3);
assert((int)task->regions.size() == n+3);
const AggregateMeta* m = *((AggregateMeta**)task->local_args);
// get gate_pred, gate_assign, output
const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA);
const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA);
const AccessorWO<float, 2> acc_output(regions[n+2], FID_DATA);
Rect<2> rect_gate_pred = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<2> rect_gate_assign = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<2> rect_output = runtime->get_index_space_domain(
ctx, task->regions[n+2].region.get_index_space());
coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1;
assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1);
assert(rect_gate_pred.hi[0] - rect_gate_pred.lo[0] == rect_gate_assign.hi[0] - rect_gate_assign.lo[0]);
assert(batch_size == rect_output.hi[1] - rect_output.lo[1] + 1);
coord_t out_dim = rect_output.hi[0] - rect_output.lo[0] + 1;
// get exp_preds
float* exp_preds[n];
// get first exp_pred and row and out_dim
Domain exp_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
exp_preds[0] = helperGetTensorPointerWO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1;
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
for(int i = 1; i < n; i++) {
exp_domain = runtime->get_index_space_domain(
ctx, task->regions[i+2].region.get_index_space());
exp_preds[i] = helperGetTensorPointerWO<float>(
regions[i+2], task->regions[i+2], FID_DATA, ctx, runtime);
assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1);
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
}
int k = (int)(rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
// call forward_kernel
cudaMemcpy(m->dev_exp_preds, exp_preds, n*sizeof(float*), cudaMemcpyHostToDevice);
agg_forward_kernel<<<GET_BLOCKS(batch_size*k*out_dim), min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim)), 0, stream>>>(
m->dev_exp_preds, acc_gate_assign.ptr(rect_gate_assign), acc_gate_pred.ptr(rect_gate_pred),
acc_output.ptr(rect_output), n, k, rows, batch_size, out_dim);
}
void Aggregate::backward_task(const Task *task,
const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime)
{
const AggregateMeta* m = *((AggregateMeta**)task->local_args);
int n = ((Aggregate*)task->args)->n;
float lambda_bal = ((Aggregate*)task->args)->lambda_bal;
assert((int)regions.size() == 2*n+5);
assert((int)task->regions.size() == 2*n+5);
// get gate_pred, gate_grad, gate_assign, output_grad
const AccessorRO<float, 2> acc_gate_pred(regions[0], FID_DATA);
const AccessorRO<int, 2> acc_gate_assign(regions[1], FID_DATA);
const AccessorRO<int, 2> acc_true_gate_assign(regions[2], FID_DATA);
const AccessorWO<float, 2> full_acc_gate_grad(regions[3], FID_DATA);
const AccessorRO<float, 2> acc_output_grad(regions[2*n+4], FID_DATA);
Rect<2> rect_gate_pred = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<2> rect_gate_assign = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<2> rect_true_gate_assign = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Rect<2> rect_full_gate_grad = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
Rect<2> rect_out_grad = runtime->get_index_space_domain(
ctx, task->regions[2*n+4].region.get_index_space());
coord_t batch_size = rect_gate_pred.hi[1] - rect_gate_pred.lo[1] + 1;
assert(batch_size == rect_gate_assign.hi[1] - rect_gate_assign.lo[1] + 1);
assert(rect_gate_assign == rect_true_gate_assign);
assert(batch_size == rect_out_grad.hi[1] - rect_out_grad.lo[1] + 1);
assert(batch_size == rect_full_gate_grad.hi[1] - rect_full_gate_grad.lo[1] + 1);
coord_t k = rect_gate_assign.hi[0] - rect_gate_assign.lo[0] + 1;
assert(rect_gate_pred.hi[0] - rect_gate_pred.lo[0] + 1 == k);
coord_t out_dim = rect_out_grad.hi[0] - rect_out_grad.lo[0] + 1;
assert(n == rect_full_gate_grad.hi[0] - rect_full_gate_grad.lo[0] + 1);
// get exp_preds
float* exp_preds[n];
// get first exp_pred and row
Domain exp_domain = runtime->get_index_space_domain(
ctx, task->regions[4].region.get_index_space());
exp_preds[0] = helperGetTensorPointerRW<float>(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
coord_t rows = exp_domain.hi()[1] - exp_domain.lo()[1] + 1;
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
for(int i = 1; i < n; i++) {
exp_domain = runtime->get_index_space_domain(
ctx, task->regions[i+4].region.get_index_space());
exp_preds[i] = helperGetTensorPointerRW<float>(
regions[i+4], task->regions[i+4], FID_DATA, ctx, runtime);
assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1);
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
}
// get chosen_exp_grads
float* exp_grads[n];
for(int i = 0; i < n; i++) {
exp_domain = runtime->get_index_space_domain(
ctx, task->regions[n+i+4].region.get_index_space());
exp_grads[i] = helperGetTensorPointerRW<float>(
regions[n+i+4], task->regions[n+i+4], FID_DATA, ctx, runtime);
assert(rows == exp_domain.hi()[1] - exp_domain.lo()[1] + 1);
assert(out_dim == exp_domain.hi()[0] - exp_domain.lo()[0] + 1);
}
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
// call backward kernel
cudaMemcpy(m->dev_exp_preds, exp_preds, n*sizeof(float*), cudaMemcpyHostToDevice);
cudaMemcpy(m->dev_exp_grads, exp_grads, n*sizeof(float*), cudaMemcpyHostToDevice);
agg_backward_kernel<<<GET_BLOCKS(batch_size*k*out_dim), min(CUDA_NUM_THREADS,(int)(batch_size*k*out_dim)), 0, stream>>>(
m->dev_exp_preds, m->dev_exp_grads, acc_gate_assign.ptr(rect_gate_assign),
acc_true_gate_assign.ptr(rect_true_gate_assign), acc_gate_pred.ptr(rect_gate_pred),
full_acc_gate_grad.ptr(rect_full_gate_grad), acc_output_grad.ptr(rect_out_grad),
n, k, rows, lambda_bal, batch_size, out_dim);
}
void Aggregate::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(AGGREGATE_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Aggregate)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// gate_preds
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// gate_assign
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
// exp_preds
for(int i = 0; i < n; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i+4], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[i+4].region));
launcher.add_field(i+2, FID_DATA);
}
// output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(n+2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
void Aggregate::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(AGGREGATE_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Aggregate)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// gate_preds
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// gate_assign
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
// true gate_assign
launcher.add_region_requirement(
RegionRequirement(input_lps[2], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[2].region));
launcher.add_field(2, FID_DATA);
// full_gate gradients
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[3], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[3].region_grad));
launcher.add_field(3, FID_DATA);
// exp_preds
for(int i = 0; i < n; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i+4], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[i+4].region));
launcher.add_field(i+4, FID_DATA);
}
// exp_preds gradients
for(int i = 0; i < n; i++) {
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[i+4], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[i+4].region_grad));
launcher.add_field(i+n+4, FID_DATA);
}
// output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(2*n+4, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
AggregateMeta::AggregateMeta(FFHandler handler, int n)
: OpMeta(handler)
{
checkCUDA(cudaMalloc(&dev_exp_preds, n*sizeof(float*)));
checkCUDA(cudaMalloc(&dev_exp_grads, n*sizeof(float*)));
}
AggregateMeta::~AggregateMeta(void)
{
checkCUDA(cudaFree(&dev_exp_preds));
checkCUDA(cudaFree(&dev_exp_grads));
}
bool Aggregate::measure_operator_cost(Simulator* sim,
const ParallelConfig& pc,
CostMetrics& cost_metrics)
{
//TODO: implement
cost_metrics.forward_time = 0.0f;
cost_metrics.backward_time = 0.0f;
cost_metrics.memory_requirement = 0;
return false;
}
|
9edac379485378956f28e474a0247a2593aa61d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>, created on 16.01.2019
//
#include <loops/special_kernels.h>
namespace sd {
static Nd4jLong __device__ __noinline__ getIndexOffset_(Nd4jLong index, Nd4jLong *shapeInfo) {
return shape::getIndexOffset(index, shapeInfo);
}
static Nd4jLong __device__ __noinline__ subArrayOffset(Nd4jLong index, Nd4jLong *shapeInfoA, Nd4jLong *shapeInfoB) {
return shape::subArrayOffset(index, shapeInfoA, shapeInfoB);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// tileKernel:
// input: (inputBuffer and inputShape) - NDArray buffer and shape to tile
// output: (outputBuffer and outputShape) - NDArray to tile input
// resultLength - length for output array
template<typename T>
static __global__ void
tileKernel(void const *inputBuffer, Nd4jLong *inputShape, void *outputBuffer, Nd4jLong *outputShape,
Nd4jLong resultLength) {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Original code to transform in cuda-based
auto tid = blockIdx.x * blockDim.x + threadIdx.x; // copy linear sequence of elements, so one-level threading
int totalThreads = gridDim.x * blockDim.x;
if (shape::order(outputShape) == 'c') { // ews == 1 always here
for (int i = tid; i < resultLength; i += totalThreads) {
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<T *>(outputBuffer) + i) = *(reinterpret_cast<T const *>(inputBuffer) + yOffset);
}
} else {
for (int i = tid; i < resultLength; i += totalThreads) {
auto xOffset = getIndexOffset_(i, outputShape);
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<T *>(outputBuffer) + xOffset) = *(reinterpret_cast<T const *>(inputBuffer) + yOffset);
}
}
}
BUILD_SINGLE_TEMPLATE(template __global__ void tileKernel,(void const* inputBuffer, Nd4jLong* inputShape, void* outputBuffer, Nd4jLong* outputShape, Nd4jLong resultLength), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename T>
void tileKernelH(void const *inputBuffer, Nd4jLong *inputShape, void *outputBuffer, Nd4jLong *outputShape, Nd4jLong resultLength, hipStream_t *stream) {
dim3 launchDims(256, 512, 8192);
tileKernel<T> << < launchDims.x, launchDims.y, launchDims.z, *stream>>>(inputBuffer, inputShape, outputBuffer, outputShape, resultLength);
}
BUILD_SINGLE_TEMPLATE(template void tileKernelH, (void const* inputBuffer, Nd4jLong* inputShape, void* outputBuffer, Nd4jLong* outputShape, Nd4jLong resultLength, hipStream_t *stream), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// enhancement for tileKernel to different input and output data types: X - output type, Y - input type
template<typename X, typename Y>
static __global__ void
tileKernelDouble(void const *inputBuffer, Nd4jLong *inputShape, void *outputBuffer, Nd4jLong *outputShape, Nd4jLong resultLength, Nd4jLong ews) {
char ordering = shape::order(outputShape);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if (ordering == 'c' && ews == 1) { // ews == 1 always here
for (int i = tid; i < resultLength; i += totalThreads) {
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<X *>(outputBuffer) + i) = static_cast<X>(*(reinterpret_cast<Y const *>(inputBuffer) + yOffset));
}
} else if (ordering == 'c' && ews > 1) {
for (int i = tid; i < resultLength; i += totalThreads) {
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<X *>(outputBuffer) + i * ews) = static_cast<X>(*(reinterpret_cast<Y const *>(inputBuffer) + yOffset));
}
} else {
for (int i = tid; i < resultLength; i += totalThreads) {
auto xOffset = getIndexOffset_(i, outputShape);
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<X *>(outputBuffer) + xOffset) = static_cast<X>(*(reinterpret_cast<Y const *>(inputBuffer) + yOffset));
}
}
}
BUILD_SINGLE_TEMPLATE_TWICE(template __global__ void tileKernelDouble, (void const* inputBuffer, Nd4jLong* inputShape, void* outputBuffer, Nd4jLong* outputShape, Nd4jLong resultLength, Nd4jLong ews), LIBND4J_TYPES);
template<typename X, typename Y>
void tileKernelHH(void const *inputBuffer, Nd4jLong *inputShape, void *outputBuffer, Nd4jLong *outputShape, Nd4jLong resultLength, Nd4jLong ews, hipStream_t *stream) {
dim3 launchDims(256, 512, 8192);
hipLaunchKernelGGL(( tileKernelDouble<X, Y>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, inputBuffer, inputShape, outputBuffer, outputShape, resultLength, ews);
}
BUILD_SINGLE_TEMPLATE_TWICE(template void tileKernelHH, (void const* inputBuffer, Nd4jLong* inputShape, void* outputBuffer, Nd4jLong* outputShape, Nd4jLong resultLength, Nd4jLong ews, hipStream_t *stream),LIBND4J_TYPES);
}
|
9edac379485378956f28e474a0247a2593aa61d6.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>, created on 16.01.2019
//
#include <loops/special_kernels.h>
namespace sd {
static Nd4jLong __device__ __noinline__ getIndexOffset_(Nd4jLong index, Nd4jLong *shapeInfo) {
return shape::getIndexOffset(index, shapeInfo);
}
static Nd4jLong __device__ __noinline__ subArrayOffset(Nd4jLong index, Nd4jLong *shapeInfoA, Nd4jLong *shapeInfoB) {
return shape::subArrayOffset(index, shapeInfoA, shapeInfoB);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// tileKernel:
// input: (inputBuffer and inputShape) - NDArray buffer and shape to tile
// output: (outputBuffer and outputShape) - NDArray to tile input
// resultLength - length for output array
template<typename T>
static __global__ void
tileKernel(void const *inputBuffer, Nd4jLong *inputShape, void *outputBuffer, Nd4jLong *outputShape,
Nd4jLong resultLength) {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Original code to transform in cuda-based
auto tid = blockIdx.x * blockDim.x + threadIdx.x; // copy linear sequence of elements, so one-level threading
int totalThreads = gridDim.x * blockDim.x;
if (shape::order(outputShape) == 'c') { // ews == 1 always here
for (int i = tid; i < resultLength; i += totalThreads) {
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<T *>(outputBuffer) + i) = *(reinterpret_cast<T const *>(inputBuffer) + yOffset);
}
} else {
for (int i = tid; i < resultLength; i += totalThreads) {
auto xOffset = getIndexOffset_(i, outputShape);
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<T *>(outputBuffer) + xOffset) = *(reinterpret_cast<T const *>(inputBuffer) + yOffset);
}
}
}
BUILD_SINGLE_TEMPLATE(template __global__ void tileKernel,(void const* inputBuffer, Nd4jLong* inputShape, void* outputBuffer, Nd4jLong* outputShape, Nd4jLong resultLength), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename T>
void tileKernelH(void const *inputBuffer, Nd4jLong *inputShape, void *outputBuffer, Nd4jLong *outputShape, Nd4jLong resultLength, cudaStream_t *stream) {
dim3 launchDims(256, 512, 8192);
tileKernel<T> << < launchDims.x, launchDims.y, launchDims.z, *stream>>>(inputBuffer, inputShape, outputBuffer, outputShape, resultLength);
}
BUILD_SINGLE_TEMPLATE(template void tileKernelH, (void const* inputBuffer, Nd4jLong* inputShape, void* outputBuffer, Nd4jLong* outputShape, Nd4jLong resultLength, cudaStream_t *stream), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// enhancement for tileKernel to different input and output data types: X - output type, Y - input type
template<typename X, typename Y>
static __global__ void
tileKernelDouble(void const *inputBuffer, Nd4jLong *inputShape, void *outputBuffer, Nd4jLong *outputShape, Nd4jLong resultLength, Nd4jLong ews) {
char ordering = shape::order(outputShape);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if (ordering == 'c' && ews == 1) { // ews == 1 always here
for (int i = tid; i < resultLength; i += totalThreads) {
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<X *>(outputBuffer) + i) = static_cast<X>(*(reinterpret_cast<Y const *>(inputBuffer) + yOffset));
}
} else if (ordering == 'c' && ews > 1) {
for (int i = tid; i < resultLength; i += totalThreads) {
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<X *>(outputBuffer) + i * ews) = static_cast<X>(*(reinterpret_cast<Y const *>(inputBuffer) + yOffset));
}
} else {
for (int i = tid; i < resultLength; i += totalThreads) {
auto xOffset = getIndexOffset_(i, outputShape);
auto yOffset = subArrayOffset(i, outputShape, inputShape);
*(reinterpret_cast<X *>(outputBuffer) + xOffset) = static_cast<X>(*(reinterpret_cast<Y const *>(inputBuffer) + yOffset));
}
}
}
BUILD_SINGLE_TEMPLATE_TWICE(template __global__ void tileKernelDouble, (void const* inputBuffer, Nd4jLong* inputShape, void* outputBuffer, Nd4jLong* outputShape, Nd4jLong resultLength, Nd4jLong ews), LIBND4J_TYPES);
template<typename X, typename Y>
void tileKernelHH(void const *inputBuffer, Nd4jLong *inputShape, void *outputBuffer, Nd4jLong *outputShape, Nd4jLong resultLength, Nd4jLong ews, cudaStream_t *stream) {
dim3 launchDims(256, 512, 8192);
tileKernelDouble<X, Y><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(inputBuffer, inputShape, outputBuffer, outputShape, resultLength, ews);
}
BUILD_SINGLE_TEMPLATE_TWICE(template void tileKernelHH, (void const* inputBuffer, Nd4jLong* inputShape, void* outputBuffer, Nd4jLong* outputShape, Nd4jLong resultLength, Nd4jLong ews, cudaStream_t *stream),LIBND4J_TYPES);
}
|
651be4b1361dc23cf3d18186d9dd7febd6d78c6b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaDSaturation_backPropagate_kernel(double* x, double* dx, unsigned int size, int shifting, double threshold)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
if (shifting > 0)
dx[i] /= (1 << shifting);
else if (shifting < 0)
dx[i] *= (1 << (-shifting));
if (threshold != 0.0) {
dx[i] *= (x[i] > -threshold && x[i] < threshold)
? 1.0 : 0.0;
}
}
}
|
651be4b1361dc23cf3d18186d9dd7febd6d78c6b.cu
|
#include "includes.h"
__global__ void cudaDSaturation_backPropagate_kernel(double* x, double* dx, unsigned int size, int shifting, double threshold)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
if (shifting > 0)
dx[i] /= (1 << shifting);
else if (shifting < 0)
dx[i] *= (1 << (-shifting));
if (threshold != 0.0) {
dx[i] *= (x[i] > -threshold && x[i] < threshold)
? 1.0 : 0.0;
}
}
}
|
84a8946d3553f2a9946252ff1d984b7fe8f00ea0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_uvector.hpp>
#include <raft/handle.hpp>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/path_retrieval.hpp>
namespace cugraph {
namespace detail {
template <typename vertex_t, typename weight_t>
__global__ void get_traversed_cost_kernel(vertex_t const* vertices,
vertex_t const* preds,
vertex_t const* vtx_map,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
for (vertex_t i = threadIdx.x + blockIdx.x * blockDim.x; i < num_vertices;
i += gridDim.x * blockDim.x) {
weight_t sum = info_weights[i];
vertex_t pred = preds[i];
while (pred != stop_vertex) {
vertex_t pos = vtx_map[pred];
sum += info_weights[pos];
pred = preds[pos];
}
out[i] = sum;
}
}
template <typename vertex_t, typename weight_t>
void get_traversed_cost_impl(raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t const* preds,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
auto stream = handle.get_stream();
vertex_t max_blocks = handle.get_device_properties().maxGridSize[0];
vertex_t max_threads = handle.get_device_properties().maxThreadsPerBlock;
dim3 nthreads, nblocks;
nthreads.x = std::min<vertex_t>(num_vertices, max_threads);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = std::min<vertex_t>((num_vertices + nthreads.x - 1) / nthreads.x, max_blocks);
nblocks.y = 1;
nblocks.z = 1;
rmm::device_uvector<vertex_t> vtx_map_v(num_vertices, stream);
rmm::device_uvector<vertex_t> vtx_keys_v(num_vertices, stream);
vertex_t* vtx_map = vtx_map_v.data();
vertex_t* vtx_keys = vtx_keys_v.data();
raft::copy(vtx_keys, vertices, num_vertices, stream);
thrust::sequence(rmm::exec_policy(stream)->on(stream), vtx_map, vtx_map + num_vertices);
thrust::stable_sort_by_key(
rmm::exec_policy(stream)->on(stream), vtx_keys, vtx_keys + num_vertices, vtx_map);
hipLaunchKernelGGL(( get_traversed_cost_kernel), dim3(nblocks), dim3(nthreads), 0, 0,
vertices, preds, vtx_map, info_weights, out, stop_vertex, num_vertices);
}
} // namespace detail
template <typename vertex_t, typename weight_t>
void get_traversed_cost(raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t const* preds,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
CUGRAPH_EXPECTS(num_vertices > 0, "num_vertices should be strictly positive");
CUGRAPH_EXPECTS(out != nullptr, "out should be of size num_vertices");
cugraph::detail::get_traversed_cost_impl(
handle, vertices, preds, info_weights, out, stop_vertex, num_vertices);
}
template void get_traversed_cost<int32_t, float>(raft::handle_t const& handle,
int32_t const* vertices,
int32_t const* preds,
float const* info_weights,
float* out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int32_t, double>(raft::handle_t const& handle,
int32_t const* vertices,
int32_t const* preds,
double const* info_weights,
double* out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int64_t, float>(raft::handle_t const& handle,
int64_t const* vertices,
int64_t const* preds,
float const* info_weights,
float* out,
int64_t stop_vertex,
int64_t num_vertices);
template void get_traversed_cost<int64_t, double>(raft::handle_t const& handle,
int64_t const* vertices,
int64_t const* preds,
double const* info_weights,
double* out,
int64_t stop_vertex,
int64_t num_vertices);
} // namespace cugraph
|
84a8946d3553f2a9946252ff1d984b7fe8f00ea0.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_uvector.hpp>
#include <raft/handle.hpp>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/path_retrieval.hpp>
namespace cugraph {
namespace detail {
template <typename vertex_t, typename weight_t>
__global__ void get_traversed_cost_kernel(vertex_t const* vertices,
vertex_t const* preds,
vertex_t const* vtx_map,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
for (vertex_t i = threadIdx.x + blockIdx.x * blockDim.x; i < num_vertices;
i += gridDim.x * blockDim.x) {
weight_t sum = info_weights[i];
vertex_t pred = preds[i];
while (pred != stop_vertex) {
vertex_t pos = vtx_map[pred];
sum += info_weights[pos];
pred = preds[pos];
}
out[i] = sum;
}
}
template <typename vertex_t, typename weight_t>
void get_traversed_cost_impl(raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t const* preds,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
auto stream = handle.get_stream();
vertex_t max_blocks = handle.get_device_properties().maxGridSize[0];
vertex_t max_threads = handle.get_device_properties().maxThreadsPerBlock;
dim3 nthreads, nblocks;
nthreads.x = std::min<vertex_t>(num_vertices, max_threads);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = std::min<vertex_t>((num_vertices + nthreads.x - 1) / nthreads.x, max_blocks);
nblocks.y = 1;
nblocks.z = 1;
rmm::device_uvector<vertex_t> vtx_map_v(num_vertices, stream);
rmm::device_uvector<vertex_t> vtx_keys_v(num_vertices, stream);
vertex_t* vtx_map = vtx_map_v.data();
vertex_t* vtx_keys = vtx_keys_v.data();
raft::copy(vtx_keys, vertices, num_vertices, stream);
thrust::sequence(rmm::exec_policy(stream)->on(stream), vtx_map, vtx_map + num_vertices);
thrust::stable_sort_by_key(
rmm::exec_policy(stream)->on(stream), vtx_keys, vtx_keys + num_vertices, vtx_map);
get_traversed_cost_kernel<<<nblocks, nthreads>>>(
vertices, preds, vtx_map, info_weights, out, stop_vertex, num_vertices);
}
} // namespace detail
template <typename vertex_t, typename weight_t>
void get_traversed_cost(raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t const* preds,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
CUGRAPH_EXPECTS(num_vertices > 0, "num_vertices should be strictly positive");
CUGRAPH_EXPECTS(out != nullptr, "out should be of size num_vertices");
cugraph::detail::get_traversed_cost_impl(
handle, vertices, preds, info_weights, out, stop_vertex, num_vertices);
}
template void get_traversed_cost<int32_t, float>(raft::handle_t const& handle,
int32_t const* vertices,
int32_t const* preds,
float const* info_weights,
float* out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int32_t, double>(raft::handle_t const& handle,
int32_t const* vertices,
int32_t const* preds,
double const* info_weights,
double* out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int64_t, float>(raft::handle_t const& handle,
int64_t const* vertices,
int64_t const* preds,
float const* info_weights,
float* out,
int64_t stop_vertex,
int64_t num_vertices);
template void get_traversed_cost<int64_t, double>(raft::handle_t const& handle,
int64_t const* vertices,
int64_t const* preds,
double const* info_weights,
double* out,
int64_t stop_vertex,
int64_t num_vertices);
} // namespace cugraph
|
7885f1ff44c92d04d56635df5b871d326019431a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_utils.cuh>
#include <metrics/dispersion.cuh>
#include <random/rng.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
template <typename T>
struct DispersionInputs {
T tolerance;
int dim, clusters;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const DispersionInputs<T> &dims) {
return os;
}
template <typename T>
class DispersionTest : public ::testing::TestWithParam<DispersionInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<DispersionInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.clusters * params.dim;
CUDA_CHECK(hipStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
allocate(data, len);
allocate(counts, params.clusters);
allocate(exp_mean, params.dim);
allocate(act_mean, params.dim);
r.uniform(data, len, (T)-1.0, (T)1.0, stream);
r.uniformInt(counts, params.clusters, 1, 100, stream);
std::vector<int> h_counts(params.clusters, 0);
updateHost(&(h_counts[0]), counts, params.clusters, stream);
npoints = 0;
for (const auto &val : h_counts) {
npoints += val;
}
actualVal = dispersion(data, counts, act_mean, params.clusters, npoints,
params.dim, allocator, stream);
expectedVal = T(0);
std::vector<T> h_data(len, T(0));
updateHost(&(h_data[0]), data, len, stream);
std::vector<T> mean(params.dim, T(0));
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
mean[j] += h_data[i * params.dim + j] * T(h_counts[i]);
}
}
for (int i = 0; i < params.dim; ++i) {
mean[i] /= T(npoints);
}
updateDevice(exp_mean, &(mean[0]), params.dim, stream);
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
auto diff = h_data[i * params.dim + j] - mean[j];
expectedVal += diff * diff * T(h_counts[i]);
}
}
expectedVal = sqrt(expectedVal);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(counts));
CUDA_CHECK(hipFree(exp_mean));
CUDA_CHECK(hipFree(act_mean));
}
protected:
DispersionInputs<T> params;
T *data, *exp_mean, *act_mean;
int *counts;
hipStream_t stream;
int npoints;
std::shared_ptr<deviceAllocator> allocator;
T expectedVal, actualVal;
};
const std::vector<DispersionInputs<float>> inputsf = {
{0.001f, 10, 1000, 1234ULL},
{0.001f, 100, 100, 1234ULL},
{0.001f, 1000, 1000, 1234ULL}};
typedef DispersionTest<float> DispersionTestF;
TEST_P(DispersionTestF, Result) {
auto eq = CompareApprox<float>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean, act_mean, params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestF,
::testing::ValuesIn(inputsf));
const std::vector<DispersionInputs<double>> inputsd = {
{0.001, 10, 1000, 1234ULL},
{0.001, 100, 100, 1234ULL},
{0.001, 1000, 1000, 1234ULL}};
typedef DispersionTest<double> DispersionTestD;
TEST_P(DispersionTestD, Result) {
auto eq = CompareApprox<double>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean, act_mean, params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestD,
::testing::ValuesIn(inputsd));
} // end namespace Metrics
} // end namespace MLCommon
|
7885f1ff44c92d04d56635df5b871d326019431a.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_utils.cuh>
#include <metrics/dispersion.cuh>
#include <random/rng.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
template <typename T>
struct DispersionInputs {
T tolerance;
int dim, clusters;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const DispersionInputs<T> &dims) {
return os;
}
template <typename T>
class DispersionTest : public ::testing::TestWithParam<DispersionInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<DispersionInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.clusters * params.dim;
CUDA_CHECK(cudaStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
allocate(data, len);
allocate(counts, params.clusters);
allocate(exp_mean, params.dim);
allocate(act_mean, params.dim);
r.uniform(data, len, (T)-1.0, (T)1.0, stream);
r.uniformInt(counts, params.clusters, 1, 100, stream);
std::vector<int> h_counts(params.clusters, 0);
updateHost(&(h_counts[0]), counts, params.clusters, stream);
npoints = 0;
for (const auto &val : h_counts) {
npoints += val;
}
actualVal = dispersion(data, counts, act_mean, params.clusters, npoints,
params.dim, allocator, stream);
expectedVal = T(0);
std::vector<T> h_data(len, T(0));
updateHost(&(h_data[0]), data, len, stream);
std::vector<T> mean(params.dim, T(0));
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
mean[j] += h_data[i * params.dim + j] * T(h_counts[i]);
}
}
for (int i = 0; i < params.dim; ++i) {
mean[i] /= T(npoints);
}
updateDevice(exp_mean, &(mean[0]), params.dim, stream);
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
auto diff = h_data[i * params.dim + j] - mean[j];
expectedVal += diff * diff * T(h_counts[i]);
}
}
expectedVal = sqrt(expectedVal);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(counts));
CUDA_CHECK(cudaFree(exp_mean));
CUDA_CHECK(cudaFree(act_mean));
}
protected:
DispersionInputs<T> params;
T *data, *exp_mean, *act_mean;
int *counts;
cudaStream_t stream;
int npoints;
std::shared_ptr<deviceAllocator> allocator;
T expectedVal, actualVal;
};
const std::vector<DispersionInputs<float>> inputsf = {
{0.001f, 10, 1000, 1234ULL},
{0.001f, 100, 100, 1234ULL},
{0.001f, 1000, 1000, 1234ULL}};
typedef DispersionTest<float> DispersionTestF;
TEST_P(DispersionTestF, Result) {
auto eq = CompareApprox<float>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean, act_mean, params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestF,
::testing::ValuesIn(inputsf));
const std::vector<DispersionInputs<double>> inputsd = {
{0.001, 10, 1000, 1234ULL},
{0.001, 100, 100, 1234ULL},
{0.001, 1000, 1000, 1234ULL}};
typedef DispersionTest<double> DispersionTestD;
TEST_P(DispersionTestD, Result) {
auto eq = CompareApprox<double>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean, act_mean, params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestD,
::testing::ValuesIn(inputsd));
} // end namespace Metrics
} // end namespace MLCommon
|
9244a8b41d2685d5cdae365362148626b2bc67d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modified from
// https://github.com/hszhao/semseg/blob/master/lib/psa/src
#include <THH/THH.h>
#include <torch/serialize/tensor.h>
#include <THH/THHDeviceUtils.cuh>
#include "psamask_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void PSAMaskForwardCUDAKernelLauncher(const int psa_type, const Tensor input,
Tensor output, const int num_,
const int h_feature, const int w_feature,
const int h_mask, const int w_mask,
const int half_h_mask,
const int half_w_mask) {
int nthreads = num_ * h_feature * w_feature;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (psa_type == 0)
AT_DISPATCH_FLOATING_TYPES(
input.scalar_type(), "psamask_collect_forward_cuda", [&] {
hipLaunchKernelGGL(( psamask_collect_forward_cuda<scalar_t>), dim3(nthreads), dim3(512), 0, stream,
nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask,
half_w_mask, input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
else
AT_DISPATCH_FLOATING_TYPES(
input.scalar_type(), "psamask_distribute_forward_cuda", [&] {
hipLaunchKernelGGL(( psamask_distribute_forward_cuda<scalar_t>)
, dim3(nthreads), dim3(512), 0, stream,
nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask,
half_w_mask, input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
}
void PSAMaskBackwardCUDAKernelLauncher(
const int psa_type, const Tensor grad_output, Tensor grad_input,
const int num_, const int h_feature, const int w_feature, const int h_mask,
const int w_mask, const int half_h_mask, const int half_w_mask) {
int nthreads = num_ * h_feature * w_feature;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (psa_type == 0)
AT_DISPATCH_FLOATING_TYPES(
grad_input.scalar_type(), "psamask_collect_backward_cuda", [&] {
hipLaunchKernelGGL(( psamask_collect_backward_cuda<scalar_t>), dim3(nthreads), dim3(512), 0, stream,
nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask,
half_w_mask, grad_output.data_ptr<scalar_t>(),
grad_input.data_ptr<scalar_t>());
});
else
AT_DISPATCH_FLOATING_TYPES(
grad_input.scalar_type(), "psamask_distribute_backward_cuda", [&] {
hipLaunchKernelGGL(( psamask_distribute_backward_cuda<scalar_t>)
, dim3(nthreads), dim3(512), 0, stream,
nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask,
half_w_mask, grad_output.data_ptr<scalar_t>(),
grad_input.data_ptr<scalar_t>());
});
}
|
9244a8b41d2685d5cdae365362148626b2bc67d6.cu
|
// Modified from
// https://github.com/hszhao/semseg/blob/master/lib/psa/src
#include <THC/THC.h>
#include <torch/serialize/tensor.h>
#include <THC/THCDeviceUtils.cuh>
#include "psamask_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void PSAMaskForwardCUDAKernelLauncher(const int psa_type, const Tensor input,
Tensor output, const int num_,
const int h_feature, const int w_feature,
const int h_mask, const int w_mask,
const int half_h_mask,
const int half_w_mask) {
int nthreads = num_ * h_feature * w_feature;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (psa_type == 0)
AT_DISPATCH_FLOATING_TYPES(
input.scalar_type(), "psamask_collect_forward_cuda", [&] {
psamask_collect_forward_cuda<scalar_t><<<nthreads, 512, 0, stream>>>(
nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask,
half_w_mask, input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
else
AT_DISPATCH_FLOATING_TYPES(
input.scalar_type(), "psamask_distribute_forward_cuda", [&] {
psamask_distribute_forward_cuda<scalar_t>
<<<nthreads, 512, 0, stream>>>(
nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask,
half_w_mask, input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
}
void PSAMaskBackwardCUDAKernelLauncher(
const int psa_type, const Tensor grad_output, Tensor grad_input,
const int num_, const int h_feature, const int w_feature, const int h_mask,
const int w_mask, const int half_h_mask, const int half_w_mask) {
int nthreads = num_ * h_feature * w_feature;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (psa_type == 0)
AT_DISPATCH_FLOATING_TYPES(
grad_input.scalar_type(), "psamask_collect_backward_cuda", [&] {
psamask_collect_backward_cuda<scalar_t><<<nthreads, 512, 0, stream>>>(
nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask,
half_w_mask, grad_output.data_ptr<scalar_t>(),
grad_input.data_ptr<scalar_t>());
});
else
AT_DISPATCH_FLOATING_TYPES(
grad_input.scalar_type(), "psamask_distribute_backward_cuda", [&] {
psamask_distribute_backward_cuda<scalar_t>
<<<nthreads, 512, 0, stream>>>(
nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask,
half_w_mask, grad_output.data_ptr<scalar_t>(),
grad_input.data_ptr<scalar_t>());
});
}
|
cb0c46370cb6285d4c80d746965f444d9fe87396.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "add.h"
// Necessary headers in VS - my guess: not compiling directly with nvcc
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//---
/*
This is the function that each thread will execute on the GPU. The
fact that it executes on the device is indicated by the __global__
modifier in front of the return type of the function. After that,
the signature of the function isn't special - in particular, the
pointers we pass in should point to memory on the device, but this
is not indicated by the function's signature.
*/
__global__ void add(int *a, int *b, int *c) {
/*
Each thread knows its identity in the system. This identity is
made available in code via indices blockIdx and threadIdx. We
write blockIdx.x because block indices are multidimensional. In
this case, we have linear arrays of data, so we only need one
dimension. If this doesn't make sense, don't worry - the important
thing is that the first step in the function is converting the
thread's indentity into an index into the data.
*/
int thread_id = blockIdx.x;
/*
We make sure that the thread_id isn't too large, and then we
assign c = a + b using the index we calculated above.
The big picture is that each thread is responsible for adding one
element from a and one element from b. Each thread is able to run
in parallel, so we get speedup.
*/
if (thread_id < N) {
c[thread_id] = a[thread_id] + b[thread_id];
}
}
|
cb0c46370cb6285d4c80d746965f444d9fe87396.cu
|
#include "add.h"
// Necessary headers in VS - my guess: not compiling directly with nvcc
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//---
/*
This is the function that each thread will execute on the GPU. The
fact that it executes on the device is indicated by the __global__
modifier in front of the return type of the function. After that,
the signature of the function isn't special - in particular, the
pointers we pass in should point to memory on the device, but this
is not indicated by the function's signature.
*/
__global__ void add(int *a, int *b, int *c) {
/*
Each thread knows its identity in the system. This identity is
made available in code via indices blockIdx and threadIdx. We
write blockIdx.x because block indices are multidimensional. In
this case, we have linear arrays of data, so we only need one
dimension. If this doesn't make sense, don't worry - the important
thing is that the first step in the function is converting the
thread's indentity into an index into the data.
*/
int thread_id = blockIdx.x;
/*
We make sure that the thread_id isn't too large, and then we
assign c = a + b using the index we calculated above.
The big picture is that each thread is responsible for adding one
element from a and one element from b. Each thread is able to run
in parallel, so we get speedup.
*/
if (thread_id < N) {
c[thread_id] = a[thread_id] + b[thread_id];
}
}
|
3cb772794dda18ce625a79484c468407bf0674d1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dBinaryCrossEntropyCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *predictions = NULL;
hipMalloc(&predictions, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
float *dY = NULL;
hipMalloc(&dY, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dBinaryCrossEntropyCost), dim3(gridBlock),dim3(threadBlock), 0, 0, predictions,target,dY,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dBinaryCrossEntropyCost), dim3(gridBlock),dim3(threadBlock), 0, 0, predictions,target,dY,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dBinaryCrossEntropyCost), dim3(gridBlock),dim3(threadBlock), 0, 0, predictions,target,dY,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3cb772794dda18ce625a79484c468407bf0674d1.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dBinaryCrossEntropyCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *predictions = NULL;
cudaMalloc(&predictions, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
float *dY = NULL;
cudaMalloc(&dY, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dBinaryCrossEntropyCost<<<gridBlock,threadBlock>>>(predictions,target,dY,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dBinaryCrossEntropyCost<<<gridBlock,threadBlock>>>(predictions,target,dY,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dBinaryCrossEntropyCost<<<gridBlock,threadBlock>>>(predictions,target,dY,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
9eea3c80fb3972bc9bcb34e7a3f0e9a88ba14319.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <iostream>
#include <stdio.h>
#include "merge_sort.cuh"
void init_array(int* in, int size, int max_level);
void print_vector(int* in, int size);
int main()
{
int size = 100;
int max_val = 100;
int* test = new int[size];
int* out = new int[size];
init_array(test, size, max_val);
print_vector(test, size);
auto start_cpu = std::chrono::high_resolution_clock::now();
mergeSortAsc(test, size, out);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_span_cpu = std::chrono::duration_cast<std::chrono::duration<double>>(end_cpu - start_cpu);
print_vector(out, size);
std::cout << "Merge sort, CPU time elapsed (millisec) " << time_span_cpu.count() << std::endl;
system("pause");
return 0;
}
void init_array(int* in, int size, int max_level)
{
for (int i = 0; i < size; i++)
{
in[i] = floor(max_level*((double)rand() / (RAND_MAX)));
}
}
void print_vector(int* in, int size)
{
for (int i = 0; i < size; i++)
{
std::cout << in[i] << " ";
}
std::cout << std::endl;
}
|
9eea3c80fb3972bc9bcb34e7a3f0e9a88ba14319.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <iostream>
#include <stdio.h>
#include "merge_sort.cuh"
void init_array(int* in, int size, int max_level);
void print_vector(int* in, int size);
int main()
{
int size = 100;
int max_val = 100;
int* test = new int[size];
int* out = new int[size];
init_array(test, size, max_val);
print_vector(test, size);
auto start_cpu = std::chrono::high_resolution_clock::now();
mergeSortAsc(test, size, out);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_span_cpu = std::chrono::duration_cast<std::chrono::duration<double>>(end_cpu - start_cpu);
print_vector(out, size);
std::cout << "Merge sort, CPU time elapsed (millisec) " << time_span_cpu.count() << std::endl;
system("pause");
return 0;
}
void init_array(int* in, int size, int max_level)
{
for (int i = 0; i < size; i++)
{
in[i] = floor(max_level*((double)rand() / (RAND_MAX)));
}
}
void print_vector(int* in, int size)
{
for (int i = 0; i < size; i++)
{
std::cout << in[i] << " ";
}
std::cout << std::endl;
}
|
24cc8914ec758b1e59106ab00b384d032989dab5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
typedef unsigned long long uint64_t;
typedef unsigned int uint32_t;
// constants for craft
#define ROUND_NUM 8
#define KEY0 0xb36cab650838eb24LLU
#define KEY1 0xe5c67f9611b68427LLU
#define INPUT_DIFF 0x00000000005a0a00LLU
#define COND 0x00000000005a0a00LLU
// constants for gpu
#define ENUM_NUM 22
#define BLOCK_NUM 128 //2^7
#define THREAD_NUM 128 // 2^7
#define THREADS_SHIFT 14 // (7+7)
#define ONE (0x1LLU)
#define MASK16 (0xfLLU)
// the output of g
#define LFSR(g) (((g >> 39) ^ (g >> 34) ^ (g >> 24) ^ (g >> 19) ^ (g >> 14) ^ (g >> 5)) & ONE)
#define LEFTROTBY4(w, shift) ((w) << ((shift) << 2))
#define GETBY4(w, p) (((w) >> ((p) << 2)) & 0xfLLU)
#define FROM_I_TO_J(w, i, j) (LEFTROTBY4(GETBY4(w, i), j))
#define Sbox_FROM_I_TO_J(w, i, j) (LEFTROTBY4(S[GETBY4(w, i)], j))
/* Cell Shuffle in the tk schedule */
// const int Q[16] =
// {0xc, 0xa, 0xf, 0x5, 0xe, 0x8, 0x9, 0x2, 0xb, 0x3, 0x7, 0x4, 0x6, 0x0, 0x1, 0xd};
#define Q(T) (FROM_I_TO_J(T, 12, 0) ^ FROM_I_TO_J(T, 10, 1) ^ FROM_I_TO_J(T, 15, 2) ^ FROM_I_TO_J(T, 5, 3) ^ FROM_I_TO_J(T, 14, 4) ^ FROM_I_TO_J(T, 8, 5) ^ FROM_I_TO_J(T, 9, 6) ^ FROM_I_TO_J(T, 2, 7) ^ FROM_I_TO_J(T, 11, 8) ^ FROM_I_TO_J(T, 3, 9) ^ FROM_I_TO_J(T, 7, 10) ^ FROM_I_TO_J(T, 4, 11) ^ FROM_I_TO_J(T, 6, 12) ^ FROM_I_TO_J(T, 0, 13) ^ FROM_I_TO_J(T, 1, 14) ^ FROM_I_TO_J(T, 13, 15))
/* Cell Shuffle in the linear layer */
// const int P[16] =
// {0xf, 0xc, 0xd, 0xe, 0xa, 0x9, 0x8, 0xb, 0x6, 0x5, 0x4, 0x7, 0x1, 0x2, 0x3, 0x0};
/* S-box */
// const int S[16] =
// {0xc, 0xa, 0xd, 0x3, 0xe, 0xb, 0xf, 0x7, 0x8, 0x9, 0x1, 0x5, 0x0, 0x2, 0x4, 0x6};
#define PS(st) (Sbox_FROM_I_TO_J(st, 15, 0) ^ Sbox_FROM_I_TO_J(st, 12, 1) ^ Sbox_FROM_I_TO_J(st, 13, 2) ^ Sbox_FROM_I_TO_J(st, 14, 3) ^ Sbox_FROM_I_TO_J(st, 10, 4) ^ Sbox_FROM_I_TO_J(st, 9, 5) ^ Sbox_FROM_I_TO_J(st, 8, 6) ^ Sbox_FROM_I_TO_J(st, 11, 7) ^ Sbox_FROM_I_TO_J(st, 6, 8) ^ Sbox_FROM_I_TO_J(st, 5, 9) ^ Sbox_FROM_I_TO_J(st, 4, 10) ^ Sbox_FROM_I_TO_J(st, 7, 11) ^ Sbox_FROM_I_TO_J(st, 1, 12) ^ Sbox_FROM_I_TO_J(st, 2, 13) ^ Sbox_FROM_I_TO_J(st, 3, 14) ^ Sbox_FROM_I_TO_J(st, 0, 15))
/* Round constants generated by 3-bit LFSR, XOR-ed at state[5] */
// const int RC3[32] =
// {0x1, 0x4, 0x2, 0x5, 0x6, 0x7, 0x3, 0x1, 0x4, 0x2, 0x5, 0x6, 0x7, 0x3, 0x1, 0x4,
// 0x2, 0x5, 0x6, 0x7, 0x3, 0x1, 0x4, 0x2, 0x5, 0x6, 0x7, 0x3, 0x1, 0x4, 0x2, 0x5};
// Round constants generated by 4-bit LFSR, XOR-ed at state[4]
// const int RC4 [32] =
// {0x1, 0x8, 0x4, 0x2, 0x9, 0xc, 0x6, 0xb, 0x5, 0xa, 0xd, 0xe, 0xf, 0x7, 0x3, 0x1,
// 0x8, 0x4, 0x2, 0x9, 0xc, 0x6, 0xb, 0x5, 0xa, 0xd, 0xe, 0xf, 0x7, 0x3, 0x1, 0x8};
__device__ __constant__ uint64_t
S[16] = {0xc, 0xa, 0xd, 0x3, 0xe, 0xb, 0xf, 0x7, 0x8, 0x9, 0x1, 0x5, 0x0, 0x2, 0x4, 0x6};
__device__ __constant__ uint64_t
RC[32] = {0x110000, 0x480000, 0x240000, 0x520000, 0x690000, 0x7c0000, 0x360000, 0x1b0000,
0x450000, 0x2a0000, 0x5d0000, 0x6e0000, 0x7f0000, 0x370000, 0x130000, 0x410000,
0x280000, 0x540000, 0x620000, 0x790000, 0x3c0000, 0x160000, 0x4b0000, 0x250000,
0x5a0000, 0x6d0000, 0x7e0000, 0x3f0000, 0x170000, 0x430000, 0x210000, 0x580000};
// requirements
// tweak[7] == key1[10]
// tweak[10] == key1[10]
// tweak[0] == key1[13]
// tweak[13] == key1[13]
// tweak[3] == key1[9]
// tweak[9] == key1[9]
// set values in tweak to 0.
__device__ __constant__ uint64_t
tweak_mask = ((~0x0LLU) ^ LEFTROTBY4(MASK16, 3) ^ LEFTROTBY4(MASK16, 9) ^ LEFTROTBY4(MASK16, 0) ^ LEFTROTBY4(MASK16, 13) ^ LEFTROTBY4(MASK16, 7) ^ LEFTROTBY4(MASK16, 10));
// prepare values in tweak to be set.
__device__ __constant__ uint64_t
key1_setto_tweak = (LEFTROTBY4(GETBY4(KEY1, 9), 3) | LEFTROTBY4(GETBY4(KEY1, 9), 9) | LEFTROTBY4(GETBY4(KEY1, 13), 0) | LEFTROTBY4(GETBY4(KEY1, 13), 13) | LEFTROTBY4(GETBY4(KEY1, 10), 7) | LEFTROTBY4(GETBY4(KEY1, 10), 10));
__device__ static __forceinline__ uint64_t
round(const uint64_t old_st, const uint64_t tk0, const uint64_t tk1, const uint64_t tk2, const uint64_t tk3, const int r) {
/* MC */
uint64_t row3 = (old_st >> 48);
uint64_t st = old_st ^ ((old_st >> 32) & 0xffff) ^ row3 ^ (row3 << 16);
/* Add constants */
st ^= RC[r];
/* Add tweakey */
switch (r % 4) {
case 0:
st ^= tk0;
break;
case 1:
st ^= tk1;
break;
case 2:
st ^= tk2;
break;
case 3:
st ^= tk3;
break;
}
/* PN and SB */
return PS(st);
}
__global__ void
test_correct_pairs(const uint64_t *d_init_values, uint32_t *d_correct_num) {
// recieve input data
const uint64_t cur_idx = blockDim.x * blockIdx.x + threadIdx.x;
uint64_t p0_lfsr = d_init_values[cur_idx * 2];
uint64_t tweak_lfsr = d_init_values[cur_idx * 2 + 1];
uint32_t counter_for_correct_pairs = 0;
uint64_t p0, p1, tweak;
uint64_t tk0, tk1, tk2, tk3, qt;
// main loop in each device
for (uint64_t data_i = 0; data_i < (ONE << ENUM_NUM); data_i++) {
// plain p0 is from LFSR g(U) = u0+u5+u15+u20+u25+u34.
if (LFSR(p0_lfsr) == 0) {
p0_lfsr = (p0_lfsr << 1);
} else {
p0_lfsr = ((p0_lfsr << 1) ^ ONE);
}
p0 = p0_lfsr;
// plain p1
p1 = p0 ^ INPUT_DIFF;
// tweak is also from LFSR g(U) = u0+u5+u15+u20+u25+u34.
if (LFSR(tweak_lfsr) == 0) {
tweak_lfsr = (tweak_lfsr << 1);
} else {
tweak_lfsr = ((tweak_lfsr << 1) ^ ONE);
}
// set hypotheses
tweak = ((tweak_lfsr & tweak_mask) | key1_setto_tweak);
// initialize key
tk0 = KEY0 ^ tweak;
tk1 = KEY1 ^ tweak;
qt = Q(tweak);
tk2 = KEY0 ^ qt;
tk3 = KEY1 ^ qt;
// foward
for (int r = 0; r < ROUND_NUM; r++) {
p0 = round(p0, tk0, tk1, tk2, tk3, r);
p1 = round(p1, tk0, tk1, tk2, tk3, r);
}
// test correct pairs
if ((p0 ^ p1) == COND) {
// printf("p0: %lx, c0: %lx, tweak: %lx\n", p0_lfsr, p0, tweak);
// printf("p1: %lx, c1: %lx\n\n", p0_lfsr ^ INPUT_DIFF, p1);
counter_for_correct_pairs++;
}
}
d_correct_num[cur_idx] = counter_for_correct_pairs;
}
static inline void print_word(uint64_t w) {
printf("0x");
for (int i = 15; i >= 0; i--) {
if (i < 15 && (i & 0x3) == 0x3) {
printf(" ");
}
printf("%llx", (w >> (i << 2)) & 0xfLLU);
}
}
int main(int argc, char** argv) {
// parsing inputs
// argv[1]: gpu device number; argv[2]: seed for random number.
int dev_id, seed;
if (argc >= 3) {
dev_id = atoi(argv[1]);
seed = atoi(argv[2]);
} else {
dev_id = 3;
seed = time(0);
}
hipSetDevice(dev_id);
srand(seed);
printf("finish setting device %d with seed %d\n\n", dev_id, seed);
// generate initial states for each threads.
const uint64_t thr_num = BLOCK_NUM * THREAD_NUM;
uint64_t h_init_values[2 * thr_num];
for (uint64_t i = 0; i < 2 * thr_num; i++) {
h_init_values[i] = rand();
uint64_t temp = rand();
h_init_values[i] ^= (temp << 32);
}
printf("finish initializing random values!\n");
// allocate memory.
hipError_t err = hipSuccess;
uint64_t *d_init_values = NULL;
err = hipMalloc((void **) &d_init_values, 2 * thr_num * sizeof(uint64_t));
if (err != hipSuccess) {
fprintf(stderr, "Failed to allocate initial values (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_init_values, h_init_values, 2 * thr_num * sizeof(uint64_t), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "Failed to copy init_values from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
uint32_t *d_correct_num = NULL;
err = hipMalloc((void **) &d_correct_num, thr_num * sizeof(uint32_t));
if (err != hipSuccess) {
printf("Failed to allocate correct_num (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("finish allocating memory!\n");
// test for correct pairs.
printf("enum num (on each device): %d\ntotal thread num: %d (block num %d * thread num %d)\n", ENUM_NUM, BLOCK_NUM * THREAD_NUM, BLOCK_NUM, THREAD_NUM);
// real start
hipEvent_t start1;
hipEventCreate(&start1);
hipEvent_t stop1;
hipEventCreate(&stop1);
hipEventRecord(start1, NULL);
printf("begin testing correct pairs!\n\n");
printf("\ndata num : 2 ^ %d\n", ENUM_NUM + THREADS_SHIFT);
printf("Round : %d\n", ROUND_NUM);
printf("key0 : ");
print_word(KEY0);
printf("\nkey1 : ");
print_word(KEY1);
printf("\ndiff at key0[9] : %llx\n", GETBY4(KEY0, 9) ^ GETBY4(KEY1, 9));
printf("input diff : ");
print_word(INPUT_DIFF);
printf("\noutput diff : ");
print_word(COND);
printf("\n");
hipLaunchKernelGGL(( test_correct_pairs), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, d_init_values, d_correct_num);
hipEventRecord(stop1, NULL);
hipEventSynchronize(stop1);
float msecTotal1 = 0.0f;
hipEventElapsedTime(&msecTotal1, start1, stop1);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch test_correct_pairs kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
uint32_t h_correct_num[thr_num];
err = hipMemcpy(h_correct_num, d_correct_num, thr_num * sizeof(uint32_t), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "Failed to copy correct numbers from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("computation is done!\n");
// get probability.
uint64_t all_num = 0;
for (uint64_t i = 0; i < thr_num; i++) {
all_num += h_correct_num[i];
}
printf("\n\nright pairs : %llu\n", all_num);
printf("pro : %f\n\n", log2(float(all_num)/float(uint64_t(1) << (ENUM_NUM + THREADS_SHIFT))));
printf("\ntime:%.3lf sec.\n---------------------------------------\n", msecTotal1 / 1000.0);
hipFree(d_init_values);
hipFree(d_correct_num);
}
|
24cc8914ec758b1e59106ab00b384d032989dab5.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
typedef unsigned long long uint64_t;
typedef unsigned int uint32_t;
// constants for craft
#define ROUND_NUM 8
#define KEY0 0xb36cab650838eb24LLU
#define KEY1 0xe5c67f9611b68427LLU
#define INPUT_DIFF 0x00000000005a0a00LLU
#define COND 0x00000000005a0a00LLU
// constants for gpu
#define ENUM_NUM 22
#define BLOCK_NUM 128 //2^7
#define THREAD_NUM 128 // 2^7
#define THREADS_SHIFT 14 // (7+7)
#define ONE (0x1LLU)
#define MASK16 (0xfLLU)
// the output of g
#define LFSR(g) (((g >> 39) ^ (g >> 34) ^ (g >> 24) ^ (g >> 19) ^ (g >> 14) ^ (g >> 5)) & ONE)
#define LEFTROTBY4(w, shift) ((w) << ((shift) << 2))
#define GETBY4(w, p) (((w) >> ((p) << 2)) & 0xfLLU)
#define FROM_I_TO_J(w, i, j) (LEFTROTBY4(GETBY4(w, i), j))
#define Sbox_FROM_I_TO_J(w, i, j) (LEFTROTBY4(S[GETBY4(w, i)], j))
/* Cell Shuffle in the tk schedule */
// const int Q[16] =
// {0xc, 0xa, 0xf, 0x5, 0xe, 0x8, 0x9, 0x2, 0xb, 0x3, 0x7, 0x4, 0x6, 0x0, 0x1, 0xd};
#define Q(T) (FROM_I_TO_J(T, 12, 0) ^ FROM_I_TO_J(T, 10, 1) ^ FROM_I_TO_J(T, 15, 2) ^ FROM_I_TO_J(T, 5, 3) ^ FROM_I_TO_J(T, 14, 4) ^ FROM_I_TO_J(T, 8, 5) ^ FROM_I_TO_J(T, 9, 6) ^ FROM_I_TO_J(T, 2, 7) ^ FROM_I_TO_J(T, 11, 8) ^ FROM_I_TO_J(T, 3, 9) ^ FROM_I_TO_J(T, 7, 10) ^ FROM_I_TO_J(T, 4, 11) ^ FROM_I_TO_J(T, 6, 12) ^ FROM_I_TO_J(T, 0, 13) ^ FROM_I_TO_J(T, 1, 14) ^ FROM_I_TO_J(T, 13, 15))
/* Cell Shuffle in the linear layer */
// const int P[16] =
// {0xf, 0xc, 0xd, 0xe, 0xa, 0x9, 0x8, 0xb, 0x6, 0x5, 0x4, 0x7, 0x1, 0x2, 0x3, 0x0};
/* S-box */
// const int S[16] =
// {0xc, 0xa, 0xd, 0x3, 0xe, 0xb, 0xf, 0x7, 0x8, 0x9, 0x1, 0x5, 0x0, 0x2, 0x4, 0x6};
#define PS(st) (Sbox_FROM_I_TO_J(st, 15, 0) ^ Sbox_FROM_I_TO_J(st, 12, 1) ^ Sbox_FROM_I_TO_J(st, 13, 2) ^ Sbox_FROM_I_TO_J(st, 14, 3) ^ Sbox_FROM_I_TO_J(st, 10, 4) ^ Sbox_FROM_I_TO_J(st, 9, 5) ^ Sbox_FROM_I_TO_J(st, 8, 6) ^ Sbox_FROM_I_TO_J(st, 11, 7) ^ Sbox_FROM_I_TO_J(st, 6, 8) ^ Sbox_FROM_I_TO_J(st, 5, 9) ^ Sbox_FROM_I_TO_J(st, 4, 10) ^ Sbox_FROM_I_TO_J(st, 7, 11) ^ Sbox_FROM_I_TO_J(st, 1, 12) ^ Sbox_FROM_I_TO_J(st, 2, 13) ^ Sbox_FROM_I_TO_J(st, 3, 14) ^ Sbox_FROM_I_TO_J(st, 0, 15))
/* Round constants generated by 3-bit LFSR, XOR-ed at state[5] */
// const int RC3[32] =
// {0x1, 0x4, 0x2, 0x5, 0x6, 0x7, 0x3, 0x1, 0x4, 0x2, 0x5, 0x6, 0x7, 0x3, 0x1, 0x4,
// 0x2, 0x5, 0x6, 0x7, 0x3, 0x1, 0x4, 0x2, 0x5, 0x6, 0x7, 0x3, 0x1, 0x4, 0x2, 0x5};
// Round constants generated by 4-bit LFSR, XOR-ed at state[4]
// const int RC4 [32] =
// {0x1, 0x8, 0x4, 0x2, 0x9, 0xc, 0x6, 0xb, 0x5, 0xa, 0xd, 0xe, 0xf, 0x7, 0x3, 0x1,
// 0x8, 0x4, 0x2, 0x9, 0xc, 0x6, 0xb, 0x5, 0xa, 0xd, 0xe, 0xf, 0x7, 0x3, 0x1, 0x8};
__device__ __constant__ uint64_t
S[16] = {0xc, 0xa, 0xd, 0x3, 0xe, 0xb, 0xf, 0x7, 0x8, 0x9, 0x1, 0x5, 0x0, 0x2, 0x4, 0x6};
__device__ __constant__ uint64_t
RC[32] = {0x110000, 0x480000, 0x240000, 0x520000, 0x690000, 0x7c0000, 0x360000, 0x1b0000,
0x450000, 0x2a0000, 0x5d0000, 0x6e0000, 0x7f0000, 0x370000, 0x130000, 0x410000,
0x280000, 0x540000, 0x620000, 0x790000, 0x3c0000, 0x160000, 0x4b0000, 0x250000,
0x5a0000, 0x6d0000, 0x7e0000, 0x3f0000, 0x170000, 0x430000, 0x210000, 0x580000};
// requirements
// tweak[7] == key1[10]
// tweak[10] == key1[10]
// tweak[0] == key1[13]
// tweak[13] == key1[13]
// tweak[3] == key1[9]
// tweak[9] == key1[9]
// set values in tweak to 0.
__device__ __constant__ uint64_t
tweak_mask = ((~0x0LLU) ^ LEFTROTBY4(MASK16, 3) ^ LEFTROTBY4(MASK16, 9) ^ LEFTROTBY4(MASK16, 0) ^ LEFTROTBY4(MASK16, 13) ^ LEFTROTBY4(MASK16, 7) ^ LEFTROTBY4(MASK16, 10));
// prepare values in tweak to be set.
__device__ __constant__ uint64_t
key1_setto_tweak = (LEFTROTBY4(GETBY4(KEY1, 9), 3) | LEFTROTBY4(GETBY4(KEY1, 9), 9) | LEFTROTBY4(GETBY4(KEY1, 13), 0) | LEFTROTBY4(GETBY4(KEY1, 13), 13) | LEFTROTBY4(GETBY4(KEY1, 10), 7) | LEFTROTBY4(GETBY4(KEY1, 10), 10));
__device__ static __forceinline__ uint64_t
round(const uint64_t old_st, const uint64_t tk0, const uint64_t tk1, const uint64_t tk2, const uint64_t tk3, const int r) {
/* MC */
uint64_t row3 = (old_st >> 48);
uint64_t st = old_st ^ ((old_st >> 32) & 0xffff) ^ row3 ^ (row3 << 16);
/* Add constants */
st ^= RC[r];
/* Add tweakey */
switch (r % 4) {
case 0:
st ^= tk0;
break;
case 1:
st ^= tk1;
break;
case 2:
st ^= tk2;
break;
case 3:
st ^= tk3;
break;
}
/* PN and SB */
return PS(st);
}
__global__ void
test_correct_pairs(const uint64_t *d_init_values, uint32_t *d_correct_num) {
// recieve input data
const uint64_t cur_idx = blockDim.x * blockIdx.x + threadIdx.x;
uint64_t p0_lfsr = d_init_values[cur_idx * 2];
uint64_t tweak_lfsr = d_init_values[cur_idx * 2 + 1];
uint32_t counter_for_correct_pairs = 0;
uint64_t p0, p1, tweak;
uint64_t tk0, tk1, tk2, tk3, qt;
// main loop in each device
for (uint64_t data_i = 0; data_i < (ONE << ENUM_NUM); data_i++) {
// plain p0 is from LFSR g(U) = u0+u5+u15+u20+u25+u34.
if (LFSR(p0_lfsr) == 0) {
p0_lfsr = (p0_lfsr << 1);
} else {
p0_lfsr = ((p0_lfsr << 1) ^ ONE);
}
p0 = p0_lfsr;
// plain p1
p1 = p0 ^ INPUT_DIFF;
// tweak is also from LFSR g(U) = u0+u5+u15+u20+u25+u34.
if (LFSR(tweak_lfsr) == 0) {
tweak_lfsr = (tweak_lfsr << 1);
} else {
tweak_lfsr = ((tweak_lfsr << 1) ^ ONE);
}
// set hypotheses
tweak = ((tweak_lfsr & tweak_mask) | key1_setto_tweak);
// initialize key
tk0 = KEY0 ^ tweak;
tk1 = KEY1 ^ tweak;
qt = Q(tweak);
tk2 = KEY0 ^ qt;
tk3 = KEY1 ^ qt;
// foward
for (int r = 0; r < ROUND_NUM; r++) {
p0 = round(p0, tk0, tk1, tk2, tk3, r);
p1 = round(p1, tk0, tk1, tk2, tk3, r);
}
// test correct pairs
if ((p0 ^ p1) == COND) {
// printf("p0: %lx, c0: %lx, tweak: %lx\n", p0_lfsr, p0, tweak);
// printf("p1: %lx, c1: %lx\n\n", p0_lfsr ^ INPUT_DIFF, p1);
counter_for_correct_pairs++;
}
}
d_correct_num[cur_idx] = counter_for_correct_pairs;
}
static inline void print_word(uint64_t w) {
printf("0x");
for (int i = 15; i >= 0; i--) {
if (i < 15 && (i & 0x3) == 0x3) {
printf(" ");
}
printf("%llx", (w >> (i << 2)) & 0xfLLU);
}
}
int main(int argc, char** argv) {
// parsing inputs
// argv[1]: gpu device number; argv[2]: seed for random number.
int dev_id, seed;
if (argc >= 3) {
dev_id = atoi(argv[1]);
seed = atoi(argv[2]);
} else {
dev_id = 3;
seed = time(0);
}
cudaSetDevice(dev_id);
srand(seed);
printf("finish setting device %d with seed %d\n\n", dev_id, seed);
// generate initial states for each threads.
const uint64_t thr_num = BLOCK_NUM * THREAD_NUM;
uint64_t h_init_values[2 * thr_num];
for (uint64_t i = 0; i < 2 * thr_num; i++) {
h_init_values[i] = rand();
uint64_t temp = rand();
h_init_values[i] ^= (temp << 32);
}
printf("finish initializing random values!\n");
// allocate memory.
cudaError_t err = cudaSuccess;
uint64_t *d_init_values = NULL;
err = cudaMalloc((void **) &d_init_values, 2 * thr_num * sizeof(uint64_t));
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate initial values (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_init_values, h_init_values, 2 * thr_num * sizeof(uint64_t), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy init_values from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
uint32_t *d_correct_num = NULL;
err = cudaMalloc((void **) &d_correct_num, thr_num * sizeof(uint32_t));
if (err != cudaSuccess) {
printf("Failed to allocate correct_num (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("finish allocating memory!\n");
// test for correct pairs.
printf("enum num (on each device): %d\ntotal thread num: %d (block num %d * thread num %d)\n", ENUM_NUM, BLOCK_NUM * THREAD_NUM, BLOCK_NUM, THREAD_NUM);
// real start
cudaEvent_t start1;
cudaEventCreate(&start1);
cudaEvent_t stop1;
cudaEventCreate(&stop1);
cudaEventRecord(start1, NULL);
printf("begin testing correct pairs!\n\n");
printf("\ndata num : 2 ^ %d\n", ENUM_NUM + THREADS_SHIFT);
printf("Round : %d\n", ROUND_NUM);
printf("key0 : ");
print_word(KEY0);
printf("\nkey1 : ");
print_word(KEY1);
printf("\ndiff at key0[9] : %llx\n", GETBY4(KEY0, 9) ^ GETBY4(KEY1, 9));
printf("input diff : ");
print_word(INPUT_DIFF);
printf("\noutput diff : ");
print_word(COND);
printf("\n");
test_correct_pairs<<<BLOCK_NUM, THREAD_NUM>>>(d_init_values, d_correct_num);
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch test_correct_pairs kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
uint32_t h_correct_num[thr_num];
err = cudaMemcpy(h_correct_num, d_correct_num, thr_num * sizeof(uint32_t), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy correct numbers from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("computation is done!\n");
// get probability.
uint64_t all_num = 0;
for (uint64_t i = 0; i < thr_num; i++) {
all_num += h_correct_num[i];
}
printf("\n\nright pairs : %llu\n", all_num);
printf("pro : %f\n\n", log2(float(all_num)/float(uint64_t(1) << (ENUM_NUM + THREADS_SHIFT))));
printf("\ntime:%.3lf sec.\n---------------------------------------\n", msecTotal1 / 1000.0);
cudaFree(d_init_values);
cudaFree(d_correct_num);
}
|
fcf5c0bb1d50babc057d7a7d1145a0ee8c680d38.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc ./memcpy.cu -arch=sm_62 && ./a.out
# include <stdio.h>
# include <sys/time.h>
# include <cstdlib>
# include <iostream>
# include <ctime>
# include <vector>
# include <sys/types.h>
# include <hip/hip_runtime.h>
struct TimeLogger
{
struct EventPoint
{
int line_number;
std::string event_name;
timeval time;
};
timeval start;
std::vector<EventPoint> time_points;
void Start()
{
gettimeofday(&start, NULL);
time_points.clear();
}
void Log(int line, std::string name)
{
EventPoint ep;
ep.line_number = line;
ep.event_name = name;
gettimeofday(&ep.time, NULL);
time_points.push_back(ep);
}
double Diff_ms(timeval start1, timeval end)
{
double seconds = end.tv_sec - start1.tv_sec;
double useconds = end.tv_usec - start1.tv_usec;
double mtime = ((seconds)* 1000.0 + useconds / 1000.0) + 0.5;
return mtime;
}
double Display()
{
//std::cout << "\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n";
timeval last = start;
for (size_t i = 0; i < time_points.size(); i++)
{
double ms = Diff_ms(last, time_points[i].time);
double ams = Diff_ms(start, time_points[i].time);
std::cout << ams << " is " << " Accumulated time......." <<
ms << " ms for " << time_points[i].event_name << "\n";
last = time_points[i].time;
}
return Diff_ms(start, last);
}
};
TimeLogger g_time_logger;
# define LOG(x) g_time_logger.Log(__LINE__, #x)
FILE* Open(const char* fileName, const char* mode)
{
# ifdef _WIN32
FILE* fp = 0;
fopen_s(&fp, fileName, mode);
return fp;
# else
return fopen(fileName, mode);
# endif
}
void DumpTga(const char* fileName, int width, int height, int bpp, unsigned char* data)
{
unsigned char header[18] = { 0 };
for (int i = 0; i < 18; i++) header[i] = 0;
header[2] = (bpp == 1) ? 3 : 2;
header[12] = width & 255;
header[13] = width >> 8;
header[14] = height & 255;
header[15] = height >> 8;
header[16] = bpp * 8;
FILE* fp = Open(fileName, "wb");
fwrite(header, 18, 1, fp);
fwrite(data, width*height, bpp, fp);
fclose(fp);
}
unsigned char* ReadTGA(const char* fileName, int& width, int &height, int &bpp, unsigned char* pixels = 0)
{
width = height = bpp = 0;
FILE* fp = Open(fileName, "rb");
if (!fp) return pixels;
unsigned char header[18] = { 0 };
fread(header, 18, 1, fp);
if (header[2] != 2 && header[2] != 3)
return pixels;
width = header[12] + header[13] * 256;
height = header[14] + header[15] * 256;
bpp = header[16] >> 3;
//unsigned char* pixels = 0;
if(!pixels)
pixels = new unsigned char[width*height*bpp];
fread(pixels, 76, 1, fp);
if (pixels[0] == 'R'
&& pixels[1] == 'e'
&& pixels[2] == 'n'
&& pixels[3] == 'd'
&& pixels[4] == 'e'
&& pixels[5] == 'r'
)
{
fread(pixels, width*height, bpp, fp);
}
else
{
fread(pixels + 76, width*height*bpp - 76, 1, fp);
}
fclose(fp);
return pixels;
}
const int width = (1 << 12);
const int height = width / 2;
const int bpp = 3;
__device__ __managed__ unsigned char img[width*height*bpp];
__device__ __managed__ unsigned char outH[width*height*bpp];
__device__ __managed__ unsigned char outV[width*height*bpp];
__device__ __managed__ unsigned char out[width*height*bpp];
__global__ void Gradient(unsigned char* img, int x, int y)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = (col + row*width)*bpp;
col = (col + x) % width;
row = (row + y) % height;
unsigned char r = col & 255;
unsigned char g = row & 255;
unsigned char b = ((row >> 8) << 4) | (col >> 8);
img[index] = b;
img[index+1] = g;
img[index+2] = r;
}
__global__ void DownScale(unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols)*bpp;
row = row << 1;
col = col << 1;
int src_index = (col + row * cols*2)*bpp;
unsigned char b = src[src_index];
unsigned char g = src[src_index + 1];
unsigned char r = src[src_index + 2];
dst[dst_index] = (256+b)&255;
dst[dst_index + 1] = g;
dst[dst_index + 2] = r;
}
__global__ void Blur(unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols)*bpp;
row = row << 1;
col = col << 1;
int col0 = max(col - 2, 0);
int col1 = max(col - 1, 0);
int col2 = col;
int col3 = min(col + 1, cols - 1);
int col4 = min(col + 2, cols - 1);
int row0 = max(row - 2, 0);
int row1 = max(row - 1, 0);
int row2 = row;
int row3 = min(row + 1, rows - 1);
int row4 = min(row + 2, rows - 1);
# define C00 1
# define C01 4
# define C02 6
# define C03 4
# define C04 1
# define C10 4
# define C11 16
# define C12 24
# define C13 16
# define C14 1
# define C20 6
# define C21 24
# define C22 36
# define C23 24
# define C24 6
# define C30 4
# define C31 16
# define C32 24
# define C33 16
# define C34 1
# define C40 1
# define C41 4
# define C42 6
# define C43 4
# define C44 1
/*
# define R(x,y) src[bpp*(row##y*cols + col##x )]
# define G(x,y) src[bpp*(row##y*cols + col##x ) + 1]
# define B(x,y) src[bpp*(row##y*cols + col##x ) + 2]
# define DEF(c, y, x) unsigned char c##y##x = c(x,y)
# define PIX(y, x) DEF(R,y,x); DEF(G,y,x); DEF(B, y,x);
PIX(0, 0); PIX(0, 1); PIX(0, 2); PIX(0, 3); PIX(0, 4);
PIX(1, 0); PIX(1, 1); PIX(1, 2); PIX(1, 3); PIX(1, 4);
PIX(2, 0); PIX(2, 1); PIX(2, 2); PIX(2, 3); PIX(2, 4);
PIX(3, 0); PIX(3, 1); PIX(3, 2); PIX(3, 3); PIX(3, 4);
PIX(4, 0); PIX(4, 1); PIX(4, 2); PIX(4, 3); PIX(4, 4);*/
unsigned char R00 = src[bpp*(row0*cols*2 + col0)];
unsigned char R01 = src[bpp*(row0*cols*2 + col1)];
unsigned char R02 = src[bpp*(row0*cols*2 + col2)];
unsigned char R03 = src[bpp*(row0*cols*2 + col3)];
unsigned char R04 = src[bpp*(row0*cols*2 + col4)];
unsigned char R10 = src[bpp*(row1*cols*2 + col0)];
unsigned char R11 = src[bpp*(row1*cols*2 + col1)];
unsigned char R12 = src[bpp*(row1*cols*2 + col2)];
unsigned char R13 = src[bpp*(row1*cols*2 + col3)];
unsigned char R14 = src[bpp*(row1*cols*2 + col4)];
unsigned char R20 = src[bpp*(row2*cols*2 + col0)];
unsigned char R21 = src[bpp*(row2*cols*2 + col1)];
unsigned char R22 = src[bpp*(row2*cols*2 + col2)];
unsigned char R23 = src[bpp*(row2*cols*2 + col3)];
unsigned char R24 = src[bpp*(row2*cols*2 + col4)];
unsigned char R30 = src[bpp*(row3*cols*2 + col0)];
unsigned char R31 = src[bpp*(row3*cols*2 + col1)];
unsigned char R32 = src[bpp*(row3*cols*2 + col2)];
unsigned char R33 = src[bpp*(row3*cols*2 + col3)];
unsigned char R34 = src[bpp*(row3*cols*2 + col4)];
unsigned char R40 = src[bpp*(row4*cols*2 + col0)];
unsigned char R41 = src[bpp*(row4*cols*2 + col1)];
unsigned char R42 = src[bpp*(row4*cols*2 + col2)];
unsigned char R43 = src[bpp*(row4*cols*2 + col3)];
unsigned char R44 = src[bpp*(row4*cols*2 + col4)];
unsigned char G00 = src[1+bpp*(row0*cols*2 + col0)];
unsigned char G01 = src[1+bpp*(row0*cols*2 + col1)];
unsigned char G02 = src[1+bpp*(row0*cols*2 + col2)];
unsigned char G03 = src[1+bpp*(row0*cols*2 + col3)];
unsigned char G04 = src[1+bpp*(row0*cols*2 + col4)];
unsigned char G10 = src[1+bpp*(row1*cols*2 + col0)];
unsigned char G11 = src[1+bpp*(row1*cols*2 + col1)];
unsigned char G12 = src[1+bpp*(row1*cols*2 + col2)];
unsigned char G13 = src[1+bpp*(row1*cols*2 + col3)];
unsigned char G14 = src[1+bpp*(row1*cols*2 + col4)];
unsigned char G20 = src[1+bpp*(row2*cols*2 + col0)];
unsigned char G21 = src[1+bpp*(row2*cols*2 + col1)];
unsigned char G22 = src[1+bpp*(row2*cols*2 + col2)];
unsigned char G23 = src[1+bpp*(row2*cols*2 + col3)];
unsigned char G24 = src[1+bpp*(row2*cols*2 + col4)];
unsigned char G30 = src[1+bpp*(row3*cols*2 + col0)];
unsigned char G31 = src[1+bpp*(row3*cols*2 + col1)];
unsigned char G32 = src[1+bpp*(row3*cols*2 + col2)];
unsigned char G33 = src[1+bpp*(row3*cols*2 + col3)];
unsigned char G34 = src[1+bpp*(row3*cols*2 + col4)];
unsigned char G40 = src[1+bpp*(row4*cols*2 + col0)];
unsigned char G41 = src[1+bpp*(row4*cols*2 + col1)];
unsigned char G42 = src[1+bpp*(row4*cols*2 + col2)];
unsigned char G43 = src[1+bpp*(row4*cols*2 + col3)];
unsigned char G44 = src[1+bpp*(row4*cols*2 + col4)];
unsigned char B00 = src[2+bpp*(row0*cols*2 + col0)];
unsigned char B01 = src[2+bpp*(row0*cols*2 + col1)];
unsigned char B02 = src[2+bpp*(row0*cols*2 + col2)];
unsigned char B03 = src[2+bpp*(row0*cols*2 + col3)];
unsigned char B04 = src[2+bpp*(row0*cols*2 + col4)];
unsigned char B10 = src[2+bpp*(row1*cols*2 + col0)];
unsigned char B11 = src[2+bpp*(row1*cols*2 + col1)];
unsigned char B12 = src[2+bpp*(row1*cols*2 + col2)];
unsigned char B13 = src[2+bpp*(row1*cols*2 + col3)];
unsigned char B14 = src[2+bpp*(row1*cols*2 + col4)];
unsigned char B20 = src[2+bpp*(row2*cols*2 + col0)];
unsigned char B21 = src[2+bpp*(row2*cols*2 + col1)];
unsigned char B22 = src[2+bpp*(row2*cols*2 + col2)];
unsigned char B23 = src[2+bpp*(row2*cols*2 + col3)];
unsigned char B24 = src[2+bpp*(row2*cols*2 + col4)];
unsigned char B30 = src[2+bpp*(row3*cols*2 + col0)];
unsigned char B31 = src[2+bpp*(row3*cols*2 + col1)];
unsigned char B32 = src[2+bpp*(row3*cols*2 + col2)];
unsigned char B33 = src[2+bpp*(row3*cols*2 + col3)];
unsigned char B34 = src[2+bpp*(row3*cols*2 + col4)];
unsigned char B40 = src[2+bpp*(row4*cols*2 + col0)];
unsigned char B41 = src[2+bpp*(row4*cols*2 + col1)];
unsigned char B42 = src[2+bpp*(row4*cols*2 + col2)];
unsigned char B43 = src[2+bpp*(row4*cols*2 + col3)];
unsigned char B44 = src[2+bpp*(row4*cols*2 + col4)];
# define RC(yx) R##yx*C##yx
# define GC(yx) G##yx*C##yx
# define BC(yx) B##yx*C##yx
short R0 = RC(00) + RC(01) + RC(02) + RC(03) + RC(04);
short R1 = RC(10) + RC(11) + RC(12) + RC(13) + RC(14);
short R2 = RC(20) + RC(21) + RC(22) + RC(23) + RC(24);
short R3 = RC(30) + RC(31) + RC(32) + RC(33) + RC(34);
short R4 = RC(40) + RC(41) + RC(42) + RC(43) + RC(44);
short G0 = GC(00) + GC(01) + GC(02) + GC(03) + GC(04);
short G1 = GC(10) + GC(11) + GC(12) + GC(13) + GC(14);
short G2 = GC(20) + GC(21) + GC(22) + GC(23) + GC(24);
short G3 = GC(30) + GC(31) + GC(32) + GC(33) + GC(34);
short G4 = GC(40) + GC(41) + GC(42) + GC(43) + GC(44);
short B0 = BC(00) + BC(01) + BC(02) + BC(03) + BC(04);
short B1 = BC(10) + BC(11) + BC(12) + BC(13) + BC(14);
short B2 = BC(20) + BC(21) + BC(22) + BC(23) + BC(24);
short B3 = BC(30) + BC(31) + BC(32) + BC(33) + BC(34);
short B4 = BC(40) + BC(41) + BC(42) + BC(43) + BC(44);
dst[dst_index+0] = (R0 + R1 + R2 + R3 + R4 + 128) >> 8;
dst[dst_index+1] = (G0 + G1 + G2 + G3 + G4 + 128) >> 8;
dst[dst_index+2] = (B0 + B1 + B2 + B3 + B4 + 128) >> 8;
}
__global__ void PyrUp(unsigned char* input, unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols)*bpp;
row = row << 1;
col = col << 1;
//int col0 = max(col - 2, 0);
int col1 = max(col - 1, 0);
int col2 = col;
int col3 = min(col + 1, cols - 1);
//int col4 = min(col + 2, cols - 1);
//int row0 = max(row - 2, 0);
int row1 = max(row - 1, 0);
int row2 = row;
int row3 = min(row + 1, rows - 1);
//int row4 = min(row + 2, rows - 1);
//unsigned char R00 = src[bpp*(row0*cols * 2 + col0)];
//unsigned char R01 = src[bpp*(row0*cols * 2 + col1)];
//unsigned char R02 = src[bpp*(row0*cols * 2 + col2)];
//unsigned char R03 = src[bpp*(row0*cols * 2 + col3)];
//unsigned char R04 = src[bpp*(row0*cols * 2 + col4)];
//unsigned char R10 = src[bpp*(row1*cols * 2 + col0)];
unsigned char R11 = src[bpp*(row1*cols * 2 + col1)];
unsigned char R12 = src[bpp*(row1*cols * 2 + col2)];
unsigned char R13 = src[bpp*(row1*cols * 2 + col3)];
//unsigned char R14 = src[bpp*(row1*cols * 2 + col4)];
//unsigned char R20 = src[bpp*(row2*cols * 2 + col0)];
unsigned char R21 = src[bpp*(row2*cols * 2 + col1)];
unsigned char R22 = src[bpp*(row2*cols * 2 + col2)];
unsigned char R23 = src[bpp*(row2*cols * 2 + col3)];
//unsigned char R24 = src[bpp*(row2*cols * 2 + col4)];
//unsigned char R30 = src[bpp*(row3*cols * 2 + col0)];
unsigned char R31 = src[bpp*(row3*cols * 2 + col1)];
unsigned char R32 = src[bpp*(row3*cols * 2 + col2)];
unsigned char R33 = src[bpp*(row3*cols * 2 + col3)];
//unsigned char R34 = src[bpp*(row3*cols * 2 + col4)];
//unsigned char R40 = src[bpp*(row4*cols * 2 + col0)];
//unsigned char R41 = src[bpp*(row4*cols * 2 + col1)];
//unsigned char R42 = src[bpp*(row4*cols * 2 + col2)];
//unsigned char R43 = src[bpp*(row4*cols * 2 + col3)];
//unsigned char R44 = src[bpp*(row4*cols * 2 + col4)];
//unsigned char G00 = src[1 + bpp*(row0*cols * 2 + col0)];
//unsigned char G01 = src[1 + bpp*(row0*cols * 2 + col1)];
//unsigned char G02 = src[1 + bpp*(row0*cols * 2 + col2)];
//unsigned char G03 = src[1 + bpp*(row0*cols * 2 + col3)];
//unsigned char G04 = src[1 + bpp*(row0*cols * 2 + col4)];
//unsigned char G10 = src[1 + bpp*(row1*cols * 2 + col0)];
unsigned char G11 = src[1 + bpp*(row1*cols * 2 + col1)];
unsigned char G12 = src[1 + bpp*(row1*cols * 2 + col2)];
unsigned char G13 = src[1 + bpp*(row1*cols * 2 + col3)];
//unsigned char G14 = src[1 + bpp*(row1*cols * 2 + col4)];
//unsigned char G20 = src[1 + bpp*(row2*cols * 2 + col0)];
unsigned char G21 = src[1 + bpp*(row2*cols * 2 + col1)];
unsigned char G22 = src[1 + bpp*(row2*cols * 2 + col2)];
unsigned char G23 = src[1 + bpp*(row2*cols * 2 + col3)];
//unsigned char G24 = src[1 + bpp*(row2*cols * 2 + col4)];
//unsigned char G30 = src[1 + bpp*(row3*cols * 2 + col0)];
unsigned char G31 = src[1 + bpp*(row3*cols * 2 + col1)];
unsigned char G32 = src[1 + bpp*(row3*cols * 2 + col2)];
unsigned char G33 = src[1 + bpp*(row3*cols * 2 + col3)];
//unsigned char G34 = src[1 + bpp*(row3*cols * 2 + col4)];
//unsigned char G40 = src[1 + bpp*(row4*cols * 2 + col0)];
//unsigned char G41 = src[1 + bpp*(row4*cols * 2 + col1)];
//unsigned char G42 = src[1 + bpp*(row4*cols * 2 + col2)];
//unsigned char G43 = src[1 + bpp*(row4*cols * 2 + col3)];
//unsigned char G44 = src[1 + bpp*(row4*cols * 2 + col4)];
//unsigned char B00 = src[2 + bpp*(row0*cols * 2 + col0)];
//unsigned char B01 = src[2 + bpp*(row0*cols * 2 + col1)];
//unsigned char B02 = src[2 + bpp*(row0*cols * 2 + col2)];
//unsigned char B03 = src[2 + bpp*(row0*cols * 2 + col3)];
//unsigned char B04 = src[2 + bpp*(row0*cols * 2 + col4)];
//unsigned char B10 = src[2 + bpp*(row1*cols * 2 + col0)];
unsigned char B11 = src[2 + bpp*(row1*cols * 2 + col1)];
unsigned char B12 = src[2 + bpp*(row1*cols * 2 + col2)];
unsigned char B13 = src[2 + bpp*(row1*cols * 2 + col3)];
//unsigned char B14 = src[2 + bpp*(row1*cols * 2 + col4)];
//unsigned char B20 = src[2 + bpp*(row2*cols * 2 + col0)];
unsigned char B21 = src[2 + bpp*(row2*cols * 2 + col1)];
unsigned char B22 = src[2 + bpp*(row2*cols * 2 + col2)];
unsigned char B23 = src[2 + bpp*(row2*cols * 2 + col3)];
//unsigned char B24 = src[2 + bpp*(row2*cols * 2 + col4)];
//unsigned char B30 = src[2 + bpp*(row3*cols * 2 + col0)];
unsigned char B31 = src[2 + bpp*(row3*cols * 2 + col1)];
unsigned char B32 = src[2 + bpp*(row3*cols * 2 + col2)];
unsigned char B33 = src[2 + bpp*(row3*cols * 2 + col3)];
//unsigned char B34 = src[2 + bpp*(row3*cols * 2 + col4)];
//unsigned char B40 = src[2 + bpp*(row4*cols * 2 + col0)];
//unsigned char B41 = src[2 + bpp*(row4*cols * 2 + col1)];
//unsigned char B42 = src[2 + bpp*(row4*cols * 2 + col2)];
//unsigned char B43 = src[2 + bpp*(row4*cols * 2 + col3)];
//unsigned char B44 = src[2 + bpp*(row4*cols * 2 + col4)];
# define RC(yx) R##yx*C##yx
# define GC(yx) G##yx*C##yx
# define BC(yx) B##yx*C##yx
//short R0 = RC(00) + RC(01) + RC(02) + RC(03) + RC(04);
short R1 = /*RC(10) +*/ RC(11) + RC(12) + RC(13)/*+ RC(14)*/;
short R2 = /*RC(20) +*/ RC(21) + RC(22) + RC(23)/*+ RC(24)*/;
short R3 = /*RC(30) +*/ RC(31) + RC(32) + RC(33)/*+ RC(34)*/;
//short R4 = RC(40) + RC(41) + RC(42) + RC(43) + RC(44);
//short G0 = GC(00) + GC(01) + GC(02) + GC(03) + GC(04);
short G1 = /*GC(10) +*/ GC(11) + GC(12) + GC(13)/* + GC(14)*/;
short G2 = /*GC(20) +*/ GC(21) + GC(22) + GC(23)/* + GC(24)*/;
short G3 = /*GC(30) +*/ GC(31) + GC(32) + GC(33)/* + GC(34)*/;
//short G4 = GC(40) + GC(41) + GC(42) + GC(43) + GC(44);
//short B0 = BC(00) + BC(01) + BC(02) + BC(03) + BC(04);
short B1 = /*BC(10)*/ + BC(11) + BC(12) + BC(13)/* + BC(14)*/;
short B2 = /*BC(20)*/ + BC(21) + BC(22) + BC(23)/* + BC(24)*/;
short B3 = /*BC(30)*/ + BC(31) + BC(32) + BC(33)/* + BC(34)*/;
//short B4 = BC(40) + BC(41) + BC(42) + BC(43) + BC(44);
//dst[dst_index + 0] = (R0 + R1 + R2 + R3 + R4 + 128) >> 8;
//dst[dst_index + 1] = (G0 + G1 + G2 + G3 + G4 + 128) >> 8;
//dst[dst_index + 2] = (B0 + B1 + B2 + B3 + B4 + 128) >> 8;
short R = (R1 + R2 + R3 + 128 - input[dst_index + 0] + 128) >> 9;
short G = (G1 + G2 + G3 + 128 - input[dst_index + 1] + 128) >> 9;
short B = (B1 + B2 + B3 + 128 - input[dst_index + 2] + 128) >> 9;
dst[dst_index + 0] = R;
dst[dst_index + 1] = G;
dst[dst_index + 2] = B;
}
# define C0 1
# define C1 4
# define C2 6
# define C3 4
# define C4 1
__global__ void BlurHorizontal(unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols/2)*bpp;
//row = row << 1;
col = col << 1;
int dst_rows = rows;
int dst_cols = cols;
int src_rows = rows;
int src_cols = cols;
int col0 = max(col - 2, 0);
int col1 = max(col - 1, 0);
int col2 = col;
int col3 = min(col + 1, cols - 1);
int col4 = min(col + 2, cols - 1);
unsigned char R0 = src[bpp*(row*src_cols + col0)];
unsigned char R1 = src[bpp*(row*src_cols + col1)];
unsigned char R2 = src[bpp*(row*src_cols + col2)];
unsigned char R3 = src[bpp*(row*src_cols + col3)];
unsigned char R4 = src[bpp*(row*src_cols + col4)];
unsigned char G0 = src[1 + bpp*(row*src_cols + col0)];
unsigned char G1 = src[1 + bpp*(row*src_cols + col1)];
unsigned char G2 = src[1 + bpp*(row*src_cols + col2)];
unsigned char G3 = src[1 + bpp*(row*src_cols + col3)];
unsigned char G4 = src[1 + bpp*(row*src_cols + col4)];
unsigned char B0 = src[2 + bpp*(row*src_cols + col0)];
unsigned char B1 = src[2 + bpp*(row*src_cols + col1)];
unsigned char B2 = src[2 + bpp*(row*src_cols + col2)];
unsigned char B3 = src[2 + bpp*(row*src_cols + col3)];
unsigned char B4 = src[2 + bpp*(row*src_cols + col4)];
dst[dst_index + 0] = min(255, (R0*C0 + R1*C1 + R2*C2 + R3*C3 + R4*C4 + 8) >> 4);
dst[dst_index + 1] = min(255, (G0*C0 + G1*C1 + G2*C2 + G3*C3 + G4*C4 + 8) >> 4);
dst[dst_index + 2] = min(255, (B0*C0 + B1*C1 + B2*C2 + B3*C3 + B4*C4 + 8) >> 4);
}
__global__ void BlurVertical(unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols)*bpp;
row = row << 1;
//col = col << 1;
//int dst_rows = rows;
//int dst_cols = cols;
//int src_rows = rows * 2;
int src_cols = cols;
int row0 = max(row - 2, 0);
int row1 = max(row - 1, 0);
int row2 = row;
int row3 = min(row + 1, rows - 1);
int row4 = min(row + 2, rows - 1);
unsigned char R0 = src[bpp*(row0*src_cols + col)];
unsigned char R1 = src[bpp*(row1*src_cols + col)];
unsigned char R2 = src[bpp*(row2*src_cols + col)];
unsigned char R3 = src[bpp*(row3*src_cols + col)];
unsigned char R4 = src[bpp*(row4*src_cols + col)];
unsigned char G0 = src[1 + bpp*(row0*src_cols + col)];
unsigned char G1 = src[1 + bpp*(row1*src_cols + col)];
unsigned char G2 = src[1 + bpp*(row2*src_cols + col)];
unsigned char G3 = src[1 + bpp*(row3*src_cols + col)];
unsigned char G4 = src[1 + bpp*(row4*src_cols + col)];
unsigned char B0 = src[2 + bpp*(row0*src_cols + col)];
unsigned char B1 = src[2 + bpp*(row1*src_cols + col)];
unsigned char B2 = src[2 + bpp*(row2*src_cols + col)];
unsigned char B3 = src[2 + bpp*(row3*src_cols + col)];
unsigned char B4 = src[2 + bpp*(row4*src_cols + col)];
dst[dst_index + 0] = min(255, (R0*C0 + R1*C1 + R2*C2 + R3*C3 + R4*C4 + 8) >> 4);
dst[dst_index + 1] = min(255, (G0*C0 + G1*C1 + G2*C2 + G3*C3 + G4*C4 + 8) >> 4);
dst[dst_index + 2] = min(255, (B0*C0 + B1*C1 + B2*C2 + B3*C3 + B4*C4 + 8) >> 4);
}
int main()
{
int rows = 1300, cols = 1024, c = 3;
ReadTGA("src.tga", cols, rows, c, img);
dim3 threadsPerBlock(32, 32);
dim3 threadsPerBlockH(32, 32); //(16,64 ) = 10+ ms, (32, 32) = 8+ms, (64, 16) = 10+ms
dim3 threadsPerBlockV(512, 2);
dim3 numBlocks((cols/2) / threadsPerBlock.x, (rows/2) / threadsPerBlock.y);
dim3 numBlocksH((cols / 2) / threadsPerBlockH.x, (rows) / threadsPerBlockH.y);
dim3 numBlocksV((cols / 2) / threadsPerBlockV.x, (rows / 2) / threadsPerBlockV.y);
//cv::cuda::GpuMat gpuOut;
g_time_logger.Start();
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 10; j++)
{
//Blur << < numBlocks, threadsPerBlock >> > (img, out, rows/2, cols/2);
BlurHorizontal << < numBlocksH, threadsPerBlockH >> > (img, outH, rows, cols);
hipDeviceSynchronize();
LOG(h_blur);
BlurVertical << < numBlocksV, threadsPerBlockV >> > (outH, outV, rows/2, cols/2);
hipDeviceSynchronize();
LOG(v_blur);
//PyrUp << < numBlocksV, threadsPerBlock >> > (img, outV, out, rows / 2, cols / 2);
//hipDeviceSynchronize();
//LOG(pyr_up);
//GpuMat(int rows, int cols, int type, void *data
//cuda::GpuMat gpuIn(rows, cols, CV_8UC3, img);
//cuda::pyrDown(gpuIn, gpuOut);
}
}
g_time_logger.Display();
DumpTga("blurH.tga", cols / 2, rows, bpp, outH);
DumpTga("blurV.tga", cols / 2, rows / 2, bpp, outV);
DumpTga("pyrUp.tga", cols, rows, bpp, out);
//DumpTga("copy.tga", cols, rows, bpp, img);
return 0;
}
|
fcf5c0bb1d50babc057d7a7d1145a0ee8c680d38.cu
|
// nvcc ./memcpy.cu -arch=sm_62 && ./a.out
# include <stdio.h>
# include <sys/time.h>
# include <cstdlib>
# include <iostream>
# include <ctime>
# include <vector>
# include <sys/types.h>
# include <cuda_runtime.h>
struct TimeLogger
{
struct EventPoint
{
int line_number;
std::string event_name;
timeval time;
};
timeval start;
std::vector<EventPoint> time_points;
void Start()
{
gettimeofday(&start, NULL);
time_points.clear();
}
void Log(int line, std::string name)
{
EventPoint ep;
ep.line_number = line;
ep.event_name = name;
gettimeofday(&ep.time, NULL);
time_points.push_back(ep);
}
double Diff_ms(timeval start1, timeval end)
{
double seconds = end.tv_sec - start1.tv_sec;
double useconds = end.tv_usec - start1.tv_usec;
double mtime = ((seconds)* 1000.0 + useconds / 1000.0) + 0.5;
return mtime;
}
double Display()
{
//std::cout << "\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n";
timeval last = start;
for (size_t i = 0; i < time_points.size(); i++)
{
double ms = Diff_ms(last, time_points[i].time);
double ams = Diff_ms(start, time_points[i].time);
std::cout << ams << " is " << " Accumulated time......." <<
ms << " ms for " << time_points[i].event_name << "\n";
last = time_points[i].time;
}
return Diff_ms(start, last);
}
};
TimeLogger g_time_logger;
# define LOG(x) g_time_logger.Log(__LINE__, #x)
FILE* Open(const char* fileName, const char* mode)
{
# ifdef _WIN32
FILE* fp = 0;
fopen_s(&fp, fileName, mode);
return fp;
# else
return fopen(fileName, mode);
# endif
}
void DumpTga(const char* fileName, int width, int height, int bpp, unsigned char* data)
{
unsigned char header[18] = { 0 };
for (int i = 0; i < 18; i++) header[i] = 0;
header[2] = (bpp == 1) ? 3 : 2;
header[12] = width & 255;
header[13] = width >> 8;
header[14] = height & 255;
header[15] = height >> 8;
header[16] = bpp * 8;
FILE* fp = Open(fileName, "wb");
fwrite(header, 18, 1, fp);
fwrite(data, width*height, bpp, fp);
fclose(fp);
}
unsigned char* ReadTGA(const char* fileName, int& width, int &height, int &bpp, unsigned char* pixels = 0)
{
width = height = bpp = 0;
FILE* fp = Open(fileName, "rb");
if (!fp) return pixels;
unsigned char header[18] = { 0 };
fread(header, 18, 1, fp);
if (header[2] != 2 && header[2] != 3)
return pixels;
width = header[12] + header[13] * 256;
height = header[14] + header[15] * 256;
bpp = header[16] >> 3;
//unsigned char* pixels = 0;
if(!pixels)
pixels = new unsigned char[width*height*bpp];
fread(pixels, 76, 1, fp);
if (pixels[0] == 'R'
&& pixels[1] == 'e'
&& pixels[2] == 'n'
&& pixels[3] == 'd'
&& pixels[4] == 'e'
&& pixels[5] == 'r'
)
{
fread(pixels, width*height, bpp, fp);
}
else
{
fread(pixels + 76, width*height*bpp - 76, 1, fp);
}
fclose(fp);
return pixels;
}
const int width = (1 << 12);
const int height = width / 2;
const int bpp = 3;
__device__ __managed__ unsigned char img[width*height*bpp];
__device__ __managed__ unsigned char outH[width*height*bpp];
__device__ __managed__ unsigned char outV[width*height*bpp];
__device__ __managed__ unsigned char out[width*height*bpp];
__global__ void Gradient(unsigned char* img, int x, int y)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = (col + row*width)*bpp;
col = (col + x) % width;
row = (row + y) % height;
unsigned char r = col & 255;
unsigned char g = row & 255;
unsigned char b = ((row >> 8) << 4) | (col >> 8);
img[index] = b;
img[index+1] = g;
img[index+2] = r;
}
__global__ void DownScale(unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols)*bpp;
row = row << 1;
col = col << 1;
int src_index = (col + row * cols*2)*bpp;
unsigned char b = src[src_index];
unsigned char g = src[src_index + 1];
unsigned char r = src[src_index + 2];
dst[dst_index] = (256+b)&255;
dst[dst_index + 1] = g;
dst[dst_index + 2] = r;
}
__global__ void Blur(unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols)*bpp;
row = row << 1;
col = col << 1;
int col0 = max(col - 2, 0);
int col1 = max(col - 1, 0);
int col2 = col;
int col3 = min(col + 1, cols - 1);
int col4 = min(col + 2, cols - 1);
int row0 = max(row - 2, 0);
int row1 = max(row - 1, 0);
int row2 = row;
int row3 = min(row + 1, rows - 1);
int row4 = min(row + 2, rows - 1);
# define C00 1
# define C01 4
# define C02 6
# define C03 4
# define C04 1
# define C10 4
# define C11 16
# define C12 24
# define C13 16
# define C14 1
# define C20 6
# define C21 24
# define C22 36
# define C23 24
# define C24 6
# define C30 4
# define C31 16
# define C32 24
# define C33 16
# define C34 1
# define C40 1
# define C41 4
# define C42 6
# define C43 4
# define C44 1
/*
# define R(x,y) src[bpp*(row##y*cols + col##x )]
# define G(x,y) src[bpp*(row##y*cols + col##x ) + 1]
# define B(x,y) src[bpp*(row##y*cols + col##x ) + 2]
# define DEF(c, y, x) unsigned char c##y##x = c(x,y)
# define PIX(y, x) DEF(R,y,x); DEF(G,y,x); DEF(B, y,x);
PIX(0, 0); PIX(0, 1); PIX(0, 2); PIX(0, 3); PIX(0, 4);
PIX(1, 0); PIX(1, 1); PIX(1, 2); PIX(1, 3); PIX(1, 4);
PIX(2, 0); PIX(2, 1); PIX(2, 2); PIX(2, 3); PIX(2, 4);
PIX(3, 0); PIX(3, 1); PIX(3, 2); PIX(3, 3); PIX(3, 4);
PIX(4, 0); PIX(4, 1); PIX(4, 2); PIX(4, 3); PIX(4, 4);*/
unsigned char R00 = src[bpp*(row0*cols*2 + col0)];
unsigned char R01 = src[bpp*(row0*cols*2 + col1)];
unsigned char R02 = src[bpp*(row0*cols*2 + col2)];
unsigned char R03 = src[bpp*(row0*cols*2 + col3)];
unsigned char R04 = src[bpp*(row0*cols*2 + col4)];
unsigned char R10 = src[bpp*(row1*cols*2 + col0)];
unsigned char R11 = src[bpp*(row1*cols*2 + col1)];
unsigned char R12 = src[bpp*(row1*cols*2 + col2)];
unsigned char R13 = src[bpp*(row1*cols*2 + col3)];
unsigned char R14 = src[bpp*(row1*cols*2 + col4)];
unsigned char R20 = src[bpp*(row2*cols*2 + col0)];
unsigned char R21 = src[bpp*(row2*cols*2 + col1)];
unsigned char R22 = src[bpp*(row2*cols*2 + col2)];
unsigned char R23 = src[bpp*(row2*cols*2 + col3)];
unsigned char R24 = src[bpp*(row2*cols*2 + col4)];
unsigned char R30 = src[bpp*(row3*cols*2 + col0)];
unsigned char R31 = src[bpp*(row3*cols*2 + col1)];
unsigned char R32 = src[bpp*(row3*cols*2 + col2)];
unsigned char R33 = src[bpp*(row3*cols*2 + col3)];
unsigned char R34 = src[bpp*(row3*cols*2 + col4)];
unsigned char R40 = src[bpp*(row4*cols*2 + col0)];
unsigned char R41 = src[bpp*(row4*cols*2 + col1)];
unsigned char R42 = src[bpp*(row4*cols*2 + col2)];
unsigned char R43 = src[bpp*(row4*cols*2 + col3)];
unsigned char R44 = src[bpp*(row4*cols*2 + col4)];
unsigned char G00 = src[1+bpp*(row0*cols*2 + col0)];
unsigned char G01 = src[1+bpp*(row0*cols*2 + col1)];
unsigned char G02 = src[1+bpp*(row0*cols*2 + col2)];
unsigned char G03 = src[1+bpp*(row0*cols*2 + col3)];
unsigned char G04 = src[1+bpp*(row0*cols*2 + col4)];
unsigned char G10 = src[1+bpp*(row1*cols*2 + col0)];
unsigned char G11 = src[1+bpp*(row1*cols*2 + col1)];
unsigned char G12 = src[1+bpp*(row1*cols*2 + col2)];
unsigned char G13 = src[1+bpp*(row1*cols*2 + col3)];
unsigned char G14 = src[1+bpp*(row1*cols*2 + col4)];
unsigned char G20 = src[1+bpp*(row2*cols*2 + col0)];
unsigned char G21 = src[1+bpp*(row2*cols*2 + col1)];
unsigned char G22 = src[1+bpp*(row2*cols*2 + col2)];
unsigned char G23 = src[1+bpp*(row2*cols*2 + col3)];
unsigned char G24 = src[1+bpp*(row2*cols*2 + col4)];
unsigned char G30 = src[1+bpp*(row3*cols*2 + col0)];
unsigned char G31 = src[1+bpp*(row3*cols*2 + col1)];
unsigned char G32 = src[1+bpp*(row3*cols*2 + col2)];
unsigned char G33 = src[1+bpp*(row3*cols*2 + col3)];
unsigned char G34 = src[1+bpp*(row3*cols*2 + col4)];
unsigned char G40 = src[1+bpp*(row4*cols*2 + col0)];
unsigned char G41 = src[1+bpp*(row4*cols*2 + col1)];
unsigned char G42 = src[1+bpp*(row4*cols*2 + col2)];
unsigned char G43 = src[1+bpp*(row4*cols*2 + col3)];
unsigned char G44 = src[1+bpp*(row4*cols*2 + col4)];
unsigned char B00 = src[2+bpp*(row0*cols*2 + col0)];
unsigned char B01 = src[2+bpp*(row0*cols*2 + col1)];
unsigned char B02 = src[2+bpp*(row0*cols*2 + col2)];
unsigned char B03 = src[2+bpp*(row0*cols*2 + col3)];
unsigned char B04 = src[2+bpp*(row0*cols*2 + col4)];
unsigned char B10 = src[2+bpp*(row1*cols*2 + col0)];
unsigned char B11 = src[2+bpp*(row1*cols*2 + col1)];
unsigned char B12 = src[2+bpp*(row1*cols*2 + col2)];
unsigned char B13 = src[2+bpp*(row1*cols*2 + col3)];
unsigned char B14 = src[2+bpp*(row1*cols*2 + col4)];
unsigned char B20 = src[2+bpp*(row2*cols*2 + col0)];
unsigned char B21 = src[2+bpp*(row2*cols*2 + col1)];
unsigned char B22 = src[2+bpp*(row2*cols*2 + col2)];
unsigned char B23 = src[2+bpp*(row2*cols*2 + col3)];
unsigned char B24 = src[2+bpp*(row2*cols*2 + col4)];
unsigned char B30 = src[2+bpp*(row3*cols*2 + col0)];
unsigned char B31 = src[2+bpp*(row3*cols*2 + col1)];
unsigned char B32 = src[2+bpp*(row3*cols*2 + col2)];
unsigned char B33 = src[2+bpp*(row3*cols*2 + col3)];
unsigned char B34 = src[2+bpp*(row3*cols*2 + col4)];
unsigned char B40 = src[2+bpp*(row4*cols*2 + col0)];
unsigned char B41 = src[2+bpp*(row4*cols*2 + col1)];
unsigned char B42 = src[2+bpp*(row4*cols*2 + col2)];
unsigned char B43 = src[2+bpp*(row4*cols*2 + col3)];
unsigned char B44 = src[2+bpp*(row4*cols*2 + col4)];
# define RC(yx) R##yx*C##yx
# define GC(yx) G##yx*C##yx
# define BC(yx) B##yx*C##yx
short R0 = RC(00) + RC(01) + RC(02) + RC(03) + RC(04);
short R1 = RC(10) + RC(11) + RC(12) + RC(13) + RC(14);
short R2 = RC(20) + RC(21) + RC(22) + RC(23) + RC(24);
short R3 = RC(30) + RC(31) + RC(32) + RC(33) + RC(34);
short R4 = RC(40) + RC(41) + RC(42) + RC(43) + RC(44);
short G0 = GC(00) + GC(01) + GC(02) + GC(03) + GC(04);
short G1 = GC(10) + GC(11) + GC(12) + GC(13) + GC(14);
short G2 = GC(20) + GC(21) + GC(22) + GC(23) + GC(24);
short G3 = GC(30) + GC(31) + GC(32) + GC(33) + GC(34);
short G4 = GC(40) + GC(41) + GC(42) + GC(43) + GC(44);
short B0 = BC(00) + BC(01) + BC(02) + BC(03) + BC(04);
short B1 = BC(10) + BC(11) + BC(12) + BC(13) + BC(14);
short B2 = BC(20) + BC(21) + BC(22) + BC(23) + BC(24);
short B3 = BC(30) + BC(31) + BC(32) + BC(33) + BC(34);
short B4 = BC(40) + BC(41) + BC(42) + BC(43) + BC(44);
dst[dst_index+0] = (R0 + R1 + R2 + R3 + R4 + 128) >> 8;
dst[dst_index+1] = (G0 + G1 + G2 + G3 + G4 + 128) >> 8;
dst[dst_index+2] = (B0 + B1 + B2 + B3 + B4 + 128) >> 8;
}
__global__ void PyrUp(unsigned char* input, unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols)*bpp;
row = row << 1;
col = col << 1;
//int col0 = max(col - 2, 0);
int col1 = max(col - 1, 0);
int col2 = col;
int col3 = min(col + 1, cols - 1);
//int col4 = min(col + 2, cols - 1);
//int row0 = max(row - 2, 0);
int row1 = max(row - 1, 0);
int row2 = row;
int row3 = min(row + 1, rows - 1);
//int row4 = min(row + 2, rows - 1);
//unsigned char R00 = src[bpp*(row0*cols * 2 + col0)];
//unsigned char R01 = src[bpp*(row0*cols * 2 + col1)];
//unsigned char R02 = src[bpp*(row0*cols * 2 + col2)];
//unsigned char R03 = src[bpp*(row0*cols * 2 + col3)];
//unsigned char R04 = src[bpp*(row0*cols * 2 + col4)];
//unsigned char R10 = src[bpp*(row1*cols * 2 + col0)];
unsigned char R11 = src[bpp*(row1*cols * 2 + col1)];
unsigned char R12 = src[bpp*(row1*cols * 2 + col2)];
unsigned char R13 = src[bpp*(row1*cols * 2 + col3)];
//unsigned char R14 = src[bpp*(row1*cols * 2 + col4)];
//unsigned char R20 = src[bpp*(row2*cols * 2 + col0)];
unsigned char R21 = src[bpp*(row2*cols * 2 + col1)];
unsigned char R22 = src[bpp*(row2*cols * 2 + col2)];
unsigned char R23 = src[bpp*(row2*cols * 2 + col3)];
//unsigned char R24 = src[bpp*(row2*cols * 2 + col4)];
//unsigned char R30 = src[bpp*(row3*cols * 2 + col0)];
unsigned char R31 = src[bpp*(row3*cols * 2 + col1)];
unsigned char R32 = src[bpp*(row3*cols * 2 + col2)];
unsigned char R33 = src[bpp*(row3*cols * 2 + col3)];
//unsigned char R34 = src[bpp*(row3*cols * 2 + col4)];
//unsigned char R40 = src[bpp*(row4*cols * 2 + col0)];
//unsigned char R41 = src[bpp*(row4*cols * 2 + col1)];
//unsigned char R42 = src[bpp*(row4*cols * 2 + col2)];
//unsigned char R43 = src[bpp*(row4*cols * 2 + col3)];
//unsigned char R44 = src[bpp*(row4*cols * 2 + col4)];
//unsigned char G00 = src[1 + bpp*(row0*cols * 2 + col0)];
//unsigned char G01 = src[1 + bpp*(row0*cols * 2 + col1)];
//unsigned char G02 = src[1 + bpp*(row0*cols * 2 + col2)];
//unsigned char G03 = src[1 + bpp*(row0*cols * 2 + col3)];
//unsigned char G04 = src[1 + bpp*(row0*cols * 2 + col4)];
//unsigned char G10 = src[1 + bpp*(row1*cols * 2 + col0)];
unsigned char G11 = src[1 + bpp*(row1*cols * 2 + col1)];
unsigned char G12 = src[1 + bpp*(row1*cols * 2 + col2)];
unsigned char G13 = src[1 + bpp*(row1*cols * 2 + col3)];
//unsigned char G14 = src[1 + bpp*(row1*cols * 2 + col4)];
//unsigned char G20 = src[1 + bpp*(row2*cols * 2 + col0)];
unsigned char G21 = src[1 + bpp*(row2*cols * 2 + col1)];
unsigned char G22 = src[1 + bpp*(row2*cols * 2 + col2)];
unsigned char G23 = src[1 + bpp*(row2*cols * 2 + col3)];
//unsigned char G24 = src[1 + bpp*(row2*cols * 2 + col4)];
//unsigned char G30 = src[1 + bpp*(row3*cols * 2 + col0)];
unsigned char G31 = src[1 + bpp*(row3*cols * 2 + col1)];
unsigned char G32 = src[1 + bpp*(row3*cols * 2 + col2)];
unsigned char G33 = src[1 + bpp*(row3*cols * 2 + col3)];
//unsigned char G34 = src[1 + bpp*(row3*cols * 2 + col4)];
//unsigned char G40 = src[1 + bpp*(row4*cols * 2 + col0)];
//unsigned char G41 = src[1 + bpp*(row4*cols * 2 + col1)];
//unsigned char G42 = src[1 + bpp*(row4*cols * 2 + col2)];
//unsigned char G43 = src[1 + bpp*(row4*cols * 2 + col3)];
//unsigned char G44 = src[1 + bpp*(row4*cols * 2 + col4)];
//unsigned char B00 = src[2 + bpp*(row0*cols * 2 + col0)];
//unsigned char B01 = src[2 + bpp*(row0*cols * 2 + col1)];
//unsigned char B02 = src[2 + bpp*(row0*cols * 2 + col2)];
//unsigned char B03 = src[2 + bpp*(row0*cols * 2 + col3)];
//unsigned char B04 = src[2 + bpp*(row0*cols * 2 + col4)];
//unsigned char B10 = src[2 + bpp*(row1*cols * 2 + col0)];
unsigned char B11 = src[2 + bpp*(row1*cols * 2 + col1)];
unsigned char B12 = src[2 + bpp*(row1*cols * 2 + col2)];
unsigned char B13 = src[2 + bpp*(row1*cols * 2 + col3)];
//unsigned char B14 = src[2 + bpp*(row1*cols * 2 + col4)];
//unsigned char B20 = src[2 + bpp*(row2*cols * 2 + col0)];
unsigned char B21 = src[2 + bpp*(row2*cols * 2 + col1)];
unsigned char B22 = src[2 + bpp*(row2*cols * 2 + col2)];
unsigned char B23 = src[2 + bpp*(row2*cols * 2 + col3)];
//unsigned char B24 = src[2 + bpp*(row2*cols * 2 + col4)];
//unsigned char B30 = src[2 + bpp*(row3*cols * 2 + col0)];
unsigned char B31 = src[2 + bpp*(row3*cols * 2 + col1)];
unsigned char B32 = src[2 + bpp*(row3*cols * 2 + col2)];
unsigned char B33 = src[2 + bpp*(row3*cols * 2 + col3)];
//unsigned char B34 = src[2 + bpp*(row3*cols * 2 + col4)];
//unsigned char B40 = src[2 + bpp*(row4*cols * 2 + col0)];
//unsigned char B41 = src[2 + bpp*(row4*cols * 2 + col1)];
//unsigned char B42 = src[2 + bpp*(row4*cols * 2 + col2)];
//unsigned char B43 = src[2 + bpp*(row4*cols * 2 + col3)];
//unsigned char B44 = src[2 + bpp*(row4*cols * 2 + col4)];
# define RC(yx) R##yx*C##yx
# define GC(yx) G##yx*C##yx
# define BC(yx) B##yx*C##yx
//short R0 = RC(00) + RC(01) + RC(02) + RC(03) + RC(04);
short R1 = /*RC(10) +*/ RC(11) + RC(12) + RC(13)/*+ RC(14)*/;
short R2 = /*RC(20) +*/ RC(21) + RC(22) + RC(23)/*+ RC(24)*/;
short R3 = /*RC(30) +*/ RC(31) + RC(32) + RC(33)/*+ RC(34)*/;
//short R4 = RC(40) + RC(41) + RC(42) + RC(43) + RC(44);
//short G0 = GC(00) + GC(01) + GC(02) + GC(03) + GC(04);
short G1 = /*GC(10) +*/ GC(11) + GC(12) + GC(13)/* + GC(14)*/;
short G2 = /*GC(20) +*/ GC(21) + GC(22) + GC(23)/* + GC(24)*/;
short G3 = /*GC(30) +*/ GC(31) + GC(32) + GC(33)/* + GC(34)*/;
//short G4 = GC(40) + GC(41) + GC(42) + GC(43) + GC(44);
//short B0 = BC(00) + BC(01) + BC(02) + BC(03) + BC(04);
short B1 = /*BC(10)*/ + BC(11) + BC(12) + BC(13)/* + BC(14)*/;
short B2 = /*BC(20)*/ + BC(21) + BC(22) + BC(23)/* + BC(24)*/;
short B3 = /*BC(30)*/ + BC(31) + BC(32) + BC(33)/* + BC(34)*/;
//short B4 = BC(40) + BC(41) + BC(42) + BC(43) + BC(44);
//dst[dst_index + 0] = (R0 + R1 + R2 + R3 + R4 + 128) >> 8;
//dst[dst_index + 1] = (G0 + G1 + G2 + G3 + G4 + 128) >> 8;
//dst[dst_index + 2] = (B0 + B1 + B2 + B3 + B4 + 128) >> 8;
short R = (R1 + R2 + R3 + 128 - input[dst_index + 0] + 128) >> 9;
short G = (G1 + G2 + G3 + 128 - input[dst_index + 1] + 128) >> 9;
short B = (B1 + B2 + B3 + 128 - input[dst_index + 2] + 128) >> 9;
dst[dst_index + 0] = R;
dst[dst_index + 1] = G;
dst[dst_index + 2] = B;
}
# define C0 1
# define C1 4
# define C2 6
# define C3 4
# define C4 1
__global__ void BlurHorizontal(unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols/2)*bpp;
//row = row << 1;
col = col << 1;
int dst_rows = rows;
int dst_cols = cols;
int src_rows = rows;
int src_cols = cols;
int col0 = max(col - 2, 0);
int col1 = max(col - 1, 0);
int col2 = col;
int col3 = min(col + 1, cols - 1);
int col4 = min(col + 2, cols - 1);
unsigned char R0 = src[bpp*(row*src_cols + col0)];
unsigned char R1 = src[bpp*(row*src_cols + col1)];
unsigned char R2 = src[bpp*(row*src_cols + col2)];
unsigned char R3 = src[bpp*(row*src_cols + col3)];
unsigned char R4 = src[bpp*(row*src_cols + col4)];
unsigned char G0 = src[1 + bpp*(row*src_cols + col0)];
unsigned char G1 = src[1 + bpp*(row*src_cols + col1)];
unsigned char G2 = src[1 + bpp*(row*src_cols + col2)];
unsigned char G3 = src[1 + bpp*(row*src_cols + col3)];
unsigned char G4 = src[1 + bpp*(row*src_cols + col4)];
unsigned char B0 = src[2 + bpp*(row*src_cols + col0)];
unsigned char B1 = src[2 + bpp*(row*src_cols + col1)];
unsigned char B2 = src[2 + bpp*(row*src_cols + col2)];
unsigned char B3 = src[2 + bpp*(row*src_cols + col3)];
unsigned char B4 = src[2 + bpp*(row*src_cols + col4)];
dst[dst_index + 0] = min(255, (R0*C0 + R1*C1 + R2*C2 + R3*C3 + R4*C4 + 8) >> 4);
dst[dst_index + 1] = min(255, (G0*C0 + G1*C1 + G2*C2 + G3*C3 + G4*C4 + 8) >> 4);
dst[dst_index + 2] = min(255, (B0*C0 + B1*C1 + B2*C2 + B3*C3 + B4*C4 + 8) >> 4);
}
__global__ void BlurVertical(unsigned char* src, unsigned char* dst, int rows, int cols)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = (col + row * cols)*bpp;
row = row << 1;
//col = col << 1;
//int dst_rows = rows;
//int dst_cols = cols;
//int src_rows = rows * 2;
int src_cols = cols;
int row0 = max(row - 2, 0);
int row1 = max(row - 1, 0);
int row2 = row;
int row3 = min(row + 1, rows - 1);
int row4 = min(row + 2, rows - 1);
unsigned char R0 = src[bpp*(row0*src_cols + col)];
unsigned char R1 = src[bpp*(row1*src_cols + col)];
unsigned char R2 = src[bpp*(row2*src_cols + col)];
unsigned char R3 = src[bpp*(row3*src_cols + col)];
unsigned char R4 = src[bpp*(row4*src_cols + col)];
unsigned char G0 = src[1 + bpp*(row0*src_cols + col)];
unsigned char G1 = src[1 + bpp*(row1*src_cols + col)];
unsigned char G2 = src[1 + bpp*(row2*src_cols + col)];
unsigned char G3 = src[1 + bpp*(row3*src_cols + col)];
unsigned char G4 = src[1 + bpp*(row4*src_cols + col)];
unsigned char B0 = src[2 + bpp*(row0*src_cols + col)];
unsigned char B1 = src[2 + bpp*(row1*src_cols + col)];
unsigned char B2 = src[2 + bpp*(row2*src_cols + col)];
unsigned char B3 = src[2 + bpp*(row3*src_cols + col)];
unsigned char B4 = src[2 + bpp*(row4*src_cols + col)];
dst[dst_index + 0] = min(255, (R0*C0 + R1*C1 + R2*C2 + R3*C3 + R4*C4 + 8) >> 4);
dst[dst_index + 1] = min(255, (G0*C0 + G1*C1 + G2*C2 + G3*C3 + G4*C4 + 8) >> 4);
dst[dst_index + 2] = min(255, (B0*C0 + B1*C1 + B2*C2 + B3*C3 + B4*C4 + 8) >> 4);
}
int main()
{
int rows = 1300, cols = 1024, c = 3;
ReadTGA("src.tga", cols, rows, c, img);
dim3 threadsPerBlock(32, 32);
dim3 threadsPerBlockH(32, 32); //(16,64 ) = 10+ ms, (32, 32) = 8+ms, (64, 16) = 10+ms
dim3 threadsPerBlockV(512, 2);
dim3 numBlocks((cols/2) / threadsPerBlock.x, (rows/2) / threadsPerBlock.y);
dim3 numBlocksH((cols / 2) / threadsPerBlockH.x, (rows) / threadsPerBlockH.y);
dim3 numBlocksV((cols / 2) / threadsPerBlockV.x, (rows / 2) / threadsPerBlockV.y);
//cv::cuda::GpuMat gpuOut;
g_time_logger.Start();
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 10; j++)
{
//Blur << < numBlocks, threadsPerBlock >> > (img, out, rows/2, cols/2);
BlurHorizontal << < numBlocksH, threadsPerBlockH >> > (img, outH, rows, cols);
cudaDeviceSynchronize();
LOG(h_blur);
BlurVertical << < numBlocksV, threadsPerBlockV >> > (outH, outV, rows/2, cols/2);
cudaDeviceSynchronize();
LOG(v_blur);
//PyrUp << < numBlocksV, threadsPerBlock >> > (img, outV, out, rows / 2, cols / 2);
//cudaDeviceSynchronize();
//LOG(pyr_up);
//GpuMat(int rows, int cols, int type, void *data
//cuda::GpuMat gpuIn(rows, cols, CV_8UC3, img);
//cuda::pyrDown(gpuIn, gpuOut);
}
}
g_time_logger.Display();
DumpTga("blurH.tga", cols / 2, rows, bpp, outH);
DumpTga("blurV.tga", cols / 2, rows / 2, bpp, outV);
DumpTga("pyrUp.tga", cols, rows, bpp, out);
//DumpTga("copy.tga", cols, rows, bpp, img);
return 0;
}
|
942fab59243e8755d1e0b85070a3ef65045c27e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <algorithm>
#include <cstdlib>
int main(void)
{
unsigned int size = 4096*4096;
thrust::host_vector<int32_t> input_host(size);
for(int i=0; i < size; i++){
input_host[i] = i;
}
//std::generate(input_host.begin(), input_host.end(), rand);
for(int i=0; i < 100; i++){
printf("%d,", input_host[i]);
}
printf("\n");
// transfer to device and compute sum
thrust::device_vector<int32_t> input_device = input_host;
thrust::device_vector<int32_t> output_device(size);
thrust::plus<int32_t> binary_op;
thrust::exclusive_scan(input_device.begin(), input_device.end(), output_device.begin(), 0, binary_op); // in-place scan
thrust::host_vector<int32_t> output_host = output_device;
for(int i=0; i < 100; i++){
printf("%d,", output_host[i]);
}
printf("\n");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for(int i=0; i < 100; i++){
//int x = thrust::reduce(input_device.begin(), input_device.end(), 0, binary_op);
thrust::exclusive_scan(input_device.begin(), input_device.end(), output_device.begin(), 0, binary_op); // in-place scan
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime , start, stop);
printf("Avg. time is %f ms\n", elapsedTime/100);
return 0;
}
|
942fab59243e8755d1e0b85070a3ef65045c27e5.cu
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <algorithm>
#include <cstdlib>
int main(void)
{
unsigned int size = 4096*4096;
thrust::host_vector<int32_t> input_host(size);
for(int i=0; i < size; i++){
input_host[i] = i;
}
//std::generate(input_host.begin(), input_host.end(), rand);
for(int i=0; i < 100; i++){
printf("%d,", input_host[i]);
}
printf("\n");
// transfer to device and compute sum
thrust::device_vector<int32_t> input_device = input_host;
thrust::device_vector<int32_t> output_device(size);
thrust::plus<int32_t> binary_op;
thrust::exclusive_scan(input_device.begin(), input_device.end(), output_device.begin(), 0, binary_op); // in-place scan
thrust::host_vector<int32_t> output_host = output_device;
for(int i=0; i < 100; i++){
printf("%d,", output_host[i]);
}
printf("\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for(int i=0; i < 100; i++){
//int x = thrust::reduce(input_device.begin(), input_device.end(), 0, binary_op);
thrust::exclusive_scan(input_device.begin(), input_device.end(), output_device.begin(), 0, binary_op); // in-place scan
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime , start, stop);
printf("Avg. time is %f ms\n", elapsedTime/100);
return 0;
}
|
faadc7d5f530ffc563c55be6c592790bf80ec0dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// Input and result
if((nlhs != 1) || (nrhs != 4)) { mexErrMsgTxt("Form: halo = GPU_dbghalo(GPU_Type, direction, depth, side)"); }
int returnCode = CHECK_CUDA_ERROR("entering GPU_test");
if(returnCode != SUCCESSFUL) return;
int dir = (int)*mxGetPr(prhs[1]);
int depth=(int)*mxGetPr(prhs[2]);
int side =(int)*mxGetPr(prhs[3]);
MGArray thetag;
returnCode = MGA_accessMatlabArrays(prhs, 0, 0, &thetag);
if(returnCode != SUCCESSFUL) {
CHECK_IMOGEN_ERROR(returnCode);
return;
}
MGArray skel = thetag;
skel.dim[dir-1] = depth;
MGArray *haloinfo;
returnCode = MGA_allocArrays(&haloinfo, 1, &skel);
returnCode = MGA_wholeFaceToLinear(&thetag, dir, side, 0, depth, &haloinfo->devicePtr[0]);
MGA_returnOneArray(plhs, haloinfo);
free(haloinfo);
CHECK_IMOGEN_ERROR(returnCode);
return;
}
|
faadc7d5f530ffc563c55be6c592790bf80ec0dd.cu
|
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// Input and result
if((nlhs != 1) || (nrhs != 4)) { mexErrMsgTxt("Form: halo = GPU_dbghalo(GPU_Type, direction, depth, side)"); }
int returnCode = CHECK_CUDA_ERROR("entering GPU_test");
if(returnCode != SUCCESSFUL) return;
int dir = (int)*mxGetPr(prhs[1]);
int depth=(int)*mxGetPr(prhs[2]);
int side =(int)*mxGetPr(prhs[3]);
MGArray thetag;
returnCode = MGA_accessMatlabArrays(prhs, 0, 0, &thetag);
if(returnCode != SUCCESSFUL) {
CHECK_IMOGEN_ERROR(returnCode);
return;
}
MGArray skel = thetag;
skel.dim[dir-1] = depth;
MGArray *haloinfo;
returnCode = MGA_allocArrays(&haloinfo, 1, &skel);
returnCode = MGA_wholeFaceToLinear(&thetag, dir, side, 0, depth, &haloinfo->devicePtr[0]);
MGA_returnOneArray(plhs, haloinfo);
free(haloinfo);
CHECK_IMOGEN_ERROR(returnCode);
return;
}
|
168a33d76d3e02bfe199763913f9e9f05883d250.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/cross_entropy_op.h"
#include "caffe2/operators/operator_fallback_gpu.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N, const int D, const float* Xdata, const int* labeldata,
const float log_threshold, float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
Ydata[i] = -logf(fmaxf(Xdata[i * D + labeldata[i]], log_threshold));
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N, const int D, const float* Xdata, const int* labeldata,
const float* dYdata, const float log_threshold, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = - dYdata[i] / fmaxf(Xdata[idx], log_threshold);
}
}
} // namespace
template <>
bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& label = Input(1);
auto* Y = Output(0);
int N, D;
if (X.ndim() > 1) {
N = X.dim32(0);
D = X.size_from_dim(1);
} else {
N = 1;
D = X.dim32(0);
}
CAFFE_ENFORCE(
(label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
CAFFE_ENFORCE_EQ(label.dim32(0), N);
Y->Resize(vector<int64_t>(size_t(1), N));
hipLaunchKernelGGL(( LabelCrossEntropyKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
X.data<float>(),
label.data<int>(),
kLOG_THRESHOLD(),
Y->template mutable_data<float>());
return true;
}
template <>
bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& label = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
int N, D;
if (X.ndim() > 1) {
N = X.dim32(0);
D = X.size_from_dim(1);
} else {
N = 1;
D = X.dim32(0);
}
CAFFE_ENFORCE(
(label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
CAFFE_ENFORCE_EQ(label.dim32(0), N);
CAFFE_ENFORCE_EQ(dY.ndim(), 1);
CAFFE_ENFORCE_EQ(dY.dim32(0), N);
dX->ResizeLike(X);
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->template mutable_data<float>(), &context_);
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
X.data<float>(),
label.data<int>(),
dY.data<float>(),
kLOG_THRESHOLD(),
dX->template mutable_data<float>());
return true;
}
namespace {
__global__ void MakeTwoClassKernel(
const int N, const float* Xdata, float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
Ydata[i * 2] = 1.0 - Xdata[i];
Ydata[i * 2 + 1] = Xdata[i];
}
}
__global__ void MakeTwoClassGradientKernel(
const int N, const float* dYdata, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2];
}
}
} // namespace
template <>
bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto shape = X.dims().vec();
shape.push_back(2);
CAFFE_ENFORCE_LT(X.size(), std::numeric_limits<int>::max() / 2);
Y->Resize(shape);
int N = X.size();
hipLaunchKernelGGL(( MakeTwoClassKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, X.data<float>(), Y->template mutable_data<float>());
return true;
}
template <>
bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0);
auto* dX = Output(0);
auto shape = dY.dims().vec();
CAFFE_ENFORCE_GE(shape.size(), 1);
CAFFE_ENFORCE_EQ(shape.back(), 2);
shape.pop_back();
CAFFE_ENFORCE_LT(dY.size(), std::numeric_limits<int>::max());
dX->Resize(shape);
int N = dX->size();
hipLaunchKernelGGL(( MakeTwoClassGradientKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, dY.data<float>(), dX->template mutable_data<float>());
return true;
}
namespace {
__device__ float sigmoid_xent_forward(float lgt, float tgt) {
return lgt * (tgt - (lgt >= 0)) - log(1 + exp(lgt - 2 * lgt * (lgt >= 0)));
}
__device__ float sigmoid_xent_backward(float lgt, float tgt) {
return tgt - 1. / (1. + exp(-lgt));
}
__device__ float sigmoid_partition(float lgt) {
// computes log(1 + exp(lgt)) with only exp(x) function when x >= 0
return lgt * (lgt >= 0) + log(1 + exp(lgt - 2 * lgt * (lgt >= 0)));
}
__device__ float sigmoid_xent_forward_with_log_d_trick(float lgt, float tgt) {
return (2 * tgt - 1.) * (lgt - sigmoid_partition(lgt));
}
__device__ float sigmoid_xent_backward_with_log_d_trick(float lgt, float tgt) {
return (2 * tgt - 1.) / (1. + exp(lgt));
}
__device__ float unjoined_sigmoid_xent_forward(float lgt, float tgt) {
return lgt * tgt + (tgt - 1) * lgt * (lgt >= 0) -
(1 - tgt) * log(1 + exp(lgt - 2 * lgt * (lgt >= 0)));
}
__device__ float unjoined_sigmoid_xent_backward(float lgt, float tgt) {
return tgt - (1. - tgt) / (1. + exp(-lgt));
}
__global__ void SigmoidCrossEntropyWithLogitsKernel(
const int outer_size,
const int inner_size,
const bool log_D_trick,
const bool unjoined_lr_loss,
const float* logits_ptr,
const float* targets_ptr,
float* out_ptr) {
int i = blockIdx.x;
int last_idx = (i + 1) * inner_size;
float value = 0;
for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx;
in_idx += blockDim.x) {
if (unjoined_lr_loss) {
value += unjoined_sigmoid_xent_forward(
logits_ptr[in_idx], targets_ptr[in_idx]);
} else {
value +=
(log_D_trick
? sigmoid_xent_forward_with_log_d_trick(
logits_ptr[in_idx], targets_ptr[in_idx])
: sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]));
}
}
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
float sum = BlockReduce(temp_storage).Sum(value);
if (threadIdx.x == 0) {
out_ptr[i] = -sum / inner_size;
}
}
__global__ void SigmoidCrossEntropyGradientWithLogitsKernel(
const int outer_size,
const int inner_size,
const bool log_D_trick,
const bool unjoined_lr_loss,
const float* g_ptr,
const float* logits_ptr,
const float* targets_ptr,
float* out_ptr) {
CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) {
int i = in_idx / inner_size;
auto g_factor = -g_ptr[i] / inner_size;
if (unjoined_lr_loss) {
out_ptr[in_idx] = g_factor *
unjoined_sigmoid_xent_backward(
logits_ptr[in_idx], targets_ptr[in_idx]);
} else {
out_ptr[in_idx] = g_factor *
(log_D_trick ? sigmoid_xent_backward_with_log_d_trick(
logits_ptr[in_idx], targets_ptr[in_idx])
: sigmoid_xent_backward(
logits_ptr[in_idx], targets_ptr[in_idx]));
}
}
}
} // namespace
template <>
bool SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>::RunOnDevice() {
auto& logits = Input(0);
auto& targets = Input(1);
CAFFE_ENFORCE(logits.dims() == targets.dims());
const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1;
const auto outer_size = logits.size() / inner_size;
auto* out = Output(0);
if (logits.ndim() == 0) {
out->Resize(std::vector<int64_t>{});
} else {
std::vector<int64_t> dims(logits.dims().begin(), logits.dims().end() - 1);
out->Resize(dims);
}
auto* out_ptr = out->template mutable_data<float>();
auto* logits_ptr = logits.data<float>();
auto* targets_ptr = targets.data<float>();
if (logits.size() <= 0) {
// nothing to do, not even launching kernel
return true;
}
hipLaunchKernelGGL(( SigmoidCrossEntropyWithLogitsKernel),
dim3(outer_size),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
outer_size,
inner_size,
log_D_trick_,
unjoined_lr_loss_,
logits_ptr,
targets_ptr,
out_ptr);
return true;
}
template <>
bool SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>::
RunOnDevice() {
auto& g = Input(0);
auto& logits = Input(1);
auto& targets = Input(2);
CAFFE_ENFORCE(logits.dims() == targets.dims());
const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1;
const auto outer_size = logits.size() / inner_size;
CAFFE_ENFORCE(g.size() == outer_size);
auto* out = Output(0);
out->ResizeLike(logits);
auto* out_ptr = out->template mutable_data<float>();
auto* logits_ptr = logits.data<float>();
auto* targets_ptr = targets.data<float>();
auto* g_ptr = g.data<float>();
hipLaunchKernelGGL(( SigmoidCrossEntropyGradientWithLogitsKernel),
dim3(CAFFE_GET_BLOCKS(outer_size * inner_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
outer_size,
inner_size,
log_D_trick_,
unjoined_lr_loss_,
g_ptr,
logits_ptr,
targets_ptr,
out_ptr);
return true;
}
namespace {
__global__ void WeightedSigmoidCrossEntropyWithLogitsKernel(
const int outer_size,
const int inner_size,
const float* logits_ptr,
const float* targets_ptr,
const float* weights_ptr,
float* out_ptr) {
int i = blockIdx.x;
int last_idx = (i + 1) * inner_size;
float value = 0;
for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx;
in_idx += blockDim.x) {
value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]) *
weights_ptr[in_idx];
}
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
float sum = BlockReduce(temp_storage).Sum(value);
if (threadIdx.x == 0) {
out_ptr[i] = -sum / inner_size;
}
}
__global__ void WeightedSigmoidCrossEntropyGradientWithLogitsKernel(
const int outer_size,
const int inner_size,
const float* g_ptr,
const float* logits_ptr,
const float* targets_ptr,
const float* weights_ptr,
float* out_ptr) {
CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) {
int i = in_idx / inner_size;
auto g_factor = -g_ptr[i] / inner_size;
out_ptr[in_idx] = g_factor *
sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]) *
weights_ptr[in_idx];
}
}
} // namespace
template <>
bool WeightedSigmoidCrossEntropyWithLogitsOp<float, CUDAContext>::
RunOnDevice() {
auto& logits = Input(0);
auto& targets = Input(1);
auto& weights = Input(2);
CAFFE_ENFORCE(logits.dims() == targets.dims());
CAFFE_ENFORCE(weights.dims() == targets.dims());
const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1;
const auto outer_size = logits.size() / inner_size;
auto* out = Output(0);
if (logits.ndim() == 0) {
out->Resize(std::vector<int64_t>{});
} else {
std::vector<int64_t> dims(logits.dims().begin(), logits.dims().end() - 1);
out->Resize(dims);
}
auto* out_ptr = out->template mutable_data<float>();
auto* logits_ptr = logits.data<float>();
auto* targets_ptr = targets.data<float>();
auto* weights_ptr = weights.data<float>();
hipLaunchKernelGGL(( WeightedSigmoidCrossEntropyWithLogitsKernel),
dim3(outer_size),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
outer_size, inner_size, logits_ptr, targets_ptr, weights_ptr, out_ptr);
return true;
}
template <>
bool WeightedSigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>::
RunOnDevice() {
auto& g = Input(0);
auto& logits = Input(1);
auto& targets = Input(2);
auto& weights = Input(3);
CAFFE_ENFORCE(logits.dims() == targets.dims());
CAFFE_ENFORCE(weights.dims() == targets.dims());
const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1;
const auto outer_size = logits.size() / inner_size;
CAFFE_ENFORCE(g.size() == outer_size);
auto* out = Output(0);
out->ResizeLike(logits);
auto* out_ptr = out->template mutable_data<float>();
auto* logits_ptr = logits.data<float>();
auto* targets_ptr = targets.data<float>();
auto* weights_ptr = weights.data<float>();
auto* g_ptr = g.data<float>();
hipLaunchKernelGGL(( WeightedSigmoidCrossEntropyGradientWithLogitsKernel),
dim3(CAFFE_GET_BLOCKS(outer_size * inner_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
outer_size,
inner_size,
g_ptr,
logits_ptr,
targets_ptr,
weights_ptr,
out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(LabelCrossEntropy,
LabelCrossEntropyOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient,
LabelCrossEntropyGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SigmoidCrossEntropyWithLogits,
SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SigmoidCrossEntropyWithLogitsGradient,
SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
WeightedSigmoidCrossEntropyWithLogits,
WeightedSigmoidCrossEntropyWithLogitsOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
WeightedSigmoidCrossEntropyWithLogitsGradient,
WeightedSigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MakeTwoClass,
MakeTwoClassOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MakeTwoClassGradient,
MakeTwoClassGradientOp<float, CUDAContext>);
//TODO(surya) Add full GPU/CUDA support for the CrossEntropyOp
REGISTER_CUDA_OPERATOR(CrossEntropy, GPUFallbackOp);
REGISTER_CUDA_OPERATOR(CrossEntropyGradient, GPUFallbackOp);
} // namespace caffe2
|
168a33d76d3e02bfe199763913f9e9f05883d250.cu
|
#include <assert.h>
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/cross_entropy_op.h"
#include "caffe2/operators/operator_fallback_gpu.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N, const int D, const float* Xdata, const int* labeldata,
const float log_threshold, float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
Ydata[i] = -logf(fmaxf(Xdata[i * D + labeldata[i]], log_threshold));
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N, const int D, const float* Xdata, const int* labeldata,
const float* dYdata, const float log_threshold, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = - dYdata[i] / fmaxf(Xdata[idx], log_threshold);
}
}
} // namespace
template <>
bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& label = Input(1);
auto* Y = Output(0);
int N, D;
if (X.ndim() > 1) {
N = X.dim32(0);
D = X.size_from_dim(1);
} else {
N = 1;
D = X.dim32(0);
}
CAFFE_ENFORCE(
(label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
CAFFE_ENFORCE_EQ(label.dim32(0), N);
Y->Resize(vector<int64_t>(size_t(1), N));
LabelCrossEntropyKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
X.data<float>(),
label.data<int>(),
kLOG_THRESHOLD(),
Y->template mutable_data<float>());
return true;
}
template <>
bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& label = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
int N, D;
if (X.ndim() > 1) {
N = X.dim32(0);
D = X.size_from_dim(1);
} else {
N = 1;
D = X.dim32(0);
}
CAFFE_ENFORCE(
(label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1));
CAFFE_ENFORCE_EQ(label.dim32(0), N);
CAFFE_ENFORCE_EQ(dY.ndim(), 1);
CAFFE_ENFORCE_EQ(dY.dim32(0), N);
dX->ResizeLike(X);
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->template mutable_data<float>(), &context_);
LabelCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
X.data<float>(),
label.data<int>(),
dY.data<float>(),
kLOG_THRESHOLD(),
dX->template mutable_data<float>());
return true;
}
namespace {
__global__ void MakeTwoClassKernel(
const int N, const float* Xdata, float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
Ydata[i * 2] = 1.0 - Xdata[i];
Ydata[i * 2 + 1] = Xdata[i];
}
}
__global__ void MakeTwoClassGradientKernel(
const int N, const float* dYdata, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2];
}
}
} // namespace
template <>
bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto shape = X.dims().vec();
shape.push_back(2);
CAFFE_ENFORCE_LT(X.size(), std::numeric_limits<int>::max() / 2);
Y->Resize(shape);
int N = X.size();
MakeTwoClassKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, X.data<float>(), Y->template mutable_data<float>());
return true;
}
template <>
bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0);
auto* dX = Output(0);
auto shape = dY.dims().vec();
CAFFE_ENFORCE_GE(shape.size(), 1);
CAFFE_ENFORCE_EQ(shape.back(), 2);
shape.pop_back();
CAFFE_ENFORCE_LT(dY.size(), std::numeric_limits<int>::max());
dX->Resize(shape);
int N = dX->size();
MakeTwoClassGradientKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, dY.data<float>(), dX->template mutable_data<float>());
return true;
}
namespace {
__device__ float sigmoid_xent_forward(float lgt, float tgt) {
return lgt * (tgt - (lgt >= 0)) - log(1 + exp(lgt - 2 * lgt * (lgt >= 0)));
}
__device__ float sigmoid_xent_backward(float lgt, float tgt) {
return tgt - 1. / (1. + exp(-lgt));
}
__device__ float sigmoid_partition(float lgt) {
// computes log(1 + exp(lgt)) with only exp(x) function when x >= 0
return lgt * (lgt >= 0) + log(1 + exp(lgt - 2 * lgt * (lgt >= 0)));
}
__device__ float sigmoid_xent_forward_with_log_d_trick(float lgt, float tgt) {
return (2 * tgt - 1.) * (lgt - sigmoid_partition(lgt));
}
__device__ float sigmoid_xent_backward_with_log_d_trick(float lgt, float tgt) {
return (2 * tgt - 1.) / (1. + exp(lgt));
}
__device__ float unjoined_sigmoid_xent_forward(float lgt, float tgt) {
return lgt * tgt + (tgt - 1) * lgt * (lgt >= 0) -
(1 - tgt) * log(1 + exp(lgt - 2 * lgt * (lgt >= 0)));
}
__device__ float unjoined_sigmoid_xent_backward(float lgt, float tgt) {
return tgt - (1. - tgt) / (1. + exp(-lgt));
}
__global__ void SigmoidCrossEntropyWithLogitsKernel(
const int outer_size,
const int inner_size,
const bool log_D_trick,
const bool unjoined_lr_loss,
const float* logits_ptr,
const float* targets_ptr,
float* out_ptr) {
int i = blockIdx.x;
int last_idx = (i + 1) * inner_size;
float value = 0;
for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx;
in_idx += blockDim.x) {
if (unjoined_lr_loss) {
value += unjoined_sigmoid_xent_forward(
logits_ptr[in_idx], targets_ptr[in_idx]);
} else {
value +=
(log_D_trick
? sigmoid_xent_forward_with_log_d_trick(
logits_ptr[in_idx], targets_ptr[in_idx])
: sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]));
}
}
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
float sum = BlockReduce(temp_storage).Sum(value);
if (threadIdx.x == 0) {
out_ptr[i] = -sum / inner_size;
}
}
__global__ void SigmoidCrossEntropyGradientWithLogitsKernel(
const int outer_size,
const int inner_size,
const bool log_D_trick,
const bool unjoined_lr_loss,
const float* g_ptr,
const float* logits_ptr,
const float* targets_ptr,
float* out_ptr) {
CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) {
int i = in_idx / inner_size;
auto g_factor = -g_ptr[i] / inner_size;
if (unjoined_lr_loss) {
out_ptr[in_idx] = g_factor *
unjoined_sigmoid_xent_backward(
logits_ptr[in_idx], targets_ptr[in_idx]);
} else {
out_ptr[in_idx] = g_factor *
(log_D_trick ? sigmoid_xent_backward_with_log_d_trick(
logits_ptr[in_idx], targets_ptr[in_idx])
: sigmoid_xent_backward(
logits_ptr[in_idx], targets_ptr[in_idx]));
}
}
}
} // namespace
template <>
bool SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>::RunOnDevice() {
auto& logits = Input(0);
auto& targets = Input(1);
CAFFE_ENFORCE(logits.dims() == targets.dims());
const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1;
const auto outer_size = logits.size() / inner_size;
auto* out = Output(0);
if (logits.ndim() == 0) {
out->Resize(std::vector<int64_t>{});
} else {
std::vector<int64_t> dims(logits.dims().begin(), logits.dims().end() - 1);
out->Resize(dims);
}
auto* out_ptr = out->template mutable_data<float>();
auto* logits_ptr = logits.data<float>();
auto* targets_ptr = targets.data<float>();
if (logits.size() <= 0) {
// nothing to do, not even launching kernel
return true;
}
SigmoidCrossEntropyWithLogitsKernel<<<
outer_size,
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
outer_size,
inner_size,
log_D_trick_,
unjoined_lr_loss_,
logits_ptr,
targets_ptr,
out_ptr);
return true;
}
template <>
bool SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>::
RunOnDevice() {
auto& g = Input(0);
auto& logits = Input(1);
auto& targets = Input(2);
CAFFE_ENFORCE(logits.dims() == targets.dims());
const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1;
const auto outer_size = logits.size() / inner_size;
CAFFE_ENFORCE(g.size() == outer_size);
auto* out = Output(0);
out->ResizeLike(logits);
auto* out_ptr = out->template mutable_data<float>();
auto* logits_ptr = logits.data<float>();
auto* targets_ptr = targets.data<float>();
auto* g_ptr = g.data<float>();
SigmoidCrossEntropyGradientWithLogitsKernel<<<
CAFFE_GET_BLOCKS(outer_size * inner_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
outer_size,
inner_size,
log_D_trick_,
unjoined_lr_loss_,
g_ptr,
logits_ptr,
targets_ptr,
out_ptr);
return true;
}
namespace {
__global__ void WeightedSigmoidCrossEntropyWithLogitsKernel(
const int outer_size,
const int inner_size,
const float* logits_ptr,
const float* targets_ptr,
const float* weights_ptr,
float* out_ptr) {
int i = blockIdx.x;
int last_idx = (i + 1) * inner_size;
float value = 0;
for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx;
in_idx += blockDim.x) {
value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]) *
weights_ptr[in_idx];
}
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
float sum = BlockReduce(temp_storage).Sum(value);
if (threadIdx.x == 0) {
out_ptr[i] = -sum / inner_size;
}
}
__global__ void WeightedSigmoidCrossEntropyGradientWithLogitsKernel(
const int outer_size,
const int inner_size,
const float* g_ptr,
const float* logits_ptr,
const float* targets_ptr,
const float* weights_ptr,
float* out_ptr) {
CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) {
int i = in_idx / inner_size;
auto g_factor = -g_ptr[i] / inner_size;
out_ptr[in_idx] = g_factor *
sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]) *
weights_ptr[in_idx];
}
}
} // namespace
template <>
bool WeightedSigmoidCrossEntropyWithLogitsOp<float, CUDAContext>::
RunOnDevice() {
auto& logits = Input(0);
auto& targets = Input(1);
auto& weights = Input(2);
CAFFE_ENFORCE(logits.dims() == targets.dims());
CAFFE_ENFORCE(weights.dims() == targets.dims());
const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1;
const auto outer_size = logits.size() / inner_size;
auto* out = Output(0);
if (logits.ndim() == 0) {
out->Resize(std::vector<int64_t>{});
} else {
std::vector<int64_t> dims(logits.dims().begin(), logits.dims().end() - 1);
out->Resize(dims);
}
auto* out_ptr = out->template mutable_data<float>();
auto* logits_ptr = logits.data<float>();
auto* targets_ptr = targets.data<float>();
auto* weights_ptr = weights.data<float>();
WeightedSigmoidCrossEntropyWithLogitsKernel<<<
outer_size,
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
outer_size, inner_size, logits_ptr, targets_ptr, weights_ptr, out_ptr);
return true;
}
template <>
bool WeightedSigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>::
RunOnDevice() {
auto& g = Input(0);
auto& logits = Input(1);
auto& targets = Input(2);
auto& weights = Input(3);
CAFFE_ENFORCE(logits.dims() == targets.dims());
CAFFE_ENFORCE(weights.dims() == targets.dims());
const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1;
const auto outer_size = logits.size() / inner_size;
CAFFE_ENFORCE(g.size() == outer_size);
auto* out = Output(0);
out->ResizeLike(logits);
auto* out_ptr = out->template mutable_data<float>();
auto* logits_ptr = logits.data<float>();
auto* targets_ptr = targets.data<float>();
auto* weights_ptr = weights.data<float>();
auto* g_ptr = g.data<float>();
WeightedSigmoidCrossEntropyGradientWithLogitsKernel<<<
CAFFE_GET_BLOCKS(outer_size * inner_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
outer_size,
inner_size,
g_ptr,
logits_ptr,
targets_ptr,
weights_ptr,
out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(LabelCrossEntropy,
LabelCrossEntropyOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient,
LabelCrossEntropyGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SigmoidCrossEntropyWithLogits,
SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SigmoidCrossEntropyWithLogitsGradient,
SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
WeightedSigmoidCrossEntropyWithLogits,
WeightedSigmoidCrossEntropyWithLogitsOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
WeightedSigmoidCrossEntropyWithLogitsGradient,
WeightedSigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MakeTwoClass,
MakeTwoClassOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MakeTwoClassGradient,
MakeTwoClassGradientOp<float, CUDAContext>);
//TODO(surya) Add full GPU/CUDA support for the CrossEntropyOp
REGISTER_CUDA_OPERATOR(CrossEntropy, GPUFallbackOp);
REGISTER_CUDA_OPERATOR(CrossEntropyGradient, GPUFallbackOp);
} // namespace caffe2
|
df0f01a0bf88121f34d7fe0b07a8ef961be4250b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if ( c >= numCols ||
r >= numRows )
{
return;
}
float result = 0.f;
/*
// use shared memory for filter
extern __shared__ float filterShared[];
if (threadIdx.x < filterWidth && threadIdx.y < filterWidth) {
// only the upper left corner threads do the value copy
// to do this, must have blockDim.x >= filterWidth and blockDim.y >= filterWidth
int filterIdx = threadIdx.y * filterWidth + threadIdx.x;
filterShared[filterIdx] = filter[filterIdx];
}
__syncthreads();
float* filter0 = filterShared;
*/
const float* filter0 = &(*filter);
/*
// use global memory for filter
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(r + filter_r, 0), numRows - 1);
int image_c = min(max(c + filter_c, 0), numCols - 1);
float image_value = (float)inputChannel[image_r * numCols + image_c];
float filter_value = filter0[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[r * numCols + c] = (char)result;
*/
// use shared memory for image
int numRows1 = blockDim.y + filterWidth - 1, numCols1 = blockDim.x + filterWidth - 1;
extern __shared__ unsigned char inputChannelShared[];
int filterWidthHalf = filterWidth/2; // actually (filterWidth - 1) / 2 since filterWidth % 2 == 1
int c1 = threadIdx.x + filterWidthHalf, r1 = threadIdx.y + filterWidthHalf;
inputChannelShared[r1 * numCols1 + c1] = inputChannel[r * numCols + c];
int cShift = 0, rShift = 0;
if (threadIdx.x - filterWidthHalf < 0) {
cShift = -1;
} else if (threadIdx.x + filterWidthHalf >= blockDim.x) {
cShift = 1;
}
if (threadIdx.y - filterWidthHalf < 0) {
rShift = -1;
} else if (threadIdx.y + filterWidthHalf >= blockDim.y) {
rShift = 1;
}
int rShifted = min(max(r + rShift * filterWidthHalf, 0), numRows - 1);
int cShifted = min(max(c + cShift * filterWidthHalf, 0), numCols - 1);
if (cShift != 0) {
inputChannelShared[r1 * numCols1 + c1 + cShift * filterWidthHalf] = inputChannel[r * numCols + cShifted];
}
if (rShift != 0) {
inputChannelShared[(r1 + rShift * filterWidthHalf) * numCols1 + c1] = inputChannel[rShifted * numCols + c];
}
if (cShift != 0 && rShift != 0) {
inputChannelShared[(r1 + rShift * filterWidthHalf) * numCols1 + c1 + cShift * filterWidthHalf] = inputChannel[rShifted * numCols + cShifted];
}
__syncthreads();
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = r1 + filter_r;
int image_c = c1 + filter_c;
float image_value = (float)inputChannelShared[image_r * numCols1 + image_c];
float filter_value = filter0[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[r * numCols + c] = (char)result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int absolute_image_position_x = blockIdx.x * blockDim.x + threadIdx.x;
int absolute_image_position_y = blockIdx.y * blockDim.y + threadIdx.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
int position_flatten = absolute_image_position_y * numCols + absolute_image_position_x;
uchar4 rgba = inputImageRGBA[position_flatten];
redChannel[position_flatten] = rgba.x;
greenChannel[position_flatten] = rgba.y;
blueChannel[position_flatten] = rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth,
hipMemcpyHostToDevice));
}
int find_devisor(int n, int max_devisor) {
for (int i = max_devisor; i > 0; i--) {
if (n % i == 0) {
return i;
}
}
return 1;
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
/*
int max_devisor = 20;
int blockDimX = find_devisor(numRows, max_devisor);
int blockDimY = find_devisor(numCols, max_devisor);
int gridDimX = numRows / blockDimX;
int gridDimY = numCols / blockDimY;
*/
int blockDimX = 16;
int blockDimY = 16;
int gridDimX = numCols / blockDimX + int(numCols % blockDimX > 0);
int gridDimY = numRows / blockDimY + int(numRows % blockDimY > 0);
printf("%d\t%d,\t%d\t%d,\t%d\t%d\n", numRows, numCols, blockDimX, blockDimY, gridDimX, gridDimY);
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(blockDimX, blockDimY, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(gridDimX, gridDimY, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
/*
int sharedMemSize = sizeof(float) * filterWidth * filterWidth;
*/
int numRows1 = blockDimY + filterWidth - 1, numCols1 = blockDimX + filterWidth - 1;
int sharedMemSize = numRows1 * numCols1;
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), sharedMemSize, 0, d_red,
d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), sharedMemSize, 0, d_green,
d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), sharedMemSize, 0, d_blue,
d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
|
df0f01a0bf88121f34d7fe0b07a8ef961be4250b.cu
|
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if ( c >= numCols ||
r >= numRows )
{
return;
}
float result = 0.f;
/*
// use shared memory for filter
extern __shared__ float filterShared[];
if (threadIdx.x < filterWidth && threadIdx.y < filterWidth) {
// only the upper left corner threads do the value copy
// to do this, must have blockDim.x >= filterWidth and blockDim.y >= filterWidth
int filterIdx = threadIdx.y * filterWidth + threadIdx.x;
filterShared[filterIdx] = filter[filterIdx];
}
__syncthreads();
float* filter0 = filterShared;
*/
const float* filter0 = &(*filter);
/*
// use global memory for filter
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(r + filter_r, 0), numRows - 1);
int image_c = min(max(c + filter_c, 0), numCols - 1);
float image_value = (float)inputChannel[image_r * numCols + image_c];
float filter_value = filter0[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[r * numCols + c] = (char)result;
*/
// use shared memory for image
int numRows1 = blockDim.y + filterWidth - 1, numCols1 = blockDim.x + filterWidth - 1;
extern __shared__ unsigned char inputChannelShared[];
int filterWidthHalf = filterWidth/2; // actually (filterWidth - 1) / 2 since filterWidth % 2 == 1
int c1 = threadIdx.x + filterWidthHalf, r1 = threadIdx.y + filterWidthHalf;
inputChannelShared[r1 * numCols1 + c1] = inputChannel[r * numCols + c];
int cShift = 0, rShift = 0;
if (threadIdx.x - filterWidthHalf < 0) {
cShift = -1;
} else if (threadIdx.x + filterWidthHalf >= blockDim.x) {
cShift = 1;
}
if (threadIdx.y - filterWidthHalf < 0) {
rShift = -1;
} else if (threadIdx.y + filterWidthHalf >= blockDim.y) {
rShift = 1;
}
int rShifted = min(max(r + rShift * filterWidthHalf, 0), numRows - 1);
int cShifted = min(max(c + cShift * filterWidthHalf, 0), numCols - 1);
if (cShift != 0) {
inputChannelShared[r1 * numCols1 + c1 + cShift * filterWidthHalf] = inputChannel[r * numCols + cShifted];
}
if (rShift != 0) {
inputChannelShared[(r1 + rShift * filterWidthHalf) * numCols1 + c1] = inputChannel[rShifted * numCols + c];
}
if (cShift != 0 && rShift != 0) {
inputChannelShared[(r1 + rShift * filterWidthHalf) * numCols1 + c1 + cShift * filterWidthHalf] = inputChannel[rShifted * numCols + cShifted];
}
__syncthreads();
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = r1 + filter_r;
int image_c = c1 + filter_c;
float image_value = (float)inputChannelShared[image_r * numCols1 + image_c];
float filter_value = filter0[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[r * numCols + c] = (char)result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int absolute_image_position_x = blockIdx.x * blockDim.x + threadIdx.x;
int absolute_image_position_y = blockIdx.y * blockDim.y + threadIdx.y;
if ( absolute_image_position_x >= numCols ||
absolute_image_position_y >= numRows )
{
return;
}
int position_flatten = absolute_image_position_y * numCols + absolute_image_position_x;
uchar4 rgba = inputImageRGBA[position_flatten];
redChannel[position_flatten] = rgba.x;
greenChannel[position_flatten] = rgba.y;
blueChannel[position_flatten] = rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth,
cudaMemcpyHostToDevice));
}
int find_devisor(int n, int max_devisor) {
for (int i = max_devisor; i > 0; i--) {
if (n % i == 0) {
return i;
}
}
return 1;
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
/*
int max_devisor = 20;
int blockDimX = find_devisor(numRows, max_devisor);
int blockDimY = find_devisor(numCols, max_devisor);
int gridDimX = numRows / blockDimX;
int gridDimY = numCols / blockDimY;
*/
int blockDimX = 16;
int blockDimY = 16;
int gridDimX = numCols / blockDimX + int(numCols % blockDimX > 0);
int gridDimY = numRows / blockDimY + int(numRows % blockDimY > 0);
printf("%d\t%d,\t%d\t%d,\t%d\t%d\n", numRows, numCols, blockDimX, blockDimY, gridDimX, gridDimY);
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(blockDimX, blockDimY, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(gridDimX, gridDimY, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
/*
int sharedMemSize = sizeof(float) * filterWidth * filterWidth;
*/
int numRows1 = blockDimY + filterWidth - 1, numCols1 = blockDimX + filterWidth - 1;
int sharedMemSize = numRows1 * numCols1;
gaussian_blur<<<gridSize, blockSize, sharedMemSize>>>(d_red,
d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, sharedMemSize>>>(d_green,
d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize, sharedMemSize>>>(d_blue,
d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
92e9800527bd59e7691f311f81316e4a6fda3964.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "ClothTOP.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
surface<void, cudaSurfaceType2D> outputSurface;
surface<void, cudaSurfaceType2D> outputSurface2;
__device__ float4 operator+(const float4 & a, const float4 & b) {
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
__global__ void
PosNormKernel(int npoints, int ntris, Vec4* pos, int* indices, Vec4* normals, Vec3* uvs, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * imgw;
int offset1 = 4 * idx;
int offset2 = 4 * idx + 1;
int offset3 = 4 * idx + 2;
if (x < imgw && y < imgh )
{
if (idx < npoints)
{
surf2Dwrite(make_float4(pos[idx].x, pos[idx].y, pos[idx].z, 1), outputSurface, (int)sizeof(float4) * (offset1 % imgw), (offset1 / imgw));
surf2Dwrite(make_float4(normals[idx].x, normals[idx].y, normals[idx].z, 0.0f), outputSurface, (int)sizeof(float4) * (offset3 % imgw), (offset3 / imgw));
}
if (idx < ntris)
{
surf2Dwrite(make_float4((float)indices[3*idx + 2], (float)indices[3 * idx + 1], (float)indices[3 * idx + 0], 0.0f), outputSurface, (int)sizeof(float4) * (offset2 % imgw), (offset2 / imgw));
}
}
}
__global__ void
PosNormKernelUV(int npoints, int ntris, Vec4* pos, int* indices, Vec4* normals, Vec3* uvs, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * imgw;
int uv_idx_x = sizeof(float4) * (int)(uvs[idx].x * imgw);
int uv_idx_y = (int)((uvs[idx].y) * imgh);
if (x < imgw && y < imgh && idx < npoints)
{
surf2Dwrite(make_float4(pos[idx].x, pos[idx].y, pos[idx].z, 1), outputSurface, uv_idx_x, uv_idx_y);
surf2Dwrite(make_float4(normals[idx].x, normals[idx].y, normals[idx].z, 0.0f), outputSurface2, uv_idx_x, uv_idx_y);
}
}
void launch_PosNormKernel(int outputmode, dim3 grid, dim3 block,int npoints, int ntris, void** mapped_g_buff, hipArray* output1, hipArray* output2, int imgw, int imgh)
{
Vec4* pos = (Vec4*)mapped_g_buff[0];
int* indices = (int*)mapped_g_buff[1];
Vec4* normals = (Vec4*)mapped_g_buff[2];
Vec3* uvs = (Vec3*)mapped_g_buff[3];
cudaCheck(hipBindSurfaceToArray(outputSurface, output1));
cudaCheck(hipBindSurfaceToArray(outputSurface2, output2));
if(outputmode == 0)
PosNormKernel << < grid, block >> > (npoints, ntris, pos, indices, normals, uvs, imgw, imgh);
else
PosNormKernelUV << < grid, block >> > (npoints, ntris, pos, indices, normals, uvs, imgw, imgh);
}
|
92e9800527bd59e7691f311f81316e4a6fda3964.cu
|
#include "ClothTOP.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
surface<void, cudaSurfaceType2D> outputSurface;
surface<void, cudaSurfaceType2D> outputSurface2;
__device__ float4 operator+(const float4 & a, const float4 & b) {
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
__global__ void
PosNormKernel(int npoints, int ntris, Vec4* pos, int* indices, Vec4* normals, Vec3* uvs, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * imgw;
int offset1 = 4 * idx;
int offset2 = 4 * idx + 1;
int offset3 = 4 * idx + 2;
if (x < imgw && y < imgh )
{
if (idx < npoints)
{
surf2Dwrite(make_float4(pos[idx].x, pos[idx].y, pos[idx].z, 1), outputSurface, (int)sizeof(float4) * (offset1 % imgw), (offset1 / imgw));
surf2Dwrite(make_float4(normals[idx].x, normals[idx].y, normals[idx].z, 0.0f), outputSurface, (int)sizeof(float4) * (offset3 % imgw), (offset3 / imgw));
}
if (idx < ntris)
{
surf2Dwrite(make_float4((float)indices[3*idx + 2], (float)indices[3 * idx + 1], (float)indices[3 * idx + 0], 0.0f), outputSurface, (int)sizeof(float4) * (offset2 % imgw), (offset2 / imgw));
}
}
}
__global__ void
PosNormKernelUV(int npoints, int ntris, Vec4* pos, int* indices, Vec4* normals, Vec3* uvs, int imgw, int imgh)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x + y * imgw;
int uv_idx_x = sizeof(float4) * (int)(uvs[idx].x * imgw);
int uv_idx_y = (int)((uvs[idx].y) * imgh);
if (x < imgw && y < imgh && idx < npoints)
{
surf2Dwrite(make_float4(pos[idx].x, pos[idx].y, pos[idx].z, 1), outputSurface, uv_idx_x, uv_idx_y);
surf2Dwrite(make_float4(normals[idx].x, normals[idx].y, normals[idx].z, 0.0f), outputSurface2, uv_idx_x, uv_idx_y);
}
}
void launch_PosNormKernel(int outputmode, dim3 grid, dim3 block,int npoints, int ntris, void** mapped_g_buff, cudaArray* output1, cudaArray* output2, int imgw, int imgh)
{
Vec4* pos = (Vec4*)mapped_g_buff[0];
int* indices = (int*)mapped_g_buff[1];
Vec4* normals = (Vec4*)mapped_g_buff[2];
Vec3* uvs = (Vec3*)mapped_g_buff[3];
cudaCheck(cudaBindSurfaceToArray(outputSurface, output1));
cudaCheck(cudaBindSurfaceToArray(outputSurface2, output2));
if(outputmode == 0)
PosNormKernel << < grid, block >> > (npoints, ntris, pos, indices, normals, uvs, imgw, imgh);
else
PosNormKernelUV << < grid, block >> > (npoints, ntris, pos, indices, normals, uvs, imgw, imgh);
}
|
4e1394ed92179bd47563726c8974c27810f41821.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gpuerrchk.cuh"
#include "assert.h"
#include "real.h"
#include "ch9_strat3.cuh"
#include <iostream>
#include "rstring.h"
#include <string>
#define DAT_SIZE 10000000
void test(){
std::string buffer_string = rstring(DAT_SIZE);
const char* buffer= buffer_string.c_str();
unsigned int histo[]= {0,0,0,0,0,0,0};
char* d_buffer;
unsigned int* d_histo;
gpuErrchk(hipMalloc((void**) &d_buffer,sizeof(char)*DAT_SIZE));
gpuErrchk(hipMemcpy(d_buffer,buffer,sizeof(char)*DAT_SIZE,hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**) &d_histo,sizeof(int)*7 ));
ch9_strat3(d_buffer,d_histo, (size_t) DAT_SIZE, 7);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk(hipMemcpy(histo,d_histo,sizeof(int)*7,hipMemcpyDeviceToHost));
gpuErrchk(hipFree(d_buffer));
gpuErrchk(hipFree(d_histo));
}
int main(){
test();
std::cout<<"DONE!" <<std::endl;
}
|
4e1394ed92179bd47563726c8974c27810f41821.cu
|
#include "gpuerrchk.cuh"
#include "assert.h"
#include "real.h"
#include "ch9_strat3.cuh"
#include <iostream>
#include "rstring.h"
#include <string>
#define DAT_SIZE 10000000
void test(){
std::string buffer_string = rstring(DAT_SIZE);
const char* buffer= buffer_string.c_str();
unsigned int histo[]= {0,0,0,0,0,0,0};
char* d_buffer;
unsigned int* d_histo;
gpuErrchk(cudaMalloc((void**) &d_buffer,sizeof(char)*DAT_SIZE));
gpuErrchk(cudaMemcpy(d_buffer,buffer,sizeof(char)*DAT_SIZE,cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**) &d_histo,sizeof(int)*7 ));
ch9_strat3(d_buffer,d_histo, (size_t) DAT_SIZE, 7);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk(cudaMemcpy(histo,d_histo,sizeof(int)*7,cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_buffer));
gpuErrchk(cudaFree(d_histo));
}
int main(){
test();
std::cout<<"DONE!" <<std::endl;
}
|
b231cd4c3597eac64a3783bb761caab3eb13deb4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <caffe2/core/context_gpu.h>
#include <caffe2/operator/affine_scale_op.h>
namespace caffe2 {
namespace {
__global__ void AffineScaleKernel(const int N, const int C, const float* X,
const float* M, const float* S, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i] * S[i / C] + M[i / C]; }
}
__global__ void AffineScaleInverseKernel(const int N, const int C,
const float* X, const float* M,
const float* S, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = (X[i] - M[i / C]) / (S[i / C] + 1e-8); }
}
} // namespace
template <>
bool AffineScaleOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& M = Input(1);
auto& S = Input(2);
auto* Y = Output(0);
DCHECK_EQ(M.size(), X.dim(0));
DCHECK_EQ(S.size(), X.dim(0));
Y->ResizeLike(X);
if (X.size() > 0) {
auto size = X.size() / X.dim(0);
if (inverse_) {
hipLaunchKernelGGL(( AffineScaleInverseKernel), dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
X.size(), size, X.data<float>(), M.data<float>(), S.data<float>(),
Y->mutable_data<float>());
} else {
hipLaunchKernelGGL(( AffineScaleKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
X.size(), size, X.data<float>(), M.data<float>(), S.data<float>(),
Y->mutable_data<float>());
}
}
return true;
}
namespace {
__global__ void AffineScaleGradientKernel(const int N, const int C,
const float* dY, const float* S,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i] * S[i / C]; }
}
__global__ void AffineScaleInverseGradientKernel(const int N, const int C,
const float* dY,
const float* S, float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i] / (S[i / C] + 1e-8); }
}
} // namespace
template <>
bool AffineScaleGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& S = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
DCHECK_EQ(S.size(), X.dim(0));
DCHECK_EQ(dY.size(), X.size());
dX->ResizeLike(X);
if (X.size() > 0) {
auto size = X.size() / X.dim(0);
if (inverse_) {
hipLaunchKernelGGL(( AffineScaleInverseGradientKernel), dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
dY.size(), size, dY.data<float>(), S.data<float>(),
dX->mutable_data<float>());
} else {
hipLaunchKernelGGL(( AffineScaleGradientKernel), dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
dY.size(), size, dY.data<float>(), S.data<float>(),
dX->mutable_data<float>());
}
}
return true;
}
REGISTER_CUDA_OPERATOR(AffineScale, AffineScaleOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(AffineScaleGradient,
AffineScaleGradientOp<float, CUDAContext>);
} // namespace caffe2
|
b231cd4c3597eac64a3783bb761caab3eb13deb4.cu
|
#include <caffe2/core/context_gpu.h>
#include <caffe2/operator/affine_scale_op.h>
namespace caffe2 {
namespace {
__global__ void AffineScaleKernel(const int N, const int C, const float* X,
const float* M, const float* S, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i] * S[i / C] + M[i / C]; }
}
__global__ void AffineScaleInverseKernel(const int N, const int C,
const float* X, const float* M,
const float* S, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = (X[i] - M[i / C]) / (S[i / C] + 1e-8); }
}
} // namespace
template <>
bool AffineScaleOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& M = Input(1);
auto& S = Input(2);
auto* Y = Output(0);
DCHECK_EQ(M.size(), X.dim(0));
DCHECK_EQ(S.size(), X.dim(0));
Y->ResizeLike(X);
if (X.size() > 0) {
auto size = X.size() / X.dim(0);
if (inverse_) {
AffineScaleInverseKernel<<<CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
X.size(), size, X.data<float>(), M.data<float>(), S.data<float>(),
Y->mutable_data<float>());
} else {
AffineScaleKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
X.size(), size, X.data<float>(), M.data<float>(), S.data<float>(),
Y->mutable_data<float>());
}
}
return true;
}
namespace {
__global__ void AffineScaleGradientKernel(const int N, const int C,
const float* dY, const float* S,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i] * S[i / C]; }
}
__global__ void AffineScaleInverseGradientKernel(const int N, const int C,
const float* dY,
const float* S, float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i] / (S[i / C] + 1e-8); }
}
} // namespace
template <>
bool AffineScaleGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& S = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
DCHECK_EQ(S.size(), X.dim(0));
DCHECK_EQ(dY.size(), X.size());
dX->ResizeLike(X);
if (X.size() > 0) {
auto size = X.size() / X.dim(0);
if (inverse_) {
AffineScaleInverseGradientKernel<<<CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
dY.size(), size, dY.data<float>(), S.data<float>(),
dX->mutable_data<float>());
} else {
AffineScaleGradientKernel<<<CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
dY.size(), size, dY.data<float>(), S.data<float>(),
dX->mutable_data<float>());
}
}
return true;
}
REGISTER_CUDA_OPERATOR(AffineScale, AffineScaleOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(AffineScaleGradient,
AffineScaleGradientOp<float, CUDAContext>);
} // namespace caffe2
|
6a01a375094fa1c9e5d17d618f963203c39c591f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <su3_project.cuh>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <index_helper.cuh>
#define DOUBLE_TOL 1e-15
#define SINGLE_TOL 2e-6
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Float, typename GaugeOr, typename GaugeDs>
struct GaugeAPEArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
GaugeOr origin;
const Float alpha;
const Float tolerance;
GaugeDs dest;
GaugeAPEArg(GaugeOr &origin, GaugeDs &dest, const GaugeField &data, const Float alpha, const Float tolerance)
: origin(origin), dest(dest), alpha(alpha), tolerance(tolerance) {
#ifdef MULTI_GPU
for ( int dir = 0; dir < 4; ++dir ) {
border[dir] = data.R()[dir];
X[dir] = data.X()[dir] - border[dir] * 2;
}
#else
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir];
#endif
threads = X[0]*X[1]*X[2]*X[3];
}
};
template <typename Float, typename GaugeOr, typename GaugeDs, typename Float2>
__host__ __device__ void computeStaple(GaugeAPEArg<Float,GaugeOr,GaugeDs>& arg, int idx, int parity, int dir, Matrix<Float2,3> &staple) {
typedef typename ComplexTypeId<Float>::Type Cmplx;
// compute spacetime dimensions and parity
int X[4];
for(int dr=0; dr<4; ++dr) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
for(int dr=0; dr<4; ++dr) {
x[dr] += arg.border[dr];
X[dr] += 2*arg.border[dr];
}
#endif
setZero(&staple);
for (int mu=0; mu<3; mu++) { // I believe most users won't want to include time staples in smearing
if (mu == dir) {
continue;
}
int nu = dir;
{
int dx[4] = {0, 0, 0, 0};
Matrix<Cmplx,3> U1;
arg.origin.load((Float*)(U1.data),linkIndexShift(x,dx,X), mu, parity);
Matrix<Cmplx,3> U2;
dx[mu]++;
arg.origin.load((Float*)(U2.data),linkIndexShift(x,dx,X), nu, 1-parity);
Matrix<Cmplx,3> U3;
dx[mu]--;
dx[nu]++;
arg.origin.load((Float*)(U3.data),linkIndexShift(x,dx,X), mu, 1-parity);
Matrix<Cmplx,3> tmpS;
tmpS = U1 * U2;
tmpS = tmpS * conj(U3);
staple = staple + tmpS;
dx[mu]--;
dx[nu]--;
arg.origin.load((Float*)(U1.data),linkIndexShift(x,dx,X), mu, 1-parity);
arg.origin.load((Float*)(U2.data),linkIndexShift(x,dx,X), nu, 1-parity);
dx[nu]++;
arg.origin.load((Float*)(U3.data),linkIndexShift(x,dx,X), mu, parity);
tmpS = conj(U1);
tmpS = tmpS * U2;
tmpS = tmpS * U3;
staple = staple + tmpS;
}
}
}
template<typename Float, typename GaugeOr, typename GaugeDs>
__global__ void computeAPEStep(GaugeAPEArg<Float,GaugeOr,GaugeDs> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int parity = 0;
if(idx >= arg.threads/2) {
parity = 1;
idx -= arg.threads/2;
}
int X[4];
for(int dr=0; dr<4; ++dr) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
for(int dr=0; dr<4; ++dr) {
x[dr] += arg.border[dr];
X[dr] += 2*arg.border[dr];
}
#endif
int dx[4] = {0, 0, 0, 0};
for (int dir=0; dir < 3; dir++) { //Only spatial dimensions are smeared
Matrix<Cmplx,3> U, S, TestU, I;
computeStaple<Float,GaugeOr,GaugeDs,Cmplx>(arg,idx,parity,dir,S);
arg.origin.load((Float*)(U.data),linkIndexShift(x,dx,X), dir, parity);
S = S * (arg.alpha/((Float) (2.*(3. - 1.))));
setIdentity(&I);
TestU = I*(1.-arg.alpha) + S*conj(U);
polarSu3<Cmplx,Float>(TestU, arg.tolerance);
U = TestU*U;
arg.dest.save((Float*)(U.data),linkIndexShift(x,dx,X), dir, parity);
}
}
template<typename Float, typename GaugeOr, typename GaugeDs>
class GaugeAPE : Tunable {
GaugeAPEArg<Float,GaugeOr,GaugeDs> arg;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
GaugeAPE(GaugeAPEArg<Float,GaugeOr, GaugeDs> &arg, QudaFieldLocation location)
: arg(arg), location(location) {}
virtual ~GaugeAPE () {}
void apply(const hipStream_t &stream){
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( computeAPEStep), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg);
} else {
errorQuda("CPU not supported yet\n");
//computeAPEStepCPU(arg);
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){}
void postTune(){}
long long flops() const { return (1)*6*arg.threads; }
long long bytes() const { return (1)*6*arg.threads*sizeof(Float); } // Only correct if there is no link reconstruction
}; // GaugeAPE
template<typename Float,typename GaugeOr, typename GaugeDs>
void APEStep(GaugeOr origin, GaugeDs dest, const GaugeField& dataOr, Float alpha, QudaFieldLocation location) {
if (dataOr.Precision() == QUDA_DOUBLE_PRECISION) {
GaugeAPEArg<Float,GaugeOr,GaugeDs> arg(origin, dest, dataOr, alpha, DOUBLE_TOL);
GaugeAPE<Float,GaugeOr,GaugeDs> gaugeAPE(arg, location);
gaugeAPE.apply(0);
} else {
GaugeAPEArg<Float,GaugeOr,GaugeDs> arg(origin, dest, dataOr, alpha, SINGLE_TOL);
GaugeAPE<Float,GaugeOr,GaugeDs> gaugeAPE(arg, location);
gaugeAPE.apply(0);
}
hipDeviceSynchronize();
}
template<typename Float>
void APEStep(GaugeField &dataDs, const GaugeField& dataOr, Float alpha, QudaFieldLocation location) {
if(dataDs.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type GDs;
if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else{
errorQuda("Reconstruction type %d of origin gauge field not supported", dataOr.Reconstruct());
}
} else if(dataDs.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type GDs;
if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_NO){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else{
errorQuda("Reconstruction type %d of origin gauge field not supported", dataOr.Reconstruct());
}
} else if(dataDs.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type GDs;
if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_NO){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else{
errorQuda("Reconstruction type %d of origin gauge field not supported", dataOr.Reconstruct());
}
} else {
errorQuda("Reconstruction type %d of destination gauge field not supported", dataDs.Reconstruct());
}
}
#endif
void APEStep(GaugeField &dataDs, const GaugeField& dataOr, double alpha, QudaFieldLocation location) {
#ifdef GPU_GAUGE_TOOLS
if(dataOr.Precision() != dataDs.Precision()) {
errorQuda("Oriign and destination fields must have the same precision\n");
}
if(dataDs.Precision() == QUDA_HALF_PRECISION){
errorQuda("Half precision not supported\n");
}
if (!dataOr.isNative())
errorQuda("Order %d with %d reconstruct not supported", dataOr.Order(), dataOr.Reconstruct());
if (!dataDs.isNative())
errorQuda("Order %d with %d reconstruct not supported", dataDs.Order(), dataDs.Reconstruct());
if (dataDs.Precision() == QUDA_SINGLE_PRECISION){
APEStep<float>(dataDs, dataOr, (float) alpha, location);
} else if(dataDs.Precision() == QUDA_DOUBLE_PRECISION) {
APEStep<double>(dataDs, dataOr, alpha, location);
} else {
errorQuda("Precision %d not supported", dataDs.Precision());
}
return;
#else
errorQuda("Gauge tools are not build");
#endif
}
}
|
6a01a375094fa1c9e5d17d618f963203c39c591f.cu
|
#include <quda_internal.h>
#include <quda_matrix.h>
#include <su3_project.cuh>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <index_helper.cuh>
#define DOUBLE_TOL 1e-15
#define SINGLE_TOL 2e-6
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Float, typename GaugeOr, typename GaugeDs>
struct GaugeAPEArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
GaugeOr origin;
const Float alpha;
const Float tolerance;
GaugeDs dest;
GaugeAPEArg(GaugeOr &origin, GaugeDs &dest, const GaugeField &data, const Float alpha, const Float tolerance)
: origin(origin), dest(dest), alpha(alpha), tolerance(tolerance) {
#ifdef MULTI_GPU
for ( int dir = 0; dir < 4; ++dir ) {
border[dir] = data.R()[dir];
X[dir] = data.X()[dir] - border[dir] * 2;
}
#else
for(int dir=0; dir<4; ++dir) X[dir] = data.X()[dir];
#endif
threads = X[0]*X[1]*X[2]*X[3];
}
};
template <typename Float, typename GaugeOr, typename GaugeDs, typename Float2>
__host__ __device__ void computeStaple(GaugeAPEArg<Float,GaugeOr,GaugeDs>& arg, int idx, int parity, int dir, Matrix<Float2,3> &staple) {
typedef typename ComplexTypeId<Float>::Type Cmplx;
// compute spacetime dimensions and parity
int X[4];
for(int dr=0; dr<4; ++dr) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
for(int dr=0; dr<4; ++dr) {
x[dr] += arg.border[dr];
X[dr] += 2*arg.border[dr];
}
#endif
setZero(&staple);
for (int mu=0; mu<3; mu++) { // I believe most users won't want to include time staples in smearing
if (mu == dir) {
continue;
}
int nu = dir;
{
int dx[4] = {0, 0, 0, 0};
Matrix<Cmplx,3> U1;
arg.origin.load((Float*)(U1.data),linkIndexShift(x,dx,X), mu, parity);
Matrix<Cmplx,3> U2;
dx[mu]++;
arg.origin.load((Float*)(U2.data),linkIndexShift(x,dx,X), nu, 1-parity);
Matrix<Cmplx,3> U3;
dx[mu]--;
dx[nu]++;
arg.origin.load((Float*)(U3.data),linkIndexShift(x,dx,X), mu, 1-parity);
Matrix<Cmplx,3> tmpS;
tmpS = U1 * U2;
tmpS = tmpS * conj(U3);
staple = staple + tmpS;
dx[mu]--;
dx[nu]--;
arg.origin.load((Float*)(U1.data),linkIndexShift(x,dx,X), mu, 1-parity);
arg.origin.load((Float*)(U2.data),linkIndexShift(x,dx,X), nu, 1-parity);
dx[nu]++;
arg.origin.load((Float*)(U3.data),linkIndexShift(x,dx,X), mu, parity);
tmpS = conj(U1);
tmpS = tmpS * U2;
tmpS = tmpS * U3;
staple = staple + tmpS;
}
}
}
template<typename Float, typename GaugeOr, typename GaugeDs>
__global__ void computeAPEStep(GaugeAPEArg<Float,GaugeOr,GaugeDs> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int parity = 0;
if(idx >= arg.threads/2) {
parity = 1;
idx -= arg.threads/2;
}
int X[4];
for(int dr=0; dr<4; ++dr) X[dr] = arg.X[dr];
int x[4];
getCoords(x, idx, X, parity);
#ifdef MULTI_GPU
for(int dr=0; dr<4; ++dr) {
x[dr] += arg.border[dr];
X[dr] += 2*arg.border[dr];
}
#endif
int dx[4] = {0, 0, 0, 0};
for (int dir=0; dir < 3; dir++) { //Only spatial dimensions are smeared
Matrix<Cmplx,3> U, S, TestU, I;
computeStaple<Float,GaugeOr,GaugeDs,Cmplx>(arg,idx,parity,dir,S);
arg.origin.load((Float*)(U.data),linkIndexShift(x,dx,X), dir, parity);
S = S * (arg.alpha/((Float) (2.*(3. - 1.))));
setIdentity(&I);
TestU = I*(1.-arg.alpha) + S*conj(U);
polarSu3<Cmplx,Float>(TestU, arg.tolerance);
U = TestU*U;
arg.dest.save((Float*)(U.data),linkIndexShift(x,dx,X), dir, parity);
}
}
template<typename Float, typename GaugeOr, typename GaugeDs>
class GaugeAPE : Tunable {
GaugeAPEArg<Float,GaugeOr,GaugeDs> arg;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
GaugeAPE(GaugeAPEArg<Float,GaugeOr, GaugeDs> &arg, QudaFieldLocation location)
: arg(arg), location(location) {}
virtual ~GaugeAPE () {}
void apply(const cudaStream_t &stream){
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
computeAPEStep<<<tp.grid,tp.block,tp.shared_bytes>>>(arg);
} else {
errorQuda("CPU not supported yet\n");
//computeAPEStepCPU(arg);
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){}
void postTune(){}
long long flops() const { return (1)*6*arg.threads; }
long long bytes() const { return (1)*6*arg.threads*sizeof(Float); } // Only correct if there is no link reconstruction
}; // GaugeAPE
template<typename Float,typename GaugeOr, typename GaugeDs>
void APEStep(GaugeOr origin, GaugeDs dest, const GaugeField& dataOr, Float alpha, QudaFieldLocation location) {
if (dataOr.Precision() == QUDA_DOUBLE_PRECISION) {
GaugeAPEArg<Float,GaugeOr,GaugeDs> arg(origin, dest, dataOr, alpha, DOUBLE_TOL);
GaugeAPE<Float,GaugeOr,GaugeDs> gaugeAPE(arg, location);
gaugeAPE.apply(0);
} else {
GaugeAPEArg<Float,GaugeOr,GaugeDs> arg(origin, dest, dataOr, alpha, SINGLE_TOL);
GaugeAPE<Float,GaugeOr,GaugeDs> gaugeAPE(arg, location);
gaugeAPE.apply(0);
}
cudaDeviceSynchronize();
}
template<typename Float>
void APEStep(GaugeField &dataDs, const GaugeField& dataOr, Float alpha, QudaFieldLocation location) {
if(dataDs.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type GDs;
if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else{
errorQuda("Reconstruction type %d of origin gauge field not supported", dataOr.Reconstruct());
}
} else if(dataDs.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type GDs;
if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_NO){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else{
errorQuda("Reconstruction type %d of origin gauge field not supported", dataOr.Reconstruct());
}
} else if(dataDs.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type GDs;
if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_NO){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else if(dataOr.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type GOr;
APEStep(GOr(dataOr), GDs(dataDs), dataOr, alpha, location);
}else{
errorQuda("Reconstruction type %d of origin gauge field not supported", dataOr.Reconstruct());
}
} else {
errorQuda("Reconstruction type %d of destination gauge field not supported", dataDs.Reconstruct());
}
}
#endif
void APEStep(GaugeField &dataDs, const GaugeField& dataOr, double alpha, QudaFieldLocation location) {
#ifdef GPU_GAUGE_TOOLS
if(dataOr.Precision() != dataDs.Precision()) {
errorQuda("Oriign and destination fields must have the same precision\n");
}
if(dataDs.Precision() == QUDA_HALF_PRECISION){
errorQuda("Half precision not supported\n");
}
if (!dataOr.isNative())
errorQuda("Order %d with %d reconstruct not supported", dataOr.Order(), dataOr.Reconstruct());
if (!dataDs.isNative())
errorQuda("Order %d with %d reconstruct not supported", dataDs.Order(), dataDs.Reconstruct());
if (dataDs.Precision() == QUDA_SINGLE_PRECISION){
APEStep<float>(dataDs, dataOr, (float) alpha, location);
} else if(dataDs.Precision() == QUDA_DOUBLE_PRECISION) {
APEStep<double>(dataDs, dataOr, alpha, location);
} else {
errorQuda("Precision %d not supported", dataDs.Precision());
}
return;
#else
errorQuda("Gauge tools are not build");
#endif
}
}
|
1117422239ee1d165554fd07b57ce7af32c01615.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuTv1dOperator.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_elemwise.h"
#include "vector_td_utilities.h"
#include "complext.h"
#include "check_CUDA.h"
#include "cudaDeviceManager.h"
#include <iostream>
using namespace Gadgetron;
template<class REAL, class T, unsigned int D> static inline __device__ REAL gradient(const T* __restrict__ in, const vector_td<int,D>& dims, vector_td<int,D>& co){
T xi = in[co_to_idx((co+dims)%dims,dims)];
co[D-1]+=1;
T dt = in[co_to_idx((co+dims)%dims,dims)];
REAL grad = norm(xi-dt);
co[D-1]-=1;
return sqrt(grad);
}
template<class REAL, class T, unsigned int D> static __global__ void tvGradient_kernel(const T* __restrict__ in, T* __restrict__ out, const vector_td<int,D> dims,REAL limit,REAL weight){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if( idx < prod(dims) ){
T xi = in[idx];
T result=T(0);
vector_td<int,D> co = idx_to_co(idx, dims);
REAL grad = gradient<REAL,T,D>(in,dims,co);
if (grad > limit) {
result += xi/grad;
co[D-1]+=1;
result -= in[co_to_idx((co+dims)%dims,dims)]/grad;
co[D-1]-=1;
}
co[D-1]-=1;
grad = gradient<REAL,T,D>(in,dims,co);
if (grad > limit) {
result +=(xi-in[co_to_idx((co+dims)%dims,dims)])/grad;
}
co[D-1]+=1;
out[idx] += weight*result;
}
}
template<class T, unsigned int D> void cuTv1DOperator<T,D>::gradient (cuNDArray<T> * in,cuNDArray<T> * out, bool accumulate){
if (!accumulate) clear(out);
const typename intd<D>::Type dims = vector_td<int,D>( from_std_vector<size_t,D>(*(in->get_dimensions())));
int elements = in->get_number_of_elements();
int threadsPerBlock =::min(prod(dims),cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = ::max(1,prod(dims)/cudaDeviceManager::Instance()->max_blockdim());
dim3 dimGrid(totalBlocksPerGrid);
for (int i =0; i < (elements/prod(dims)); i++){
hipLaunchKernelGGL(( tvGradient_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, in->get_data_ptr()+i*prod(dims),out->get_data_ptr()+i*prod(dims),dims,limit_,this->weight_);
}
hipDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
}
template<class REAL, class T, unsigned int D> static __global__ void tvMagnitude_kernel(const T* in,T* out,const vector_td<int,D> dims,REAL limit,REAL weight)
{
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if( idx < prod(dims) ){
vector_td<int,D> co = idx_to_co(idx, dims);
REAL grad = gradient<REAL,T,D>(in,dims,co);
out[idx] = grad*weight;
}
}
template<class T, unsigned int D> typename realType<T>::Type cuTv1DOperator<T,D>::magnitude (cuNDArray<T> * in){
cuNDArray<T> out(*in);
const typename intd<D>::Type dims = vector_td<int,D>( from_std_vector<size_t,D>(*(in->get_dimensions())));
int elements = in->get_number_of_elements();
int threadsPerBlock =::min(prod(dims),cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = ::max(1,prod(dims)/cudaDeviceManager::Instance()->max_blockdim());
dim3 dimGrid(totalBlocksPerGrid);
for (int i =0; i < (elements/prod(dims)); i++){
hipLaunchKernelGGL(( tvMagnitude_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, in->get_data_ptr()+i*prod(dims),out.get_data_ptr()+i*prod(dims),dims,limit_,this->weight_);
}
hipDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
return asum(&out);
}
template class EXPORTGPUOPERATORS cuTv1DOperator<float,1>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float,2>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float,3>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float,4>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double,1>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double,2>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double,3>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double,4>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float_complext,1>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float_complext,2>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float_complext,3>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float_complext,4>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double_complext,1>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double_complext,2>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double_complext,3>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double_complext,4>;
|
1117422239ee1d165554fd07b57ce7af32c01615.cu
|
#include "cuTv1dOperator.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_elemwise.h"
#include "vector_td_utilities.h"
#include "complext.h"
#include "check_CUDA.h"
#include "cudaDeviceManager.h"
#include <iostream>
using namespace Gadgetron;
template<class REAL, class T, unsigned int D> static inline __device__ REAL gradient(const T* __restrict__ in, const vector_td<int,D>& dims, vector_td<int,D>& co){
T xi = in[co_to_idx((co+dims)%dims,dims)];
co[D-1]+=1;
T dt = in[co_to_idx((co+dims)%dims,dims)];
REAL grad = norm(xi-dt);
co[D-1]-=1;
return sqrt(grad);
}
template<class REAL, class T, unsigned int D> static __global__ void tvGradient_kernel(const T* __restrict__ in, T* __restrict__ out, const vector_td<int,D> dims,REAL limit,REAL weight){
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if( idx < prod(dims) ){
T xi = in[idx];
T result=T(0);
vector_td<int,D> co = idx_to_co(idx, dims);
REAL grad = gradient<REAL,T,D>(in,dims,co);
if (grad > limit) {
result += xi/grad;
co[D-1]+=1;
result -= in[co_to_idx((co+dims)%dims,dims)]/grad;
co[D-1]-=1;
}
co[D-1]-=1;
grad = gradient<REAL,T,D>(in,dims,co);
if (grad > limit) {
result +=(xi-in[co_to_idx((co+dims)%dims,dims)])/grad;
}
co[D-1]+=1;
out[idx] += weight*result;
}
}
template<class T, unsigned int D> void cuTv1DOperator<T,D>::gradient (cuNDArray<T> * in,cuNDArray<T> * out, bool accumulate){
if (!accumulate) clear(out);
const typename intd<D>::Type dims = vector_td<int,D>( from_std_vector<size_t,D>(*(in->get_dimensions())));
int elements = in->get_number_of_elements();
int threadsPerBlock =std::min(prod(dims),cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = std::max(1,prod(dims)/cudaDeviceManager::Instance()->max_blockdim());
dim3 dimGrid(totalBlocksPerGrid);
for (int i =0; i < (elements/prod(dims)); i++){
tvGradient_kernel<<<dimGrid,dimBlock>>>(in->get_data_ptr()+i*prod(dims),out->get_data_ptr()+i*prod(dims),dims,limit_,this->weight_);
}
cudaDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
}
template<class REAL, class T, unsigned int D> static __global__ void tvMagnitude_kernel(const T* in,T* out,const vector_td<int,D> dims,REAL limit,REAL weight)
{
const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if( idx < prod(dims) ){
vector_td<int,D> co = idx_to_co(idx, dims);
REAL grad = gradient<REAL,T,D>(in,dims,co);
out[idx] = grad*weight;
}
}
template<class T, unsigned int D> typename realType<T>::Type cuTv1DOperator<T,D>::magnitude (cuNDArray<T> * in){
cuNDArray<T> out(*in);
const typename intd<D>::Type dims = vector_td<int,D>( from_std_vector<size_t,D>(*(in->get_dimensions())));
int elements = in->get_number_of_elements();
int threadsPerBlock =std::min(prod(dims),cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = std::max(1,prod(dims)/cudaDeviceManager::Instance()->max_blockdim());
dim3 dimGrid(totalBlocksPerGrid);
for (int i =0; i < (elements/prod(dims)); i++){
tvMagnitude_kernel<<<dimGrid,dimBlock>>>(in->get_data_ptr()+i*prod(dims),out.get_data_ptr()+i*prod(dims),dims,limit_,this->weight_);
}
cudaDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
return asum(&out);
}
template class EXPORTGPUOPERATORS cuTv1DOperator<float,1>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float,2>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float,3>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float,4>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double,1>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double,2>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double,3>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double,4>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float_complext,1>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float_complext,2>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float_complext,3>;
template class EXPORTGPUOPERATORS cuTv1DOperator<float_complext,4>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double_complext,1>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double_complext,2>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double_complext,3>;
template class EXPORTGPUOPERATORS cuTv1DOperator<double_complext,4>;
|
902d659d5eac82bc41114325085c3fe154d25427.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "fmha.h"
#include "fmha_dgrad_kernel_1xN_reload.h"
using Kernel_traits = FMHA_kernel_traits< 128, 64, 16, 1, 4, 0x08u>;
extern "C" __global__ void fmha_dgrad_fp16_128_64_sm80_kernel(Fused_multihead_attention_fprop_params params) {
fmha::compute_dv_1xN<Kernel_traits>(params);
fmha::compute_dq_dk_1xN<Kernel_traits>(params);
}
void run_fmha_dgrad_fp16_128_64_sm80(const Fused_multihead_attention_fprop_params ¶ms, hipStream_t stream) {
constexpr int smem_size_softmax = Kernel_traits::Cta_tile_p::M * Kernel_traits::Cta_tile_p::WARPS_N * sizeof(float);
constexpr int smem_size_q = Kernel_traits::Smem_tile_q::BYTES_PER_TILE;
constexpr int smem_size_v = Kernel_traits::Smem_tile_v::BYTES_PER_TILE;
constexpr int smem_size_o = Kernel_traits::Smem_tile_o::BYTES_PER_TILE;
using Smem_tile_s = fmha::Smem_tile_mma_transposed< Kernel_traits::Cta_tile_p>;
constexpr int smem_size_s = Smem_tile_s::BYTES_PER_TILE;
static_assert(smem_size_s == 16 * 128 * 2);
static_assert(smem_size_o == 16 * 64 * 4 * Kernel_traits::Cta_tile_p::WARPS_N);
constexpr int smem_size_dv = smem_size_s + 2 * smem_size_q + smem_size_v + smem_size_softmax;
constexpr int smem_size_dq_dk = smem_size_s + smem_size_o + smem_size_q + smem_size_v;
constexpr int smem_size = ::max(smem_size_dv, smem_size_dq_dk);
if( smem_size >= 48 * 1024 ) {
FMHA_CHECK_CUDA(hipFuncSetAttribute(
fmha_dgrad_fp16_128_64_sm80_kernel, hipFuncAttributeMaxDynamicSharedMemorySize, smem_size));
}
dim3 grid(params.h, params.b);
hipLaunchKernelGGL(( fmha_dgrad_fp16_128_64_sm80_kernel), dim3(grid), dim3(Kernel_traits::THREADS), smem_size, stream, params);
}
|
902d659d5eac82bc41114325085c3fe154d25427.cu
|
/******************************************************************************
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "fmha.h"
#include "fmha_dgrad_kernel_1xN_reload.h"
using Kernel_traits = FMHA_kernel_traits< 128, 64, 16, 1, 4, 0x08u>;
extern "C" __global__ void fmha_dgrad_fp16_128_64_sm80_kernel(Fused_multihead_attention_fprop_params params) {
fmha::compute_dv_1xN<Kernel_traits>(params);
fmha::compute_dq_dk_1xN<Kernel_traits>(params);
}
void run_fmha_dgrad_fp16_128_64_sm80(const Fused_multihead_attention_fprop_params ¶ms, cudaStream_t stream) {
constexpr int smem_size_softmax = Kernel_traits::Cta_tile_p::M * Kernel_traits::Cta_tile_p::WARPS_N * sizeof(float);
constexpr int smem_size_q = Kernel_traits::Smem_tile_q::BYTES_PER_TILE;
constexpr int smem_size_v = Kernel_traits::Smem_tile_v::BYTES_PER_TILE;
constexpr int smem_size_o = Kernel_traits::Smem_tile_o::BYTES_PER_TILE;
using Smem_tile_s = fmha::Smem_tile_mma_transposed< Kernel_traits::Cta_tile_p>;
constexpr int smem_size_s = Smem_tile_s::BYTES_PER_TILE;
static_assert(smem_size_s == 16 * 128 * 2);
static_assert(smem_size_o == 16 * 64 * 4 * Kernel_traits::Cta_tile_p::WARPS_N);
constexpr int smem_size_dv = smem_size_s + 2 * smem_size_q + smem_size_v + smem_size_softmax;
constexpr int smem_size_dq_dk = smem_size_s + smem_size_o + smem_size_q + smem_size_v;
constexpr int smem_size = std::max(smem_size_dv, smem_size_dq_dk);
if( smem_size >= 48 * 1024 ) {
FMHA_CHECK_CUDA(cudaFuncSetAttribute(
fmha_dgrad_fp16_128_64_sm80_kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size));
}
dim3 grid(params.h, params.b);
fmha_dgrad_fp16_128_64_sm80_kernel<<<grid, Kernel_traits::THREADS, smem_size, stream>>>(params);
}
|
30b0c94b0f401e4e32550bdfefd15e5e851cb64a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*
* NV12ToARGB color space conversion CUDA kernel
*
* This sample uses CUDA to perform a simple NV12 (YUV 4:2:0 planar)
* source and converts to output in ARGB format
*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudev/common.hpp"
using namespace cv;
using namespace cv::cudev;
void videoDecPostProcessFrame(const GpuMat& decodedFrame, GpuMat& _outFrame, int width, int height, hipStream_t stream);
namespace
{
__constant__ float constHueColorSpaceMat[9] = {1.1644f, 0.0f, 1.596f, 1.1644f, -0.3918f, -0.813f, 1.1644f, 2.0172f, 0.0f};
__device__ static void YUV2RGB(const uint* yuvi, float* red, float* green, float* blue)
{
float luma, chromaCb, chromaCr;
// Prepare for hue adjustment
luma = (float)yuvi[0];
chromaCb = (float)((int)yuvi[1] - 512.0f);
chromaCr = (float)((int)yuvi[2] - 512.0f);
// Convert YUV To RGB with hue adjustment
*red = (luma * constHueColorSpaceMat[0]) +
(chromaCb * constHueColorSpaceMat[1]) +
(chromaCr * constHueColorSpaceMat[2]);
*green = (luma * constHueColorSpaceMat[3]) +
(chromaCb * constHueColorSpaceMat[4]) +
(chromaCr * constHueColorSpaceMat[5]);
*blue = (luma * constHueColorSpaceMat[6]) +
(chromaCb * constHueColorSpaceMat[7]) +
(chromaCr * constHueColorSpaceMat[8]);
}
__device__ static uint RGBA_pack_10bit(float red, float green, float blue, uint alpha)
{
uint ARGBpixel = 0;
// Clamp final 10 bit results
red = ::fmin(::fmax(red, 0.0f), 1023.f);
green = ::fmin(::fmax(green, 0.0f), 1023.f);
blue = ::fmin(::fmax(blue, 0.0f), 1023.f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = (((uint)blue >> 2) |
(((uint)green >> 2) << 8) |
(((uint)red >> 2) << 16) |
(uint)alpha);
return ARGBpixel;
}
// CUDA kernel for outputting the final ARGB output from NV12
#define COLOR_COMPONENT_BIT_SIZE 10
#define COLOR_COMPONENT_MASK 0x3FF
__global__ void NV12_to_RGB(const uchar* srcImage, size_t nSourcePitch,
uint* dstImage, size_t nDestPitch,
uint width, uint height)
{
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
const int x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
uint yuv101010Pel[2];
yuv101010Pel[0] = (srcImage[y * nSourcePitch + x ]) << 2;
yuv101010Pel[1] = (srcImage[y * nSourcePitch + x + 1]) << 2;
const size_t chromaOffset = nSourcePitch * height;
const int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint chromaCb = srcImage[chromaOffset + y_chroma * nSourcePitch + x ];
uint chromaCr = srcImage[chromaOffset + y_chroma * nSourcePitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImage[chromaOffset + (y_chroma + 1) * nSourcePitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImage[chromaOffset + (y_chroma + 1) * nSourcePitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << ( COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << ( COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x ] << ( COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x ] << ( COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK );
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK );
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
const size_t dstImagePitch = nDestPitch >> 2;
dstImage[y * dstImagePitch + x ] = RGBA_pack_10bit(red[0], green[0], blue[0], ((uint)0xff << 24));
dstImage[y * dstImagePitch + x + 1 ] = RGBA_pack_10bit(red[1], green[1], blue[1], ((uint)0xff << 24));
}
}
void videoDecPostProcessFrame(const GpuMat& decodedFrame, GpuMat& outFrame, int width, int height, hipStream_t stream)
{
// Final Stage: NV12toARGB color space conversion
outFrame.create(height, width, CV_8UC4);
dim3 block(32, 8);
dim3 grid(divUp(width, 2 * block.x), divUp(height, block.y));
hipLaunchKernelGGL(( NV12_to_RGB), dim3(grid), dim3(block), 0, stream, decodedFrame.ptr<uchar>(), decodedFrame.step,
outFrame.ptr<uint>(), outFrame.step,
width, height);
CV_CUDEV_SAFE_CALL( hipGetLastError() );
if (stream == 0)
CV_CUDEV_SAFE_CALL( hipDeviceSynchronize() );
}
#endif
|
30b0c94b0f401e4e32550bdfefd15e5e851cb64a.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*
* NV12ToARGB color space conversion CUDA kernel
*
* This sample uses CUDA to perform a simple NV12 (YUV 4:2:0 planar)
* source and converts to output in ARGB format
*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudev/common.hpp"
using namespace cv;
using namespace cv::cudev;
void videoDecPostProcessFrame(const GpuMat& decodedFrame, GpuMat& _outFrame, int width, int height, cudaStream_t stream);
namespace
{
__constant__ float constHueColorSpaceMat[9] = {1.1644f, 0.0f, 1.596f, 1.1644f, -0.3918f, -0.813f, 1.1644f, 2.0172f, 0.0f};
__device__ static void YUV2RGB(const uint* yuvi, float* red, float* green, float* blue)
{
float luma, chromaCb, chromaCr;
// Prepare for hue adjustment
luma = (float)yuvi[0];
chromaCb = (float)((int)yuvi[1] - 512.0f);
chromaCr = (float)((int)yuvi[2] - 512.0f);
// Convert YUV To RGB with hue adjustment
*red = (luma * constHueColorSpaceMat[0]) +
(chromaCb * constHueColorSpaceMat[1]) +
(chromaCr * constHueColorSpaceMat[2]);
*green = (luma * constHueColorSpaceMat[3]) +
(chromaCb * constHueColorSpaceMat[4]) +
(chromaCr * constHueColorSpaceMat[5]);
*blue = (luma * constHueColorSpaceMat[6]) +
(chromaCb * constHueColorSpaceMat[7]) +
(chromaCr * constHueColorSpaceMat[8]);
}
__device__ static uint RGBA_pack_10bit(float red, float green, float blue, uint alpha)
{
uint ARGBpixel = 0;
// Clamp final 10 bit results
red = ::fmin(::fmax(red, 0.0f), 1023.f);
green = ::fmin(::fmax(green, 0.0f), 1023.f);
blue = ::fmin(::fmax(blue, 0.0f), 1023.f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = (((uint)blue >> 2) |
(((uint)green >> 2) << 8) |
(((uint)red >> 2) << 16) |
(uint)alpha);
return ARGBpixel;
}
// CUDA kernel for outputting the final ARGB output from NV12
#define COLOR_COMPONENT_BIT_SIZE 10
#define COLOR_COMPONENT_MASK 0x3FF
__global__ void NV12_to_RGB(const uchar* srcImage, size_t nSourcePitch,
uint* dstImage, size_t nDestPitch,
uint width, uint height)
{
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
const int x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
uint yuv101010Pel[2];
yuv101010Pel[0] = (srcImage[y * nSourcePitch + x ]) << 2;
yuv101010Pel[1] = (srcImage[y * nSourcePitch + x + 1]) << 2;
const size_t chromaOffset = nSourcePitch * height;
const int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint chromaCb = srcImage[chromaOffset + y_chroma * nSourcePitch + x ];
uint chromaCr = srcImage[chromaOffset + y_chroma * nSourcePitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImage[chromaOffset + (y_chroma + 1) * nSourcePitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImage[chromaOffset + (y_chroma + 1) * nSourcePitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << ( COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << ( COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x ] << ( COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x ] << ( COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint)srcImage[chromaOffset + y_chroma * nSourcePitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK );
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK );
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
const size_t dstImagePitch = nDestPitch >> 2;
dstImage[y * dstImagePitch + x ] = RGBA_pack_10bit(red[0], green[0], blue[0], ((uint)0xff << 24));
dstImage[y * dstImagePitch + x + 1 ] = RGBA_pack_10bit(red[1], green[1], blue[1], ((uint)0xff << 24));
}
}
void videoDecPostProcessFrame(const GpuMat& decodedFrame, GpuMat& outFrame, int width, int height, cudaStream_t stream)
{
// Final Stage: NV12toARGB color space conversion
outFrame.create(height, width, CV_8UC4);
dim3 block(32, 8);
dim3 grid(divUp(width, 2 * block.x), divUp(height, block.y));
NV12_to_RGB<<<grid, block, 0, stream>>>(decodedFrame.ptr<uchar>(), decodedFrame.step,
outFrame.ptr<uint>(), outFrame.step,
width, height);
CV_CUDEV_SAFE_CALL( cudaGetLastError() );
if (stream == 0)
CV_CUDEV_SAFE_CALL( cudaDeviceSynchronize() );
}
#endif
|
f00e8c0185ae3c3dc90cdbc5492316f96dbe70cd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathReduce.hip"
#else
accreal THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
void THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::lower_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<scalar_t, int64_t>());
}
void THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::upper_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<scalar_t, int64_t>());
}
scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(
THTensor_(nElement)(self) > 0,
1,
"cannot perform reduction function min "
"on tensor with no elements because the "
"operation does not have an identity"
);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(
THTensor_(nElement)(self) > 0,
1,
"cannot perform reduction function max "
"on tensor with no elements because the "
"operation does not have an identity"
);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<scalar_t>(val);
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
dimension = at::maybe_wrap_dim(dimension, src);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<scalar_t, accreal>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
accreal THCTensor_(std_all)(THCState *state, THCTensor *self, bool unbiased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(var_all)(state, self, unbiased)));
}
accreal THCTensor_(var_all)(THCState *state, THCTensor *self, bool unbiased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (unbiased ? 1 : 0)))
);
THCudaCheck(hipGetLastError());
return val;
}
#endif
accreal THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
#endif
#endif
|
f00e8c0185ae3c3dc90cdbc5492316f96dbe70cd.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathReduce.cu"
#else
accreal THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
void THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::lower_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<scalar_t, int64_t>());
}
void THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::upper_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<scalar_t, int64_t>());
}
scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(
THTensor_(nElement)(self) > 0,
1,
"cannot perform reduction function min "
"on tensor with no elements because the "
"operation does not have an identity"
);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(
THTensor_(nElement)(self) > 0,
1,
"cannot perform reduction function max "
"on tensor with no elements because the "
"operation does not have an identity"
);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<scalar_t>(val);
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
dimension = at::maybe_wrap_dim(dimension, src);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
THCTensor_kernel_renorm<scalar_t, accreal>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>
(THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
cudaError_t errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
accreal THCTensor_(std_all)(THCState *state, THCTensor *self, bool unbiased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(var_all)(state, self, unbiased)));
}
accreal THCTensor_(var_all)(THCState *state, THCTensor *self, bool unbiased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (unbiased ? 1 : 0)))
);
THCudaCheck(cudaGetLastError());
return val;
}
#endif
accreal THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
#endif
#endif
|
43f3416eb8fe07e7a3fdfbcc747da2b99743975b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vectorAddKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *inputA = NULL;
hipMalloc(&inputA, XSIZE*YSIZE);
float *inputB = NULL;
hipMalloc(&inputB, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int length = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vectorAddKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputA,inputB,output,length);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vectorAddKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputA,inputB,output,length);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vectorAddKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputA,inputB,output,length);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
43f3416eb8fe07e7a3fdfbcc747da2b99743975b.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vectorAddKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *inputA = NULL;
cudaMalloc(&inputA, XSIZE*YSIZE);
float *inputB = NULL;
cudaMalloc(&inputB, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int length = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vectorAddKernel<<<gridBlock,threadBlock>>>(inputA,inputB,output,length);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vectorAddKernel<<<gridBlock,threadBlock>>>(inputA,inputB,output,length);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vectorAddKernel<<<gridBlock,threadBlock>>>(inputA,inputB,output,length);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f9ddcc846a5256b5837c08cc4366388eca8b03cb.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* Copyright (c) 2018 University of Maryland, College Park
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file mask_rcnn_target.cu
* \brief MaskRcnnTarget Operator
* \author Mahyar Najibi, Bharat Singh
*/
#include "./mask_rcnn_target-inl.h"
#include "../coco_api/common/maskApi.h"
#include <set>
#include <math.h>
#include <unistd.h>
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "./operator_common.h"
#include "./mshadow_op.h"
#include <time.h>
namespace mxnet {
namespace op {
namespace mask_utils {
// Mask Utility Functions
inline void convertPoly2Mask(const float* roi, const float* poly, const int mask_size, float* mask, float category)
{
/* !
Converts a polygon to a pre-defined mask wrt to an roi
*****Inputs****
roi: The RoI bounding box
poly: The polygon points the pre-defined format(see below)
mask_size: The mask size
*****Outputs****
overlap: overlap of each box in boxes1 to each box in boxes2
*/
float w = roi[3] - roi[1];
float h = roi[4] - roi[2];
w = ::max((float)1, w);
h = ::max((float)1, h);
int n_seg = poly[1];
int offset = 2 + n_seg;
RLE* rles;
rlesInit(&rles, n_seg);
for(int i = 0; i < n_seg; i++){
int cur_len = poly[i+2];
double* xys = new double[cur_len];
for(int j = 0; j < cur_len; j++){
if (j % 2 == 0)
xys[j] = (poly[offset+j+1] - roi[2]) * mask_size / h;
else
xys[j] = (poly[offset+j-1] - roi[1]) * mask_size / w;
}
rleFrPoly(rles + i, xys, cur_len/2, mask_size, mask_size);
delete [] xys;
offset += cur_len;
}
// Decode RLE to mask
byte* byte_mask = new byte[mask_size*mask_size*n_seg];
rleDecode(rles, byte_mask, n_seg);
// Flatten mask
for(int j = 0; j < mask_size*mask_size; j++)
{
float cur_byte = 0;
for(int i = 0; i< n_seg; i++){
int offset = i * mask_size * mask_size + j;
if(byte_mask[offset]==1){
cur_byte = 1;
break;
}
}
mask[j] = cur_byte;
}
// Check to make sure we don't have memory leak
rlesFree(&rles, n_seg);
delete [] byte_mask;
}
} // namespace utils
template<typename xpu>
class MaskRcnnTargetGPUOp : public Operator{
public:
float* cmask_outs, *cmask_cls;
float* crois, *cmask_boxes, *cgt_masks, *cmask_ids;
explicit MaskRcnnTargetGPUOp(MaskRcnnTargetParam param) {
this->param_ = param;
this->cmask_outs = new float[param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size];
this->crois = new float[param_.batch_size*param_.num_proposals*5];
this->cgt_masks = new float[param_.batch_size*param_.max_num_gts*param_.max_polygon_len];
this->cmask_ids = new float[param_.batch_size*param_.num_proposals];
this->cmask_cls = new float[param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size];
}
~MaskRcnnTargetGPUOp() {
delete [] this->cmask_outs;
delete [] this->crois;
delete [] this->cgt_masks;
delete [] this->cmask_ids;
delete [] this->cmask_cls;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
using namespace mshadow;
using namespace mshadow::expr;
// The polygon format for each ground-truth object is as follows:
// [category, num_seg, len_seg1, len_seg2,....,len_segn, seg1_x1,seg1_y1,...,seg1_xm,seg1_ym,seg2_x1,seg2_y1,...]
// Get input
Stream<gpu> *s = ctx.get_stream<gpu>();
Tensor<gpu, 2> rois = in_data[mask::kRoIs].get<gpu, 2, real_t>(s);
Tensor<gpu, 3> gt_masks = in_data[mask::kMaskPolys].get<gpu, 3, real_t>(s);\
Tensor<gpu, 2> mask_ids = in_data[mask::kMaskIds].get<gpu, 2, real_t>(s);
// Copy to CPU
hipMemcpy(crois, rois.dptr_, param_.batch_size*param_.num_proposals*5*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(cgt_masks, gt_masks.dptr_, param_.batch_size*param_.max_num_gts*param_.max_polygon_len*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(cmask_ids, mask_ids.dptr_, param_.batch_size*param_.num_proposals*sizeof(float), hipMemcpyDeviceToHost);
// Initialize the mask memory
int mask_mem_size = param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size;
for(int i = 0; i < mask_mem_size; i++){
cmask_outs[i] = param_.ignore_label;
}
// Initialize the mask classes to 0
for(int i=0; i< mask_mem_size; i++)
cmask_cls[i] = 0;
// Allocate memory for binary mask
#pragma omp parallel for num_threads(8)
for(int i = 0; i < param_.batch_size * param_.num_proposals; i++){
int mask_id = cmask_ids[i];
if (mask_id == -1) {
continue;
}
int imid = crois[5*i];
int poly_offset = imid * param_.max_num_gts * param_.max_polygon_len + mask_id * param_.max_polygon_len;
// Convert the mask polygon to a binary mask
float category = cgt_masks[poly_offset];
mask_utils::convertPoly2Mask(crois + i * 5, cgt_masks + poly_offset, param_.mask_size, \
cmask_outs + i*param_.mask_size*param_.mask_size, category);
// In our poly encoding the first element is the category
int mask_area = param_.mask_size*param_.mask_size;
for (int j = i*mask_area; j < (i+1)*mask_area; j++)
cmask_cls[j] = category;
}
// Get output
Stream<gpu> *so = ctx.get_stream<gpu>();
Tensor<gpu, 3> out_masks = out_data[mask::kMaskTargets].get<gpu, 3, real_t>(so);
Tensor<gpu, 3> mask_cls = out_data[mask::kMaskCls].get<gpu, 3, real_t>(so);
// Copy output to the GPU
hipMemcpy(out_masks.dptr_, cmask_outs, \
param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size*sizeof(float), \
hipMemcpyHostToDevice);
hipMemcpy(mask_cls.dptr_, cmask_cls, \
param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size*sizeof(float), hipMemcpyHostToDevice);
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2> grois = in_grad[mask::kRoIs].get<xpu, 2, real_t>(s);
Tensor<xpu, 3> gmask_polys = in_grad[mask::kMaskPolys].get<xpu, 3, real_t>(s);
Tensor<xpu, 2> gmask_ids = in_grad[mask::kMaskIds].get<xpu, 2, real_t>(s);
Assign(grois, req[mask::kRoIs], 0);
Assign(gmask_polys, req[mask::kMaskPolys], 0);
Assign(gmask_ids, req[mask::kMaskIds], 0);
}
private:
MaskRcnnTargetParam param_;
}; // class MaskRcnnTarget
template<>
Operator *CreateOp<gpu>(MaskRcnnTargetParam param) {
return new MaskRcnnTargetGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
f9ddcc846a5256b5837c08cc4366388eca8b03cb.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* Copyright (c) 2018 University of Maryland, College Park
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file mask_rcnn_target.cu
* \brief MaskRcnnTarget Operator
* \author Mahyar Najibi, Bharat Singh
*/
#include "./mask_rcnn_target-inl.h"
#include "../coco_api/common/maskApi.h"
#include <set>
#include <math.h>
#include <unistd.h>
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "./operator_common.h"
#include "./mshadow_op.h"
#include <time.h>
namespace mxnet {
namespace op {
namespace mask_utils {
// Mask Utility Functions
inline void convertPoly2Mask(const float* roi, const float* poly, const int mask_size, float* mask, float category)
{
/* !
Converts a polygon to a pre-defined mask wrt to an roi
*****Inputs****
roi: The RoI bounding box
poly: The polygon points the pre-defined format(see below)
mask_size: The mask size
*****Outputs****
overlap: overlap of each box in boxes1 to each box in boxes2
*/
float w = roi[3] - roi[1];
float h = roi[4] - roi[2];
w = std::max((float)1, w);
h = std::max((float)1, h);
int n_seg = poly[1];
int offset = 2 + n_seg;
RLE* rles;
rlesInit(&rles, n_seg);
for(int i = 0; i < n_seg; i++){
int cur_len = poly[i+2];
double* xys = new double[cur_len];
for(int j = 0; j < cur_len; j++){
if (j % 2 == 0)
xys[j] = (poly[offset+j+1] - roi[2]) * mask_size / h;
else
xys[j] = (poly[offset+j-1] - roi[1]) * mask_size / w;
}
rleFrPoly(rles + i, xys, cur_len/2, mask_size, mask_size);
delete [] xys;
offset += cur_len;
}
// Decode RLE to mask
byte* byte_mask = new byte[mask_size*mask_size*n_seg];
rleDecode(rles, byte_mask, n_seg);
// Flatten mask
for(int j = 0; j < mask_size*mask_size; j++)
{
float cur_byte = 0;
for(int i = 0; i< n_seg; i++){
int offset = i * mask_size * mask_size + j;
if(byte_mask[offset]==1){
cur_byte = 1;
break;
}
}
mask[j] = cur_byte;
}
// Check to make sure we don't have memory leak
rlesFree(&rles, n_seg);
delete [] byte_mask;
}
} // namespace utils
template<typename xpu>
class MaskRcnnTargetGPUOp : public Operator{
public:
float* cmask_outs, *cmask_cls;
float* crois, *cmask_boxes, *cgt_masks, *cmask_ids;
explicit MaskRcnnTargetGPUOp(MaskRcnnTargetParam param) {
this->param_ = param;
this->cmask_outs = new float[param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size];
this->crois = new float[param_.batch_size*param_.num_proposals*5];
this->cgt_masks = new float[param_.batch_size*param_.max_num_gts*param_.max_polygon_len];
this->cmask_ids = new float[param_.batch_size*param_.num_proposals];
this->cmask_cls = new float[param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size];
}
~MaskRcnnTargetGPUOp() {
delete [] this->cmask_outs;
delete [] this->crois;
delete [] this->cgt_masks;
delete [] this->cmask_ids;
delete [] this->cmask_cls;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
using namespace mshadow;
using namespace mshadow::expr;
// The polygon format for each ground-truth object is as follows:
// [category, num_seg, len_seg1, len_seg2,....,len_segn, seg1_x1,seg1_y1,...,seg1_xm,seg1_ym,seg2_x1,seg2_y1,...]
// Get input
Stream<gpu> *s = ctx.get_stream<gpu>();
Tensor<gpu, 2> rois = in_data[mask::kRoIs].get<gpu, 2, real_t>(s);
Tensor<gpu, 3> gt_masks = in_data[mask::kMaskPolys].get<gpu, 3, real_t>(s);\
Tensor<gpu, 2> mask_ids = in_data[mask::kMaskIds].get<gpu, 2, real_t>(s);
// Copy to CPU
cudaMemcpy(crois, rois.dptr_, param_.batch_size*param_.num_proposals*5*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cgt_masks, gt_masks.dptr_, param_.batch_size*param_.max_num_gts*param_.max_polygon_len*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(cmask_ids, mask_ids.dptr_, param_.batch_size*param_.num_proposals*sizeof(float), cudaMemcpyDeviceToHost);
// Initialize the mask memory
int mask_mem_size = param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size;
for(int i = 0; i < mask_mem_size; i++){
cmask_outs[i] = param_.ignore_label;
}
// Initialize the mask classes to 0
for(int i=0; i< mask_mem_size; i++)
cmask_cls[i] = 0;
// Allocate memory for binary mask
#pragma omp parallel for num_threads(8)
for(int i = 0; i < param_.batch_size * param_.num_proposals; i++){
int mask_id = cmask_ids[i];
if (mask_id == -1) {
continue;
}
int imid = crois[5*i];
int poly_offset = imid * param_.max_num_gts * param_.max_polygon_len + mask_id * param_.max_polygon_len;
// Convert the mask polygon to a binary mask
float category = cgt_masks[poly_offset];
mask_utils::convertPoly2Mask(crois + i * 5, cgt_masks + poly_offset, param_.mask_size, \
cmask_outs + i*param_.mask_size*param_.mask_size, category);
// In our poly encoding the first element is the category
int mask_area = param_.mask_size*param_.mask_size;
for (int j = i*mask_area; j < (i+1)*mask_area; j++)
cmask_cls[j] = category;
}
// Get output
Stream<gpu> *so = ctx.get_stream<gpu>();
Tensor<gpu, 3> out_masks = out_data[mask::kMaskTargets].get<gpu, 3, real_t>(so);
Tensor<gpu, 3> mask_cls = out_data[mask::kMaskCls].get<gpu, 3, real_t>(so);
// Copy output to the GPU
cudaMemcpy(out_masks.dptr_, cmask_outs, \
param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size*sizeof(float), \
cudaMemcpyHostToDevice);
cudaMemcpy(mask_cls.dptr_, cmask_cls, \
param_.batch_size*param_.num_proposals*param_.mask_size*param_.mask_size*sizeof(float), cudaMemcpyHostToDevice);
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2> grois = in_grad[mask::kRoIs].get<xpu, 2, real_t>(s);
Tensor<xpu, 3> gmask_polys = in_grad[mask::kMaskPolys].get<xpu, 3, real_t>(s);
Tensor<xpu, 2> gmask_ids = in_grad[mask::kMaskIds].get<xpu, 2, real_t>(s);
Assign(grois, req[mask::kRoIs], 0);
Assign(gmask_polys, req[mask::kMaskPolys], 0);
Assign(gmask_ids, req[mask::kMaskIds], 0);
}
private:
MaskRcnnTargetParam param_;
}; // class MaskRcnnTarget
template<>
Operator *CreateOp<gpu>(MaskRcnnTargetParam param) {
return new MaskRcnnTargetGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.