hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
f99b6c23412ae47df0b7da7a5b9c7a83d61679f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void iReduceSum(int *idata, int *odata, unsigned int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int blockOffset = threadIdx.x *(ncols/blockDim.x);
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[blockOffset+startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=1;s<blockDim.x;s*=2){
if(tid%(2*s) == 0){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0)odata[blockIdx.x]=sdata[0];
} | f99b6c23412ae47df0b7da7a5b9c7a83d61679f0.cu | #include "includes.h"
__global__ void iReduceSum(int *idata, int *odata, unsigned int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int blockOffset = threadIdx.x *(ncols/blockDim.x);
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[blockOffset+startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=1;s<blockDim.x;s*=2){
if(tid%(2*s) == 0){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0)odata[blockIdx.x]=sdata[0];
} |
f180aee78ca8f2fbbccdeababe502d8637481e9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ctranslate2/ops/layer_norm.h"
#include "cuda/helpers.h"
#include "cuda/utils.h"
namespace at {
namespace native {
// Forward declaration of the CUDA kernels.
template <typename T, typename SizeT>
__global__ void LayerNormForwardCUDAKernel(SizeT N,
float eps,
const T* X,
const T* gamma,
const T* beta,
T* Y);
}
}
namespace ctranslate2 {
namespace ops {
#define CUDA_NUM_THREADS 512
template <Device D, typename T>
void LayerNorm::compute(const StorageView* beta,
const StorageView* gamma,
const StorageView& input,
const dim_t axis,
const dim_t outer_size,
const dim_t axis_size,
const dim_t,
StorageView& output) const {
if (axis != input.rank() - 1 || !beta || !gamma)
throw std::invalid_argument("Generalized LayerNorm is currently not implemented on GPU");
hipLaunchKernelGGL(( at::native::LayerNormForwardCUDAKernel<cuda::device_type<T>, cuda::index_t>)
, dim3(outer_size), dim3(CUDA_NUM_THREADS), 0, cuda::get_cuda_stream(),
axis_size,
_epsilon,
cuda::device_cast(input.data<T>()),
cuda::device_cast(gamma->data<T>()),
cuda::device_cast(beta->data<T>()),
cuda::device_cast(output.data<T>()));
}
#define DECLARE_IMPL(T) \
template void \
LayerNorm::compute<Device::CUDA, T>(const StorageView* beta, \
const StorageView* gamma, \
const StorageView& input, \
const dim_t axis, \
const dim_t outer_size, \
const dim_t axis_size, \
const dim_t inner_size, \
StorageView& output) const;
DECLARE_IMPL(float)
DECLARE_IMPL(float16_t)
DECLARE_IMPL(bfloat16_t)
}
}
// The following CUDA kernels are adapted from:
// https://github.com/pytorch/pytorch/blob/295feb4e9af6cf4e7b9cff056de29a9dc17f50db/aten/src/ATen/native/cuda/layer_norm_kernel.cu
// which has the following license notice:
/*
From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
and IDIAP Research Institute nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <hipcub/hipcub.hpp>
namespace at {
namespace native {
template <typename T, typename SizeT>
__global__ void LayerNormForwardCUDAKernel(SizeT N,
float eps,
const T* X,
const T* gamma,
const T* beta,
T* Y) {
typedef hipcub::BlockReduce<float, CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage m_temp_storage;
__shared__ typename BlockReduce::TempStorage v_temp_storage;
__shared__ float s_mean;
__shared__ float s_variance;
const SizeT i = blockIdx.x;
float sum1 = 0;
float sum2 = 0;
for (SizeT j = threadIdx.x; j < N; j += blockDim.x) {
const SizeT index = i * N + j;
sum1 += float(X[index]);
sum2 += float(X[index]) * float(X[index]);
}
sum1 = BlockReduce(m_temp_storage).Sum(sum1);
sum2 = BlockReduce(v_temp_storage).Sum(sum2);
if (threadIdx.x == 0) {
const float scale = float(1) / float(N);
sum1 *= scale;
sum2 = fmaxf(sum2 * scale - sum1 * sum1, float(0));
s_mean = sum1;
s_variance = rsqrtf(sum2 + eps);
}
__syncthreads();
for (SizeT j = threadIdx.x; j < N; j += blockDim.x) {
const SizeT index = i * N + j;
Y[index] = (float(X[index]) - s_mean) * s_variance * float(gamma[j]) + float(beta[j]);
}
}
}
}
| f180aee78ca8f2fbbccdeababe502d8637481e9e.cu | #include "ctranslate2/ops/layer_norm.h"
#include "cuda/helpers.h"
#include "cuda/utils.h"
namespace at {
namespace native {
// Forward declaration of the CUDA kernels.
template <typename T, typename SizeT>
__global__ void LayerNormForwardCUDAKernel(SizeT N,
float eps,
const T* X,
const T* gamma,
const T* beta,
T* Y);
}
}
namespace ctranslate2 {
namespace ops {
#define CUDA_NUM_THREADS 512
template <Device D, typename T>
void LayerNorm::compute(const StorageView* beta,
const StorageView* gamma,
const StorageView& input,
const dim_t axis,
const dim_t outer_size,
const dim_t axis_size,
const dim_t,
StorageView& output) const {
if (axis != input.rank() - 1 || !beta || !gamma)
throw std::invalid_argument("Generalized LayerNorm is currently not implemented on GPU");
at::native::LayerNormForwardCUDAKernel<cuda::device_type<T>, cuda::index_t>
<<<outer_size, CUDA_NUM_THREADS, 0, cuda::get_cuda_stream()>>>(
axis_size,
_epsilon,
cuda::device_cast(input.data<T>()),
cuda::device_cast(gamma->data<T>()),
cuda::device_cast(beta->data<T>()),
cuda::device_cast(output.data<T>()));
}
#define DECLARE_IMPL(T) \
template void \
LayerNorm::compute<Device::CUDA, T>(const StorageView* beta, \
const StorageView* gamma, \
const StorageView& input, \
const dim_t axis, \
const dim_t outer_size, \
const dim_t axis_size, \
const dim_t inner_size, \
StorageView& output) const;
DECLARE_IMPL(float)
DECLARE_IMPL(float16_t)
DECLARE_IMPL(bfloat16_t)
}
}
// The following CUDA kernels are adapted from:
// https://github.com/pytorch/pytorch/blob/295feb4e9af6cf4e7b9cff056de29a9dc17f50db/aten/src/ATen/native/cuda/layer_norm_kernel.cu
// which has the following license notice:
/*
From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
and IDIAP Research Institute nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <cub/block/block_reduce.cuh>
namespace at {
namespace native {
template <typename T, typename SizeT>
__global__ void LayerNormForwardCUDAKernel(SizeT N,
float eps,
const T* X,
const T* gamma,
const T* beta,
T* Y) {
typedef cub::BlockReduce<float, CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage m_temp_storage;
__shared__ typename BlockReduce::TempStorage v_temp_storage;
__shared__ float s_mean;
__shared__ float s_variance;
const SizeT i = blockIdx.x;
float sum1 = 0;
float sum2 = 0;
for (SizeT j = threadIdx.x; j < N; j += blockDim.x) {
const SizeT index = i * N + j;
sum1 += float(X[index]);
sum2 += float(X[index]) * float(X[index]);
}
sum1 = BlockReduce(m_temp_storage).Sum(sum1);
sum2 = BlockReduce(v_temp_storage).Sum(sum2);
if (threadIdx.x == 0) {
const float scale = float(1) / float(N);
sum1 *= scale;
sum2 = fmaxf(sum2 * scale - sum1 * sum1, float(0));
s_mean = sum1;
s_variance = rsqrtf(sum2 + eps);
}
__syncthreads();
for (SizeT j = threadIdx.x; j < N; j += blockDim.x) {
const SizeT index = i * N + j;
Y[index] = (float(X[index]) - s_mean) * s_variance * float(gamma[j]) + float(beta[j]);
}
}
}
}
|
18b1b6399e863432b46afe9f133cccd839d57837.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/bilinear_interp_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
using Tensor = lite::Tensor;
template <typename T>
__global__ void BilinearInterp(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const int align_mode) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int out_img_idx = tid % out_img_w;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
// bilinear interpolation
out[out_id_h * output_w + out_id_w] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
}
}
void BilinearInterpCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
Tensor* input = param.X;
Tensor* output = param.Out;
Tensor* out_size = param.OutSize;
auto* input_data = input->data<float>();
const int n = input->dims()[0];
const int c = input->dims()[1];
const int in_h = input->dims()[2];
const int in_w = input->dims()[3];
int out_h = param.out_h;
int out_w = param.out_w;
float scale = param.scale;
bool align_corners = param.align_corners;
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
if (out_size != nullptr) {
Tensor sizes;
float* size_data = sizes.mutable_data<float>();
float* outsize_data = out_size->mutable_data<float>(TARGET(kCUDA));
hipMemcpy(
size_data, outsize_data, sizeof(float) * 2, hipMemcpyDeviceToHost);
out_h = static_cast<int>(size_data[0]);
out_w = static_cast<int>(size_data[1]);
}
auto output_data = output->mutable_data<float>(TARGET(kCUDA));
if (in_h == out_h && in_w == out_w) {
hipMemcpy(output_data,
input_data,
sizeof(float) * n * c * in_h * in_w,
hipMemcpyHostToDevice);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
int pixel_num = n * out_chw;
int threads = 512;
int blocks = (pixel_num + threads - 1) / threads;
blocks = blocks > 8 ? 8 : blocks;
int align_mode = param.align_mode;
hipLaunchKernelGGL(( BilinearInterp), dim3(blocks), dim3(threads), 0, stream, input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
align_mode);
hipError_t error = hipGetLastError();
if (error != hipSuccess) LOG(INFO) << hipGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(bilinear_interp,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::BilinearInterpCompute,
def)
.BindInput("X",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindInput("OutSize",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.Finalize();
| 18b1b6399e863432b46afe9f133cccd839d57837.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "lite/core/op_registry.h"
#include "lite/kernels/cuda/bilinear_interp_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
using Tensor = lite::Tensor;
template <typename T>
__global__ void BilinearInterp(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const int align_mode) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int out_img_idx = tid % out_img_w;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
// bilinear interpolation
out[out_id_h * output_w + out_id_w] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
}
}
void BilinearInterpCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
Tensor* input = param.X;
Tensor* output = param.Out;
Tensor* out_size = param.OutSize;
auto* input_data = input->data<float>();
const int n = input->dims()[0];
const int c = input->dims()[1];
const int in_h = input->dims()[2];
const int in_w = input->dims()[3];
int out_h = param.out_h;
int out_w = param.out_w;
float scale = param.scale;
bool align_corners = param.align_corners;
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
if (out_size != nullptr) {
Tensor sizes;
float* size_data = sizes.mutable_data<float>();
float* outsize_data = out_size->mutable_data<float>(TARGET(kCUDA));
cudaMemcpy(
size_data, outsize_data, sizeof(float) * 2, cudaMemcpyDeviceToHost);
out_h = static_cast<int>(size_data[0]);
out_w = static_cast<int>(size_data[1]);
}
auto output_data = output->mutable_data<float>(TARGET(kCUDA));
if (in_h == out_h && in_w == out_w) {
cudaMemcpy(output_data,
input_data,
sizeof(float) * n * c * in_h * in_w,
cudaMemcpyHostToDevice);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
int pixel_num = n * out_chw;
int threads = 512;
int blocks = (pixel_num + threads - 1) / threads;
blocks = blocks > 8 ? 8 : blocks;
int align_mode = param.align_mode;
BilinearInterp<<<blocks, threads, 0, stream>>>(input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
align_mode);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) LOG(INFO) << cudaGetErrorString(error);
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(bilinear_interp,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::BilinearInterpCompute,
def)
.BindInput("X",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindInput("OutSize",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kCUDA),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.Finalize();
|
e1463676ab4cad23f0de4c0f82b41eceb855c342.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHULENN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#define LENSOFTMAX_THREADS 128
template <typename T, typename AccumT, typename IndexT>
__global__ void culenn_LenSoftMax_updateOutput_kernel(
T *output, T *input, int nframe, int dim, IndexT *len)
{
__shared__ AccumT buffer[LENSOFTMAX_THREADS+1];
T *input_k = input + blockIdx.x*dim + blockIdx.y + blockIdx.z;
T *output_k = output + blockIdx.x*dim + blockIdx.y + blockIdx.z;
int i_start = threadIdx.x;
int i_end = ScalarConvert<IndexT, int>::to(len[blockIdx.x]);
int i_step = blockDim.x;
// max?
buffer[threadIdx.x] = -THCNumerics<AccumT>::max();
for (int i=i_start; i<i_end; i+=i_step)
{
T z = input_k[i];
AccumT zAcc = ScalarConvert<T, AccumT>::to(z);
if (buffer[threadIdx.x] < zAcc)
buffer[threadIdx.x] = zAcc;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
AccumT max_k = -THCNumerics<AccumT>::max();
for (int i=0; i<blockDim.x; i++)
{
if (max_k < buffer[i])
max_k = buffer[i];
}
buffer[LENSOFTMAX_THREADS] = max_k;
}
__syncthreads();
// sum?
T max_k = ScalarConvert<AccumT, T>::to(buffer[LENSOFTMAX_THREADS]);
buffer[threadIdx.x] = ScalarConvert<int, AccumT>::to(0);
for (int i=i_start; i<i_end; i+=i_step) {
T z = THCNumerics<T>::exp(input_k[i]-max_k);
buffer[threadIdx.x] += ScalarConvert<T, AccumT>::to(z);
output_k[i] = z;
}
T zv = ScalarConvert<int, T>::to(0);
for (int i=i_start+i_end; i<dim; i+=i_step) {
output_k[i] = zv;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
AccumT sum_k = ScalarConvert<int, AccumT>::to(0);
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[LENSOFTMAX_THREADS] = sum_k;
}
__syncthreads();
// softmax
T sum_k = ScalarConvert<AccumT, T>::to(buffer[LENSOFTMAX_THREADS]);
for (int i=i_start; i<i_end; i+=i_step)
output_k[i] = output_k[i] / sum_k;
}
template <typename T, typename AccumT, typename IndexT>
__global__ void culenn_LenSoftMax_updateGradInput_kernel(
T *gradInput, T *output, T *gradOutput, int nframe, int dim, IndexT *len)
{
__shared__ AccumT buffer[LENSOFTMAX_THREADS];
T *gradInput_k = gradInput + blockIdx.x*dim + blockIdx.y + blockIdx.z;
T *output_k = output + blockIdx.x*dim + blockIdx.y + blockIdx.z;
T *gradOutput_k = gradOutput + blockIdx.x*dim + blockIdx.y + blockIdx.z;
int i_start = threadIdx.x;
int i_end = ScalarConvert<IndexT, int>::to(len[blockIdx.x]);
int i_step = blockDim.x;
// sum?
buffer[threadIdx.x] = ScalarConvert<int, AccumT>::to(0);
for (int i=i_start; i<i_end; i+=i_step)
buffer[threadIdx.x] += ScalarConvert<T, AccumT>::to(gradOutput_k[i] * output_k[i]);
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
AccumT sum_k = ScalarConvert<int, AccumT>::to(0);
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[0] = sum_k;
}
__syncthreads();
T sum_k = ScalarConvert<AccumT, T>::to(buffer[0]);
for (int i=i_start; i<i_end; i+=i_step)
gradInput_k[i] = output_k[i] * (gradOutput_k[i] - sum_k);
T zv = ScalarConvert<int, T>::to(0);
for (int i=i_start+i_end; i<dim; i+=i_step) {
gradInput_k[i] = zv;
}
}
#include "generic/LenSoftMax.cu"
#include "THHGenerateFloatTypes.h"
#undef LENSOFTMAX_THREADS
| e1463676ab4cad23f0de4c0f82b41eceb855c342.cu | #include "THCULENN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#define LENSOFTMAX_THREADS 128
template <typename T, typename AccumT, typename IndexT>
__global__ void culenn_LenSoftMax_updateOutput_kernel(
T *output, T *input, int nframe, int dim, IndexT *len)
{
__shared__ AccumT buffer[LENSOFTMAX_THREADS+1];
T *input_k = input + blockIdx.x*dim + blockIdx.y + blockIdx.z;
T *output_k = output + blockIdx.x*dim + blockIdx.y + blockIdx.z;
int i_start = threadIdx.x;
int i_end = ScalarConvert<IndexT, int>::to(len[blockIdx.x]);
int i_step = blockDim.x;
// max?
buffer[threadIdx.x] = -THCNumerics<AccumT>::max();
for (int i=i_start; i<i_end; i+=i_step)
{
T z = input_k[i];
AccumT zAcc = ScalarConvert<T, AccumT>::to(z);
if (buffer[threadIdx.x] < zAcc)
buffer[threadIdx.x] = zAcc;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
AccumT max_k = -THCNumerics<AccumT>::max();
for (int i=0; i<blockDim.x; i++)
{
if (max_k < buffer[i])
max_k = buffer[i];
}
buffer[LENSOFTMAX_THREADS] = max_k;
}
__syncthreads();
// sum?
T max_k = ScalarConvert<AccumT, T>::to(buffer[LENSOFTMAX_THREADS]);
buffer[threadIdx.x] = ScalarConvert<int, AccumT>::to(0);
for (int i=i_start; i<i_end; i+=i_step) {
T z = THCNumerics<T>::exp(input_k[i]-max_k);
buffer[threadIdx.x] += ScalarConvert<T, AccumT>::to(z);
output_k[i] = z;
}
T zv = ScalarConvert<int, T>::to(0);
for (int i=i_start+i_end; i<dim; i+=i_step) {
output_k[i] = zv;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
AccumT sum_k = ScalarConvert<int, AccumT>::to(0);
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[LENSOFTMAX_THREADS] = sum_k;
}
__syncthreads();
// softmax
T sum_k = ScalarConvert<AccumT, T>::to(buffer[LENSOFTMAX_THREADS]);
for (int i=i_start; i<i_end; i+=i_step)
output_k[i] = output_k[i] / sum_k;
}
template <typename T, typename AccumT, typename IndexT>
__global__ void culenn_LenSoftMax_updateGradInput_kernel(
T *gradInput, T *output, T *gradOutput, int nframe, int dim, IndexT *len)
{
__shared__ AccumT buffer[LENSOFTMAX_THREADS];
T *gradInput_k = gradInput + blockIdx.x*dim + blockIdx.y + blockIdx.z;
T *output_k = output + blockIdx.x*dim + blockIdx.y + blockIdx.z;
T *gradOutput_k = gradOutput + blockIdx.x*dim + blockIdx.y + blockIdx.z;
int i_start = threadIdx.x;
int i_end = ScalarConvert<IndexT, int>::to(len[blockIdx.x]);
int i_step = blockDim.x;
// sum?
buffer[threadIdx.x] = ScalarConvert<int, AccumT>::to(0);
for (int i=i_start; i<i_end; i+=i_step)
buffer[threadIdx.x] += ScalarConvert<T, AccumT>::to(gradOutput_k[i] * output_k[i]);
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
AccumT sum_k = ScalarConvert<int, AccumT>::to(0);
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[0] = sum_k;
}
__syncthreads();
T sum_k = ScalarConvert<AccumT, T>::to(buffer[0]);
for (int i=i_start; i<i_end; i+=i_step)
gradInput_k[i] = output_k[i] * (gradOutput_k[i] - sum_k);
T zv = ScalarConvert<int, T>::to(0);
for (int i=i_start+i_end; i<dim; i+=i_step) {
gradInput_k[i] = zv;
}
}
#include "generic/LenSoftMax.cu"
#include "THCGenerateFloatTypes.h"
#undef LENSOFTMAX_THREADS
|
Shape.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/TypeProperties.h>
#include <ATen/Dispatch.h>
#include <c10/core/MemoryFormat.h>
#include <c10/util/Optional.h>
#include <THH/THH.h>
namespace at {
namespace native {
constexpr int CAT_ARRAY_BATCH_SIZE = 1024;
constexpr int CAT_ARRAY_MAX_INPUT_DIMS = 4;
namespace {
inline bool getCatGrid(ptrdiff_t nTensors, dim3& grid) {
const int numSM = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
//X dim of grid for cat array cooperates on a single tensor in the cat.
//Given half of the GPU, full utilization will always occur.
grid = dim3( 2LL * numSM, (long long) nTensors );
return true;
}
// Similar to any other IndexToOffset calculation for copying along a given
// dimension.
template <typename IndexType, int Dims>
struct CatArrIndexToOffset {
static inline __device__ IndexType compute(
const IndexType outputSize[Dims],
const IndexType outputStride[Dims],
const IndexType dimSize,
const unsigned int concatDim,
IndexType linearIndex) {
// linearIndex is not really linear index, but instead the offset in
// input tensor. If the input tensor is contiguous, then this offset
// is the linear index, but if the input tensor is channels last, then
// it is the linear index of the permuted contiguous tensor
IndexType offset = 0;
#pragma unroll
for (int i = Dims - 1; i >= 1; --i) {
IndexType curDimSize = i == concatDim ? dimSize : outputSize[i];
IndexType nextDimIndex = linearIndex / curDimSize;
IndexType curDimIndex = linearIndex - curDimSize * nextDimIndex;
IndexType curDimOffset = curDimIndex * outputStride[i];
offset += curDimOffset;
linearIndex = nextDimIndex;
}
return offset + linearIndex * outputStride[0];
}
};
template <typename T, typename IndexType>
struct CatArrInputTensor {
T* input;
IndexType offset;
IndexType dimSize;
IndexType nElements;
};
template<typename IndexType, unsigned int MaxDims>
struct OutputTensorSizeStride {
IndexType outputSize[MaxDims];
IndexType outputStride[MaxDims];
};
/**
* Kernel used to concatenated grimDim.y tensors into an output tensor. Uses a
* grid-stride loop based off of the blockIdx.x, threadIdx.x for each input to
* copy each element from each input tensor into the output.
*
* output: base pointer to the storage associated with the output tensor
* inputs: GPU-allocated array of input metadata for each input to concatenate
* in the kernel
* os: the size/stride vectors for the output tensor
* concatDim: dimension along which we are concatenating
* dimStride: the stride of the output tensor at the concatDim
*
* The most important assumption made is that the input tensors are contiguous.
*/
template <typename T, typename IndexType, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void CatArrayBatchedCopy(
T* output,
CatArrInputTensor<T, IndexType>* inputs,
OutputTensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> os,
const int concatDim,
IndexType dimStride) {
IndexType tid = blockIdx.x * blockDim.x + threadIdx.x;
IndexType nElements = inputs[blockIdx.y].nElements;
if(tid >= nElements) return;
T* data = inputs[blockIdx.y].input;
IndexType offset = inputs[blockIdx.y].offset;
IndexType dimSize = inputs[blockIdx.y].dimSize;
IndexType dataOffset = offset * dimStride;
IndexType stride = gridDim.x * blockDim.x;
while( tid < nElements){
IndexType elementOffset = CatArrIndexToOffset<IndexType, Dims>::compute(
os.outputSize, os.outputStride, dimSize, concatDim, tid);
output[dataOffset + elementOffset] = data[tid];
tid += stride;
}
}
void check_shape_except_dim(const Tensor &first, const Tensor &second,
int dimension, int index)
{
int first_dims = first.dim();
int second_dims = second.dim();
TORCH_CHECK(first_dims == second_dims,
"Tensors must have same number of dimensions: got ", first_dims,
" and ", second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = at::native::size(first, dim);
int64_t second_dim_size = at::native::size(second, dim);
TORCH_CHECK(first_dim_size == second_dim_size,
"Sizes of tensors must match except in dimension ", dim, ". Got ",
static_cast<long long>(first_dim_size), " and ",
static_cast<long long>(second_dim_size), " (The offending index is ",
index, ")");
}
}
template <typename scalar_t>
void parallel_cat(Tensor &out, const TensorList &inputs, int64_t dimension,
int nDims, c10::MemoryFormat memory_format) {
// First, let's set up our kernel parameters. We start with a raw pointer to
// the storage for the output Tensor.
scalar_t *data = out.data_ptr<scalar_t>();
// Kernel Parameter
long tensorMetadataSize =
sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
auto d_inputs_storage = at::empty(
{tensorMetadataSize}, out.options().dtype(at::kByte));
auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(
d_inputs_storage.data_ptr());
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
if (memory_format == c10::MemoryFormat::Contiguous) {
for (int i = 0; i < nDims; ++i) {
param.outputSize[i] = at::native::size(out, i);
param.outputStride[i] = out.stride(i);
}
} else if (memory_format == c10::MemoryFormat::ChannelsLast || memory_format == c10::MemoryFormat::ChannelsLast3d) {
// permute the semantics of dims from NCHW to NHWC so that the input
// tensor is now contiguous
param.outputSize[0] = at::native::size(out, 0);
param.outputStride[0] = out.stride(0);
for (int i = 1; i < nDims - 1; ++i) {
param.outputSize[i] = at::native::size(out, i + 1);
param.outputStride[i] = out.stride(i + 1);
}
param.outputSize[nDims - 1] = at::native::size(out, 1);
param.outputStride[nDims - 1] = out.stride(1);
} else {
TORCH_CHECK(false, "unsupported memory format");
}
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Now we loop
int batchCounter = 0;
int64_t offset = 0;
for (int i = 0; i < inputs.size() ; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
{
auto stackInputs_storage = at::empty({tensorMetadataSize},
out.options().dtype(at::kByte).device(at::kCPU).pinned_memory(true));
auto stackInputs =
static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(
stackInputs_storage.data_ptr());
for (batchCounter = 0;
batchCounter < CAT_ARRAY_BATCH_SIZE &&
(i+batchCounter) < inputs.size();
++batchCounter) {
int64_t dimSize = at::native::size(inputs[i+batchCounter], dimension);
stackInputs[batchCounter].input =
inputs[i+batchCounter].data_ptr<scalar_t>();
stackInputs[batchCounter].offset = offset;
stackInputs[batchCounter].dimSize = dimSize;
stackInputs[batchCounter].nElements = inputs[i+batchCounter].numel();
// update offset
offset += dimSize;
}
at::native::copy_(d_inputs_storage, stackInputs_storage,
/* non_blocking= */ true);
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = dim3(32*16);
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(batchCounter, catGrid);
if (memory_format != c10::MemoryFormat::Contiguous) {
switch (dimension) {
case 0:
break;
case 1:
dimension = nDims - dimension;
break;
default:
dimension--;
}
}
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
hipLaunchKernelGGL(( CatArrayBatchedCopy<scalar_t, unsigned int, DIMS>), \
catGrid, dim3(applyBlock), 0, stream.stream(), \
data, d_inputs, param, dimension, param.outputStride[dimension]);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
#undef HANDLE_CASE
AT_CUDA_CHECK(hipGetLastError());
}
}
} // namespace
Tensor cat_cuda(TensorList inputs, int64_t dimension) {
ScalarType high_type = result_type(inputs);
Tensor out = at::empty({0}, inputs.front().options().dtype(high_type));
cat_out_cuda(out, inputs, dimension);
return out;
}
inline c10::MemoryFormat compute_output_memory_format(const TensorList &inputs) {
c10::optional<c10::MemoryFormat> format = c10::nullopt;
for (auto &t : inputs) {
auto f = t.suggest_memory_format();
if (!format.has_value()) {
format = f;
continue;
}
if (format.value() == f) {
continue;
}
bool contiguous = (format.value() == c10::MemoryFormat::Contiguous || f == c10::MemoryFormat::Contiguous || format.value() != f);
if (contiguous) {
return c10::MemoryFormat::Contiguous;
}
}
return format.value();
}
Tensor& cat_out_cuda(Tensor& out, TensorList inputs, int64_t dimension) {
// previously, size [0] tensors were the only possible empty tensors; thus, it
// wasn't possible to cat empty tensors unless all the other tensors were
// 1-dimensional, so we allowed these tensors to be "skipped". We maintain
// this behavior for backwards compatibility, but only for this specific size
// (i.e. other empty sizes are not skipped).
// FIXME: warn if this is the case
auto should_skip = [](const Tensor &t) {
return t.dim() == 1 && at::native::size(t, 0) == 0;
};
bool hasSkippedInput = false;
const Tensor *notSkippedTensor = NULL; // non-owning reference
int nDims = 0;
// Check for type promotion
TORCH_CHECK(canCast(result_type(inputs), out.scalar_type()), "input types ",
" can't be cast to the desired output type ",
out.scalar_type());
// Inputs cannot alias the output tensor
for (int i = 0; i < inputs.size(); i++) {
auto lap = at::get_overlap_status(out, inputs[i]);
TORCH_CHECK(lap != at::MemOverlapStatus::PARTIAL &&
lap != at::MemOverlapStatus::FULL,
"unsupported operation: the input tensors cannot refer to any "
"of the output memory locations. Found overlap in input "
"tensor ", i);
}
for (int i = 0; i < inputs.size(); i++)
{
if (should_skip(inputs[i])) {
hasSkippedInput = true;
continue;
}
nDims = inputs[i].dim();
notSkippedTensor = &inputs[i];
}
// If all inputs are empty tensors, return an empty tensor
if (notSkippedTensor == NULL) {
return out;
}
TORCH_CHECK(inputs.size() > 0, "invalid number of inputs ", inputs.size());
TORCH_CHECK(dimension >= 0, "invalid dimension ", dimension);
for (const Tensor& t: inputs) {
TORCH_CHECK(t.device() == notSkippedTensor->device(),
"All input tensors must be on the same device. Received ",
t.device(), " and ", notSkippedTensor->device());
}
c10::MemoryFormat memory_format = compute_output_memory_format(inputs);
std::vector<int64_t> size(notSkippedTensor->sizes().vec());
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < inputs.size(); i++) {
const Tensor &tensor = inputs[i];
if (should_skip(tensor)) {
continue;
}
check_shape_except_dim(*notSkippedTensor, tensor, dimension, i);
cat_dim_size += at::native::size(tensor, dimension);
}
// Compute the size of the result
size[dimension] = cat_dim_size;
out.resize_(size, memory_format);
if (out.numel() == 0) {
return out;
}
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The out tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
const bool all32BitIndexable = std::all_of(inputs.begin(), inputs.end(),
[] (const Tensor& t) {
return at::cuda::detail::canUse32BitIndexMath(t);
});
const bool allContiguous = std::all_of(inputs.begin(), inputs.end(),
[=](const Tensor& t) {
return !t.defined() || t.is_contiguous(memory_format);
});
ScalarType firstType = inputs[0].scalar_type();
const bool allSameType = std::all_of(inputs.begin(), inputs.end(),
[firstType](const Tensor& t) {
return t.scalar_type() == firstType;
});
if (inputs.size() > 1 &&
!hasSkippedInput &&
out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
at::cuda::detail::canUse32BitIndexMath(out) &&
allContiguous &&
all32BitIndexable &&
allSameType) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "cat_cuda", [&]() {
parallel_cat<scalar_t>(out, inputs, dimension, nDims, memory_format);
});
} else {
int64_t offset = 0;
for (int j = 0; j < inputs.size(); j++)
{
if (should_skip(inputs[j])) continue;
int64_t dimSize = at::native::size(inputs[j], dimension);
Tensor nt = at::narrow(out, dimension, offset, dimSize);
copy_(nt, inputs[j]);
offset += dimSize;
}
}
return out;
}
} // namespace native
} // namespace at
| Shape.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/TypeProperties.h>
#include <ATen/Dispatch.h>
#include <c10/core/MemoryFormat.h>
#include <c10/util/Optional.h>
#include <THC/THC.h>
namespace at {
namespace native {
constexpr int CAT_ARRAY_BATCH_SIZE = 1024;
constexpr int CAT_ARRAY_MAX_INPUT_DIMS = 4;
namespace {
inline bool getCatGrid(ptrdiff_t nTensors, dim3& grid) {
const int numSM = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
//X dim of grid for cat array cooperates on a single tensor in the cat.
//Given half of the GPU, full utilization will always occur.
grid = dim3( 2LL * numSM, (long long) nTensors );
return true;
}
// Similar to any other IndexToOffset calculation for copying along a given
// dimension.
template <typename IndexType, int Dims>
struct CatArrIndexToOffset {
static inline __device__ IndexType compute(
const IndexType outputSize[Dims],
const IndexType outputStride[Dims],
const IndexType dimSize,
const unsigned int concatDim,
IndexType linearIndex) {
// linearIndex is not really linear index, but instead the offset in
// input tensor. If the input tensor is contiguous, then this offset
// is the linear index, but if the input tensor is channels last, then
// it is the linear index of the permuted contiguous tensor
IndexType offset = 0;
#pragma unroll
for (int i = Dims - 1; i >= 1; --i) {
IndexType curDimSize = i == concatDim ? dimSize : outputSize[i];
IndexType nextDimIndex = linearIndex / curDimSize;
IndexType curDimIndex = linearIndex - curDimSize * nextDimIndex;
IndexType curDimOffset = curDimIndex * outputStride[i];
offset += curDimOffset;
linearIndex = nextDimIndex;
}
return offset + linearIndex * outputStride[0];
}
};
template <typename T, typename IndexType>
struct CatArrInputTensor {
T* input;
IndexType offset;
IndexType dimSize;
IndexType nElements;
};
template<typename IndexType, unsigned int MaxDims>
struct OutputTensorSizeStride {
IndexType outputSize[MaxDims];
IndexType outputStride[MaxDims];
};
/**
* Kernel used to concatenated grimDim.y tensors into an output tensor. Uses a
* grid-stride loop based off of the blockIdx.x, threadIdx.x for each input to
* copy each element from each input tensor into the output.
*
* output: base pointer to the storage associated with the output tensor
* inputs: GPU-allocated array of input metadata for each input to concatenate
* in the kernel
* os: the size/stride vectors for the output tensor
* concatDim: dimension along which we are concatenating
* dimStride: the stride of the output tensor at the concatDim
*
* The most important assumption made is that the input tensors are contiguous.
*/
template <typename T, typename IndexType, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void CatArrayBatchedCopy(
T* output,
CatArrInputTensor<T, IndexType>* inputs,
OutputTensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> os,
const int concatDim,
IndexType dimStride) {
IndexType tid = blockIdx.x * blockDim.x + threadIdx.x;
IndexType nElements = inputs[blockIdx.y].nElements;
if(tid >= nElements) return;
T* data = inputs[blockIdx.y].input;
IndexType offset = inputs[blockIdx.y].offset;
IndexType dimSize = inputs[blockIdx.y].dimSize;
IndexType dataOffset = offset * dimStride;
IndexType stride = gridDim.x * blockDim.x;
while( tid < nElements){
IndexType elementOffset = CatArrIndexToOffset<IndexType, Dims>::compute(
os.outputSize, os.outputStride, dimSize, concatDim, tid);
output[dataOffset + elementOffset] = data[tid];
tid += stride;
}
}
void check_shape_except_dim(const Tensor &first, const Tensor &second,
int dimension, int index)
{
int first_dims = first.dim();
int second_dims = second.dim();
TORCH_CHECK(first_dims == second_dims,
"Tensors must have same number of dimensions: got ", first_dims,
" and ", second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = at::native::size(first, dim);
int64_t second_dim_size = at::native::size(second, dim);
TORCH_CHECK(first_dim_size == second_dim_size,
"Sizes of tensors must match except in dimension ", dim, ". Got ",
static_cast<long long>(first_dim_size), " and ",
static_cast<long long>(second_dim_size), " (The offending index is ",
index, ")");
}
}
template <typename scalar_t>
void parallel_cat(Tensor &out, const TensorList &inputs, int64_t dimension,
int nDims, c10::MemoryFormat memory_format) {
// First, let's set up our kernel parameters. We start with a raw pointer to
// the storage for the output Tensor.
scalar_t *data = out.data_ptr<scalar_t>();
// Kernel Parameter
long tensorMetadataSize =
sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
auto d_inputs_storage = at::empty(
{tensorMetadataSize}, out.options().dtype(at::kByte));
auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(
d_inputs_storage.data_ptr());
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
if (memory_format == c10::MemoryFormat::Contiguous) {
for (int i = 0; i < nDims; ++i) {
param.outputSize[i] = at::native::size(out, i);
param.outputStride[i] = out.stride(i);
}
} else if (memory_format == c10::MemoryFormat::ChannelsLast || memory_format == c10::MemoryFormat::ChannelsLast3d) {
// permute the semantics of dims from NCHW to NHWC so that the input
// tensor is now contiguous
param.outputSize[0] = at::native::size(out, 0);
param.outputStride[0] = out.stride(0);
for (int i = 1; i < nDims - 1; ++i) {
param.outputSize[i] = at::native::size(out, i + 1);
param.outputStride[i] = out.stride(i + 1);
}
param.outputSize[nDims - 1] = at::native::size(out, 1);
param.outputStride[nDims - 1] = out.stride(1);
} else {
TORCH_CHECK(false, "unsupported memory format");
}
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
// Now we loop
int batchCounter = 0;
int64_t offset = 0;
for (int i = 0; i < inputs.size() ; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
{
auto stackInputs_storage = at::empty({tensorMetadataSize},
out.options().dtype(at::kByte).device(at::kCPU).pinned_memory(true));
auto stackInputs =
static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(
stackInputs_storage.data_ptr());
for (batchCounter = 0;
batchCounter < CAT_ARRAY_BATCH_SIZE &&
(i+batchCounter) < inputs.size();
++batchCounter) {
int64_t dimSize = at::native::size(inputs[i+batchCounter], dimension);
stackInputs[batchCounter].input =
inputs[i+batchCounter].data_ptr<scalar_t>();
stackInputs[batchCounter].offset = offset;
stackInputs[batchCounter].dimSize = dimSize;
stackInputs[batchCounter].nElements = inputs[i+batchCounter].numel();
// update offset
offset += dimSize;
}
at::native::copy_(d_inputs_storage, stackInputs_storage,
/* non_blocking= */ true);
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = dim3(32*16);
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(batchCounter, catGrid);
if (memory_format != c10::MemoryFormat::Contiguous) {
switch (dimension) {
case 0:
break;
case 1:
dimension = nDims - dimension;
break;
default:
dimension--;
}
}
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
CatArrayBatchedCopy<scalar_t, unsigned int, DIMS><<<\
catGrid, applyBlock, 0, stream.stream()>>>(\
data, d_inputs, param, dimension, param.outputStride[dimension]);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
#undef HANDLE_CASE
AT_CUDA_CHECK(cudaGetLastError());
}
}
} // namespace
Tensor cat_cuda(TensorList inputs, int64_t dimension) {
ScalarType high_type = result_type(inputs);
Tensor out = at::empty({0}, inputs.front().options().dtype(high_type));
cat_out_cuda(out, inputs, dimension);
return out;
}
inline c10::MemoryFormat compute_output_memory_format(const TensorList &inputs) {
c10::optional<c10::MemoryFormat> format = c10::nullopt;
for (auto &t : inputs) {
auto f = t.suggest_memory_format();
if (!format.has_value()) {
format = f;
continue;
}
if (format.value() == f) {
continue;
}
bool contiguous = (format.value() == c10::MemoryFormat::Contiguous || f == c10::MemoryFormat::Contiguous || format.value() != f);
if (contiguous) {
return c10::MemoryFormat::Contiguous;
}
}
return format.value();
}
Tensor& cat_out_cuda(Tensor& out, TensorList inputs, int64_t dimension) {
// previously, size [0] tensors were the only possible empty tensors; thus, it
// wasn't possible to cat empty tensors unless all the other tensors were
// 1-dimensional, so we allowed these tensors to be "skipped". We maintain
// this behavior for backwards compatibility, but only for this specific size
// (i.e. other empty sizes are not skipped).
// FIXME: warn if this is the case
auto should_skip = [](const Tensor &t) {
return t.dim() == 1 && at::native::size(t, 0) == 0;
};
bool hasSkippedInput = false;
const Tensor *notSkippedTensor = NULL; // non-owning reference
int nDims = 0;
// Check for type promotion
TORCH_CHECK(canCast(result_type(inputs), out.scalar_type()), "input types ",
" can't be cast to the desired output type ",
out.scalar_type());
// Inputs cannot alias the output tensor
for (int i = 0; i < inputs.size(); i++) {
auto lap = at::get_overlap_status(out, inputs[i]);
TORCH_CHECK(lap != at::MemOverlapStatus::PARTIAL &&
lap != at::MemOverlapStatus::FULL,
"unsupported operation: the input tensors cannot refer to any "
"of the output memory locations. Found overlap in input "
"tensor ", i);
}
for (int i = 0; i < inputs.size(); i++)
{
if (should_skip(inputs[i])) {
hasSkippedInput = true;
continue;
}
nDims = inputs[i].dim();
notSkippedTensor = &inputs[i];
}
// If all inputs are empty tensors, return an empty tensor
if (notSkippedTensor == NULL) {
return out;
}
TORCH_CHECK(inputs.size() > 0, "invalid number of inputs ", inputs.size());
TORCH_CHECK(dimension >= 0, "invalid dimension ", dimension);
for (const Tensor& t: inputs) {
TORCH_CHECK(t.device() == notSkippedTensor->device(),
"All input tensors must be on the same device. Received ",
t.device(), " and ", notSkippedTensor->device());
}
c10::MemoryFormat memory_format = compute_output_memory_format(inputs);
std::vector<int64_t> size(notSkippedTensor->sizes().vec());
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < inputs.size(); i++) {
const Tensor &tensor = inputs[i];
if (should_skip(tensor)) {
continue;
}
check_shape_except_dim(*notSkippedTensor, tensor, dimension, i);
cat_dim_size += at::native::size(tensor, dimension);
}
// Compute the size of the result
size[dimension] = cat_dim_size;
out.resize_(size, memory_format);
if (out.numel() == 0) {
return out;
}
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The out tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
const bool all32BitIndexable = std::all_of(inputs.begin(), inputs.end(),
[] (const Tensor& t) {
return at::cuda::detail::canUse32BitIndexMath(t);
});
const bool allContiguous = std::all_of(inputs.begin(), inputs.end(),
[=](const Tensor& t) {
return !t.defined() || t.is_contiguous(memory_format);
});
ScalarType firstType = inputs[0].scalar_type();
const bool allSameType = std::all_of(inputs.begin(), inputs.end(),
[firstType](const Tensor& t) {
return t.scalar_type() == firstType;
});
if (inputs.size() > 1 &&
!hasSkippedInput &&
out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
at::cuda::detail::canUse32BitIndexMath(out) &&
allContiguous &&
all32BitIndexable &&
allSameType) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "cat_cuda", [&]() {
parallel_cat<scalar_t>(out, inputs, dimension, nDims, memory_format);
});
} else {
int64_t offset = 0;
for (int j = 0; j < inputs.size(); j++)
{
if (should_skip(inputs[j])) continue;
int64_t dimSize = at::native::size(inputs[j], dimension);
Tensor nt = at::narrow(out, dimension, offset, dimSize);
copy_(nt, inputs[j]);
offset += dimSize;
}
}
return out;
}
} // namespace native
} // namespace at
|
cffbd4559bfe1109419551e9633e9967622516ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "util.h"
#include "kernel.h"
#include "hip/hip_runtime.h"
#include "math.h"
__constant__ FluxKernelArgs flux_ctx;
__constant__ DtKernelArgs dt_ctx;
__constant__ RKKernelArgs rk_ctx;
void init_allocate(){
for (int i=0; i<3; i++){
hipHostMalloc(&BCArgs[i], sizeof(collBCKernelArgs), hipHostMallocWriteCombined);
//hipHostMalloc(&BCArgs[i], sizeof(collBCKernelArgs));
hipHostMalloc(&fluxArgs[i], sizeof(FluxKernelArgs), hipHostMallocWriteCombined);
hipHostMalloc(&RKArgs[i], sizeof(RKKernelArgs), hipHostMallocWriteCombined);
hipHostMalloc(&dtArgs, sizeof(DtKernelArgs), hipHostMallocWriteCombined);
hipHostMalloc(&dt_host, sizeof(float), hipHostMallocWriteCombined);
//hipHostMalloc(&fluxArgs[i], sizeof(FluxKernelArgs));
}
}
__global__ void RKKernel(int step){
float dt = rk_ctx.dt[0];
// dt = 0.0006;
//printf("TIME %.6f\n", dt);
float u0,u1,u2,u3,r0,r1,r2,r3,q0,q1,q2,q3;
int global_border = rk_ctx.global_border;
// Global indexes
int xid = blockIdx.x*blockDim.x + threadIdx.x - global_border;
int yid = blockIdx.y*blockDim.y + threadIdx.y - global_border;
if ( xid < 0 || xid >= rk_ctx.nx || yid < 0 || yid >= rk_ctx.ny ) return;
u0 = global_index(rk_ctx.U0.ptr, rk_ctx.U0.pitch, xid, yid, global_border)[0];
u1 = global_index(rk_ctx.U1.ptr, rk_ctx.U1.pitch, xid, yid, global_border)[0];
u2 = global_index(rk_ctx.U2.ptr, rk_ctx.U2.pitch, xid, yid, global_border)[0];
u3 = global_index(rk_ctx.U3.ptr, rk_ctx.U3.pitch, xid, yid, global_border)[0];
r0 = global_index(rk_ctx.R0.ptr, rk_ctx.R0.pitch, xid, yid, global_border)[0];
r1 = global_index(rk_ctx.R1.ptr, rk_ctx.R1.pitch, xid, yid, global_border)[0];
r2 = global_index(rk_ctx.R2.ptr, rk_ctx.R2.pitch, xid, yid, global_border)[0];
r3 = global_index(rk_ctx.R3.ptr, rk_ctx.R3.pitch, xid, yid, global_border)[0];
if (step == 0) {
q0 = u0 + dt*r0;
q1 = u1 + dt*r1;
q2 = u2 + dt*r2;
q3 = u3 + dt*r3;
}
else {
q0 = global_index(rk_ctx.Q0.ptr, rk_ctx.Q0.pitch, xid, yid, global_border)[0];
q1 = global_index(rk_ctx.Q1.ptr, rk_ctx.Q1.pitch, xid, yid, global_border)[0];
q2 = global_index(rk_ctx.Q2.ptr, rk_ctx.Q2.pitch, xid, yid, global_border)[0];
q3 = global_index(rk_ctx.Q3.ptr, rk_ctx.Q3.pitch, xid, yid, global_border)[0];
q0 = 0.5f*(q0 + (u0 + dt*r0));
q1 = 0.5f*(q1 + (u1 + dt*r1));
q2 = 0.5f*(q2 + (u2 + dt*r2));
q3 = 0.5f*(q3 + (u3 + dt*r3));
}
global_index(rk_ctx.Q0.ptr, rk_ctx.Q0.pitch, xid, yid, global_border)[0] = q0;
global_index(rk_ctx.Q1.ptr, rk_ctx.Q1.pitch, xid, yid, global_border)[0] = q1;
global_index(rk_ctx.Q2.ptr, rk_ctx.Q2.pitch, xid, yid, global_border)[0] = q2;
global_index(rk_ctx.Q3.ptr, rk_ctx.Q3.pitch, xid, yid, global_border)[0] = q3;
}
void callRKKernel(dim3 grid, dim3 block, int step, RKKernelArgs* h_ctx){
hipMemcpyToSymbolAsync(rk_ctx, h_ctx, sizeof(RKKernelArgs), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( RKKernel), dim3(grid), dim3(block), 0, 0, step);
}
__global__ void DtKernel(int nThreads){
extern __shared__ float sdata[];
volatile float* sdata_volatile = sdata;
unsigned int tid = threadIdx.x;
int threads = nThreads;
float dt;
//printf("THREADID %i",tid);
sdata[tid] = FLT_MAX;
for (unsigned int i=tid; i<dt_ctx.nElements; i += threads)
sdata[tid] = min(sdata[tid], dt_ctx.L[i]);
__syncthreads();
// if (tid == 0){
// printf("START\n");
// for (int k=0; k<nThreads; k++)
// printf(" %.5f\t",sdata[k]);
// }
//Now, reduce all elements into a single element
if (threads >= 512) {
if (tid < 256) sdata[tid] = min(sdata[tid], sdata[tid + 256]);
__syncthreads();
}
if (threads >= 256) {
if (tid < 128) sdata[tid] = min(sdata[tid], sdata[tid + 128]);
__syncthreads();
}
if (threads >= 128) {
if (tid < 64) sdata[tid] = min(sdata[tid], sdata[tid + 64]);
__syncthreads();
}
if (tid < 32) {
if (threads >= 64) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 32]);
if (tid < 16) {
if (threads >= 32) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 16]);
if (threads >= 16) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 8]);
if (threads >= 8) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 4]);
if (threads >= 4) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 2]);
if (threads >= 2) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 1]);
}
if (tid == 0) {
dt = sdata_volatile[tid];
if (dt == FLT_MAX) {
//If no water at all, and no sources,
//we really do not need to simulate,
//but using FLT_MAX will make things crash...
dt = 1.0e-7f;
}
dt_ctx.dt[tid] = dt*dt_ctx.scale;
// printf("TID %i",tid);
}
}
}
void callDtKernel(int nThreads, DtKernelArgs* h_ctx){
hipMemcpyToSymbolAsync(dt_ctx, h_ctx, sizeof(DtKernelArgs), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( DtKernel), dim3(1),dim3(nThreads),sizeof(float)*nThreads, 0, nThreads);
}
inline __device__ void fluxAndLambdaFuncF(float& rho, float& U1, float& U2, float& U3,
const float& gamma,
float& F0, float& F1, float& F2, float& F3,
float& u, float& v,float& c){
float pressure, E;
// Vaues needed to compute the eigenvalues
u = U1/rho;
v = U2/rho;
E = U3;
pressure = (gamma - 1.0f)*(E-0.5f*rho*(u*u + v*v));
c = sqrtf(gamma*pressure/rho);
// Flux computation
F0 = U1;
F1 = U1*u + pressure;
F2 = U1*v;
F3 = u*(E+pressure);
}
inline __device__ void fluxAndLambdaFuncG(float& rho, float& U1, float& U2, float& U3,
const float& gamma,
float& G0, float& G1, float& G2, float& G3,
float& u, float& v,float& c){
float pressure, E;
// Vaues needed to compute the eigenvalues
u = U1/rho;
v = U2/rho;
E = U3;
pressure = (gamma - 1.0f)*(E-0.5f*rho*(u*u + v*v));
c = sqrtf(gamma*pressure/rho);
//if (pressure < 0)
//printf("ZERO alert compute G and Lambda gamma:%.3f pressure: %.3f rho:%.3f rho_u:%.3f rho_v%.3f E%.3f\n", gamma,pressure,rho,U1,U2,E);
// Flux computation
G0 = U2;
G1 = U2*u;
G2 = U2*v + pressure;
G3 = v*(E+pressure);
}
inline __device__ float minEigenVal(float a, float b) {
return fminf(fminf(a, b), 0.0f);
}
inline __device__ float maxEigenVal(float a, float b) {
return fmaxf(fmaxf(a, b), 0.0f);
}
inline __device__ float sign(float& a) {
/**
* The following works by bit hacks. In non-obfuscated code, something like
* float r = ((int&)a & 0x7FFFFFFF)!=0; //set r to one or zero
* (int&)r |= ((int&)a & 0x80000000); //Copy sign bit of a
* return r;
*/
#ifndef NEW_SIGN
return (signed((int&)a & 0x80000000) >> 31 ) | ((int&)a & 0x7FFFFFFF)!=0;
#else
float r = ((int&)a & 0x7FFFFFFF)!=0;
return copysignf(r, a);
#endif
}
inline __device__ float minmod(float a, float b, float c){
return 0.25f
*sign(a)
*(sign(a) + sign(b))
*(sign(b) + sign(c))
*fminf( fminf(fabsf(a), fabsf(b)), fabsf(c) );
/* if ( a > 0 && b > 0 && c > 0)
return fminf(c,fminf(a,b));
else if ( a < 0 && b < 0 && c < 0)
return fmaxf(c,fmaxf(a,b));
else
return 0.0;
*/
}
inline __device__ float limiter(float u_plus, float u_center, float u_minus){
return minmod(flux_ctx.theta*(u_plus-u_center),(u_plus-u_minus)*0.5f, flux_ctx.theta*(u_center-u_minus));
}
inline __device__ void reconstructPointVal(float (&U)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], float (&Ux)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], float (&Uy)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], unsigned int i, unsigned int j){
float u_center,u_south,u_north,u_east,u_west;
float ux_out, uy_out;
for (int l=0; l<4; l++){
u_center = U[l][i][j];
u_south = U[l][i][j-1];
u_north = U[l][i][j+1];
u_west = U[l][i-1][j];
u_east = U[l][i+1][j];
// Compute interface values, each cell computes
ux_out = 0.5f*limiter(u_east, u_center, u_west);
uy_out = 0.5f*limiter(u_north, u_center, u_south);
Ux[l][i][j] = ux_out;
Uy[l][i][j] = uy_out;
}
}
inline __device__ float computeFluxWest(float (&U)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], float (&Ux)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], unsigned int i, unsigned int j){
float U0m, U1m, U2m, U3m;
float U0p, U1p, U2p, U3p;
float FG0p, FG1p, FG2p, FG3p;
float FG0m, FG1m, FG2m, FG3m;
float up,vp,cp,um,vm,cm;
float am, ap;
// The eastern reconstruction point of u(i-1,j)
U0m = U[0][i-1][j] + Ux[0][i-1][j];
U1m = U[1][i-1][j] + Ux[1][i-1][j];
U2m = U[2][i-1][j] + Ux[2][i-1][j];
U3m = U[3][i-1][j] + Ux[3][i-1][j];
// The western reconstruction point of u(i,j)
U0p = U[0][i][j] - Ux[0][i][j];
U1p = U[1][i][j] - Ux[1][i][j];
U2p = U[2][i][j] - Ux[2][i][j];
U3p = U[3][i][j] - Ux[3][i][j];
fluxAndLambdaFuncF(U0p, U1p, U2p, U3p, flux_ctx.gamma, FG0p, FG1p, FG2p, FG3p, up, vp, cp);
fluxAndLambdaFuncF(U0m, U1m, U2m, U3m, flux_ctx.gamma, FG0m, FG1m, FG2m, FG3m, um, vm, cm);
am = minEigenVal(um-cm, up-cp);
ap = maxEigenVal(um+cm, up+cp);
// printf("am: %.3f ap: %.3f Ux: %.3f Um: %.3f Up: %.3f U1m: %.3f U2m %.3f U3m: %.3f U3p:%.3f gamma: %.3f up: %.3f um: %.3f vp: %.3f vm: %.3f cp: %.3f cm: %.3f\n", am,ap,Ux[0][i][j], U0m, U0p, U1m, U2m,U3m, U3p, flux_ctx.gamma, up, um, vp,vm, cp, cm);
__syncthreads();
Ux[0][i][j] = ((ap*FG0m -am*FG0p) + ap*am*(U0p-U0m))/(ap-am);
Ux[1][i][j] = ((ap*FG1m -am*FG1p) + ap*am*(U1p-U1m))/(ap-am);
Ux[2][i][j] = ((ap*FG2m -am*FG2p) + ap*am*(U2p-U2m))/(ap-am);
Ux[3][i][j] = ((ap*FG3m -am*FG3p) + ap*am*(U3p-U3m))/(ap-am);
return flux_ctx.dx/fmaxf(ap, -am);
}
inline __device__ float computeFluxSouth(float (&U)[4][BLOCKDIM_X][SM_BLOCKDIM_Y],float (&Uy)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], unsigned int i, unsigned int j){
float U0m, U1m, U2m, U3m;
float U0p, U1p, U2p, U3p;
float FG0p, FG1p, FG2p, FG3p;
float FG0m, FG1m, FG2m, FG3m;
float up,vp,cp,um,vm,cm;
float am, ap;
// The eastern reconstruction point of u(i-1,j)
U0m = U[0][i][j-1] + Uy[0][i][j-1];
U1m = U[1][i][j-1] + Uy[1][i][j-1];
U2m = U[2][i][j-1] + Uy[2][i][j-1];
U3m = U[3][i][j-1] + Uy[3][i][j-1];
// The western reconstruction point of u(i,j)
U0p = U[0][i][j] - Uy[0][i][j];
U1p = U[1][i][j] - Uy[1][i][j];
U2p = U[2][i][j] - Uy[2][i][j];
U3p = U[3][i][j] - Uy[3][i][j];
fluxAndLambdaFuncG(U0p, U1p, U2p, U3p, flux_ctx.gamma, FG0p, FG1p, FG2p, FG3p, up, vp, cp);
fluxAndLambdaFuncG(U0m, U1m, U2m, U3m, flux_ctx.gamma, FG0m, FG1m, FG2m, FG3m, um, vm, cm);
am = minEigenVal(vm-cm, vp-cp);
ap = maxEigenVal(vm+cm, vp+cp);
__syncthreads();
Uy[0][i][j] = ((ap*FG0m -am*FG0p) + ap*am*(U0p-U0m))/(ap-am);
Uy[1][i][j] = ((ap*FG1m -am*FG1p) + ap*am*(U1p-U1m))/(ap-am);
Uy[2][i][j] = ((ap*FG2m -am*FG2p) + ap*am*(U2p-U2m))/(ap-am);
Uy[3][i][j] = ((ap*FG3m -am*FG3p) + ap*am*(U3p-U3m))/(ap-am);
return flux_ctx.dy/fmaxf(ap, -am);
}
__global__ void fluxKernel(int step){
int global_border = flux_ctx.global_border;
float dx = flux_ctx.dx;
float dy = flux_ctx.dy;
// Global indexes, multiply by tiledim because each block has a halo/border
int xid = blockIdx.x*flux_ctx.innerDimX + threadIdx.x - global_border;
int yid = blockIdx.y*flux_ctx.innerDimY + threadIdx.y - global_border;
//xid = fminf(xid, flux_ctx.nx+global_border-1);
//yid = fminf(yid, flux_ctx.ny+global_border-1);
// Local id
int i = threadIdx.x;
int j = threadIdx.y;
float r = FLT_MAX;
float r0, r1, r2, r3;
const int nthreads = BLOCKDIM_X*BLOCKDIM_Y;
__shared__ float timeStep[BLOCKDIM_X][BLOCKDIM_Y];
timeStep[i][j] = FLT_MAX;
__shared__ float local_U[4][BLOCKDIM_X][SM_BLOCKDIM_Y];
__shared__ float local_Ux[4][BLOCKDIM_X][SM_BLOCKDIM_Y];
__shared__ float local_Uy[4][BLOCKDIM_X][SM_BLOCKDIM_Y];
local_U[0][i][j] = global_index(flux_ctx.U0.ptr, flux_ctx.U0.pitch, xid, yid, global_border)[0];
local_U[1][i][j] = global_index(flux_ctx.U1.ptr, flux_ctx.U1.pitch, xid, yid, global_border)[0];
local_U[2][i][j] = global_index(flux_ctx.U2.ptr, flux_ctx.U2.pitch, xid, yid, global_border)[0];
local_U[3][i][j] = global_index(flux_ctx.U3.ptr, flux_ctx.U3.pitch, xid, yid, global_border)[0];
__syncthreads();
if ( i > 0 && i < BLOCKDIM_X - 1 && j > 0 && j < BLOCKDIM_Y - 1){
reconstructPointVal(local_U, local_Ux, local_Uy, i, j);
}
__syncthreads();
if ( i > 1 && i < TILEDIM_X + 1 && j > 1 && j < TILEDIM_Y)
r = min(r, computeFluxWest(local_U, local_Ux, i, j));
if ( i > 1 && i < TILEDIM_X && j > 1 && j < TILEDIM_Y + 1)
r = computeFluxSouth(local_U, local_Uy, i, j);
int p = threadIdx.y*blockDim.x+threadIdx.x;
__syncthreads();
if (xid > -1 && xid < flux_ctx.nx && yid > -1 && yid < flux_ctx.ny){
if ( i > 1 && i < TILEDIM_X && j > 1 && j < TILEDIM_Y){
r0 = (local_Ux[0][i][j] - local_Ux[0][i+1][j])/dx + (local_Uy[0][i][j] - local_Uy[0][i][j+1])/dy;
r1 = (local_Ux[1][i][j] - local_Ux[1][i+1][j])/dx + (local_Uy[1][i][j] - local_Uy[1][i][j+1])/dy;
r2 = (local_Ux[2][i][j] - local_Ux[2][i+1][j])/dx + (local_Uy[2][i][j] - local_Uy[2][i][j+1])/dy;
r3 = (local_Ux[3][i][j] - local_Ux[3][i+1][j])/dx + (local_Uy[3][i][j] - local_Uy[3][i][j+1])/dy;
global_index(flux_ctx.R0.ptr, flux_ctx.R0.pitch, xid, yid, global_border)[0] = r0;//local_Ux[0][i][j];
global_index(flux_ctx.R1.ptr, flux_ctx.R1.pitch, xid, yid, global_border)[0] = r1;
global_index(flux_ctx.R2.ptr, flux_ctx.R2.pitch, xid, yid, global_border)[0] = r2;
global_index(flux_ctx.R3.ptr, flux_ctx.R3.pitch, xid, yid, global_border)[0] = r3;//local_Uy[0][i][j];
timeStep[0][p] = r;
}
}
//Now, find and write out the maximal eigenvalue in this block
if (step==0) {
// __syncthreads();
volatile float* B_volatile = timeStep[0];
//int p = threadIdx.y*blockDim.x+threadIdx.x; //reuse p for indexing
//printf(" %i ", p);
//Write the maximum eigenvalues computed by this thread into shared memory
//Only consider eigenvalues within the internal domain
/* if (xid < flux_ctx.nx && yid < flux_ctx.ny && xid >= 0 && yid >=0){
timeStep[0][p] = r;
}
*/
__syncthreads();
//First use all threads to reduce min(1024, nthreads) values into 64 values
//This first outer test is a compile-time test simply to remove statements if nthreads is less than 512.
if (nthreads >= 512) {
//This inner test (p < 512) first checks that the current thread should
//be active in the reduction from min(1024, nthreads) elements to 512. Makes little sense here, but
//a lot of sense for the last test where there should only be 64 active threads.
//The second part of this test ((p+512) < nthreads) removes the threads that would generate an
//out-of-bounds access to shared memory
if (p < 512 && (p+512) < nthreads) timeStep[0][p] = fminf(timeStep[0][p], timeStep[0][p + 512]); //min(1024, nthreads)=>512
__syncthreads();
}
if (nthreads >= 256) {
if (p < 256 && (p+256) < nthreads) timeStep[0][p] = fminf(timeStep[0][p], timeStep[0][p + 256]); //min(512, nthreads)=>256
__syncthreads();
}
if (nthreads >= 128) {
if (p < 128 && (p+128) < nthreads) timeStep[0][p] = fminf(timeStep[0][p], timeStep[0][p + 128]); //min(256, nthreads)=>128
__syncthreads();
}
if (nthreads >= 64) {
if (p < 64 && (p+64) < nthreads) timeStep[0][p] = fminf(timeStep[0][p], timeStep[0][p + 64]); //min(128, nthreads)=>64
__syncthreads();
}
//Let the last warp reduce 64 values into a single value
//Will generate out-of-bounds errors for nthreads < 64
if (p < 32) {
if (nthreads >= 64) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 32]); //64=>32
if (nthreads >= 32) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 16]); //32=>16
if (nthreads >= 16) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 8]); //16=>8
if (nthreads >= 8) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 4]); //8=>4
if (nthreads >= 4) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 2]); //4=>2
if (nthreads >= 2) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 1]); //2=>1
}
if (threadIdx.y + threadIdx.x == 0) flux_ctx.L[blockIdx.x*gridDim.y + blockIdx.y] = B_volatile[0];
}
}
void callFluxKernel(dim3 grid, dim3 block, int step, FluxKernelArgs* h_ctx){
hipMemcpyToSymbolAsync(flux_ctx, h_ctx, sizeof(FluxKernelArgs), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( fluxKernel), dim3(grid), dim3(block), 0, 0, step);
}
// Set wall boundry condition
__global__ void setBCPeriodic(gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
int threads = blockDim.x*blockDim.y;
float* B_in;
float* B_out;
int nx = NX-2*border;
int ny = NY-2*border;
int tid = threadIdx.y*blockDim.x+threadIdx.x;
int kin;
int kk;
// SOUTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, -1 - b, border);
B_in = global_index(U.ptr, U.pitch, 0, ny -1 - b, border);
for (int k = tid; k < nx+border*2; k+=threads){
kk = k-border;
kin = min(kk,nx-1);
kin = max(kin,0);
B_out[kk] = B_in[kin];
}
}
// NORTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, ny + b, border);
B_in = global_index(U.ptr, U.pitch, 0, 0 + b, border);
for (int k = tid; k < nx+border*2; k+=threads){
kk = k-border;
kin = min(kk,nx-1);
kin = max(kin,0);
B_out[kk] = B_in[kin];
}
}
// WEST
for (int k = tid; k < ny+border*2; k+= threads){
kk = k-border;
B_out = global_index(U.ptr, U.pitch, 0, kk, border);
kin = min(kk,ny-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[-1-b] = global_index(U.ptr, U.pitch, nx -1 - b, kin, border)[0];
}
// EAST
for (int k = tid; k < ny+border*2; k+= threads){
kk = k-border;
B_out = global_index(U.ptr, U.pitch, nx, kk, border);
kin = min(kk,ny-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[b] = global_index(U.ptr, U.pitch, 0 + b, kin,border)[0];
}
}
void callSetBCPeriodic(dim3 grid, dim3 block, gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
hipLaunchKernelGGL(( setBCPeriodic), dim3(grid), dim3(block), 0, 0, U, NX, NY, border);
}
void callCollectiveSetBCPeriodic(dim3 grid, dim3 block, const collBCKernelArgs* arg){
callSetBCPeriodic(grid, block, arg->U0, arg->NX, arg->NY, arg->global_border);
callSetBCPeriodic(grid, block, arg->U1, arg->NX, arg->NY, arg->global_border);
callSetBCPeriodic(grid, block, arg->U2, arg->NX, arg->NY, arg->global_border);
callSetBCPeriodic(grid, block, arg->U3, arg->NX, arg->NY, arg->global_border);
}
// Set wall boundry condition
__global__ void setBCOpen(gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
int threads = blockDim.x*blockDim.y;
float* B_in;
float* B_out;
int nx = NX-2*border;
int ny = NY-2*border;
int tid = threadIdx.y*blockDim.x+threadIdx.x;
int kin;
int kk;
// SOUTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, -1 - b, border);
B_in = global_index(U.ptr, U.pitch, 0, 0, border);
for (int k = tid; k < nx+border*2; k+=threads){
kk = k-border;
kin = min(kk,nx-1);
kin = max(kin,0);
B_out[kk] = B_in[kin];
}
}
// NORTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, ny + b, border);
B_in = global_index(U.ptr, U.pitch, 0, ny - 1, border);
for (int k = tid; k < nx+border*2; k+=threads){
kk = k-border;
kin = min(kk,nx-1);
kin = max(kin,0);
B_out[kk] = B_in[kin];
}
}
// WEST
for (int k = tid; k < ny+border*2; k+= threads){
kk = k-border;
B_out = global_index(U.ptr, U.pitch, 0, kk, border);
kin = min(kk,nx-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[-1-b] = global_index(U.ptr, U.pitch, 0, kin, border)[0];
}
// EAST
for (int k = tid; k < ny+border*2; k+= threads){
kk = k-border;
B_out = global_index(U.ptr, U.pitch, nx, kk, border);
kin = min(kk,nx-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[b] = global_index(U.ptr, U.pitch, nx - 1, kin,border)[0];
}
}
void callSetBCOpen(dim3 grid, dim3 block, gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
hipLaunchKernelGGL(( setBCOpen), dim3(grid), dim3(block), 0, 0, U, NX, NY, border);
}
void callCollectiveSetBCOpen(dim3 grid, dim3 block, const collBCKernelArgs* arg){
//hipMemcpyToSymbolAsync(bc_ctx, arg->, sizeof(collBCKernelArgs), 0, hipMemcpyHostToDevice);
callSetBCOpen(grid, block, arg->U0, arg->NX, arg->NY, arg->global_border);
callSetBCOpen(grid, block, arg->U1, arg->NX, arg->NY, arg->global_border);
callSetBCOpen(grid, block, arg->U2, arg->NX, arg->NY, arg->global_border);
callSetBCOpen(grid, block, arg->U3, arg->NX, arg->NY, arg->global_border);
}
// Set wall boundry condition
__global__ void setBCWall(gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
int threads = blockDim.x*blockDim.y;
float* B_in;
float* B_out;
int nx = NX-2*border;
int ny = NY-2*border;
int tid = threadIdx.y*blockDim.x+threadIdx.x;
int kin;
// SOUTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, -1 - b, border);
B_in = global_index(U.ptr, U.pitch, 0, 0 + b, border);
for (int k = tid-2; k < nx+border; k+=threads){
kin = min(k,nx-1);
kin = max(kin,0);
B_out[k] = B_in[kin];
}
}
// NORTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, ny + b, border);
B_in = global_index(U.ptr, U.pitch, 0, ny - 1 - b, border);
for (int k = tid-2; k < nx+border; k+=threads){
kin = min(k,nx-1);
kin = max(kin,0);
B_out[k] = B_in[kin];
}
}
// WEST
for (int k = tid-2; k < ny; k+= threads){
printf("k: %i", k);
B_out = global_index(U.ptr, U.pitch, 0, k, border);
kin = min(k,nx-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[-1-b] = global_index(U.ptr, U.pitch, 0 + b, kin, border)[0];
}
// EAST
for (unsigned int k = tid; k < ny; k+= threads){
B_out = global_index(U.ptr, U.pitch, nx, k, border);
kin = min(k,nx-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[b] = global_index(U.ptr, U.pitch, nx - 1 - b, kin,border)[0];
}
}
void callSetBCWall(dim3 grid, dim3 block, gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
hipLaunchKernelGGL(( setBCWall), dim3(grid), dim3(block), 0, 0, U, NX, NY, border);
}
void callCollectiveSetBCWall(dim3 grid, dim3 block, const collBCKernelArgs* arg){
callSetBCWall(grid, block, arg->U0, arg->NX, arg->NY, arg->global_border);
callSetBCWall(grid, block, arg->U1, arg->NX, arg->NY, arg->global_border);
callSetBCWall(grid, block, arg->U2, arg->NX, arg->NY, arg->global_border);
callSetBCWall(grid, block, arg->U3, arg->NX, arg->NY, arg->global_border);
}
| cffbd4559bfe1109419551e9633e9967622516ab.cu | #include "util.h"
#include "kernel.h"
#include "cuda.h"
#include "math.h"
__constant__ FluxKernelArgs flux_ctx;
__constant__ DtKernelArgs dt_ctx;
__constant__ RKKernelArgs rk_ctx;
void init_allocate(){
for (int i=0; i<3; i++){
cudaHostAlloc(&BCArgs[i], sizeof(collBCKernelArgs), cudaHostAllocWriteCombined);
//cudaMallocHost(&BCArgs[i], sizeof(collBCKernelArgs));
cudaHostAlloc(&fluxArgs[i], sizeof(FluxKernelArgs), cudaHostAllocWriteCombined);
cudaHostAlloc(&RKArgs[i], sizeof(RKKernelArgs), cudaHostAllocWriteCombined);
cudaHostAlloc(&dtArgs, sizeof(DtKernelArgs), cudaHostAllocWriteCombined);
cudaHostAlloc(&dt_host, sizeof(float), cudaHostAllocWriteCombined);
//cudaMallocHost(&fluxArgs[i], sizeof(FluxKernelArgs));
}
}
__global__ void RKKernel(int step){
float dt = rk_ctx.dt[0];
// dt = 0.0006;
//printf("TIME %.6f\n", dt);
float u0,u1,u2,u3,r0,r1,r2,r3,q0,q1,q2,q3;
int global_border = rk_ctx.global_border;
// Global indexes
int xid = blockIdx.x*blockDim.x + threadIdx.x - global_border;
int yid = blockIdx.y*blockDim.y + threadIdx.y - global_border;
if ( xid < 0 || xid >= rk_ctx.nx || yid < 0 || yid >= rk_ctx.ny ) return;
u0 = global_index(rk_ctx.U0.ptr, rk_ctx.U0.pitch, xid, yid, global_border)[0];
u1 = global_index(rk_ctx.U1.ptr, rk_ctx.U1.pitch, xid, yid, global_border)[0];
u2 = global_index(rk_ctx.U2.ptr, rk_ctx.U2.pitch, xid, yid, global_border)[0];
u3 = global_index(rk_ctx.U3.ptr, rk_ctx.U3.pitch, xid, yid, global_border)[0];
r0 = global_index(rk_ctx.R0.ptr, rk_ctx.R0.pitch, xid, yid, global_border)[0];
r1 = global_index(rk_ctx.R1.ptr, rk_ctx.R1.pitch, xid, yid, global_border)[0];
r2 = global_index(rk_ctx.R2.ptr, rk_ctx.R2.pitch, xid, yid, global_border)[0];
r3 = global_index(rk_ctx.R3.ptr, rk_ctx.R3.pitch, xid, yid, global_border)[0];
if (step == 0) {
q0 = u0 + dt*r0;
q1 = u1 + dt*r1;
q2 = u2 + dt*r2;
q3 = u3 + dt*r3;
}
else {
q0 = global_index(rk_ctx.Q0.ptr, rk_ctx.Q0.pitch, xid, yid, global_border)[0];
q1 = global_index(rk_ctx.Q1.ptr, rk_ctx.Q1.pitch, xid, yid, global_border)[0];
q2 = global_index(rk_ctx.Q2.ptr, rk_ctx.Q2.pitch, xid, yid, global_border)[0];
q3 = global_index(rk_ctx.Q3.ptr, rk_ctx.Q3.pitch, xid, yid, global_border)[0];
q0 = 0.5f*(q0 + (u0 + dt*r0));
q1 = 0.5f*(q1 + (u1 + dt*r1));
q2 = 0.5f*(q2 + (u2 + dt*r2));
q3 = 0.5f*(q3 + (u3 + dt*r3));
}
global_index(rk_ctx.Q0.ptr, rk_ctx.Q0.pitch, xid, yid, global_border)[0] = q0;
global_index(rk_ctx.Q1.ptr, rk_ctx.Q1.pitch, xid, yid, global_border)[0] = q1;
global_index(rk_ctx.Q2.ptr, rk_ctx.Q2.pitch, xid, yid, global_border)[0] = q2;
global_index(rk_ctx.Q3.ptr, rk_ctx.Q3.pitch, xid, yid, global_border)[0] = q3;
}
void callRKKernel(dim3 grid, dim3 block, int step, RKKernelArgs* h_ctx){
cudaMemcpyToSymbolAsync(rk_ctx, h_ctx, sizeof(RKKernelArgs), 0, cudaMemcpyHostToDevice);
RKKernel<<<grid, block>>>(step);
}
__global__ void DtKernel(int nThreads){
extern __shared__ float sdata[];
volatile float* sdata_volatile = sdata;
unsigned int tid = threadIdx.x;
int threads = nThreads;
float dt;
//printf("THREADID %i",tid);
sdata[tid] = FLT_MAX;
for (unsigned int i=tid; i<dt_ctx.nElements; i += threads)
sdata[tid] = min(sdata[tid], dt_ctx.L[i]);
__syncthreads();
// if (tid == 0){
// printf("START\n");
// for (int k=0; k<nThreads; k++)
// printf(" %.5f\t",sdata[k]);
// }
//Now, reduce all elements into a single element
if (threads >= 512) {
if (tid < 256) sdata[tid] = min(sdata[tid], sdata[tid + 256]);
__syncthreads();
}
if (threads >= 256) {
if (tid < 128) sdata[tid] = min(sdata[tid], sdata[tid + 128]);
__syncthreads();
}
if (threads >= 128) {
if (tid < 64) sdata[tid] = min(sdata[tid], sdata[tid + 64]);
__syncthreads();
}
if (tid < 32) {
if (threads >= 64) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 32]);
if (tid < 16) {
if (threads >= 32) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 16]);
if (threads >= 16) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 8]);
if (threads >= 8) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 4]);
if (threads >= 4) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 2]);
if (threads >= 2) sdata_volatile[tid] = min(sdata_volatile[tid], sdata_volatile[tid + 1]);
}
if (tid == 0) {
dt = sdata_volatile[tid];
if (dt == FLT_MAX) {
//If no water at all, and no sources,
//we really do not need to simulate,
//but using FLT_MAX will make things crash...
dt = 1.0e-7f;
}
dt_ctx.dt[tid] = dt*dt_ctx.scale;
// printf("TID %i",tid);
}
}
}
void callDtKernel(int nThreads, DtKernelArgs* h_ctx){
cudaMemcpyToSymbolAsync(dt_ctx, h_ctx, sizeof(DtKernelArgs), 0, cudaMemcpyHostToDevice);
DtKernel<<<1,nThreads,sizeof(float)*nThreads>>>(nThreads);
}
inline __device__ void fluxAndLambdaFuncF(float& rho, float& U1, float& U2, float& U3,
const float& gamma,
float& F0, float& F1, float& F2, float& F3,
float& u, float& v,float& c){
float pressure, E;
// Vaues needed to compute the eigenvalues
u = U1/rho;
v = U2/rho;
E = U3;
pressure = (gamma - 1.0f)*(E-0.5f*rho*(u*u + v*v));
c = sqrtf(gamma*pressure/rho);
// Flux computation
F0 = U1;
F1 = U1*u + pressure;
F2 = U1*v;
F3 = u*(E+pressure);
}
inline __device__ void fluxAndLambdaFuncG(float& rho, float& U1, float& U2, float& U3,
const float& gamma,
float& G0, float& G1, float& G2, float& G3,
float& u, float& v,float& c){
float pressure, E;
// Vaues needed to compute the eigenvalues
u = U1/rho;
v = U2/rho;
E = U3;
pressure = (gamma - 1.0f)*(E-0.5f*rho*(u*u + v*v));
c = sqrtf(gamma*pressure/rho);
//if (pressure < 0)
//printf("ZERO alert compute G and Lambda gamma:%.3f pressure: %.3f rho:%.3f rho_u:%.3f rho_v%.3f E%.3f\n", gamma,pressure,rho,U1,U2,E);
// Flux computation
G0 = U2;
G1 = U2*u;
G2 = U2*v + pressure;
G3 = v*(E+pressure);
}
inline __device__ float minEigenVal(float a, float b) {
return fminf(fminf(a, b), 0.0f);
}
inline __device__ float maxEigenVal(float a, float b) {
return fmaxf(fmaxf(a, b), 0.0f);
}
inline __device__ float sign(float& a) {
/**
* The following works by bit hacks. In non-obfuscated code, something like
* float r = ((int&)a & 0x7FFFFFFF)!=0; //set r to one or zero
* (int&)r |= ((int&)a & 0x80000000); //Copy sign bit of a
* return r;
*/
#ifndef NEW_SIGN
return (signed((int&)a & 0x80000000) >> 31 ) | ((int&)a & 0x7FFFFFFF)!=0;
#else
float r = ((int&)a & 0x7FFFFFFF)!=0;
return copysignf(r, a);
#endif
}
inline __device__ float minmod(float a, float b, float c){
return 0.25f
*sign(a)
*(sign(a) + sign(b))
*(sign(b) + sign(c))
*fminf( fminf(fabsf(a), fabsf(b)), fabsf(c) );
/* if ( a > 0 && b > 0 && c > 0)
return fminf(c,fminf(a,b));
else if ( a < 0 && b < 0 && c < 0)
return fmaxf(c,fmaxf(a,b));
else
return 0.0;
*/
}
inline __device__ float limiter(float u_plus, float u_center, float u_minus){
return minmod(flux_ctx.theta*(u_plus-u_center),(u_plus-u_minus)*0.5f, flux_ctx.theta*(u_center-u_minus));
}
inline __device__ void reconstructPointVal(float (&U)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], float (&Ux)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], float (&Uy)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], unsigned int i, unsigned int j){
float u_center,u_south,u_north,u_east,u_west;
float ux_out, uy_out;
for (int l=0; l<4; l++){
u_center = U[l][i][j];
u_south = U[l][i][j-1];
u_north = U[l][i][j+1];
u_west = U[l][i-1][j];
u_east = U[l][i+1][j];
// Compute interface values, each cell computes
ux_out = 0.5f*limiter(u_east, u_center, u_west);
uy_out = 0.5f*limiter(u_north, u_center, u_south);
Ux[l][i][j] = ux_out;
Uy[l][i][j] = uy_out;
}
}
inline __device__ float computeFluxWest(float (&U)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], float (&Ux)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], unsigned int i, unsigned int j){
float U0m, U1m, U2m, U3m;
float U0p, U1p, U2p, U3p;
float FG0p, FG1p, FG2p, FG3p;
float FG0m, FG1m, FG2m, FG3m;
float up,vp,cp,um,vm,cm;
float am, ap;
// The eastern reconstruction point of u(i-1,j)
U0m = U[0][i-1][j] + Ux[0][i-1][j];
U1m = U[1][i-1][j] + Ux[1][i-1][j];
U2m = U[2][i-1][j] + Ux[2][i-1][j];
U3m = U[3][i-1][j] + Ux[3][i-1][j];
// The western reconstruction point of u(i,j)
U0p = U[0][i][j] - Ux[0][i][j];
U1p = U[1][i][j] - Ux[1][i][j];
U2p = U[2][i][j] - Ux[2][i][j];
U3p = U[3][i][j] - Ux[3][i][j];
fluxAndLambdaFuncF(U0p, U1p, U2p, U3p, flux_ctx.gamma, FG0p, FG1p, FG2p, FG3p, up, vp, cp);
fluxAndLambdaFuncF(U0m, U1m, U2m, U3m, flux_ctx.gamma, FG0m, FG1m, FG2m, FG3m, um, vm, cm);
am = minEigenVal(um-cm, up-cp);
ap = maxEigenVal(um+cm, up+cp);
// printf("am: %.3f ap: %.3f Ux: %.3f Um: %.3f Up: %.3f U1m: %.3f U2m %.3f U3m: %.3f U3p:%.3f gamma: %.3f up: %.3f um: %.3f vp: %.3f vm: %.3f cp: %.3f cm: %.3f\n", am,ap,Ux[0][i][j], U0m, U0p, U1m, U2m,U3m, U3p, flux_ctx.gamma, up, um, vp,vm, cp, cm);
__syncthreads();
Ux[0][i][j] = ((ap*FG0m -am*FG0p) + ap*am*(U0p-U0m))/(ap-am);
Ux[1][i][j] = ((ap*FG1m -am*FG1p) + ap*am*(U1p-U1m))/(ap-am);
Ux[2][i][j] = ((ap*FG2m -am*FG2p) + ap*am*(U2p-U2m))/(ap-am);
Ux[3][i][j] = ((ap*FG3m -am*FG3p) + ap*am*(U3p-U3m))/(ap-am);
return flux_ctx.dx/fmaxf(ap, -am);
}
inline __device__ float computeFluxSouth(float (&U)[4][BLOCKDIM_X][SM_BLOCKDIM_Y],float (&Uy)[4][BLOCKDIM_X][SM_BLOCKDIM_Y], unsigned int i, unsigned int j){
float U0m, U1m, U2m, U3m;
float U0p, U1p, U2p, U3p;
float FG0p, FG1p, FG2p, FG3p;
float FG0m, FG1m, FG2m, FG3m;
float up,vp,cp,um,vm,cm;
float am, ap;
// The eastern reconstruction point of u(i-1,j)
U0m = U[0][i][j-1] + Uy[0][i][j-1];
U1m = U[1][i][j-1] + Uy[1][i][j-1];
U2m = U[2][i][j-1] + Uy[2][i][j-1];
U3m = U[3][i][j-1] + Uy[3][i][j-1];
// The western reconstruction point of u(i,j)
U0p = U[0][i][j] - Uy[0][i][j];
U1p = U[1][i][j] - Uy[1][i][j];
U2p = U[2][i][j] - Uy[2][i][j];
U3p = U[3][i][j] - Uy[3][i][j];
fluxAndLambdaFuncG(U0p, U1p, U2p, U3p, flux_ctx.gamma, FG0p, FG1p, FG2p, FG3p, up, vp, cp);
fluxAndLambdaFuncG(U0m, U1m, U2m, U3m, flux_ctx.gamma, FG0m, FG1m, FG2m, FG3m, um, vm, cm);
am = minEigenVal(vm-cm, vp-cp);
ap = maxEigenVal(vm+cm, vp+cp);
__syncthreads();
Uy[0][i][j] = ((ap*FG0m -am*FG0p) + ap*am*(U0p-U0m))/(ap-am);
Uy[1][i][j] = ((ap*FG1m -am*FG1p) + ap*am*(U1p-U1m))/(ap-am);
Uy[2][i][j] = ((ap*FG2m -am*FG2p) + ap*am*(U2p-U2m))/(ap-am);
Uy[3][i][j] = ((ap*FG3m -am*FG3p) + ap*am*(U3p-U3m))/(ap-am);
return flux_ctx.dy/fmaxf(ap, -am);
}
__global__ void fluxKernel(int step){
int global_border = flux_ctx.global_border;
float dx = flux_ctx.dx;
float dy = flux_ctx.dy;
// Global indexes, multiply by tiledim because each block has a halo/border
int xid = blockIdx.x*flux_ctx.innerDimX + threadIdx.x - global_border;
int yid = blockIdx.y*flux_ctx.innerDimY + threadIdx.y - global_border;
//xid = fminf(xid, flux_ctx.nx+global_border-1);
//yid = fminf(yid, flux_ctx.ny+global_border-1);
// Local id
int i = threadIdx.x;
int j = threadIdx.y;
float r = FLT_MAX;
float r0, r1, r2, r3;
const int nthreads = BLOCKDIM_X*BLOCKDIM_Y;
__shared__ float timeStep[BLOCKDIM_X][BLOCKDIM_Y];
timeStep[i][j] = FLT_MAX;
__shared__ float local_U[4][BLOCKDIM_X][SM_BLOCKDIM_Y];
__shared__ float local_Ux[4][BLOCKDIM_X][SM_BLOCKDIM_Y];
__shared__ float local_Uy[4][BLOCKDIM_X][SM_BLOCKDIM_Y];
local_U[0][i][j] = global_index(flux_ctx.U0.ptr, flux_ctx.U0.pitch, xid, yid, global_border)[0];
local_U[1][i][j] = global_index(flux_ctx.U1.ptr, flux_ctx.U1.pitch, xid, yid, global_border)[0];
local_U[2][i][j] = global_index(flux_ctx.U2.ptr, flux_ctx.U2.pitch, xid, yid, global_border)[0];
local_U[3][i][j] = global_index(flux_ctx.U3.ptr, flux_ctx.U3.pitch, xid, yid, global_border)[0];
__syncthreads();
if ( i > 0 && i < BLOCKDIM_X - 1 && j > 0 && j < BLOCKDIM_Y - 1){
reconstructPointVal(local_U, local_Ux, local_Uy, i, j);
}
__syncthreads();
if ( i > 1 && i < TILEDIM_X + 1 && j > 1 && j < TILEDIM_Y)
r = min(r, computeFluxWest(local_U, local_Ux, i, j));
if ( i > 1 && i < TILEDIM_X && j > 1 && j < TILEDIM_Y + 1)
r = computeFluxSouth(local_U, local_Uy, i, j);
int p = threadIdx.y*blockDim.x+threadIdx.x;
__syncthreads();
if (xid > -1 && xid < flux_ctx.nx && yid > -1 && yid < flux_ctx.ny){
if ( i > 1 && i < TILEDIM_X && j > 1 && j < TILEDIM_Y){
r0 = (local_Ux[0][i][j] - local_Ux[0][i+1][j])/dx + (local_Uy[0][i][j] - local_Uy[0][i][j+1])/dy;
r1 = (local_Ux[1][i][j] - local_Ux[1][i+1][j])/dx + (local_Uy[1][i][j] - local_Uy[1][i][j+1])/dy;
r2 = (local_Ux[2][i][j] - local_Ux[2][i+1][j])/dx + (local_Uy[2][i][j] - local_Uy[2][i][j+1])/dy;
r3 = (local_Ux[3][i][j] - local_Ux[3][i+1][j])/dx + (local_Uy[3][i][j] - local_Uy[3][i][j+1])/dy;
global_index(flux_ctx.R0.ptr, flux_ctx.R0.pitch, xid, yid, global_border)[0] = r0;//local_Ux[0][i][j];
global_index(flux_ctx.R1.ptr, flux_ctx.R1.pitch, xid, yid, global_border)[0] = r1;
global_index(flux_ctx.R2.ptr, flux_ctx.R2.pitch, xid, yid, global_border)[0] = r2;
global_index(flux_ctx.R3.ptr, flux_ctx.R3.pitch, xid, yid, global_border)[0] = r3;//local_Uy[0][i][j];
timeStep[0][p] = r;
}
}
//Now, find and write out the maximal eigenvalue in this block
if (step==0) {
// __syncthreads();
volatile float* B_volatile = timeStep[0];
//int p = threadIdx.y*blockDim.x+threadIdx.x; //reuse p for indexing
//printf(" %i ", p);
//Write the maximum eigenvalues computed by this thread into shared memory
//Only consider eigenvalues within the internal domain
/* if (xid < flux_ctx.nx && yid < flux_ctx.ny && xid >= 0 && yid >=0){
timeStep[0][p] = r;
}
*/
__syncthreads();
//First use all threads to reduce min(1024, nthreads) values into 64 values
//This first outer test is a compile-time test simply to remove statements if nthreads is less than 512.
if (nthreads >= 512) {
//This inner test (p < 512) first checks that the current thread should
//be active in the reduction from min(1024, nthreads) elements to 512. Makes little sense here, but
//a lot of sense for the last test where there should only be 64 active threads.
//The second part of this test ((p+512) < nthreads) removes the threads that would generate an
//out-of-bounds access to shared memory
if (p < 512 && (p+512) < nthreads) timeStep[0][p] = fminf(timeStep[0][p], timeStep[0][p + 512]); //min(1024, nthreads)=>512
__syncthreads();
}
if (nthreads >= 256) {
if (p < 256 && (p+256) < nthreads) timeStep[0][p] = fminf(timeStep[0][p], timeStep[0][p + 256]); //min(512, nthreads)=>256
__syncthreads();
}
if (nthreads >= 128) {
if (p < 128 && (p+128) < nthreads) timeStep[0][p] = fminf(timeStep[0][p], timeStep[0][p + 128]); //min(256, nthreads)=>128
__syncthreads();
}
if (nthreads >= 64) {
if (p < 64 && (p+64) < nthreads) timeStep[0][p] = fminf(timeStep[0][p], timeStep[0][p + 64]); //min(128, nthreads)=>64
__syncthreads();
}
//Let the last warp reduce 64 values into a single value
//Will generate out-of-bounds errors for nthreads < 64
if (p < 32) {
if (nthreads >= 64) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 32]); //64=>32
if (nthreads >= 32) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 16]); //32=>16
if (nthreads >= 16) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 8]); //16=>8
if (nthreads >= 8) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 4]); //8=>4
if (nthreads >= 4) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 2]); //4=>2
if (nthreads >= 2) B_volatile[p] = fminf(B_volatile[p], B_volatile[p + 1]); //2=>1
}
if (threadIdx.y + threadIdx.x == 0) flux_ctx.L[blockIdx.x*gridDim.y + blockIdx.y] = B_volatile[0];
}
}
void callFluxKernel(dim3 grid, dim3 block, int step, FluxKernelArgs* h_ctx){
cudaMemcpyToSymbolAsync(flux_ctx, h_ctx, sizeof(FluxKernelArgs), 0, cudaMemcpyHostToDevice);
fluxKernel<<<grid, block>>>(step);
}
// Set wall boundry condition
__global__ void setBCPeriodic(gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
int threads = blockDim.x*blockDim.y;
float* B_in;
float* B_out;
int nx = NX-2*border;
int ny = NY-2*border;
int tid = threadIdx.y*blockDim.x+threadIdx.x;
int kin;
int kk;
// SOUTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, -1 - b, border);
B_in = global_index(U.ptr, U.pitch, 0, ny -1 - b, border);
for (int k = tid; k < nx+border*2; k+=threads){
kk = k-border;
kin = min(kk,nx-1);
kin = max(kin,0);
B_out[kk] = B_in[kin];
}
}
// NORTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, ny + b, border);
B_in = global_index(U.ptr, U.pitch, 0, 0 + b, border);
for (int k = tid; k < nx+border*2; k+=threads){
kk = k-border;
kin = min(kk,nx-1);
kin = max(kin,0);
B_out[kk] = B_in[kin];
}
}
// WEST
for (int k = tid; k < ny+border*2; k+= threads){
kk = k-border;
B_out = global_index(U.ptr, U.pitch, 0, kk, border);
kin = min(kk,ny-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[-1-b] = global_index(U.ptr, U.pitch, nx -1 - b, kin, border)[0];
}
// EAST
for (int k = tid; k < ny+border*2; k+= threads){
kk = k-border;
B_out = global_index(U.ptr, U.pitch, nx, kk, border);
kin = min(kk,ny-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[b] = global_index(U.ptr, U.pitch, 0 + b, kin,border)[0];
}
}
void callSetBCPeriodic(dim3 grid, dim3 block, gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
setBCPeriodic<<<grid, block>>>(U, NX, NY, border);
}
void callCollectiveSetBCPeriodic(dim3 grid, dim3 block, const collBCKernelArgs* arg){
callSetBCPeriodic(grid, block, arg->U0, arg->NX, arg->NY, arg->global_border);
callSetBCPeriodic(grid, block, arg->U1, arg->NX, arg->NY, arg->global_border);
callSetBCPeriodic(grid, block, arg->U2, arg->NX, arg->NY, arg->global_border);
callSetBCPeriodic(grid, block, arg->U3, arg->NX, arg->NY, arg->global_border);
}
// Set wall boundry condition
__global__ void setBCOpen(gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
int threads = blockDim.x*blockDim.y;
float* B_in;
float* B_out;
int nx = NX-2*border;
int ny = NY-2*border;
int tid = threadIdx.y*blockDim.x+threadIdx.x;
int kin;
int kk;
// SOUTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, -1 - b, border);
B_in = global_index(U.ptr, U.pitch, 0, 0, border);
for (int k = tid; k < nx+border*2; k+=threads){
kk = k-border;
kin = min(kk,nx-1);
kin = max(kin,0);
B_out[kk] = B_in[kin];
}
}
// NORTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, ny + b, border);
B_in = global_index(U.ptr, U.pitch, 0, ny - 1, border);
for (int k = tid; k < nx+border*2; k+=threads){
kk = k-border;
kin = min(kk,nx-1);
kin = max(kin,0);
B_out[kk] = B_in[kin];
}
}
// WEST
for (int k = tid; k < ny+border*2; k+= threads){
kk = k-border;
B_out = global_index(U.ptr, U.pitch, 0, kk, border);
kin = min(kk,nx-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[-1-b] = global_index(U.ptr, U.pitch, 0, kin, border)[0];
}
// EAST
for (int k = tid; k < ny+border*2; k+= threads){
kk = k-border;
B_out = global_index(U.ptr, U.pitch, nx, kk, border);
kin = min(kk,nx-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[b] = global_index(U.ptr, U.pitch, nx - 1, kin,border)[0];
}
}
void callSetBCOpen(dim3 grid, dim3 block, gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
setBCOpen<<<grid, block>>>(U, NX, NY, border);
}
void callCollectiveSetBCOpen(dim3 grid, dim3 block, const collBCKernelArgs* arg){
//cudaMemcpyToSymbolAsync(bc_ctx, arg->, sizeof(collBCKernelArgs), 0, cudaMemcpyHostToDevice);
callSetBCOpen(grid, block, arg->U0, arg->NX, arg->NY, arg->global_border);
callSetBCOpen(grid, block, arg->U1, arg->NX, arg->NY, arg->global_border);
callSetBCOpen(grid, block, arg->U2, arg->NX, arg->NY, arg->global_border);
callSetBCOpen(grid, block, arg->U3, arg->NX, arg->NY, arg->global_border);
}
// Set wall boundry condition
__global__ void setBCWall(gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
int threads = blockDim.x*blockDim.y;
float* B_in;
float* B_out;
int nx = NX-2*border;
int ny = NY-2*border;
int tid = threadIdx.y*blockDim.x+threadIdx.x;
int kin;
// SOUTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, -1 - b, border);
B_in = global_index(U.ptr, U.pitch, 0, 0 + b, border);
for (int k = tid-2; k < nx+border; k+=threads){
kin = min(k,nx-1);
kin = max(kin,0);
B_out[k] = B_in[kin];
}
}
// NORTH
for (int b = 0; b < border; b++){
B_out = global_index(U.ptr, U.pitch, 0, ny + b, border);
B_in = global_index(U.ptr, U.pitch, 0, ny - 1 - b, border);
for (int k = tid-2; k < nx+border; k+=threads){
kin = min(k,nx-1);
kin = max(kin,0);
B_out[k] = B_in[kin];
}
}
// WEST
for (int k = tid-2; k < ny; k+= threads){
printf("k: %i", k);
B_out = global_index(U.ptr, U.pitch, 0, k, border);
kin = min(k,nx-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[-1-b] = global_index(U.ptr, U.pitch, 0 + b, kin, border)[0];
}
// EAST
for (unsigned int k = tid; k < ny; k+= threads){
B_out = global_index(U.ptr, U.pitch, nx, k, border);
kin = min(k,nx-1);
kin = max(kin,0);
for (int b = 0; b < border; b++)
B_out[b] = global_index(U.ptr, U.pitch, nx - 1 - b, kin,border)[0];
}
}
void callSetBCWall(dim3 grid, dim3 block, gpu_raw_ptr U, unsigned int NX, unsigned int NY, int border){
setBCWall<<<grid, block>>>(U, NX, NY, border);
}
void callCollectiveSetBCWall(dim3 grid, dim3 block, const collBCKernelArgs* arg){
callSetBCWall(grid, block, arg->U0, arg->NX, arg->NY, arg->global_border);
callSetBCWall(grid, block, arg->U1, arg->NX, arg->NY, arg->global_border);
callSetBCWall(grid, block, arg->U2, arg->NX, arg->NY, arg->global_border);
callSetBCWall(grid, block, arg->U3, arg->NX, arg->NY, arg->global_border);
}
|
3413c366426cbe8184789d259dceb5b621012846.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2016, Robert Wang, email: robertwgh (at) gmail.com
All rights reserved. https://github.com/robertwgh/cuLDPC
CUDA implementation of LDPC decoding algorithm.
The details of implementation can be found from the following papers:
1. Wang, G., Wu, M., Sun, Y., & Cavallaro, J. R. (2011, June). A massively parallel implementation of QC-LDPC decoder on GPU. In Application Specific Processors (SASP), 2011 IEEE 9th Symposium on (pp. 82-85). IEEE.
2. Wang, G., Wu, M., Yin, B., & Cavallaro, J. R. (2013, December). High throughput low latency LDPC decoding on GPU for SDR systems. In Global Conference on Signal and Information Processing (GlobalSIP), 2013 IEEE (pp. 1258-1261). IEEE.
The current release is close to the GlobalSIP2013 paper.
Created: 10/1/2010
Revision: 08/01/2013
4/20/2016 prepare for release on Github.
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include "util/helper_cuda.h"
#include "util/timer.h"
#include "cuLDPC.h"
#include "cuLDPC_matrix.h"
// TODO: Including a .cu is strange
#include "cuLDPC_kernel.hip"
float snr ;
long seed ;
float rate ;
int iter ;
// Extern function and variable definition
extern "C"
{
void structure_encode (int s [], int code [], int h[BLK_ROW][BLK_COL]);
void info_gen (int info_bin []);
void modulation (int code [], float trans []);
void awgn (float trans [], float recv []);
void error_check (float trans [], float recv []);
void llr_init (float llr [], float recv []);
int parity_check (float app[]);
error_result cuda_error_check (int info[], int hard_decision[]);
float sigma ;
int *info_bin ;
};
int printDevices();
int runTest();
int printDevices()
{
int deviceCount = 0;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0)
{
printf("error: no devices supporting CUDA. \n");
}
hipDeviceProp_t deviceProperty;
int currentDeviceID = 0;
checkCudaErrors(hipGetDeviceProperties(&deviceProperty, currentDeviceID));
printf("\ndevice name: %s", deviceProperty.name);
printf("\n");
printf("device sharedMemPerBlock: %Iu \n", deviceProperty.sharedMemPerBlock);
printf("device totalGlobalMem: %Iu \n", deviceProperty.totalGlobalMem);
printf("device regsPerBlock: %d \n", deviceProperty.regsPerBlock);
printf("device warpSize: %d \n", deviceProperty.warpSize);
printf("device memPitch: %Iu \n", deviceProperty.memPitch);
printf("device maxThreadsPerBlock: %d \n", deviceProperty.maxThreadsPerBlock);
printf("device maxThreadsDim[0]: %d \n", deviceProperty.maxThreadsDim[0]);
printf("device maxThreadsDim[1]: %d \n", deviceProperty.maxThreadsDim[1]);
printf("device maxThreadsDim[2]: %d \n", deviceProperty.maxThreadsDim[2]);
printf("device maxGridSize[0]: %d \n", deviceProperty.maxGridSize[0]);
printf("device maxGridSize[1]: %d \n", deviceProperty.maxGridSize[1]);
printf("device maxGridSize[2]: %d \n", deviceProperty.maxGridSize[2]);
printf("device totalConstMem: %Iu \n", deviceProperty.totalConstMem);
printf("device major: %d \n", deviceProperty.major);
printf("device minor: %d \n", deviceProperty.minor);
printf("device clockRate: %d \n", deviceProperty.clockRate);
printf("device textureAlignment: %Iu \n", deviceProperty.textureAlignment);
printf("device deviceOverlap: %d \n", deviceProperty.deviceOverlap);
printf("device multiProcessorCount: %d \n", deviceProperty.multiProcessorCount);
printf("device zero-copy data transfers: %d \n", deviceProperty.canMapHostMemory);
printf("\n");
return hipSuccess;
}
int main()
{
printf("CUDA LDPC Decoder\r\nComputing...\r\n");
printDevices();
printf("number of host CPUs:\t%d\n", omp_get_num_procs());
hipSetDevice(DEVICE_ID);
runTest();
return 0;
}
// TODO: Do we need to define it extern?
// extern "C" int h_base [BLK_ROW][BLK_COL];
int runTest()
{
h_element h_compact1 [H_COMPACT1_COL][H_COMPACT1_ROW]; // for update dt, R
h_element h_element_temp;
// init the compact matrix
for(int i = 0; i < H_COMPACT1_COL; i++)
{
for(int j = 0; j < H_COMPACT1_ROW; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact1[i][j] = h_element_temp; // h[i][0-11], the same column
}
}
// scan the h matrix, and gengerate compact mode of h
for(int i = 0; i < BLK_ROW; i++)
{
int k = 0;
for(int j = 0; j < BLK_COL; j ++)
{
if(h_base[i][j] != -1)
{
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact1[k][i] = h_element_temp;
k++;
}
}
printf("row %d, #element=%d\n", i, k);
}
// h_compact2
h_element h_compact2 [H_COMPACT2_ROW][H_COMPACT2_COL]; // for update llr
// init the compact matrix
for(int i = 0; i < H_COMPACT2_ROW; i++)
{
for(int j = 0; j < H_COMPACT2_COL; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact2[i][j] = h_element_temp;
}
}
for(int j = 0; j < BLK_COL; j++)
{
int k=0;
for(int i = 0; i < BLK_ROW; i ++)
{
if(h_base[i][j] != -1)
{
// although h is transposed, the (x,y) is still (iBlkRow, iBlkCol)
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact2[k][j] = h_element_temp;
k++;
}
}
}
//int memorySize_h_base = BLK_ROW * BLK_COL * sizeof(int);
int memorySize_h_compact1 = H_COMPACT1_ROW * H_COMPACT1_COL * sizeof(h_element);
int memorySize_h_compact2 = H_COMPACT2_ROW * H_COMPACT2_COL * sizeof(h_element);
int memorySize_infobits = INFO_LEN * sizeof(int);
int memorySize_codeword = CODEWORD_LEN * sizeof(int);
int memorySize_llr = CODEWORD_LEN * sizeof(float);
int memorySize_et = MCW * CW * sizeof(int);
info_bin = (int *) malloc(memorySize_infobits) ;
int *codeword = (int *) malloc(memorySize_codeword) ;
float *trans = (float *) malloc(memorySize_llr) ;
float *recv = (float *) malloc(memorySize_llr) ;
float *APP = (float *) malloc(memorySize_llr) ;
float *llr = (float *) malloc(memorySize_llr) ;
int * et = (int*) malloc(memorySize_et);
rate = (float)0.5f;
seed = 69012 ;
srand (seed);
// Create streams
hipStream_t *streams = (hipStream_t *) malloc(NSTREAMS * sizeof(hipStream_t));
for (int i = 0; i < NSTREAMS; i++)
{
checkCudaErrors(hipStreamCreate(&(streams[i])));
}
//////////////////////////////////////////////////////////////////////////////////
// all the variables Starting with _cuda is used in host code and for cuda computation
int memorySize_infobits_cuda = MCW * CW * memorySize_infobits ;
int memorySize_llr_cuda = MCW * CW * CODEWORD_LEN * sizeof(float);
int memorySize_dt_cuda = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_R_cuda = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_hard_decision_cuda = MCW * CW * CODEWORD_LEN * sizeof(int);
int memorySize_et_cuda = MCW * CW * sizeof(int);
int *info_bin_cuda[NSTREAMS];
float *llr_cuda[NSTREAMS];
int * hard_decision_cuda[NSTREAMS];
// Allocate pinned memory for llr and hard_decision data.
#if USE_PINNED_MEM == 1 // pinned memory
for(int i = 0; i < NSTREAMS; i ++)
{
info_bin_cuda[i] = (int *) malloc(memorySize_infobits_cuda);
checkCudaErrors(hipHostMalloc((void **)&llr_cuda[i],
memorySize_llr_cuda, hipHostMallocDefault));
checkCudaErrors(hipHostMalloc((void **)&hard_decision_cuda[i],
memorySize_hard_decision_cuda, hipHostMallocDefault));
}
#else // pageable memory
hard_decision_cuda = (int *) malloc(memorySize_hard_decision_cuda);
llr_cuda = (float *) malloc(memorySize_llr_cuda);
#endif
// create device memory
float * dev_llr[NSTREAMS];
float * dev_dt[NSTREAMS];
float * dev_R[NSTREAMS];
int * dev_hard_decision[NSTREAMS];
int * dev_et[NSTREAMS];
bool b_et;
error_result this_error;
int total_frame_error = 0;
int total_bit_error = 0;
int total_codeword = 0;
int num_of_iteration_for_et = 0;
int iter_cnt=0, iter_num =0;
float aver_iter=0.0f;
for(int i = 0; i < NSTREAMS; i ++)
{
checkCudaErrors(hipMalloc((void **)&dev_llr[i], memorySize_llr_cuda));
checkCudaErrors(hipMalloc((void **)&dev_dt[i], memorySize_dt_cuda));
checkCudaErrors(hipMalloc((void **)&dev_R[i], memorySize_R_cuda));
checkCudaErrors(hipMalloc((void **)&dev_hard_decision[i],
memorySize_hard_decision_cuda));
checkCudaErrors(hipMalloc((void **)&dev_et[i], memorySize_et_cuda));
}
for(int snri = 0; snri < NUM_SNR; snri++)
{
snr = snr_array[snri];
sigma = 1.0f/sqrt(2.0f*rate*pow(10.0f,(snr/10.0f)));
total_codeword = 0;
total_frame_error = 0;
total_bit_error = 0;
iter_num = 0;
aver_iter = 0.0f;
iter_cnt = 0;
// In this version code, I don't care the BER performance, so don't need this loop.
while ( (total_frame_error <= MIN_FER) && (total_codeword <= MIN_CODEWORD))
{
total_codeword += CW * MCW;
for(int i = 0; i < CW * MCW; i++)
{
// generate random data
info_gen (info_bin);
// encode the data
structure_encode (info_bin, codeword, h_base);
// BPSK modulation
modulation (codeword, trans);
// additive white Gaussian noise
awgn (trans, recv);
#ifdef PRINT_MSG
// Error check
error_check (trans, recv);
#endif
// LLR init
llr_init (llr, recv);
// copy the info_bin and llr to the total memory
for(int j = 0; j < NSTREAMS; j ++)
{
memcpy(info_bin_cuda[j] + i * INFO_LEN, info_bin, memorySize_infobits);
memcpy(llr_cuda[j] + i * CODEWORD_LEN, llr, memorySize_llr);
}
}
#if MEASURE_CUDA_TIME == 1
// start the timer
hipEvent_t start_kernel, stop_kernel, start_h2d, stop_h2d, start_d2h, stop_d2h;
hipEvent_t start_memset, stop_memset;
hipEventCreate(&start_kernel);
hipEventCreate(&stop_kernel);
hipEventCreate(&start_h2d);
hipEventCreate(&stop_h2d);
hipEventCreate(&start_d2h);
hipEventCreate(&stop_d2h);
hipEventCreate(&start_memset);
hipEventCreate(&stop_memset);
float time_kernel = 0.0, time_kernel_temp = 0.0;
float time_h2d=0.0, time_h2d_temp = 0.0;
float time_d2h=0.0, time_d2h_temp = 0.0;
float time_memset = 0.0f, time_memset_temp = 0.0;
#endif
// Since for all the simulation, this part only transfer
// once. So the time we don't count into the total time.
// constant memory init.
checkCudaErrors(hipMemcpyToSymbol(dev_h_compact1,
h_compact1,
memorySize_h_compact1));
// constant memory init.
checkCudaErrors(hipMemcpyToSymbol(dev_h_compact2,
h_compact2,
memorySize_h_compact2));
int blockSizeX = (Z + 32 - 1)/ 32 * 32;
// Define CUDA kernel dimension
dim3 dimGridKernel1(BLK_ROW, MCW, 1); // dim of the thread blocks
dim3 dimBlockKernel1(blockSizeX, CW, 1);
int threadsPerBlockKernel1 = blockSizeX * CW;
int sharedRCacheSize = threadsPerBlockKernel1 * NON_EMPTY_ELMENT * sizeof(float);
dim3 dimGridKernel2(BLK_COL, MCW, 1);
dim3 dimBlockKernel2(blockSizeX, CW, 1);
int threadsPerBlockKernel2 = blockSizeX * CW;
int sharedDtCacheSize = threadsPerBlockKernel2 * NON_EMPTY_ELMENT_VNP * sizeof(float);
dim3 dimGridKernel3(MCW, CW, 1);
dim3 dimBlockKernel3(Z, BLK_ROW, 1);
// initialize and start the cpu timer
#if MEASURE_CPU_TIME == 1
float cpu_run_time = 0.0;
Timer cpu_timer;
cpu_timer.start();
#endif
// run the kernel
for(int j = 0; j < MAX_SIM; j++)
{
#if MEASURE_CUDA_TIME == 1
hipEventRecord(start_h2d,0);
//hipEventSynchronize(start_h2d);
#endif
// Transfer LLR data into device.
#if USE_PINNED_MEM == 1
for(int iSt = 0; iSt < NSTREAMS; iSt ++)
{
checkCudaErrors(hipMemcpyAsync(dev_llr[iSt],
llr_cuda[iSt],
memorySize_llr_cuda,
hipMemcpyHostToDevice,
streams[iSt]));
hipStreamSynchronize(streams[iSt]);
}
//hipDeviceSynchronize();
#else
checkCudaErrors(hipMemcpy(dev_llr,
llr_cuda,
memorySize_llr_cuda,
hipMemcpyHostToDevice));
#endif
#if MEASURE_CUDA_TIME == 1
hipEventRecord(stop_h2d, 0);
hipEventSynchronize(stop_h2d);
hipEventElapsedTime(&time_h2d_temp, start_h2d, stop_h2d);
time_h2d+=time_h2d_temp;
#endif
#if MEASURE_CUDA_TIME == 1
hipEventRecord(start_memset,0);
#endif
#if ETA == 1 // early termination algorithm
checkCudaErrors(hipMemset(dev_et, 0, memorySize_et_cuda));
#endif
#if MEASURE_CUDA_TIME == 1
hipEventRecord(stop_memset,0);
hipEventSynchronize(stop_memset);
hipEventElapsedTime(&time_memset_temp, start_memset, stop_memset);
time_memset += time_memset_temp;
#endif
for(int iSt = 0; iSt < NSTREAMS; iSt++)
{
checkCudaErrors(hipMemcpyAsync(dev_llr[iSt],
llr_cuda[iSt],
memorySize_llr_cuda,
hipMemcpyHostToDevice,
streams[iSt]));
// kernel launch
for(int ii = 0; ii < MAX_ITERATION; ii++)
{
// run check-node processing kernel
// TODO: Why run a special kernel the first iteration?
if(ii == 0) {
hipLaunchKernelGGL(( ldpc_cnp_kernel_1st_iter),
dim3( dimGridKernel1),
dim3( dimBlockKernel1),
0,
streams[iSt],
dev_llr[iSt],
dev_dt[iSt],
dev_R[iSt],
dev_et[iSt]);
} else {
hipLaunchKernelGGL(( ldpc_cnp_kernel),
dim3( dimGridKernel1),
dim3( dimBlockKernel1),
sharedRCacheSize,
streams[iSt],
dev_llr[iSt],
dev_dt[iSt],
dev_R[iSt],
dev_et[iSt],
threadsPerBlockKernel1);
}
// run variable-node processing kernel
// for the last iteration we run a special
// kernel. this is because we can make a hard
// decision instead of writing back the belief
// for the value of each bit.
if(ii < MAX_ITERATION - 1) {
hipLaunchKernelGGL(( ldpc_vnp_kernel_normal),
dim3( dimGridKernel2),
dim3( dimBlockKernel2),
0,
streams[iSt],
dev_llr[iSt],
dev_dt[iSt],
dev_et[iSt]);
} else {
hipLaunchKernelGGL(( ldpc_vnp_kernel_last_iter),
dim3( dimGridKernel2),
dim3( dimBlockKernel2),
0,
streams[iSt],
dev_llr[iSt],
dev_dt[iSt],
dev_hard_decision[iSt],
dev_et[iSt]);
}
}
// copy the decoded data from device to host
checkCudaErrors(hipMemcpyAsync(hard_decision_cuda[iSt],
dev_hard_decision[iSt],
memorySize_hard_decision_cuda,
hipMemcpyDeviceToHost,
streams[iSt]));
num_of_iteration_for_et = MAX_ITERATION;
}
hipDeviceSynchronize();
#if MEASURE_CUDA_TIME == 1
hipEventRecord(stop_d2h, 0);
hipEventSynchronize(stop_d2h);
hipEventElapsedTime(&time_d2h_temp, start_d2h, stop_d2h);
time_d2h+=time_d2h_temp;
#endif
#ifdef DISPLAY_BER
for(int iSt = 0; iSt < NSTREAMS; iSt ++)
{
this_error = cuda_error_check(info_bin_cuda[iSt], hard_decision_cuda[iSt]);
total_bit_error += this_error.bit_error;
total_frame_error += this_error.frame_error;
}
#endif
#if ETA == 1
iter_num += num_of_iteration_for_et;
iter_cnt ++;
aver_iter = (float)iter_num * 1.0f / iter_cnt;
#endif
} // end of MAX-SIM
#if MEASURE_CUDA_TIME == 1
hipEventDestroy(start_kernel);
hipEventDestroy(stop_kernel);
hipEventDestroy(start_h2d);
hipEventDestroy(stop_h2d);
hipEventDestroy(start_d2h);
hipEventDestroy(stop_d2h);
#endif
#if MEASURE_CPU_TIME == 1
hipDeviceSynchronize();
cpu_timer.stop();
cpu_run_time += cpu_timer.stop_get();
printf ("\n=================================\n\r");
printf ("GPU CUDA Demo\n");
printf ("SNR = %1.1f dB\n", snr);
printf ("# codewords = %d, # streams = %d, CW=%d, MCW=%d\r\n",
total_codeword * NSTREAMS, NSTREAMS, CW, MCW);
printf("number of iterations = %1.1f \r\n", aver_iter);
printf("CPU time: %f ms, for %d simulations.\n", cpu_run_time, MAX_SIM);
float throughput = (float)CODEWORD_LEN * NSTREAMS * MCW * CW * MAX_SIM;
throughput /= cpu_run_time * 1000;
printf("Throughput = %f Mbps\r\n",
(float)CODEWORD_LEN * NSTREAMS * MCW * CW * MAX_SIM / cpu_run_time / 1000);
#endif
#if MEASURE_CUDA_TIME == 1
printf("Throughput (kernel only) = %f Mbps\r\n",
(float) CODEWORD_LEN * MCW * CW * MAX_SIM / time_kernel / 1000);
printf("Throughput (kernel + transer time) = %f Mbps\r\n",
(float) CODEWORD_LEN * MCW * CW * MAX_SIM / (time_kernel + time_h2d+ time_d2h + time_memset) / 1000);
float bandwidthInMBs = 1e3f * memorySize_llr_cuda;
bandwidthInMBs /= (time_h2d / MAX_SIM) * (float) (1 << 20);
printf("\nh2d (llr): size=%f MB, bandwidthInMBs = %f MB/s\n",
memorySize_llr_cuda /1e6, bandwidthInMBs);
bandwidthInMBs = (1e3f * memorySize_hard_decision_cuda) / ( (time_d2h/MAX_SIM) * (float)(1 << 20));
printf("d2h (hd): size=%f MB, bandwidthInMBs = %f MB/s\n",
memorySize_hard_decision_cuda /1e6, bandwidthInMBs);
printf ("kernel time = %f ms \nh2d time = %f ms \nd2h time = %f ms\n",
time_kernel, time_h2d, time_d2h);
printf ("memset time = %f ms \n", time_memset);
printf ("time difference = %f ms \n",
cpu_run_time - time_kernel - time_h2d - time_d2h - time_memset);
#endif
#ifdef DISPLAY_BER
printf ("# codewords = %d, CW=%d, MCW=%d\r\n",total_codeword, CW, MCW);
printf ("total bit error = %d\n", total_bit_error);
printf ("BER = %1.2e, FER = %1.2e\n",
(float) total_bit_error/total_codeword/INFO_LEN,
(float) total_frame_error/total_codeword);
#endif
} // end of the MAX frame error.
}// end of the snr loop
for(int iSt = 0; iSt < NSTREAMS; iSt ++)
{
checkCudaErrors(hipFree(dev_llr[iSt]));
checkCudaErrors(hipFree(dev_dt[iSt]));
checkCudaErrors(hipFree(dev_R[iSt]));
checkCudaErrors(hipFree(dev_hard_decision[iSt]));
checkCudaErrors(hipFree(dev_et[iSt]));
free(info_bin_cuda[iSt]);
checkCudaErrors(hipStreamDestroy(streams[iSt]));
}
free(info_bin);
free(codeword);
free(trans);
free(recv);
free(llr);
free(et);
#if USE_PINNED_MEM == 1
for(int iSt = 0; iSt < NSTREAMS; iSt ++)
{
checkCudaErrors(hipHostFree(llr_cuda[iSt]));
checkCudaErrors(hipHostFree(hard_decision_cuda[iSt]));
}
#else
free(llr_cuda);
free(hard_decision_cuda);
#endif
return 0;
}
| 3413c366426cbe8184789d259dceb5b621012846.cu | /* Copyright (c) 2011-2016, Robert Wang, email: robertwgh (at) gmail.com
All rights reserved. https://github.com/robertwgh/cuLDPC
CUDA implementation of LDPC decoding algorithm.
The details of implementation can be found from the following papers:
1. Wang, G., Wu, M., Sun, Y., & Cavallaro, J. R. (2011, June). A massively parallel implementation of QC-LDPC decoder on GPU. In Application Specific Processors (SASP), 2011 IEEE 9th Symposium on (pp. 82-85). IEEE.
2. Wang, G., Wu, M., Yin, B., & Cavallaro, J. R. (2013, December). High throughput low latency LDPC decoding on GPU for SDR systems. In Global Conference on Signal and Information Processing (GlobalSIP), 2013 IEEE (pp. 1258-1261). IEEE.
The current release is close to the GlobalSIP2013 paper.
Created: 10/1/2010
Revision: 08/01/2013
4/20/2016 prepare for release on Github.
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
// CUDA runtime
#include <cuda_runtime.h>
#include "util/helper_cuda.h"
#include "util/timer.h"
#include "cuLDPC.h"
#include "cuLDPC_matrix.h"
// TODO: Including a .cu is strange
#include "cuLDPC_kernel.cu"
float snr ;
long seed ;
float rate ;
int iter ;
// Extern function and variable definition
extern "C"
{
void structure_encode (int s [], int code [], int h[BLK_ROW][BLK_COL]);
void info_gen (int info_bin []);
void modulation (int code [], float trans []);
void awgn (float trans [], float recv []);
void error_check (float trans [], float recv []);
void llr_init (float llr [], float recv []);
int parity_check (float app[]);
error_result cuda_error_check (int info[], int hard_decision[]);
float sigma ;
int *info_bin ;
};
int printDevices();
int runTest();
int printDevices()
{
int deviceCount = 0;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0)
{
printf("error: no devices supporting CUDA. \n");
}
cudaDeviceProp deviceProperty;
int currentDeviceID = 0;
checkCudaErrors(cudaGetDeviceProperties(&deviceProperty, currentDeviceID));
printf("\ndevice name: %s", deviceProperty.name);
printf("\n");
printf("device sharedMemPerBlock: %Iu \n", deviceProperty.sharedMemPerBlock);
printf("device totalGlobalMem: %Iu \n", deviceProperty.totalGlobalMem);
printf("device regsPerBlock: %d \n", deviceProperty.regsPerBlock);
printf("device warpSize: %d \n", deviceProperty.warpSize);
printf("device memPitch: %Iu \n", deviceProperty.memPitch);
printf("device maxThreadsPerBlock: %d \n", deviceProperty.maxThreadsPerBlock);
printf("device maxThreadsDim[0]: %d \n", deviceProperty.maxThreadsDim[0]);
printf("device maxThreadsDim[1]: %d \n", deviceProperty.maxThreadsDim[1]);
printf("device maxThreadsDim[2]: %d \n", deviceProperty.maxThreadsDim[2]);
printf("device maxGridSize[0]: %d \n", deviceProperty.maxGridSize[0]);
printf("device maxGridSize[1]: %d \n", deviceProperty.maxGridSize[1]);
printf("device maxGridSize[2]: %d \n", deviceProperty.maxGridSize[2]);
printf("device totalConstMem: %Iu \n", deviceProperty.totalConstMem);
printf("device major: %d \n", deviceProperty.major);
printf("device minor: %d \n", deviceProperty.minor);
printf("device clockRate: %d \n", deviceProperty.clockRate);
printf("device textureAlignment: %Iu \n", deviceProperty.textureAlignment);
printf("device deviceOverlap: %d \n", deviceProperty.deviceOverlap);
printf("device multiProcessorCount: %d \n", deviceProperty.multiProcessorCount);
printf("device zero-copy data transfers: %d \n", deviceProperty.canMapHostMemory);
printf("\n");
return cudaSuccess;
}
int main()
{
printf("CUDA LDPC Decoder\r\nComputing...\r\n");
printDevices();
printf("number of host CPUs:\t%d\n", omp_get_num_procs());
cudaSetDevice(DEVICE_ID);
runTest();
return 0;
}
// TODO: Do we need to define it extern?
// extern "C" int h_base [BLK_ROW][BLK_COL];
int runTest()
{
h_element h_compact1 [H_COMPACT1_COL][H_COMPACT1_ROW]; // for update dt, R
h_element h_element_temp;
// init the compact matrix
for(int i = 0; i < H_COMPACT1_COL; i++)
{
for(int j = 0; j < H_COMPACT1_ROW; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact1[i][j] = h_element_temp; // h[i][0-11], the same column
}
}
// scan the h matrix, and gengerate compact mode of h
for(int i = 0; i < BLK_ROW; i++)
{
int k = 0;
for(int j = 0; j < BLK_COL; j ++)
{
if(h_base[i][j] != -1)
{
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact1[k][i] = h_element_temp;
k++;
}
}
printf("row %d, #element=%d\n", i, k);
}
// h_compact2
h_element h_compact2 [H_COMPACT2_ROW][H_COMPACT2_COL]; // for update llr
// init the compact matrix
for(int i = 0; i < H_COMPACT2_ROW; i++)
{
for(int j = 0; j < H_COMPACT2_COL; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact2[i][j] = h_element_temp;
}
}
for(int j = 0; j < BLK_COL; j++)
{
int k=0;
for(int i = 0; i < BLK_ROW; i ++)
{
if(h_base[i][j] != -1)
{
// although h is transposed, the (x,y) is still (iBlkRow, iBlkCol)
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact2[k][j] = h_element_temp;
k++;
}
}
}
//int memorySize_h_base = BLK_ROW * BLK_COL * sizeof(int);
int memorySize_h_compact1 = H_COMPACT1_ROW * H_COMPACT1_COL * sizeof(h_element);
int memorySize_h_compact2 = H_COMPACT2_ROW * H_COMPACT2_COL * sizeof(h_element);
int memorySize_infobits = INFO_LEN * sizeof(int);
int memorySize_codeword = CODEWORD_LEN * sizeof(int);
int memorySize_llr = CODEWORD_LEN * sizeof(float);
int memorySize_et = MCW * CW * sizeof(int);
info_bin = (int *) malloc(memorySize_infobits) ;
int *codeword = (int *) malloc(memorySize_codeword) ;
float *trans = (float *) malloc(memorySize_llr) ;
float *recv = (float *) malloc(memorySize_llr) ;
float *APP = (float *) malloc(memorySize_llr) ;
float *llr = (float *) malloc(memorySize_llr) ;
int * et = (int*) malloc(memorySize_et);
rate = (float)0.5f;
seed = 69012 ;
srand (seed);
// Create streams
cudaStream_t *streams = (cudaStream_t *) malloc(NSTREAMS * sizeof(cudaStream_t));
for (int i = 0; i < NSTREAMS; i++)
{
checkCudaErrors(cudaStreamCreate(&(streams[i])));
}
//////////////////////////////////////////////////////////////////////////////////
// all the variables Starting with _cuda is used in host code and for cuda computation
int memorySize_infobits_cuda = MCW * CW * memorySize_infobits ;
int memorySize_llr_cuda = MCW * CW * CODEWORD_LEN * sizeof(float);
int memorySize_dt_cuda = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_R_cuda = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_hard_decision_cuda = MCW * CW * CODEWORD_LEN * sizeof(int);
int memorySize_et_cuda = MCW * CW * sizeof(int);
int *info_bin_cuda[NSTREAMS];
float *llr_cuda[NSTREAMS];
int * hard_decision_cuda[NSTREAMS];
// Allocate pinned memory for llr and hard_decision data.
#if USE_PINNED_MEM == 1 // pinned memory
for(int i = 0; i < NSTREAMS; i ++)
{
info_bin_cuda[i] = (int *) malloc(memorySize_infobits_cuda);
checkCudaErrors(cudaHostAlloc((void **)&llr_cuda[i],
memorySize_llr_cuda, cudaHostAllocDefault));
checkCudaErrors(cudaHostAlloc((void **)&hard_decision_cuda[i],
memorySize_hard_decision_cuda, cudaHostAllocDefault));
}
#else // pageable memory
hard_decision_cuda = (int *) malloc(memorySize_hard_decision_cuda);
llr_cuda = (float *) malloc(memorySize_llr_cuda);
#endif
// create device memory
float * dev_llr[NSTREAMS];
float * dev_dt[NSTREAMS];
float * dev_R[NSTREAMS];
int * dev_hard_decision[NSTREAMS];
int * dev_et[NSTREAMS];
bool b_et;
error_result this_error;
int total_frame_error = 0;
int total_bit_error = 0;
int total_codeword = 0;
int num_of_iteration_for_et = 0;
int iter_cnt=0, iter_num =0;
float aver_iter=0.0f;
for(int i = 0; i < NSTREAMS; i ++)
{
checkCudaErrors(cudaMalloc((void **)&dev_llr[i], memorySize_llr_cuda));
checkCudaErrors(cudaMalloc((void **)&dev_dt[i], memorySize_dt_cuda));
checkCudaErrors(cudaMalloc((void **)&dev_R[i], memorySize_R_cuda));
checkCudaErrors(cudaMalloc((void **)&dev_hard_decision[i],
memorySize_hard_decision_cuda));
checkCudaErrors(cudaMalloc((void **)&dev_et[i], memorySize_et_cuda));
}
for(int snri = 0; snri < NUM_SNR; snri++)
{
snr = snr_array[snri];
sigma = 1.0f/sqrt(2.0f*rate*pow(10.0f,(snr/10.0f)));
total_codeword = 0;
total_frame_error = 0;
total_bit_error = 0;
iter_num = 0;
aver_iter = 0.0f;
iter_cnt = 0;
// In this version code, I don't care the BER performance, so don't need this loop.
while ( (total_frame_error <= MIN_FER) && (total_codeword <= MIN_CODEWORD))
{
total_codeword += CW * MCW;
for(int i = 0; i < CW * MCW; i++)
{
// generate random data
info_gen (info_bin);
// encode the data
structure_encode (info_bin, codeword, h_base);
// BPSK modulation
modulation (codeword, trans);
// additive white Gaussian noise
awgn (trans, recv);
#ifdef PRINT_MSG
// Error check
error_check (trans, recv);
#endif
// LLR init
llr_init (llr, recv);
// copy the info_bin and llr to the total memory
for(int j = 0; j < NSTREAMS; j ++)
{
memcpy(info_bin_cuda[j] + i * INFO_LEN, info_bin, memorySize_infobits);
memcpy(llr_cuda[j] + i * CODEWORD_LEN, llr, memorySize_llr);
}
}
#if MEASURE_CUDA_TIME == 1
// start the timer
cudaEvent_t start_kernel, stop_kernel, start_h2d, stop_h2d, start_d2h, stop_d2h;
cudaEvent_t start_memset, stop_memset;
cudaEventCreate(&start_kernel);
cudaEventCreate(&stop_kernel);
cudaEventCreate(&start_h2d);
cudaEventCreate(&stop_h2d);
cudaEventCreate(&start_d2h);
cudaEventCreate(&stop_d2h);
cudaEventCreate(&start_memset);
cudaEventCreate(&stop_memset);
float time_kernel = 0.0, time_kernel_temp = 0.0;
float time_h2d=0.0, time_h2d_temp = 0.0;
float time_d2h=0.0, time_d2h_temp = 0.0;
float time_memset = 0.0f, time_memset_temp = 0.0;
#endif
// Since for all the simulation, this part only transfer
// once. So the time we don't count into the total time.
// constant memory init.
checkCudaErrors(cudaMemcpyToSymbol(dev_h_compact1,
h_compact1,
memorySize_h_compact1));
// constant memory init.
checkCudaErrors(cudaMemcpyToSymbol(dev_h_compact2,
h_compact2,
memorySize_h_compact2));
int blockSizeX = (Z + 32 - 1)/ 32 * 32;
// Define CUDA kernel dimension
dim3 dimGridKernel1(BLK_ROW, MCW, 1); // dim of the thread blocks
dim3 dimBlockKernel1(blockSizeX, CW, 1);
int threadsPerBlockKernel1 = blockSizeX * CW;
int sharedRCacheSize = threadsPerBlockKernel1 * NON_EMPTY_ELMENT * sizeof(float);
dim3 dimGridKernel2(BLK_COL, MCW, 1);
dim3 dimBlockKernel2(blockSizeX, CW, 1);
int threadsPerBlockKernel2 = blockSizeX * CW;
int sharedDtCacheSize = threadsPerBlockKernel2 * NON_EMPTY_ELMENT_VNP * sizeof(float);
dim3 dimGridKernel3(MCW, CW, 1);
dim3 dimBlockKernel3(Z, BLK_ROW, 1);
// initialize and start the cpu timer
#if MEASURE_CPU_TIME == 1
float cpu_run_time = 0.0;
Timer cpu_timer;
cpu_timer.start();
#endif
// run the kernel
for(int j = 0; j < MAX_SIM; j++)
{
#if MEASURE_CUDA_TIME == 1
cudaEventRecord(start_h2d,0);
//cudaEventSynchronize(start_h2d);
#endif
// Transfer LLR data into device.
#if USE_PINNED_MEM == 1
for(int iSt = 0; iSt < NSTREAMS; iSt ++)
{
checkCudaErrors(cudaMemcpyAsync(dev_llr[iSt],
llr_cuda[iSt],
memorySize_llr_cuda,
cudaMemcpyHostToDevice,
streams[iSt]));
cudaStreamSynchronize(streams[iSt]);
}
//cudaDeviceSynchronize();
#else
checkCudaErrors(cudaMemcpy(dev_llr,
llr_cuda,
memorySize_llr_cuda,
cudaMemcpyHostToDevice));
#endif
#if MEASURE_CUDA_TIME == 1
cudaEventRecord(stop_h2d, 0);
cudaEventSynchronize(stop_h2d);
cudaEventElapsedTime(&time_h2d_temp, start_h2d, stop_h2d);
time_h2d+=time_h2d_temp;
#endif
#if MEASURE_CUDA_TIME == 1
cudaEventRecord(start_memset,0);
#endif
#if ETA == 1 // early termination algorithm
checkCudaErrors(cudaMemset(dev_et, 0, memorySize_et_cuda));
#endif
#if MEASURE_CUDA_TIME == 1
cudaEventRecord(stop_memset,0);
cudaEventSynchronize(stop_memset);
cudaEventElapsedTime(&time_memset_temp, start_memset, stop_memset);
time_memset += time_memset_temp;
#endif
for(int iSt = 0; iSt < NSTREAMS; iSt++)
{
checkCudaErrors(cudaMemcpyAsync(dev_llr[iSt],
llr_cuda[iSt],
memorySize_llr_cuda,
cudaMemcpyHostToDevice,
streams[iSt]));
// kernel launch
for(int ii = 0; ii < MAX_ITERATION; ii++)
{
// run check-node processing kernel
// TODO: Why run a special kernel the first iteration?
if(ii == 0) {
ldpc_cnp_kernel_1st_iter<<<
dimGridKernel1,
dimBlockKernel1,
0,
streams[iSt]>>>
(dev_llr[iSt],
dev_dt[iSt],
dev_R[iSt],
dev_et[iSt]);
} else {
ldpc_cnp_kernel<<<
dimGridKernel1,
dimBlockKernel1,
sharedRCacheSize,
streams[iSt]>>>
(dev_llr[iSt],
dev_dt[iSt],
dev_R[iSt],
dev_et[iSt],
threadsPerBlockKernel1);
}
// run variable-node processing kernel
// for the last iteration we run a special
// kernel. this is because we can make a hard
// decision instead of writing back the belief
// for the value of each bit.
if(ii < MAX_ITERATION - 1) {
ldpc_vnp_kernel_normal<<<
dimGridKernel2,
dimBlockKernel2,
0,
streams[iSt]>>>
(dev_llr[iSt],
dev_dt[iSt],
dev_et[iSt]);
} else {
ldpc_vnp_kernel_last_iter<<<
dimGridKernel2,
dimBlockKernel2,
0,
streams[iSt]>>>
(dev_llr[iSt],
dev_dt[iSt],
dev_hard_decision[iSt],
dev_et[iSt]);
}
}
// copy the decoded data from device to host
checkCudaErrors(cudaMemcpyAsync(hard_decision_cuda[iSt],
dev_hard_decision[iSt],
memorySize_hard_decision_cuda,
cudaMemcpyDeviceToHost,
streams[iSt]));
num_of_iteration_for_et = MAX_ITERATION;
}
cudaDeviceSynchronize();
#if MEASURE_CUDA_TIME == 1
cudaEventRecord(stop_d2h, 0);
cudaEventSynchronize(stop_d2h);
cudaEventElapsedTime(&time_d2h_temp, start_d2h, stop_d2h);
time_d2h+=time_d2h_temp;
#endif
#ifdef DISPLAY_BER
for(int iSt = 0; iSt < NSTREAMS; iSt ++)
{
this_error = cuda_error_check(info_bin_cuda[iSt], hard_decision_cuda[iSt]);
total_bit_error += this_error.bit_error;
total_frame_error += this_error.frame_error;
}
#endif
#if ETA == 1
iter_num += num_of_iteration_for_et;
iter_cnt ++;
aver_iter = (float)iter_num * 1.0f / iter_cnt;
#endif
} // end of MAX-SIM
#if MEASURE_CUDA_TIME == 1
cudaEventDestroy(start_kernel);
cudaEventDestroy(stop_kernel);
cudaEventDestroy(start_h2d);
cudaEventDestroy(stop_h2d);
cudaEventDestroy(start_d2h);
cudaEventDestroy(stop_d2h);
#endif
#if MEASURE_CPU_TIME == 1
cudaDeviceSynchronize();
cpu_timer.stop();
cpu_run_time += cpu_timer.stop_get();
printf ("\n=================================\n\r");
printf ("GPU CUDA Demo\n");
printf ("SNR = %1.1f dB\n", snr);
printf ("# codewords = %d, # streams = %d, CW=%d, MCW=%d\r\n",
total_codeword * NSTREAMS, NSTREAMS, CW, MCW);
printf("number of iterations = %1.1f \r\n", aver_iter);
printf("CPU time: %f ms, for %d simulations.\n", cpu_run_time, MAX_SIM);
float throughput = (float)CODEWORD_LEN * NSTREAMS * MCW * CW * MAX_SIM;
throughput /= cpu_run_time * 1000;
printf("Throughput = %f Mbps\r\n",
(float)CODEWORD_LEN * NSTREAMS * MCW * CW * MAX_SIM / cpu_run_time / 1000);
#endif
#if MEASURE_CUDA_TIME == 1
printf("Throughput (kernel only) = %f Mbps\r\n",
(float) CODEWORD_LEN * MCW * CW * MAX_SIM / time_kernel / 1000);
printf("Throughput (kernel + transer time) = %f Mbps\r\n",
(float) CODEWORD_LEN * MCW * CW * MAX_SIM / (time_kernel + time_h2d+ time_d2h + time_memset) / 1000);
float bandwidthInMBs = 1e3f * memorySize_llr_cuda;
bandwidthInMBs /= (time_h2d / MAX_SIM) * (float) (1 << 20);
printf("\nh2d (llr): size=%f MB, bandwidthInMBs = %f MB/s\n",
memorySize_llr_cuda /1e6, bandwidthInMBs);
bandwidthInMBs = (1e3f * memorySize_hard_decision_cuda) / ( (time_d2h/MAX_SIM) * (float)(1 << 20));
printf("d2h (hd): size=%f MB, bandwidthInMBs = %f MB/s\n",
memorySize_hard_decision_cuda /1e6, bandwidthInMBs);
printf ("kernel time = %f ms \nh2d time = %f ms \nd2h time = %f ms\n",
time_kernel, time_h2d, time_d2h);
printf ("memset time = %f ms \n", time_memset);
printf ("time difference = %f ms \n",
cpu_run_time - time_kernel - time_h2d - time_d2h - time_memset);
#endif
#ifdef DISPLAY_BER
printf ("# codewords = %d, CW=%d, MCW=%d\r\n",total_codeword, CW, MCW);
printf ("total bit error = %d\n", total_bit_error);
printf ("BER = %1.2e, FER = %1.2e\n",
(float) total_bit_error/total_codeword/INFO_LEN,
(float) total_frame_error/total_codeword);
#endif
} // end of the MAX frame error.
}// end of the snr loop
for(int iSt = 0; iSt < NSTREAMS; iSt ++)
{
checkCudaErrors(cudaFree(dev_llr[iSt]));
checkCudaErrors(cudaFree(dev_dt[iSt]));
checkCudaErrors(cudaFree(dev_R[iSt]));
checkCudaErrors(cudaFree(dev_hard_decision[iSt]));
checkCudaErrors(cudaFree(dev_et[iSt]));
free(info_bin_cuda[iSt]);
checkCudaErrors(cudaStreamDestroy(streams[iSt]));
}
free(info_bin);
free(codeword);
free(trans);
free(recv);
free(llr);
free(et);
#if USE_PINNED_MEM == 1
for(int iSt = 0; iSt < NSTREAMS; iSt ++)
{
checkCudaErrors(cudaFreeHost(llr_cuda[iSt]));
checkCudaErrors(cudaFreeHost(hard_decision_cuda[iSt]));
}
#else
free(llr_cuda);
free(hard_decision_cuda);
#endif
return 0;
}
|
4538943c336372e0c5419dcba2d23673827e065e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <malloc.h>
#include "head.h"
#define tpb 256
void Allocate(double **subT, double **subNUT, double **subMUT, double **subNDT, double **subMDT, double **d_subT, double **d_subBT, double **d_subNUT, double **d_subMUT, double **d_subNDT, double **d_subMDT){
hipError_t Error;
*subT = (double *)malloc(subN*sizeof(double));
*subNUT = (double *)malloc(n*sizeof(double));
*subMUT = (double *)malloc(n*sizeof(double));
*subNDT = (double *)malloc(n*sizeof(double));
*subMDT = (double *)malloc(n*sizeof(double));
hipMalloc((void**)d_subT,subN*sizeof(double));
hipMalloc((void**)d_subBT,subBN*sizeof(double));
hipMalloc((void**)d_subNUT,n*sizeof(double));
hipMalloc((void**)d_subMUT,n*sizeof(double));
hipMalloc((void**)d_subNDT,n*sizeof(double));
Error = hipMalloc((void**)d_subMDT,n*sizeof(double));
if (DEBUG) printf("CUDA Error(malloc d_subMDT) = %s\n", hipGetErrorString(Error));
}
void Send_To_Device(int phase, double **subT, double **subNUT, double **subNDT, double **d_subT, double **d_subNUT, double **d_subNDT){
hipError_t Error;
if(phase == 0){
Error = hipMemcpy(*d_subT, *subT, subN*sizeof(double), hipMemcpyHostToDevice);
if (DEBUG) printf("CUDA Error(copy subT->d_subT) = %s\n", hipGetErrorString(Error));
}
if(phase == 1){
Error = hipMemcpy(*d_subNUT, *subNUT, n*sizeof(double), hipMemcpyHostToDevice);
if (DEBUG) printf("CUDA Error(copy subNUT->d_subNUT) = %s\n", hipGetErrorString(Error));
Error = hipMemcpy(*d_subNDT, *subNDT, n*sizeof(double), hipMemcpyHostToDevice);
if (DEBUG) printf("CUDA Error(copy subNDT->d_subNDT) = %s\n", hipGetErrorString(Error));
}
}
void Send_To_Host(int phase, double **subT, double **subMUT, double **subMDT, double **d_subT, double **d_subMUT, double **d_subMDT){
hipError_t Error;
if(phase == 0){
Error = hipMemcpy(*subT, *d_subT, subN*sizeof(double), hipMemcpyDeviceToHost);
if (DEBUG) printf("CUDA Error(copy d_subT->subT) = %s\n", hipGetErrorString(Error));
}
if(phase == 1){
Error = hipMemcpy(*subMUT, *d_subMUT, n*sizeof(double), hipMemcpyDeviceToHost);
if (DEBUG) printf("CUDA Error(copy d_subMUT->subMUT) = %s\n", hipGetErrorString(Error));
Error = hipMemcpy(*subMDT, *d_subMDT, n*sizeof(double), hipMemcpyDeviceToHost);
if (DEBUG) printf("CUDA Error(copy d_subMDT->subMDT) = %s\n", hipGetErrorString(Error));
}
}
__global__ void boundary0(double *d_subT, double *d_subBT, double*d_subMUT, double *d_subMDT){
int i = blockDim.x * blockIdx.x +threadIdx.x;
int x, id;
if(i<subN){
x = i/n;
id = i+(n+2)+1+2*x;
d_subBT[id] = d_subT[i];
}
if(i<subn){
d_subBT[(i+1)*(n+2)] = d_subT[i*n+n-1];
d_subBT[(i+1)*(n+2)+n+1] = d_subT[i*n];
}
if(i<n){
d_subMUT[i] = d_subT[i];
d_subMDT[i] = d_subT[(subn-1)*n+i];
}
}
__global__ void boundary1(double *d_subBT, double*d_subNUT, double *d_subNDT){
int i = blockDim.x * blockIdx.x +threadIdx.x;
if(i<n){
d_subBT[i+1] = d_subNDT[i];
d_subBT[(subn+1)*(n+2)+i+1] = d_subNUT[i];
}
}
void CUDA_bdy(int phase, double **d_subT, double **d_subBT, double **d_subNUT, double **d_subMUT, double **d_subNDT, double **d_subMDT){
int bpg0 = (subN+tpb-1)/tpb;
int bpg1 = (n+tpb-1)/tpb;
if(phase == 0)hipLaunchKernelGGL(( boundary0), dim3(bpg0), dim3(tpb), 0, 0, *d_subT, *d_subBT, *d_subMUT, *d_subMDT);
if(phase == 1)hipLaunchKernelGGL(( boundary1), dim3(bpg1), dim3(tpb), 0, 0, *d_subBT, *d_subNUT, *d_subNDT);
hipDeviceSynchronize();
}
__global__ void Forward_Euler(double *d_subT, double *d_subBT){
int i = blockDim.x * blockIdx.x +threadIdx.x;
int id, x;
if(i<subN){
x = i/n;
id = i+(n+2)+1+2*x;
d_subT[i] = d_subBT[id] + 0.1*(d_subBT[id-(n+2)] + d_subBT[id+(n+2)] + d_subBT[id-1] + d_subBT[id+1] - 4*d_subBT[id]);
}
}
void CUDA_FE(double **d_subT, double **d_subBT){
int bpg = (subN+tpb-1)/tpb;
hipLaunchKernelGGL(( Forward_Euler), dim3(bpg), dim3(tpb), 0, 0, *d_subT, *d_subBT);
}
void Free(double **subT, double **subNUT, double **subMUT, double **subNDT, double **subMDT, double **d_subT, double **d_subBT, double **d_subNUT, double **d_subMUT, double **d_subNDT, double **d_subMDT){
free(*subT);
free(*subMUT);free(*subMDT);
free(*subNUT);free(*subNDT);
hipFree(*d_subT);hipFree(*d_subBT);
hipFree(*d_subNUT);hipFree(*d_subMUT);
hipFree(*d_subNDT);hipFree(*d_subMDT);
}
| 4538943c336372e0c5419dcba2d23673827e065e.cu | #include <stdio.h>
#include <malloc.h>
#include "head.h"
#define tpb 256
void Allocate(double **subT, double **subNUT, double **subMUT, double **subNDT, double **subMDT, double **d_subT, double **d_subBT, double **d_subNUT, double **d_subMUT, double **d_subNDT, double **d_subMDT){
cudaError_t Error;
*subT = (double *)malloc(subN*sizeof(double));
*subNUT = (double *)malloc(n*sizeof(double));
*subMUT = (double *)malloc(n*sizeof(double));
*subNDT = (double *)malloc(n*sizeof(double));
*subMDT = (double *)malloc(n*sizeof(double));
cudaMalloc((void**)d_subT,subN*sizeof(double));
cudaMalloc((void**)d_subBT,subBN*sizeof(double));
cudaMalloc((void**)d_subNUT,n*sizeof(double));
cudaMalloc((void**)d_subMUT,n*sizeof(double));
cudaMalloc((void**)d_subNDT,n*sizeof(double));
Error = cudaMalloc((void**)d_subMDT,n*sizeof(double));
if (DEBUG) printf("CUDA Error(malloc d_subMDT) = %s\n", cudaGetErrorString(Error));
}
void Send_To_Device(int phase, double **subT, double **subNUT, double **subNDT, double **d_subT, double **d_subNUT, double **d_subNDT){
cudaError_t Error;
if(phase == 0){
Error = cudaMemcpy(*d_subT, *subT, subN*sizeof(double), cudaMemcpyHostToDevice);
if (DEBUG) printf("CUDA Error(copy subT->d_subT) = %s\n", cudaGetErrorString(Error));
}
if(phase == 1){
Error = cudaMemcpy(*d_subNUT, *subNUT, n*sizeof(double), cudaMemcpyHostToDevice);
if (DEBUG) printf("CUDA Error(copy subNUT->d_subNUT) = %s\n", cudaGetErrorString(Error));
Error = cudaMemcpy(*d_subNDT, *subNDT, n*sizeof(double), cudaMemcpyHostToDevice);
if (DEBUG) printf("CUDA Error(copy subNDT->d_subNDT) = %s\n", cudaGetErrorString(Error));
}
}
void Send_To_Host(int phase, double **subT, double **subMUT, double **subMDT, double **d_subT, double **d_subMUT, double **d_subMDT){
cudaError_t Error;
if(phase == 0){
Error = cudaMemcpy(*subT, *d_subT, subN*sizeof(double), cudaMemcpyDeviceToHost);
if (DEBUG) printf("CUDA Error(copy d_subT->subT) = %s\n", cudaGetErrorString(Error));
}
if(phase == 1){
Error = cudaMemcpy(*subMUT, *d_subMUT, n*sizeof(double), cudaMemcpyDeviceToHost);
if (DEBUG) printf("CUDA Error(copy d_subMUT->subMUT) = %s\n", cudaGetErrorString(Error));
Error = cudaMemcpy(*subMDT, *d_subMDT, n*sizeof(double), cudaMemcpyDeviceToHost);
if (DEBUG) printf("CUDA Error(copy d_subMDT->subMDT) = %s\n", cudaGetErrorString(Error));
}
}
__global__ void boundary0(double *d_subT, double *d_subBT, double*d_subMUT, double *d_subMDT){
int i = blockDim.x * blockIdx.x +threadIdx.x;
int x, id;
if(i<subN){
x = i/n;
id = i+(n+2)+1+2*x;
d_subBT[id] = d_subT[i];
}
if(i<subn){
d_subBT[(i+1)*(n+2)] = d_subT[i*n+n-1];
d_subBT[(i+1)*(n+2)+n+1] = d_subT[i*n];
}
if(i<n){
d_subMUT[i] = d_subT[i];
d_subMDT[i] = d_subT[(subn-1)*n+i];
}
}
__global__ void boundary1(double *d_subBT, double*d_subNUT, double *d_subNDT){
int i = blockDim.x * blockIdx.x +threadIdx.x;
if(i<n){
d_subBT[i+1] = d_subNDT[i];
d_subBT[(subn+1)*(n+2)+i+1] = d_subNUT[i];
}
}
void CUDA_bdy(int phase, double **d_subT, double **d_subBT, double **d_subNUT, double **d_subMUT, double **d_subNDT, double **d_subMDT){
int bpg0 = (subN+tpb-1)/tpb;
int bpg1 = (n+tpb-1)/tpb;
if(phase == 0) boundary0<<<bpg0, tpb>>>(*d_subT, *d_subBT, *d_subMUT, *d_subMDT);
if(phase == 1) boundary1<<<bpg1, tpb>>>(*d_subBT, *d_subNUT, *d_subNDT);
cudaDeviceSynchronize();
}
__global__ void Forward_Euler(double *d_subT, double *d_subBT){
int i = blockDim.x * blockIdx.x +threadIdx.x;
int id, x;
if(i<subN){
x = i/n;
id = i+(n+2)+1+2*x;
d_subT[i] = d_subBT[id] + 0.1*(d_subBT[id-(n+2)] + d_subBT[id+(n+2)] + d_subBT[id-1] + d_subBT[id+1] - 4*d_subBT[id]);
}
}
void CUDA_FE(double **d_subT, double **d_subBT){
int bpg = (subN+tpb-1)/tpb;
Forward_Euler<<<bpg, tpb>>>(*d_subT, *d_subBT);
}
void Free(double **subT, double **subNUT, double **subMUT, double **subNDT, double **subMDT, double **d_subT, double **d_subBT, double **d_subNUT, double **d_subMUT, double **d_subNDT, double **d_subMDT){
free(*subT);
free(*subMUT);free(*subMDT);
free(*subNUT);free(*subNDT);
cudaFree(*d_subT);cudaFree(*d_subBT);
cudaFree(*d_subNUT);cudaFree(*d_subMUT);
cudaFree(*d_subNDT);cudaFree(*d_subMDT);
}
|
f9d09d146035d3b37b89392839833d57c945d7fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "CUDACore/cudaCheck.h"
#include "CUDACore/requireDevices.h"
#include "test_common.h"
using namespace Eigen;
using Matrix5d = Matrix<double, 5, 5>;
__host__ __device__ void eigenValues(Matrix3d *m, Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret) {
#if TEST_DEBUG
printf("Matrix(0,0): %f\n", (*m)(0, 0));
printf("Matrix(1,1): %f\n", (*m)(1, 1));
printf("Matrix(2,2): %f\n", (*m)(2, 2));
#endif
SelfAdjointEigenSolver<Matrix3d> es;
es.computeDirect(*m);
(*ret) = es.eigenvalues();
return;
}
__global__ void kernel(Matrix3d *m, Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret) {
eigenValues(m, ret);
}
__global__ void kernelInverse3x3(Matrix3d *in, Matrix3d *out) { (*out) = in->inverse(); }
__global__ void kernelInverse4x4(Matrix4d *in, Matrix4d *out) { (*out) = in->inverse(); }
__global__ void kernelInverse5x5(Matrix5d *in, Matrix5d *out) { (*out) = in->inverse(); }
template <typename M1, typename M2, typename M3>
__global__ void kernelMultiply(M1 *J, M2 *C, M3 *result) {
// Map<M3> res(result->data());
#if TEST_DEBUG
printf("*** GPU IN ***\n");
#endif
printIt(J);
printIt(C);
// res.noalias() = (*J) * (*C);
// printIt(&res);
(*result) = (*J) * (*C);
#if TEST_DEBUG
printf("*** GPU OUT ***\n");
#endif
return;
}
template <int row1, int col1, int row2, int col2>
void testMultiply() {
std::cout << "TEST MULTIPLY" << std::endl;
std::cout << "Product of type " << row1 << "x" << col1 << " * " << row2 << "x" << col2 << std::endl;
Eigen::Matrix<double, row1, col1> J;
fillMatrix(J);
Eigen::Matrix<double, row2, col2> C;
fillMatrix(C);
Eigen::Matrix<double, row1, col2> multiply_result = J * C;
#if TEST_DEBUG
std::cout << "Input J:" << std::endl;
printIt(&J);
std::cout << "Input C:" << std::endl;
printIt(&C);
std::cout << "Output:" << std::endl;
printIt(&multiply_result);
#endif
// GPU
Eigen::Matrix<double, row1, col1> *JGPU = nullptr;
Eigen::Matrix<double, row2, col2> *CGPU = nullptr;
Eigen::Matrix<double, row1, col2> *multiply_resultGPU = nullptr;
Eigen::Matrix<double, row1, col2> *multiply_resultGPUret = new Eigen::Matrix<double, row1, col2>();
cudaCheck(hipMalloc((void **)&JGPU, sizeof(Eigen::Matrix<double, row1, col1>)));
cudaCheck(hipMalloc((void **)&CGPU, sizeof(Eigen::Matrix<double, row2, col2>)));
cudaCheck(hipMalloc((void **)&multiply_resultGPU, sizeof(Eigen::Matrix<double, row1, col2>)));
cudaCheck(hipMemcpy(JGPU, &J, sizeof(Eigen::Matrix<double, row1, col1>), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(CGPU, &C, sizeof(Eigen::Matrix<double, row2, col2>), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(
multiply_resultGPU, &multiply_result, sizeof(Eigen::Matrix<double, row1, col2>), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernelMultiply), dim3(1), dim3(1), 0, 0, JGPU, CGPU, multiply_resultGPU);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(
multiply_resultGPUret, multiply_resultGPU, sizeof(Eigen::Matrix<double, row1, col2>), hipMemcpyDeviceToHost));
printIt(multiply_resultGPUret);
assert(isEqualFuzzy(multiply_result, (*multiply_resultGPUret)));
}
void testInverse3x3() {
std::cout << "TEST INVERSE 3x3" << std::endl;
Matrix3d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix3d m_inv = m.inverse();
Matrix3d *mGPU = nullptr;
Matrix3d *mGPUret = nullptr;
Matrix3d *mCPUret = new Matrix3d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
cudaCheck(hipMalloc((void **)&mGPU, sizeof(Matrix3d)));
cudaCheck(hipMalloc((void **)&mGPUret, sizeof(Matrix3d)));
cudaCheck(hipMemcpy(mGPU, &m, sizeof(Matrix3d), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernelInverse3x3), dim3(1), dim3(1), 0, 0, mGPU, mGPUret);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(mCPUret, mGPUret, sizeof(Matrix3d), hipMemcpyDeviceToHost));
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testInverse4x4() {
std::cout << "TEST INVERSE 4x4" << std::endl;
Matrix4d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix4d m_inv = m.inverse();
Matrix4d *mGPU = nullptr;
Matrix4d *mGPUret = nullptr;
Matrix4d *mCPUret = new Matrix4d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
cudaCheck(hipMalloc((void **)&mGPU, sizeof(Matrix4d)));
cudaCheck(hipMalloc((void **)&mGPUret, sizeof(Matrix4d)));
cudaCheck(hipMemcpy(mGPU, &m, sizeof(Matrix4d), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernelInverse4x4), dim3(1), dim3(1), 0, 0, mGPU, mGPUret);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(mCPUret, mGPUret, sizeof(Matrix4d), hipMemcpyDeviceToHost));
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testInverse5x5() {
std::cout << "TEST INVERSE 5x5" << std::endl;
Matrix5d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix5d m_inv = m.inverse();
Matrix5d *mGPU = nullptr;
Matrix5d *mGPUret = nullptr;
Matrix5d *mCPUret = new Matrix5d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
cudaCheck(hipMalloc((void **)&mGPU, sizeof(Matrix5d)));
cudaCheck(hipMalloc((void **)&mGPUret, sizeof(Matrix5d)));
cudaCheck(hipMemcpy(mGPU, &m, sizeof(Matrix5d), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernelInverse5x5), dim3(1), dim3(1), 0, 0, mGPU, mGPUret);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(mCPUret, mGPUret, sizeof(Matrix5d), hipMemcpyDeviceToHost));
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testEigenvalues() {
std::cout << "TEST EIGENVALUES" << std::endl;
Matrix3d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix3d *m_gpu = nullptr;
Matrix3d *mgpudebug = new Matrix3d();
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret =
new Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType;
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret1 =
new Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType;
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret_gpu = nullptr;
eigenValues(&m, ret);
#if TEST_DEBUG
std::cout << "Generated Matrix M 3x3:\n" << m << std::endl;
std::cout << "The eigenvalues of M are:" << std::endl << (*ret) << std::endl;
std::cout << "*************************\n\n" << std::endl;
#endif
cudaCheck(hipMalloc((void **)&m_gpu, sizeof(Matrix3d)));
cudaCheck(hipMalloc((void **)&ret_gpu, sizeof(Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType)));
cudaCheck(hipMemcpy(m_gpu, &m, sizeof(Matrix3d), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, m_gpu, ret_gpu);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(mgpudebug, m_gpu, sizeof(Matrix3d), hipMemcpyDeviceToHost));
cudaCheck(
hipMemcpy(ret1, ret_gpu, sizeof(Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType), hipMemcpyDeviceToHost));
#if TEST_DEBUG
std::cout << "GPU Generated Matrix M 3x3:\n" << (*mgpudebug) << std::endl;
std::cout << "GPU The eigenvalues of M are:" << std::endl << (*ret1) << std::endl;
std::cout << "*************************\n\n" << std::endl;
#endif
assert(isEqualFuzzy(*ret, *ret1));
}
int main(int argc, char *argv[]) {
cms::hiptest::requireDevices();
testEigenvalues();
testInverse3x3();
testInverse4x4();
testInverse5x5();
testMultiply<1, 2, 2, 1>();
testMultiply<1, 2, 2, 2>();
testMultiply<1, 2, 2, 3>();
testMultiply<1, 2, 2, 4>();
testMultiply<1, 2, 2, 5>();
testMultiply<2, 1, 1, 2>();
testMultiply<2, 1, 1, 3>();
testMultiply<2, 1, 1, 4>();
testMultiply<2, 1, 1, 5>();
testMultiply<2, 2, 2, 2>();
testMultiply<2, 3, 3, 1>();
testMultiply<2, 3, 3, 2>();
testMultiply<2, 3, 3, 4>();
testMultiply<2, 3, 3, 5>();
testMultiply<3, 2, 2, 3>();
testMultiply<2, 3, 3, 3>(); // DOES NOT COMPILE W/O PATCHING EIGEN
testMultiply<3, 3, 3, 3>();
testMultiply<8, 8, 8, 8>();
testMultiply<3, 4, 4, 3>();
testMultiply<2, 4, 4, 2>();
testMultiply<3, 4, 4, 2>(); // DOES NOT COMPILE W/O PATCHING EIGEN
return 0;
}
| f9d09d146035d3b37b89392839833d57c945d7fe.cu | #include "hip/hip_runtime.h"
#include <iostream>
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "CUDACore/cudaCheck.h"
#include "CUDACore/requireDevices.h"
#include "test_common.h"
using namespace Eigen;
using Matrix5d = Matrix<double, 5, 5>;
__host__ __device__ void eigenValues(Matrix3d *m, Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret) {
#if TEST_DEBUG
printf("Matrix(0,0): %f\n", (*m)(0, 0));
printf("Matrix(1,1): %f\n", (*m)(1, 1));
printf("Matrix(2,2): %f\n", (*m)(2, 2));
#endif
SelfAdjointEigenSolver<Matrix3d> es;
es.computeDirect(*m);
(*ret) = es.eigenvalues();
return;
}
__global__ void kernel(Matrix3d *m, Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret) {
eigenValues(m, ret);
}
__global__ void kernelInverse3x3(Matrix3d *in, Matrix3d *out) { (*out) = in->inverse(); }
__global__ void kernelInverse4x4(Matrix4d *in, Matrix4d *out) { (*out) = in->inverse(); }
__global__ void kernelInverse5x5(Matrix5d *in, Matrix5d *out) { (*out) = in->inverse(); }
template <typename M1, typename M2, typename M3>
__global__ void kernelMultiply(M1 *J, M2 *C, M3 *result) {
// Map<M3> res(result->data());
#if TEST_DEBUG
printf("*** GPU IN ***\n");
#endif
printIt(J);
printIt(C);
// res.noalias() = (*J) * (*C);
// printIt(&res);
(*result) = (*J) * (*C);
#if TEST_DEBUG
printf("*** GPU OUT ***\n");
#endif
return;
}
template <int row1, int col1, int row2, int col2>
void testMultiply() {
std::cout << "TEST MULTIPLY" << std::endl;
std::cout << "Product of type " << row1 << "x" << col1 << " * " << row2 << "x" << col2 << std::endl;
Eigen::Matrix<double, row1, col1> J;
fillMatrix(J);
Eigen::Matrix<double, row2, col2> C;
fillMatrix(C);
Eigen::Matrix<double, row1, col2> multiply_result = J * C;
#if TEST_DEBUG
std::cout << "Input J:" << std::endl;
printIt(&J);
std::cout << "Input C:" << std::endl;
printIt(&C);
std::cout << "Output:" << std::endl;
printIt(&multiply_result);
#endif
// GPU
Eigen::Matrix<double, row1, col1> *JGPU = nullptr;
Eigen::Matrix<double, row2, col2> *CGPU = nullptr;
Eigen::Matrix<double, row1, col2> *multiply_resultGPU = nullptr;
Eigen::Matrix<double, row1, col2> *multiply_resultGPUret = new Eigen::Matrix<double, row1, col2>();
cudaCheck(hipMalloc((void **)&JGPU, sizeof(Eigen::Matrix<double, row1, col1>)));
cudaCheck(hipMalloc((void **)&CGPU, sizeof(Eigen::Matrix<double, row2, col2>)));
cudaCheck(hipMalloc((void **)&multiply_resultGPU, sizeof(Eigen::Matrix<double, row1, col2>)));
cudaCheck(hipMemcpy(JGPU, &J, sizeof(Eigen::Matrix<double, row1, col1>), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(CGPU, &C, sizeof(Eigen::Matrix<double, row2, col2>), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(
multiply_resultGPU, &multiply_result, sizeof(Eigen::Matrix<double, row1, col2>), hipMemcpyHostToDevice));
kernelMultiply<<<1, 1, 0, 0>>>(JGPU, CGPU, multiply_resultGPU);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(
multiply_resultGPUret, multiply_resultGPU, sizeof(Eigen::Matrix<double, row1, col2>), hipMemcpyDeviceToHost));
printIt(multiply_resultGPUret);
assert(isEqualFuzzy(multiply_result, (*multiply_resultGPUret)));
}
void testInverse3x3() {
std::cout << "TEST INVERSE 3x3" << std::endl;
Matrix3d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix3d m_inv = m.inverse();
Matrix3d *mGPU = nullptr;
Matrix3d *mGPUret = nullptr;
Matrix3d *mCPUret = new Matrix3d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
cudaCheck(hipMalloc((void **)&mGPU, sizeof(Matrix3d)));
cudaCheck(hipMalloc((void **)&mGPUret, sizeof(Matrix3d)));
cudaCheck(hipMemcpy(mGPU, &m, sizeof(Matrix3d), hipMemcpyHostToDevice));
kernelInverse3x3<<<1, 1, 0, 0>>>(mGPU, mGPUret);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(mCPUret, mGPUret, sizeof(Matrix3d), hipMemcpyDeviceToHost));
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testInverse4x4() {
std::cout << "TEST INVERSE 4x4" << std::endl;
Matrix4d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix4d m_inv = m.inverse();
Matrix4d *mGPU = nullptr;
Matrix4d *mGPUret = nullptr;
Matrix4d *mCPUret = new Matrix4d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
cudaCheck(hipMalloc((void **)&mGPU, sizeof(Matrix4d)));
cudaCheck(hipMalloc((void **)&mGPUret, sizeof(Matrix4d)));
cudaCheck(hipMemcpy(mGPU, &m, sizeof(Matrix4d), hipMemcpyHostToDevice));
kernelInverse4x4<<<1, 1, 0, 0>>>(mGPU, mGPUret);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(mCPUret, mGPUret, sizeof(Matrix4d), hipMemcpyDeviceToHost));
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testInverse5x5() {
std::cout << "TEST INVERSE 5x5" << std::endl;
Matrix5d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix5d m_inv = m.inverse();
Matrix5d *mGPU = nullptr;
Matrix5d *mGPUret = nullptr;
Matrix5d *mCPUret = new Matrix5d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
cudaCheck(hipMalloc((void **)&mGPU, sizeof(Matrix5d)));
cudaCheck(hipMalloc((void **)&mGPUret, sizeof(Matrix5d)));
cudaCheck(hipMemcpy(mGPU, &m, sizeof(Matrix5d), hipMemcpyHostToDevice));
kernelInverse5x5<<<1, 1, 0, 0>>>(mGPU, mGPUret);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(mCPUret, mGPUret, sizeof(Matrix5d), hipMemcpyDeviceToHost));
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testEigenvalues() {
std::cout << "TEST EIGENVALUES" << std::endl;
Matrix3d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix3d *m_gpu = nullptr;
Matrix3d *mgpudebug = new Matrix3d();
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret =
new Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType;
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret1 =
new Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType;
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret_gpu = nullptr;
eigenValues(&m, ret);
#if TEST_DEBUG
std::cout << "Generated Matrix M 3x3:\n" << m << std::endl;
std::cout << "The eigenvalues of M are:" << std::endl << (*ret) << std::endl;
std::cout << "*************************\n\n" << std::endl;
#endif
cudaCheck(hipMalloc((void **)&m_gpu, sizeof(Matrix3d)));
cudaCheck(hipMalloc((void **)&ret_gpu, sizeof(Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType)));
cudaCheck(hipMemcpy(m_gpu, &m, sizeof(Matrix3d), hipMemcpyHostToDevice));
kernel<<<1, 1, 0, 0>>>(m_gpu, ret_gpu);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(mgpudebug, m_gpu, sizeof(Matrix3d), hipMemcpyDeviceToHost));
cudaCheck(
hipMemcpy(ret1, ret_gpu, sizeof(Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType), hipMemcpyDeviceToHost));
#if TEST_DEBUG
std::cout << "GPU Generated Matrix M 3x3:\n" << (*mgpudebug) << std::endl;
std::cout << "GPU The eigenvalues of M are:" << std::endl << (*ret1) << std::endl;
std::cout << "*************************\n\n" << std::endl;
#endif
assert(isEqualFuzzy(*ret, *ret1));
}
int main(int argc, char *argv[]) {
cms::hiptest::requireDevices();
testEigenvalues();
testInverse3x3();
testInverse4x4();
testInverse5x5();
testMultiply<1, 2, 2, 1>();
testMultiply<1, 2, 2, 2>();
testMultiply<1, 2, 2, 3>();
testMultiply<1, 2, 2, 4>();
testMultiply<1, 2, 2, 5>();
testMultiply<2, 1, 1, 2>();
testMultiply<2, 1, 1, 3>();
testMultiply<2, 1, 1, 4>();
testMultiply<2, 1, 1, 5>();
testMultiply<2, 2, 2, 2>();
testMultiply<2, 3, 3, 1>();
testMultiply<2, 3, 3, 2>();
testMultiply<2, 3, 3, 4>();
testMultiply<2, 3, 3, 5>();
testMultiply<3, 2, 2, 3>();
testMultiply<2, 3, 3, 3>(); // DOES NOT COMPILE W/O PATCHING EIGEN
testMultiply<3, 3, 3, 3>();
testMultiply<8, 8, 8, 8>();
testMultiply<3, 4, 4, 3>();
testMultiply<2, 4, 4, 2>();
testMultiply<3, 4, 4, 2>(); // DOES NOT COMPILE W/O PATCHING EIGEN
return 0;
}
|
03adfede8e205a7dff52a9421c3172b76943ecc3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#define array_size 268435456
//## KERNEL FOR VECTOR ADDITION IN 1 STREAMING MULTIPROCESSOR ##//
extern double mysecond();
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
double t;
// Allocate host memory
a = (float*)malloc(sizeof(float) * array_size);
b = (float*)malloc(sizeof(float) * array_size);
out = (float*)malloc(sizeof(float) * array_size);
// Initialize array
for(int i = 0; i < array_size; i++){
a[i] = 1.0f;
b[i] = 2.0f;
}
// Allocate device memory
hipMalloc((void**)&d_a,sizeof(float)*array_size);
hipMalloc((void**)&d_b,sizeof(float)*array_size);
hipMalloc((void**)&d_out,sizeof(float)*array_size);
t = mysecond();
// Transfer data from host to device memory
hipMemcpy(d_a,a, sizeof(float)*array_size, hipMemcpyHostToDevice);
hipMemcpy(d_b,b, sizeof(float)*array_size, hipMemcpyHostToDevice);
t = (mysecond() - t);
printf ("\nElapsed time for copy from host to device = %g\n", t);
int block_size = 256;
t = mysecond();
// Vector addition
hipLaunchKernelGGL(( vector_add), dim3(1),dim3(block_size), 0, 0, d_out, d_a, d_b, array_size);
hipDeviceSynchronize();
t = (mysecond() - t);
printf ("\nElapsed time for vector addition in 1 block = %g\n", t);
t = mysecond();
// Transfer data from device to host memory
hipMemcpy(out, d_out, sizeof(float)*array_size, hipMemcpyDeviceToHost);
t = (mysecond() - t);
printf ("\nElapsed time for copy from device to host = %g\n", t);
// Deallocate device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
printf ("\nBLock size (number of threads): %d \n", block_size);
printf ("\nNumber of blocks : 1 \n");
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
| 03adfede8e205a7dff52a9421c3172b76943ecc3.cu | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#define array_size 268435456
//## KERNEL FOR VECTOR ADDITION IN 1 STREAMING MULTIPROCESSOR ##//
extern double mysecond();
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
double t;
// Allocate host memory
a = (float*)malloc(sizeof(float) * array_size);
b = (float*)malloc(sizeof(float) * array_size);
out = (float*)malloc(sizeof(float) * array_size);
// Initialize array
for(int i = 0; i < array_size; i++){
a[i] = 1.0f;
b[i] = 2.0f;
}
// Allocate device memory
cudaMalloc((void**)&d_a,sizeof(float)*array_size);
cudaMalloc((void**)&d_b,sizeof(float)*array_size);
cudaMalloc((void**)&d_out,sizeof(float)*array_size);
t = mysecond();
// Transfer data from host to device memory
cudaMemcpy(d_a,a, sizeof(float)*array_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b, sizeof(float)*array_size, cudaMemcpyHostToDevice);
t = (mysecond() - t);
printf ("\nElapsed time for copy from host to device = %g\n", t);
int block_size = 256;
t = mysecond();
// Vector addition
vector_add<<<1,block_size>>>(d_out, d_a, d_b, array_size);
cudaDeviceSynchronize();
t = (mysecond() - t);
printf ("\nElapsed time for vector addition in 1 block = %g\n", t);
t = mysecond();
// Transfer data from device to host memory
cudaMemcpy(out, d_out, sizeof(float)*array_size, cudaMemcpyDeviceToHost);
t = (mysecond() - t);
printf ("\nElapsed time for copy from device to host = %g\n", t);
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
printf ("\nBLock size (number of threads): %d \n", block_size);
printf ("\nNumber of blocks : 1 \n");
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
|
b12b945b9f8c58ae6a506ce4bbfd52737a0bcd2f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KernelCall.h"
// constant
__global__ void kernelProcessing(uchar *rc, uchar *gc, uchar *bc, uchar *destR, uchar *destG, uchar *destB, int rows, int cols, int eyeWidth);
__global__ void kernelProcessing2(uint *ch, uint *dest, int rows, int cols, int eyeWidth);
__global__ void kernelProcessing3(uint *dest, int rows, int cols, int eyeWidth);
__global__ void kernelMapping(cv::cuda::GpuMat cvOrigin, cv::cuda::GpuMat cvRes, int rows, int cols, uint *gpuMap);
cv::Mat KernelCall(cv::Mat origin, uchar *cuRc, uchar *cuBc, uchar *cuGc, uchar *cuDestR, uchar *cuDestB, uchar *cuDestG) {
int size = origin.rows*origin.cols;
uchar *rChannel = new uchar[size];
uchar *gChannel = new uchar[size];
uchar *bChannel = new uchar[size];
uchar *resR = new uchar[800 * 480];
uchar *resB = new uchar[800 * 480];
uchar *resG = new uchar[800 * 480];
DS_timer timer_a(4);
timer_a.setTimerName(0, "array divide");
timer_a.setTimerName(1, "memcpy cpu to device");
timer_a.setTimerName(2, "array merge");
timer_a.setTimerName(3, "memcpy device to cpu");
//overhead
timer_a.onTimer(0);
for (int y = 0; y < origin.rows; y++) {
for (int x = 0; x < origin.cols; x++) {
rChannel[y*origin.cols + x] = origin.at<cv::Vec3b>(y, x)[2];
gChannel[y*origin.cols + x] = origin.at<cv::Vec3b>(y, x)[1];
bChannel[y*origin.cols + x] = origin.at<cv::Vec3b>(y, x)[0];
}
}
timer_a.offTimer(0);
//uchar *dest = new uchar[800*480*3];
//memcopy to host to device
timer_a.onTimer(1);
hipMemcpy(cuRc, rChannel, sizeof(uchar)*size, hipMemcpyHostToDevice);
hipMemcpy(cuBc, bChannel, sizeof(uchar)*size, hipMemcpyHostToDevice);
hipMemcpy(cuGc, gChannel, sizeof(uchar)*size, hipMemcpyHostToDevice);
timer_a.offTimer(1);
//kernel dimension
dim3 blockDim(32, 16);
dim3 gridDim(ceil((float)origin.cols / 32), ceil((float)origin.rows / 16));
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
hipEventRecord(start);
kernelProcessing << <gridDim, blockDim >> > (cuRc, cuGc, cuBc, cuDestR, cuDestG, cuDestB, origin.rows, origin.cols, 80);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
std::cout <<"per frame calc"<< time << " ms " << std::endl << std::endl;
timer_a.onTimer(3);
hipMemcpy(resR, cuDestR, sizeof(uchar)* 800 * 480, hipMemcpyDeviceToHost);
hipMemcpy(resB, cuDestB, sizeof(uchar)* 800 * 480, hipMemcpyDeviceToHost);
hipMemcpy(resG, cuDestG, sizeof(uchar)* 800 * 480, hipMemcpyDeviceToHost);
hipMemset(cuDestR, 0, sizeof(uchar)* 800 * 480);
hipMemset(cuDestB, 0, sizeof(uchar)* 800 * 480);
hipMemset(cuDestG, 0, sizeof(uchar)* 800 * 480);
timer_a.offTimer(3);
cv::Mat resM(480, 800, CV_8UC3);
resM.setTo(0);
timer_a.onTimer(2);
for (int y = 0; y < resM.rows; y++) {
for (int x = 0; x < resM.cols; x++) {
resM.at<cv::Vec3b>(y, x)[0] = (resB[y*resM.cols + x]!= 0)? resB[y*resM.cols + x]: resB[y*resM.cols + x-1]/2+ resB[y*resM.cols + x + 1]/2;
resM.at<cv::Vec3b>(y, x)[1] = (resG[y*resM.cols + x]!= 0) ? resG[y*resM.cols + x] : resG[y*resM.cols + x - 1]/2 + resG[y*resM.cols + x + 1]/2;
resM.at<cv::Vec3b>(y, x)[2] = (resR[y*resM.cols + x]!= 0) ? resR[y*resM.cols + x] : resR[y*resM.cols + x - 1]/2 + resR[y*resM.cols + x + 1]/2;
}
}
timer_a.offTimer(2);
timer_a.printTimer();
//imshow("resM", resM);
delete[] rChannel; delete[] bChannel; delete[] gChannel; delete[] resR; delete[] resG; delete[] resB;
return resM;
}
cv::Mat KernelCall2(cv::Mat origin, uint *ch, uint *Dest) {
int size = origin.rows*origin.cols;
uint *Channel = new uint[size];
uint *res = new uint[800 * 480];
DS_timer timer_a(4);
timer_a.setTimerName(0, "array divide");
timer_a.setTimerName(1, "memcpy cpu to device");
timer_a.setTimerName(2, "array merge");
timer_a.setTimerName(3, "memcpy device to cpu");
//overhead
timer_a.onTimer(0);
for (int y = 0; y < origin.rows; y++) {
for (int x = 0; x < origin.cols; x++) {
Channel[y*origin.cols + x] = origin.at<cv::Vec3b>(y, x)[0]<<16;
Channel[y*origin.cols + x] |= origin.at<cv::Vec3b>(y, x)[1]<<8;
Channel[y*origin.cols + x] |= origin.at<cv::Vec3b>(y, x)[2];
}
}
timer_a.offTimer(0);
//memcopy to host to device
timer_a.onTimer(1);
hipMemcpy(ch, Channel, sizeof(uint)*size, hipMemcpyHostToDevice);
timer_a.offTimer(1);
//kernel dimension
dim3 blockDim(32, 16);
dim3 gridDim(ceil((float)origin.cols / 32), ceil((float)origin.rows / 256));
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
hipEventRecord(start);
kernelProcessing2 << <gridDim, blockDim >> > (ch, Dest, origin.rows, origin.cols, 80);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
std::cout << "per frame calc" << time << " ms " << std::endl;
timer_a.onTimer(3);
hipMemcpy(res, Dest, sizeof(uint) * 800 * 480, hipMemcpyDeviceToHost);
hipMemset(Dest, 0, sizeof(uint) * 800 * 480);
timer_a.offTimer(3);
cv::Mat resM(480, 800, CV_8UC3);
resM.setTo(0);
timer_a.onTimer(2);
for (int y = 0; y < resM.rows; y++) {
for (int x = 0; x < resM.cols; x++) {
resM.at<cv::Vec3b>(y, x)[0] = (res[y*resM.cols + x] & 0xFF0000)>>16;
resM.at<cv::Vec3b>(y, x)[1] = (res[y*resM.cols + x] & 0x00FF00)>>8;
resM.at<cv::Vec3b>(y, x)[2] = (res[y*resM.cols + x] & 0x0000FF);
}
}
timer_a.offTimer(2);
timer_a.printTimer();
//imshow("resM", resM);
delete[] Channel; delete[] res;
return resM;
}
uint* KernelCall3(cv::Mat origin, uint *map_) {
uint *res = new uint[800 * 480];
dim3 blockDim(32, 16);
dim3 gridDim(ceil((float)origin.cols / 32), ceil((float)origin.rows / 16));
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
hipEventRecord(start);
kernelProcessing3 << <gridDim, blockDim >> > (map_, origin.rows, origin.cols, 80);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
std::cout << "per frame calc " << time << " ms " << std::endl;
hipMemcpy(res, map_, sizeof(uint) * 800 * 480, hipMemcpyDeviceToHost);
return res;
}
cv::Mat mapping(cv::Mat origin, uint *map_) {
cv::Mat resM(480, 800, CV_8UC3);
resM.setTo(0);
std::chrono::system_clock::time_point start = std::chrono::system_clock::now();
for (int y = 0; y < resM.rows; y++) {
for (int x = 0; x < resM.cols; x++) {
int dx = 0; int dy = 0;
if (origin.cols > origin.rows) {
dx = map_[y*resM.cols + x] / origin.cols;
dy = map_[y*resM.cols + x] % origin.cols;
}
else {
dy = map_[y*resM.cols + x] / (origin.rows + 1);
dx = map_[y*resM.cols + x] % (origin.rows + 1);
}
resM.at<cv::Vec3b>(y, x) = origin.at<cv::Vec3b>(dy, dx);
}
}
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
std::chrono::microseconds micro = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
double millis = ((double)(micro.count())) / 1000;
std::cout <<"Mapping : " << millis << " ms" << std::endl;
return resM;
}
cv::Mat mappingInGpu(cv::Mat origin, uint *gpuMap, cv::cuda::GpuMat cvOrigin, cv::cuda::GpuMat cvRes) {
cv::Mat resM(480, 800, CV_8UC3);
resM.setTo(0);
cvOrigin.upload(origin);
cvRes.upload(resM);
dim3 blockDim(16, 16);
dim3 gridDim(ceil((float)resM.cols / 16), ceil((float)resM.rows / 16));
std::chrono::system_clock::time_point start = std::chrono::system_clock::now();
kernelMapping << <gridDim, blockDim >> > (cvOrigin, cvRes, origin.rows, origin.cols, gpuMap);
hipDeviceSynchronize();
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
std::chrono::microseconds micro = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
double millis = ((double)(micro.count())) / 1000;
std::cout << "Mapping : " << millis << " ms" << std::endl;
cvRes.download(resM);
return resM;
}
__global__ void kernelProcessing(uchar *rc, uchar *gc, uchar *bc, uchar *destR, uchar *destG, uchar *destB, int rows, int cols, int eyeWidth) {
//croping is just memcpy
/*not use croping loop*/
//barrel Distortion
double pA = 0.007715;
double pB = 0.026731;
double pC = 0.0;
double pD = 1 - pA - pB - pC;
double cx = (double)cols / 2;
double cy = (double)rows / 2;
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = (blockDim.y * blockIdx.y + threadIdx.y)*16 ;
if (idxX > cols || idxY > rows) return;
int d = MIN(cx, cy);
for (int i = 0; i < 16; i++) {
double dx = (idxX - cx) / d;
double dy = (idxY+i - cy) / d;
double dr = sqrt(dx*dx + dy * dy);
double sr = (pA*dr*dr*dr + pB * dr*dr + pC * dr + pD)*dr;
double factor = abs(dr / sr);
double srcXd = cx + (dx*factor*d);
double srcYd = cy + (dy*factor*d);
int nx = ceil(srcXd);
int ny = ceil(srcYd);
double dCol = (double)cols / 400.0;
double dRow = (double)rows / 480.0;
//left 0 to cols-eyeWidth, right eyeWidth to cols
if (nx < 0 || ny < 0 || nx >= cols || ny >= rows || idxX % (int)dCol != 0 || (idxY + i) % (int)dRow != 0) return;
if ((idxY + i) > rows) return;
if (idxX < cols - eyeWidth) {
destR[(int)(ny / dRow) * 800 + (int)(nx / dCol)] = rc[(idxY + i)*cols + idxX];
destG[(int)(ny / dRow) * 800 + (int)(nx / dCol)] = gc[(idxY + i)*cols + idxX];
destB[(int)(ny/dRow)* 800 + (int)(nx/dCol)] = bc[(idxY + i)*cols + idxX];
}
if (idxX > eyeWidth) {
destR[(int)(ny / dRow) * 800 + (int)(nx / dCol) + 400] = rc[(idxY + i)*cols + idxX];
destG[(int)(ny / dRow) * 800 + (int)(nx / dCol) + 400] = gc[(idxY + i)*cols + idxX];
destB[(int)(ny / dRow)* 800 + (int)(nx / dCol) + 400] = bc[(idxY + i)*cols + idxX];
}
__syncthreads();
}
__syncthreads();
//resizing
//merge
//return
}
__global__ void kernelProcessing2(uint *ch, uint *dest, int rows, int cols, int eyeWidth) {
//croping is just memcpy
/*not use croping loop*/
//barrel Distortion
double pA = 0.007715;
double pB = 0.026731;
double pC = 0.0;
double pD = 1 - pA - pB - pC;
double cx = (double)cols / 2;
double cy = (double)rows / 2;
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = (blockDim.y * blockIdx.y + threadIdx.y)*16;
if (idxX > cols || idxY > rows) return;
int d = MIN(cx, cy);
for (int i = 0; i < 16; i++) {
double dx = (idxX - cx) / d;
double dy = (idxY + i - cy) / d;
double dr = sqrt(dx*dx + dy * dy);
double sr = (pA*dr*dr*dr + pB * dr*dr + pC * dr + pD)*dr;
double factor = abs(dr / sr);
double srcXd = cx + (dx*factor*d);
double srcYd = cy + (dy*factor*d);
int nx = ceil(srcXd);
int ny = ceil(srcYd);
double dCol = (double)cols / 400.0;
double dRow = (double)rows / 480.0;
//left 0 to cols-eyeWidth, right eyeWidth to cols
if (nx < 0 || ny < 0 || nx >= cols || ny >= rows || idxX % (int)dCol != 0 || (idxY + i) % (int)dRow != 0) return;
if ((idxY + i) > rows) return;
if (idxX < cols - eyeWidth) {
dest[(int)(ny / dRow) * 800 + (int)(nx / dCol)] = ch[(idxY + i)*cols + idxX];
}
if (idxX > eyeWidth) {
dest[(int)(ny / dRow) * 800 + (int)(nx / dCol) + 400] = ch[(idxY + i)*cols + idxX];
}
__syncthreads();
}
__syncthreads();
//return
}
__global__ void kernelProcessing3(uint *dest, int rows, int cols, int eyeWidth) {
double pA = 0.007715;
double pB = 0.026731;
double pC = 0.0;
double pD = 1 - pA - pB - pC;
double cx = (double)cols / 2;
double cy = (double)rows / 2;
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = blockDim.y * blockIdx.y + threadIdx.y;
if (idxX > cols || idxY > rows) return;
int d = MIN(cx, cy);
double dx = (idxX - cx) / d;
double dy = (idxY - cy) / d;
double dr = sqrt(dx*dx + dy * dy);
double sr = (pA*dr*dr*dr + pB * dr*dr + pC * dr + pD)*dr;
double factor = abs(dr / sr);
double srcXd = cx + (dx*factor*d);
double srcYd = cy + (dy*factor*d);
int nx = ceil(srcXd);
int ny = ceil(srcYd);
double dCol = (double)cols / 400.0;
double dRow = (double)rows / 480.0;
//left 0 to cols-eyeWidth, right eyeWidth to cols
if (nx < 0 || ny < 0 || nx >= cols || ny >= rows || idxX % (int)dCol != 0 || idxY % (int)dRow != 0) return;
if (idxX < cols - eyeWidth) {
dest[(int)(ny / dRow) * 800 + (int)(nx / dCol)] = (cols>rows)?idxX*cols+idxY:idxY*(rows+1)+idxX;
}
if (idxX > eyeWidth) {
dest[(int)(ny / dRow) * 800 + (int)(nx / dCol) + 400] = (cols>rows)?idxX*cols+idxY:idxY*(rows+1)+idxX;
}
__syncthreads();
}
__global__ void kernelMapping(cv::cuda::GpuMat cvOrigin, cv::cuda::GpuMat cvRes, int rows, int cols, uint *gpuMap) {
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = blockDim.y * blockIdx.y + threadIdx.y;
if (idxX > cols || idxY > rows) return;
int dx = 0; int dy = 0;
if (cols > rows) {
dx = gpuMap[idxY*cvRes.cols + idxX] / cvOrigin.cols;
dy = gpuMap[idxY*cvRes.cols + idxX] % cvOrigin.cols;
}
else {
dx = gpuMap[idxY*cvRes.cols + idxX] / (cvOrigin.rows + 1);
dy = gpuMap[idxY*cvRes.cols + idxX] % (cvOrigin.rows + 1);
}
//cvRes.ptr(idxY)[idxX] = cvOrigin.ptr(dy)[dx];
cvRes.data[idxY*800+idxX] = cvOrigin.data[dy * cvOrigin.cols + dx];
__syncthreads();
} | b12b945b9f8c58ae6a506ce4bbfd52737a0bcd2f.cu | #include "KernelCall.h"
//인자로 받은 메모리주소가 아닌 값은 자동으로 constant 메모리에 올라감
__global__ void kernelProcessing(uchar *rc, uchar *gc, uchar *bc, uchar *destR, uchar *destG, uchar *destB, int rows, int cols, int eyeWidth);
__global__ void kernelProcessing2(uint *ch, uint *dest, int rows, int cols, int eyeWidth);
__global__ void kernelProcessing3(uint *dest, int rows, int cols, int eyeWidth);
__global__ void kernelMapping(cv::cuda::GpuMat cvOrigin, cv::cuda::GpuMat cvRes, int rows, int cols, uint *gpuMap);
cv::Mat KernelCall(cv::Mat origin, uchar *cuRc, uchar *cuBc, uchar *cuGc, uchar *cuDestR, uchar *cuDestB, uchar *cuDestG) {
int size = origin.rows*origin.cols;
uchar *rChannel = new uchar[size];
uchar *gChannel = new uchar[size];
uchar *bChannel = new uchar[size];
uchar *resR = new uchar[800 * 480];
uchar *resB = new uchar[800 * 480];
uchar *resG = new uchar[800 * 480];
DS_timer timer_a(4);
timer_a.setTimerName(0, "array divide");
timer_a.setTimerName(1, "memcpy cpu to device");
timer_a.setTimerName(2, "array merge");
timer_a.setTimerName(3, "memcpy device to cpu");
//overhead
timer_a.onTimer(0);
for (int y = 0; y < origin.rows; y++) {
for (int x = 0; x < origin.cols; x++) {
rChannel[y*origin.cols + x] = origin.at<cv::Vec3b>(y, x)[2];
gChannel[y*origin.cols + x] = origin.at<cv::Vec3b>(y, x)[1];
bChannel[y*origin.cols + x] = origin.at<cv::Vec3b>(y, x)[0];
}
}
timer_a.offTimer(0);
//uchar *dest = new uchar[800*480*3];
//memcopy to host to device
timer_a.onTimer(1);
cudaMemcpy(cuRc, rChannel, sizeof(uchar)*size, cudaMemcpyHostToDevice);
cudaMemcpy(cuBc, bChannel, sizeof(uchar)*size, cudaMemcpyHostToDevice);
cudaMemcpy(cuGc, gChannel, sizeof(uchar)*size, cudaMemcpyHostToDevice);
timer_a.offTimer(1);
//kernel dimension
dim3 blockDim(32, 16);
dim3 gridDim(ceil((float)origin.cols / 32), ceil((float)origin.rows / 16));
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaEventRecord(start);
kernelProcessing << <gridDim, blockDim >> > (cuRc, cuGc, cuBc, cuDestR, cuDestG, cuDestB, origin.rows, origin.cols, 80);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
std::cout <<"per frame calc"<< time << " ms " << std::endl << std::endl;
timer_a.onTimer(3);
cudaMemcpy(resR, cuDestR, sizeof(uchar)* 800 * 480, cudaMemcpyDeviceToHost);
cudaMemcpy(resB, cuDestB, sizeof(uchar)* 800 * 480, cudaMemcpyDeviceToHost);
cudaMemcpy(resG, cuDestG, sizeof(uchar)* 800 * 480, cudaMemcpyDeviceToHost);
cudaMemset(cuDestR, 0, sizeof(uchar)* 800 * 480);
cudaMemset(cuDestB, 0, sizeof(uchar)* 800 * 480);
cudaMemset(cuDestG, 0, sizeof(uchar)* 800 * 480);
timer_a.offTimer(3);
cv::Mat resM(480, 800, CV_8UC3);
resM.setTo(0);
timer_a.onTimer(2);
for (int y = 0; y < resM.rows; y++) {
for (int x = 0; x < resM.cols; x++) {
resM.at<cv::Vec3b>(y, x)[0] = (resB[y*resM.cols + x]!= 0)? resB[y*resM.cols + x]: resB[y*resM.cols + x-1]/2+ resB[y*resM.cols + x + 1]/2;
resM.at<cv::Vec3b>(y, x)[1] = (resG[y*resM.cols + x]!= 0) ? resG[y*resM.cols + x] : resG[y*resM.cols + x - 1]/2 + resG[y*resM.cols + x + 1]/2;
resM.at<cv::Vec3b>(y, x)[2] = (resR[y*resM.cols + x]!= 0) ? resR[y*resM.cols + x] : resR[y*resM.cols + x - 1]/2 + resR[y*resM.cols + x + 1]/2;
}
}
timer_a.offTimer(2);
timer_a.printTimer();
//imshow("resM", resM);
delete[] rChannel; delete[] bChannel; delete[] gChannel; delete[] resR; delete[] resG; delete[] resB;
return resM;
}
cv::Mat KernelCall2(cv::Mat origin, uint *ch, uint *Dest) {
int size = origin.rows*origin.cols;
uint *Channel = new uint[size];
uint *res = new uint[800 * 480];
DS_timer timer_a(4);
timer_a.setTimerName(0, "array divide");
timer_a.setTimerName(1, "memcpy cpu to device");
timer_a.setTimerName(2, "array merge");
timer_a.setTimerName(3, "memcpy device to cpu");
//overhead
timer_a.onTimer(0);
for (int y = 0; y < origin.rows; y++) {
for (int x = 0; x < origin.cols; x++) {
Channel[y*origin.cols + x] = origin.at<cv::Vec3b>(y, x)[0]<<16;
Channel[y*origin.cols + x] |= origin.at<cv::Vec3b>(y, x)[1]<<8;
Channel[y*origin.cols + x] |= origin.at<cv::Vec3b>(y, x)[2];
}
}
timer_a.offTimer(0);
//memcopy to host to device
timer_a.onTimer(1);
cudaMemcpy(ch, Channel, sizeof(uint)*size, cudaMemcpyHostToDevice);
timer_a.offTimer(1);
//kernel dimension
dim3 blockDim(32, 16);
dim3 gridDim(ceil((float)origin.cols / 32), ceil((float)origin.rows / 256));
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaEventRecord(start);
kernelProcessing2 << <gridDim, blockDim >> > (ch, Dest, origin.rows, origin.cols, 80);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
std::cout << "per frame calc" << time << " ms " << std::endl;
timer_a.onTimer(3);
cudaMemcpy(res, Dest, sizeof(uint) * 800 * 480, cudaMemcpyDeviceToHost);
cudaMemset(Dest, 0, sizeof(uint) * 800 * 480);
timer_a.offTimer(3);
cv::Mat resM(480, 800, CV_8UC3);
resM.setTo(0);
timer_a.onTimer(2);
for (int y = 0; y < resM.rows; y++) {
for (int x = 0; x < resM.cols; x++) {
resM.at<cv::Vec3b>(y, x)[0] = (res[y*resM.cols + x] & 0xFF0000)>>16;
resM.at<cv::Vec3b>(y, x)[1] = (res[y*resM.cols + x] & 0x00FF00)>>8;
resM.at<cv::Vec3b>(y, x)[2] = (res[y*resM.cols + x] & 0x0000FF);
}
}
timer_a.offTimer(2);
timer_a.printTimer();
//imshow("resM", resM);
delete[] Channel; delete[] res;
return resM;
}
uint* KernelCall3(cv::Mat origin, uint *map_) {
uint *res = new uint[800 * 480];
dim3 blockDim(32, 16);
dim3 gridDim(ceil((float)origin.cols / 32), ceil((float)origin.rows / 16));
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaEventRecord(start);
kernelProcessing3 << <gridDim, blockDim >> > (map_, origin.rows, origin.cols, 80);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
std::cout << "per frame calc " << time << " ms " << std::endl;
cudaMemcpy(res, map_, sizeof(uint) * 800 * 480, cudaMemcpyDeviceToHost);
return res;
}
cv::Mat mapping(cv::Mat origin, uint *map_) {
cv::Mat resM(480, 800, CV_8UC3);
resM.setTo(0);
std::chrono::system_clock::time_point start = std::chrono::system_clock::now();
for (int y = 0; y < resM.rows; y++) {
for (int x = 0; x < resM.cols; x++) {
int dx = 0; int dy = 0;
if (origin.cols > origin.rows) {
dx = map_[y*resM.cols + x] / origin.cols;
dy = map_[y*resM.cols + x] % origin.cols;
}
else {
dy = map_[y*resM.cols + x] / (origin.rows + 1);
dx = map_[y*resM.cols + x] % (origin.rows + 1);
}
resM.at<cv::Vec3b>(y, x) = origin.at<cv::Vec3b>(dy, dx);
}
}
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
std::chrono::microseconds micro = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
double millis = ((double)(micro.count())) / 1000;
std::cout <<"Mapping : " << millis << " ms" << std::endl;
return resM;
}
cv::Mat mappingInGpu(cv::Mat origin, uint *gpuMap, cv::cuda::GpuMat cvOrigin, cv::cuda::GpuMat cvRes) {
cv::Mat resM(480, 800, CV_8UC3);
resM.setTo(0);
cvOrigin.upload(origin);
cvRes.upload(resM);
dim3 blockDim(16, 16);
dim3 gridDim(ceil((float)resM.cols / 16), ceil((float)resM.rows / 16));
std::chrono::system_clock::time_point start = std::chrono::system_clock::now();
kernelMapping << <gridDim, blockDim >> > (cvOrigin, cvRes, origin.rows, origin.cols, gpuMap);
cudaDeviceSynchronize();
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
std::chrono::microseconds micro = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
double millis = ((double)(micro.count())) / 1000;
std::cout << "Mapping : " << millis << " ms" << std::endl;
cvRes.download(resM);
return resM;
}
__global__ void kernelProcessing(uchar *rc, uchar *gc, uchar *bc, uchar *destR, uchar *destG, uchar *destB, int rows, int cols, int eyeWidth) {
//croping is just memcpy
/*not use croping loop*/
//barrel Distortion
double pA = 0.007715;
double pB = 0.026731;
double pC = 0.0;
double pD = 1 - pA - pB - pC;
double cx = (double)cols / 2;
double cy = (double)rows / 2;
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = (blockDim.y * blockIdx.y + threadIdx.y)*16 ;
if (idxX > cols || idxY > rows) return;
int d = MIN(cx, cy);
for (int i = 0; i < 16; i++) {
double dx = (idxX - cx) / d;
double dy = (idxY+i - cy) / d;
double dr = sqrt(dx*dx + dy * dy);
double sr = (pA*dr*dr*dr + pB * dr*dr + pC * dr + pD)*dr;
double factor = abs(dr / sr);
double srcXd = cx + (dx*factor*d);
double srcYd = cy + (dy*factor*d);
int nx = ceil(srcXd);
int ny = ceil(srcYd);
double dCol = (double)cols / 400.0;
double dRow = (double)rows / 480.0;
//left 0 to cols-eyeWidth, right eyeWidth to cols
if (nx < 0 || ny < 0 || nx >= cols || ny >= rows || idxX % (int)dCol != 0 || (idxY + i) % (int)dRow != 0) return;
if ((idxY + i) > rows) return;
if (idxX < cols - eyeWidth) {
destR[(int)(ny / dRow) * 800 + (int)(nx / dCol)] = rc[(idxY + i)*cols + idxX];
destG[(int)(ny / dRow) * 800 + (int)(nx / dCol)] = gc[(idxY + i)*cols + idxX];
destB[(int)(ny/dRow)* 800 + (int)(nx/dCol)] = bc[(idxY + i)*cols + idxX];
}
if (idxX > eyeWidth) {
destR[(int)(ny / dRow) * 800 + (int)(nx / dCol) + 400] = rc[(idxY + i)*cols + idxX];
destG[(int)(ny / dRow) * 800 + (int)(nx / dCol) + 400] = gc[(idxY + i)*cols + idxX];
destB[(int)(ny / dRow)* 800 + (int)(nx / dCol) + 400] = bc[(idxY + i)*cols + idxX];
}
__syncthreads();
}
__syncthreads();
//resizing
//merge 이것도 같이
//return
}
__global__ void kernelProcessing2(uint *ch, uint *dest, int rows, int cols, int eyeWidth) {
//croping is just memcpy
/*not use croping loop*/
//barrel Distortion
double pA = 0.007715;
double pB = 0.026731;
double pC = 0.0;
double pD = 1 - pA - pB - pC;
double cx = (double)cols / 2;
double cy = (double)rows / 2;
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = (blockDim.y * blockIdx.y + threadIdx.y)*16;
if (idxX > cols || idxY > rows) return;
int d = MIN(cx, cy);
for (int i = 0; i < 16; i++) {
double dx = (idxX - cx) / d;
double dy = (idxY + i - cy) / d;
double dr = sqrt(dx*dx + dy * dy);
double sr = (pA*dr*dr*dr + pB * dr*dr + pC * dr + pD)*dr;
double factor = abs(dr / sr);
double srcXd = cx + (dx*factor*d);
double srcYd = cy + (dy*factor*d);
int nx = ceil(srcXd);
int ny = ceil(srcYd);
double dCol = (double)cols / 400.0;
double dRow = (double)rows / 480.0;
//left 0 to cols-eyeWidth, right eyeWidth to cols
if (nx < 0 || ny < 0 || nx >= cols || ny >= rows || idxX % (int)dCol != 0 || (idxY + i) % (int)dRow != 0) return;
if ((idxY + i) > rows) return;
if (idxX < cols - eyeWidth) {
dest[(int)(ny / dRow) * 800 + (int)(nx / dCol)] = ch[(idxY + i)*cols + idxX];
}
if (idxX > eyeWidth) {
dest[(int)(ny / dRow) * 800 + (int)(nx / dCol) + 400] = ch[(idxY + i)*cols + idxX];
}
__syncthreads();
}
__syncthreads();
//return
}
__global__ void kernelProcessing3(uint *dest, int rows, int cols, int eyeWidth) {
double pA = 0.007715;
double pB = 0.026731;
double pC = 0.0;
double pD = 1 - pA - pB - pC;
double cx = (double)cols / 2;
double cy = (double)rows / 2;
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = blockDim.y * blockIdx.y + threadIdx.y;
if (idxX > cols || idxY > rows) return;
int d = MIN(cx, cy);
double dx = (idxX - cx) / d;
double dy = (idxY - cy) / d;
double dr = sqrt(dx*dx + dy * dy);
double sr = (pA*dr*dr*dr + pB * dr*dr + pC * dr + pD)*dr;
double factor = abs(dr / sr);
double srcXd = cx + (dx*factor*d);
double srcYd = cy + (dy*factor*d);
int nx = ceil(srcXd);
int ny = ceil(srcYd);
double dCol = (double)cols / 400.0;
double dRow = (double)rows / 480.0;
//left 0 to cols-eyeWidth, right eyeWidth to cols
if (nx < 0 || ny < 0 || nx >= cols || ny >= rows || idxX % (int)dCol != 0 || idxY % (int)dRow != 0) return;
if (idxX < cols - eyeWidth) {
dest[(int)(ny / dRow) * 800 + (int)(nx / dCol)] = (cols>rows)?idxX*cols+idxY:idxY*(rows+1)+idxX;
}
if (idxX > eyeWidth) {
dest[(int)(ny / dRow) * 800 + (int)(nx / dCol) + 400] = (cols>rows)?idxX*cols+idxY:idxY*(rows+1)+idxX;
}
__syncthreads();
}
__global__ void kernelMapping(cv::cuda::GpuMat cvOrigin, cv::cuda::GpuMat cvRes, int rows, int cols, uint *gpuMap) {
int idxX = blockDim.x * blockIdx.x + threadIdx.x;
int idxY = blockDim.y * blockIdx.y + threadIdx.y;
if (idxX > cols || idxY > rows) return;
int dx = 0; int dy = 0;
if (cols > rows) {
dx = gpuMap[idxY*cvRes.cols + idxX] / cvOrigin.cols;
dy = gpuMap[idxY*cvRes.cols + idxX] % cvOrigin.cols;
}
else {
dx = gpuMap[idxY*cvRes.cols + idxX] / (cvOrigin.rows + 1);
dy = gpuMap[idxY*cvRes.cols + idxX] % (cvOrigin.rows + 1);
}
//cvRes.ptr(idxY)[idxX] = cvOrigin.ptr(dy)[dx];
cvRes.data[idxY*800+idxX] = cvOrigin.data[dy * cvOrigin.cols + dx];
__syncthreads();
} |
bdd6c0263ad59b7d1c0509963e9437e280a5b673.hip | // !!! This is a file automatically generated by hipify!!!
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#include <omp.h>
#include <stdbool.h>
#include <time.h>
#include <hip/hip_runtime.h>
// compilazione nvcc gm.cu -o gm -w -Xcompiler " -openmp"
// nvcc gm.cu -o gm -w -Xcompiler " -openmp" -gencode arch=compute_61,code=sm_61 -lcudadevrt -rdc=true -O3
// nvcc gm.cu -o gm -w -Xcompiler " -openmp" -gencode arch=compute_52,code=sm_52 -lcudadevrt -rdc=true -O3
__device__ int next_pivot_row = 0;
//dichiarazione variabili globali
int max_degree = 0;
int module = 0;
struct map_row {
int len;
int *col;
};
struct map {
int len;
struct map_row *row;
};
//----------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------
void matrix_alloc_int(int ***m, int row, int col) {
//Allocazione di una matrice di tipo int con dimensioni indicate.
*m = (int **)malloc(row * sizeof(int *));
if (*m != NULL)
for (int i = 0; i<row; i++)
(*m)[i] = (int *)calloc(col, sizeof(int));
}
void matrix_free_int(int ***m, int row, int col) {
//Deallocazione di una matrice di tipo int con dimensioni indicate.
for (int i = 0; i<row; i++)
free((*m)[i]);
free(*m);
}
//copia il vettore vet2 in vet1, entrambi di lunghezza len
void vctcpy(int *vet1, const int *vet2, int len) {
for (int i = 0; i < len; i++)
vet1[i] = vet2[i];
return;
}
/*funzione ricorsiva che calcola tutti i possibili monomi con n variabili e grado <= m
e li inserisce nell'array vet. I monomi sono rappresentati come array di interi dove
il valore di ogni posizione rappresenta il grado della variabile in quella posizione.
Esempio: n=3, x^2*y*z = [2,1,1].
L'array vet deve essere gi allocato correttamente. Gli altri parametri sono necessari
per la struttura ricorsiva della funzione e alla prima chiamata devono essere:
- turn = 0, rappresenta la posizione della variabile nel monomio
- monomial = array di interi di lunghezza n gi allocato e usato per calcolare i vari monomi
- *pos = 0 puntatore ad intero, rappresenta la prima posizione libera nell'array vet
*/
void monomial_computation_rec(int n, int m, int **vet, int turn, int *monomial, int *pos) {
//per ogni variabile provo tutti i gradi da 0 a m
for (int degree = 0; degree <= m; degree++) {
//se questa la prima variabile azzero il monomio
if (turn == 0) {
//azzero il monomio lasciando solo il grado della prima variabile
monomial[0] = degree;
for (int v = 1; v < n; v++)
monomial[v] = 0;
}
//altrimenti le altre variabili aggiungo il proprio grado al monomio
else
monomial[turn] = degree;
//ottengo il grado del monomio sommando i gradi delle variabili
int sum = 0;
for (int v = 0; v <= turn; v++)
sum += monomial[v];
//se il grado del monomio supera quello massimo non ha senso continuare a cercare
//altri monomi partendo da questo, perch tutti avranno grado maggiore o uguale
if (sum > m)
break;
//se questa l'ultima variabile copia il monomio nell'array vet and incrementa l'indice pos
if (turn == (n - 1)) {
vctcpy(vet[(*pos)], monomial, n);
(*pos)++;
}
//altrimenti richiama se stessa cambiando la variabile (turn)
else
monomial_computation_rec(n, m, vet, turn + 1, monomial, pos);
}
return;
}
/*restituisce un array contenente tutti i len monomi con n variabili e grado <= m
len il numero di possibili monomi con n variabili e grado <= m
i monomi sono array di interi di lunghezza n dove il valore di ogni posizione rappresenta
il grado della variabile in quella posizione. Esempio: n=3, x^2*y*z = [2,1,1]
len viene passato come argomento per evitare di ricalcolarlo internamente
*/
int **monomial_computation(int n, int m, int len) {
int **vet, *monomial;
//alloco la memoria per l'array
matrix_alloc_int(&vet,len,n);
//strutture di supporto necessarie per il calcolo
monomial = (int *)malloc(n * sizeof(int));
int pos = 0;
//il calcolo fatto dalla funzione ricorsiva correttemente parametrizzata
monomial_computation_rec(n, m, vet, 0, monomial, &pos);
free(monomial);
return vet;
}
//calcola il fattoriale di n (se n negativo return -1)
long long factorial(int n) {
long long k;
if (n<0) //se n negativo non esiste il fattoriale
{
return -1; //codice di errore
}
else { //altrimenti calcolo il fattoriale
if (n == 0 || n == 1) {
return 1;
}
else {
k = 1;
for (int i = 2; i <= n; i++) {
k *= i;
}
return k;
}
}
}
//restituisce il numero di possibili monomi con n variabili e grado = m
int combination(int n, int m) {
long long num, den;
//calcolo {(m+n-1)! / m!*(n-1)!}
//se n>=m semplificato a {(j+n-1)*(j+n-2)* ... *(n) / j!}
if (n >= m) {
num = 1;
for (int k = m; k > 0; k--)
num = num * (n + k - 1);
den = factorial(m);
}
//se m>n semplificato a {(j+n-1)*(j+n-2)* ... *(j) / (n-1)!}
else {
num = 1;
for (int k = n; k > 1; k--)
num = num * (m + k - 1);
den = factorial(n - 1);
}
return (num / den);
}
//restituisce il numero di tutti i possibili monomi con n variabili e grado <= m
int monomial_combinations(int n, int m) {
int result = 0;
//result = Sommatoria (per j da 1 a m) {(j+n-1)! / j!*(n-1)!}
for (int j = 0; j <= m; j++)
result += combination(n, j);
return result;
}
void allocation(int **matrix, int *row, int *col, int *numero_variabili, char **variabili, int *tipo_ordinamento, int *modulo, int *max_degree, FILE *input_file) {
/*
Legge da input le seguenti informazioni:
- modulo dei coefficienti
- grado massimo
- numero dei polinomi di partenza
- tipo di ordinamento
- variabili utilizzate nei polinomi
con queste informazioni alloca la matrice principale (matrice che conterr i polinomi) e stabilisce il numero di variabili utilizzate.
*/
fscanf(input_file, "%d", modulo); //leggo il modulo
fgetc(input_file);
fscanf(input_file, "%d", max_degree); //leggo il grado massimo
fgetc(input_file);
fscanf(input_file, "%d", row); //leggo numero dei polinomi di partenza
fgetc(input_file);
fscanf(input_file, "%d", tipo_ordinamento); //leggo tipo di ordinamento
fgetc(input_file);
int i, j, k, pos_pol, num_pol;
char c;
i = 0;
pos_pol = 0;
*variabili = (char *)malloc(sizeof(char));
c = fgetc(input_file);
while (c != '\n') {
(*variabili)[i] = c;
i++;
(*numero_variabili)++;
*variabili = (char *)realloc(*variabili, (i + 1) * sizeof(char));
c = fgetc(input_file);
}
*col = monomial_combinations(*numero_variabili, *max_degree);
*matrix = (int *)calloc((*row) * (*col), sizeof(int));
}
void print_matrix(int *matrix, int row, int col, FILE *output_file) {
for (int x = 0; x < row; x++) {
for (int y = 0; y < col; y++) {
fprintf(output_file, "%d ", matrix[ (x*col) + y]);
}
fprintf(output_file, "\n\n\n");
}
fprintf(output_file, "\n");
}
//confronta due monomi di *arg variabili secondo l'ordinamento grevlex
//restituisce un intero positivo se monom1 > monom2, zero se sono uguali, uno negativo altrimenti
//i monomi sono sempre rappresentati come array di lunghezza pari al numero delle variabili
//sono fatti diversi cast perch il tipo degli argomenti sia compatibile con qsort_r
int grevlex_comparison(const void *monom1, const void *monom2, void *arg) {
int degree1 = 0, degree2 = 0, n, *mon1, *mon2;
n = *((int *)arg);
mon1 = *((int **)monom1);
mon2 = *((int **)monom2);
//calcolo i gradi dei monomi
for (int v = 0; v < n; v++) {
degree1 += mon1[v];
degree2 += mon2[v];
}
if (degree1 > degree2)
return 1;
else if (degree1 < degree2)
return -1;
//se il grado uguale guardo l'utlima cifra non nulla
//del array risultante dalla sottrazione dei monomi
else {
int *temp = (int *)malloc(n * sizeof(int));
int result;
for (int v = 0; v < n; v++)
temp[v] = mon1[v] - mon2[v];
for (int v = (n - 1); v >= 0; v--) {
if (temp[v] != 0) {
result = -temp[v];
free(temp);
//per evitare di fare free due volte sul puntatore lo setto a NULL dopo la free
temp = NULL;
return result;
}
}
free(temp);
}
return 0;
}
//confronta due monomi di *arg variabili secondo l'ordinamento grevlex
//restituisce un intero positivo se monom1 > monom2, zero se sono uguali, uno negativo altrimenti
//i monomi sono sempre rappresentati come array di lunghezza pari al numero delle variabili
//sono fatti diversi cast perch il tipo degli argomenti sia compatibile con qsort_r
int grevlex_comparison_mcvs(void *arg, const void *monom1, const void *monom2) {
int degree1 = 0, degree2 = 0, n, *mon1, *mon2;
n = *((int *)arg);
mon1 = *((int **)monom1);
mon2 = *((int **)monom2);
//calcolo i gradi dei monomi
for (int v = 0; v < n; v++) {
degree1 += mon1[v];
degree2 += mon2[v];
}
if (degree1 > degree2)
return 1;
else if (degree1 < degree2)
return -1;
//se il grado uguale guardo l'utlima cifra non nulla
//del array risultante dalla sottrazione dei monomi
else {
int *temp = (int *)malloc(n * sizeof(int));
int result;
for (int v = 0; v < n; v++)
temp[v] = mon1[v] - mon2[v];
for (int v = (n - 1); v >= 0; v--) {
if (temp[v] != 0) {
result = -temp[v];
free(temp);
//per evitare di fare free due volte sul puntatore lo setto a NULL dopo la free
temp = NULL;
return result;
}
}
free(temp);
}
return 0;
}
int order(int(**ord) (void*, const void *, const void *), int n) {
//inizializza il puntatore ord alla funzione di ordinamento adeguata. Il numero n indica quale funzione scegliere.
switch (n) {
case 0:
*ord = grevlex_comparison_mcvs;
return 0;
break;
default:
return -1;
break;
}
}
//n mod p
//Riduzione di n in modulo p.
long long mod(long long n, long long p) {
long long v = n, x = 0;
if (v >= p) {
v = n % p;
}
else {
if (v < 0) {
x = n / p;
v = n - (x*p);
v += p;
}
}
return v;
}
//https://git.devuan.org/jaretcantu/eudev/commit/a9e12476ed32256690eb801099c41526834b6390
//mancante nella stdlib, controparte di qsort_r
//effettua una ricerca binaria di key nell'array base di lunghezza nmemb i cui elementi
//hanno dimensione size, e restituisce un puntatore all'elemento uguale a key se c', altrimenti NULL.
//compar la funzione di ordinamento con cui viene confrontato key con base
//arg il terzo argomento di compar
void *bsearch_r(const void *key, const void *base, size_t nmemb, size_t size,
int(*compar) (void *, const void *, const void *),
void *arg) {
size_t l, u, idx;
const void *p;
int comparison;
l = 0;
u = nmemb;
while (l < u) {
idx = (l + u) / 2;
p = (void *)(((const char *)base) + (idx * size));
comparison = compar(arg, key, p);
if (comparison < 0)
u = idx;
else if (comparison > 0)
l = idx + 1;
else
return (void *)p;
}
return NULL;
}
/* mon: stringa che rappresenta un monomio (non c' carattere terminazione stringa)
* len: numero di caratteri in mon
* val: variabile in cui salvare il coefficiente del monomio
* num_var: numero di variabili nel sistema
* vet: vettore di caratteri in cui ogni carattere una variabile (letto precedentemente da input)
* grade: vettore in cui salvare i gradi delle variabili secondo l'ordine di vet
* module: campo su cui rappresentato il sistema
*/
int parse_mon(char * mon, int len, int * val, int num_var, char *vet, int *grade, int module) {
//parsing prima del coefficiente
int index = 0;
//se il primo carattere una lettera (variabile) il coefficiente 1
if (isalpha(mon[index]))
*val = 1;
//altrimenti leggo il coefficiente
else {
//se non n lettera n cifra il formato input sbagliato
if (!isdigit(mon[index]))
return -1;
char *coefficient = (char *)malloc(sizeof(char));
while (index < len && isdigit(mon[index])) {
coefficient = (char *)realloc(coefficient, (index + 1) * sizeof(char));
coefficient[index] = mon[index];
index++;
}
//aggiungo il carattere di temrinazione
coefficient = (char *)realloc(coefficient, (index + 1) * sizeof(char));
coefficient[index] = '\0';
//traduco il coefficiente in valore numerico e calcolo il modulo
*val = mod(atoll(coefficient), module);
free(coefficient);
}
//assumo grado zero di ogni variabile, aggiornato in seguito
for (int k = 0; k < num_var; k++)
grade[k] = 0;
//parsing delle incognite
char variable;
int variable_degree;
int variable_index;
int exponent_index;
char *exponent;
while (index < len) {
variable_index = -1;
variable_degree = 0;
//salto il moltiplicatore
if (mon[index] == '*' || mon[index] == ' ')
index++;
//leggo la variabile
if (index < len && isalpha(mon[index])) {
variable = mon[index];
//cerco la posizione della variabile in vet
for (int i = 0; i < num_var; i++)
if (vet[i] == mon[index]) {
variable_index = i;
//se presente ha almeno grado 1
variable_degree = 1;
break;
}
//se non trovo la variabile in vet segnalo errore
if (variable_index == -1)
return -1;
index++;
}
//se c' il carattere dell'elevato leggo l'esponente
if (index < len && mon[index] == '^') {
index++;
exponent_index = 0;
//se non una cifra segnalo errore
if (index > len || !isdigit(mon[index]))
return -1;
exponent = (char *)malloc(sizeof(char));
while (index < len && isdigit(mon[index])) {
exponent = (char *)realloc(exponent, (exponent_index + 1) * sizeof(char));
exponent[exponent_index] = mon[index];
exponent_index++;
index++;
}
//metto il carattere di terminazoine stringa
exponent = (char *)realloc(exponent, (exponent_index + 1) * sizeof(char));
exponent[exponent_index] = '\0';
//leggo l'esponente
variable_degree = atoi(exponent);
free(exponent);
}
//se c' la variabile
if (variable_index != -1)
//metto in grade il grado della variabile nella posizione corretta
grade[variable_index] = variable_degree;
}
return 0;
}
int parse(int numero_variabili, char *variabili, int *matrix, int row, int **monomi, int len, int module, int(*ord) (void*, const void *, const void *), FILE *input_file) {
/*
Esegue la lettura (parse) dei polinomi di partenza nel seguente modo.
Si legge un monomio alla volta.
Il monomio viene scomposta dalla funzione parse_mon.
Si inserisce il coefficiente del monomio nella matrice principale (matrice dei coefficienti) nella posizione corretta.
La posizione corretta indicata da vet_grd.
Si leggono tutti i monomi di tutti i polinomi di partenza.
In caso di errore di formato nell'input la funzione si interrompe restituendo segnale di errore -1.
*/
int pos_pol = 0, i, col;
char c, *mon;
int cof = 0;
c = fgetc(input_file);
int linear_index = 0;
int *grade;
//finch non termino il file o non ho terminato il numero di polinomi dichiarati
while (c != EOF && pos_pol < row) {
mon = (char *)malloc(sizeof(char));
grade = (int *)calloc(numero_variabili, sizeof(int));
i = 0;
while (c != '+' && c != EOF && c != '\n') {
mon = (char *)realloc(mon, (i + 1) * sizeof(char));
mon[i] = c;
i++;
c = fgetc(input_file);
}
//se non ho salvato niente in mon (i = 0) non faccio il parsing
if (i != 0 && parse_mon(mon, i, &cof, numero_variabili, variabili, grade, module) == -1) {
return -1;
}
//inserire monomio in posizione corretta
col = int((int **)(bsearch_r((void *)&grade, (void *)monomi, len, (sizeof(int*)), ord, &numero_variabili)) - monomi);
linear_index = (pos_pol * len) + col;
matrix[linear_index] = cof;
if (c == '\n') {
pos_pol++;
}
free(mon);
free(grade);
c = fgetc(input_file);
cof = 0;
}
return 0;
}
int init_matrix(int *matrix, int row, int col, int **vet_grd, char *variabili, int num_var, int(*ord) (void*, const void *, const void *), FILE *input_file) {
//Inizializza la matrice principale (dei coefficienti) con i coefficienti dei polinomi forniti come input.
return parse(num_var, variabili, matrix, row, vet_grd, col, module, ord, input_file);
}
void setup_struct_map(struct map *map, int **monomi, int len, int n, int m, int (*compar) (void*, const void *, const void *) ){
int sum, index=len;
// inizializzo la struttura map, la mappa ha len righe.
map->len = len;
map->row = (map_row *)malloc( map->len * sizeof(struct map_row) );
//per ogni monomio in vet
int row, col, i, v;
for (row = 0; row < len; row++){
index = 0;
//dichiarati dentro per la parallelizzazione
int *temp = (int *)malloc(n * sizeof(int));
int *save = (int *)calloc(len, sizeof(int));
//provo a moltiplicarlo con ogni monomio in vet
for (col = 0; col < len; col++) {
sum = 0;
//eseguo il prodotto (sum la somma dei gradi)
for (v = 0; v < n; v++) {
temp[v] = monomi[row][v] + monomi[col][v];
sum += temp[v];
}
//se il grado del prodotto > grado massimo tutti i restanti prodotti
//su quella riga sono > grado massimo
if (sum > m) {
// a questo punto col l'indice del primo elemento della mappa che non possibile rappresentare, quindi la riga row ha solo col numero di celle e non len come prima.
index = col;
break;
}
//altrimenti cerco il prodotto in vet e metto l'indice in save
else{
save[col] = (int **)(bsearch_r((void *) &temp, (void *) monomi, len, (sizeof(int*)), compar, &n)) - monomi;
}
}
// terminato il ciclo sulle colonne posso inizializzare la struttura perch conosco tutti gli elementi da inserire
// la riga attuale ha esattamente index elementi diversi da -1, quindi la riga avr lunghezza pari a index precedentemente calcolato
// alloco la riga con un array da index elementi
map->row[row].len = index;
map->row[row].col = (int *)malloc( map->row[row].len * sizeof(int) );
// a questo map devo copiare gli elementi generati dento alla struttura
for(i=0; i<map->row[row].len; i++)
map->row[row].col[i] = save[i];
free(temp);
free(save);
}
}
void init_degree_vector(int *degree, int num_var){
//inizializza il vettore degree con il numero di monomi di grado i-esimo <= del grado massimo
int i,j,c;
for(i=0; i<max_degree+1; i++){
c = combination(num_var,i);
degree[i] = c;
}
}
int grado_monomio(int posizione, int **vet, int num_var){
//Calcola il grado del monomio a partire dalla posizione occupata nel vettore (ordinato) delle posizioni rispetto l'ordinamento scelto.
//(la posizione occupata deve essere corretta).
int i,grado;
grado = 0;
for(i=0; i<num_var; i++){
grado += vet[posizione][i];
}
return grado;
}
void matrix_degree(int *m, int row, int col, int *m_deg, int **vet, int num_var){
//m_deg un vettore che ha lunghezza pari al grado massimo.
//la funzione calcola i gradi dei polinomi presenti nella matrice.
//Ogni cella del vettore m_deg rappresenta un grado, se esso compare nella matrice allora viene impostato a 1 o altrimenti.
int i,j,last,grado, linear_index = 0;
for(i=0; i<row; i++){
for(j=col-1; j>0; j--){
linear_index = i*col + j;
if( m[linear_index] != 0 ){
last = j; //posizione dell'ultimo coefficiente della riga
break;
}
}
grado = grado_monomio(last, vet, num_var);
m_deg[grado] = 1;
}
}
void moltiplica_matrice(int **m, int *row, int col, struct map map, int * degree, int **vet, int num_var, int expansion_degree){
int riga;
int grado_massimo_riga, grado_massimo_monomio,i,j,last,new_row = 0;
last = -1;
int linear_index = 0;
long long total_dim = 0;
int *last_index = (int*)calloc(*row, sizeof(int));
int *numero_polinomi = (int*)calloc(*row, sizeof(int));
int numero_nuovi_polinomi = 0;
for(riga=0; riga<*row; riga++){
for(i=col-1; i>0; i--){
linear_index = riga * col + i;
if( (*m)[linear_index] != 0 ){ //(*m)[riga][i] != 0
last = i;
break;
}
}
//risalgo al grado del monomio appena trovato
//scorro la lista delle posizioni di inizio dei monomi con lo stesso grado
last_index[riga] = last;
if( last != -1 ){
grado_massimo_riga = grado_monomio(last,vet,num_var);
//calcolo il grado massimo che deve avere il monomio per cui moltiplicare
grado_massimo_monomio = max_degree - grado_massimo_riga;
// a questo punto conosco per quanti monomi devo moltiplicare e quindi
// conosco il numero di righe che devo aggiungere alla matrice
if( expansion_degree != 0 ){
if( grado_massimo_monomio > expansion_degree ){
grado_massimo_monomio = expansion_degree;
}
}
for(i=1; i<(grado_massimo_monomio+1); i++){
new_row += degree[i];
numero_nuovi_polinomi += degree[i];
}
numero_polinomi[riga] = numero_nuovi_polinomi;
numero_nuovi_polinomi = 0;
}
}
//--------------------------------------------------------------
//printf("nuove righe %d, totale righe %d", new_row, (*row+new_row) );
//ridimensionamento della matrice
total_dim = (*row * col) + (new_row * col);
*m = (int *)realloc( *m , total_dim * sizeof(int) );
//azzeramento delle nuove righe
for(i=*row; i<*row+new_row; i++){
for(j=0; j<col; j++){
(*m)[i*col+j] = 0;
}
}
int len = *row;
for(riga=0; riga<len; riga++){
if( last_index[riga] != -1 ){
for(i=1; i<(numero_polinomi[riga]+1); i++){ //scorre tutti i monomi per i quali posso moltiplicare
for(j=0; j<(last_index[riga]+1); j++){ //scorre fino all'ultimo elemento della riga
//(*m)[*row][ map.row[i].col[j] ] = (*m)[riga][j]; //shift nella posizione corretta indicata dalla mappa
linear_index = *row * col + map.row[i].col[j];
(*m)[linear_index] = (*m)[riga*col+j];
}
*row = *row + 1; //aumento del conteggio delle righe
}
}
}
free(last_index);
free(numero_polinomi);
}
void moltiplica_riga_forn(int **m, int *row, int col, int riga, struct map map, int * degree, int **vet, int num_var, int stop_degree){
int grado_massimo_riga, grado_massimo_monomio,i,j,last,new_row;
last = -1;
int linear_index = 0;
long long total_dim = 0;
//cerco la posizione dell'ultimo coefficiente non nullo del polinomio rappresentato nella riga.
for(i=col-1; i>0; i--){
linear_index = riga * col + i;
if( (*m)[linear_index] != 0 ){ //(*m)[riga][i] != 0
last = i;
break;
}
}
//risalgo al grado del monomio appena trovato
//scorro la lista delle posizioni di inizio dei monomi con lo stesso grado
if( last != -1 ){
grado_massimo_riga = grado_monomio(last,vet,num_var);
//calcolo il grado massimo che deve avere il monomio per cui moltiplicare
grado_massimo_monomio = max_degree - grado_massimo_riga;
// a questo punto conosco per quanti monomi devo moltiplicare e quindi
// conosco il numero di righe che devo aggiungere alla matrice
new_row = 0;
for(i=1; i<(grado_massimo_monomio+1); i++){
new_row += degree[i];
}
total_dim = (*row * col) + (new_row * col);
*m = (int *)realloc( *m , total_dim * sizeof(int) );
//azzeramento delle nuove righe
for(i=*row; i<*row+new_row; i++){
for(j=0; j<col; j++){
(*m)[i*col+j] = 0;
}
}
for(i=1; i<(new_row+1); i++){ //scorre tutti i monomi per i quali posso moltiplicare
for(j=0; j<(last+1); j++){ //scorre fino all'ultimo elemento della riga
//(*m)[*row][ map.row[i].col[j] ] = (*m)[riga][j]; //shift nella posizione corretta indicata dalla mappa
linear_index = *row * col + map.row[i].col[j];
(*m)[linear_index] = (*m)[riga*col+j];
}
*row = *row + 1; //aumento del conteggio delle righe
}
}
}
//Scambio di due righe della matrice m.
__device__ void swap_rows_GPU(int *m, int row, int col, int j, int i){
int k;
long long tmp;
if( j!=i ){
for(k=0;k<col;k++){
tmp = m[i*col+k]; //m[i][k];
m[i*col+k] = m[j*col+k]; //m[i][k] = m[j][k];
m[j*col+k] = tmp; //m[j][k] = tmp;
}
}
}
//n mod p
//Riduzione di n in modulo p.
__device__ int mod_long_GPU(long long n, long long p) {
long long v = n, x = 0;
if (v >= p) {
v = n % p;
}
else {
if (v < 0) {
x = n / p;
v = n - (x*p);
v += p;
}
}
int r = v;
return r;
}
//n mod p
//Riduzione di n in modulo p.
__device__ int mod_GPU(int n, int p) {
int v = n, x = 0;
if (v >= p) {
v = n % p;
}
else {
if (v < 0) {
x = n / p;
v = n - (x*p);
v += p;
}
}
return v;
}
//inverso moltiplicativo di n in modulo p (con p primo).
__device__ int invers_GPU(int n, int p){
int b0 = p, t, q;
int x0 = 0, x1 = 1;
if (p == 1) return 1;
while (n > 1) {
q = n / p;
t = p, p = (n % p), n = t;
t = x0, x0 = x1 - q * x0, x1 = t;
}
if (x1 < 0) x1 += b0;
return x1;
}
// a + b mod p
//sommatoria di a e b in modulo p
__device__ int add_mod_GPU(int a, int b, int p){
return mod_GPU((a+b),p);
}
// a - b mod p
//sottrazione di a e b in modulo p
__device__ int sub_mod_GPU(int a, int b, int p){
long long aa,bb;
aa = a;
bb = b;
return mod_long_GPU((aa-bb),p);
}
// a * b mod p
//prodotto di a e b in modulo p
__device__ int mul_mod_GPU(int a, int b, int p){
long long aa,bb;
aa = a;
bb = b;
return mod_long_GPU((aa*bb),p);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*
Ottimizzazioni
- branch divergence
Correttezza
- kernel celle (calcoli errati)
Aggiunte
- risoluzione e stampa soluzioni delle incognite
Test
- performance al variare della dimensione di grid e block
*/
__global__ void kernel_riduzione_blocco_base(int *matrix, int row, int col, int module, int pivot_colonna, int inv, int pivot_riga, int thread_height, int block_dim){
int a = 0, s = 0;
int col_index = blockIdx.x * blockDim.x + threadIdx.x; //indice della colonna della matrice originale per il thread corrente
int interation = 0;
if(col_index < pivot_colonna){
int reached_row = ( pivot_riga + 1) + ((blockIdx.y + 1) * thread_height); //riga raggiunta dal thread corrente
if(reached_row > row){
interation = thread_height - (reached_row - row); //numero di iterazioni nel caso in cui la matrice non collima con la dimensione della grid
}else{
interation = thread_height;
}
int block_row_offset = (pivot_riga + 1) + (blockIdx.y * thread_height);
int row_offset = block_row_offset;
for(int i=0; i<interation; i++){
if( matrix[row_offset * col + pivot_colonna] != 0 ){
s = mul_mod_GPU(inv, matrix[row_offset * col + pivot_colonna], module); //tutti i thread sulla stessa riga calcolano lo stesso risultato
a = mul_mod_GPU(s, matrix[pivot_riga * col + col_index], module);
matrix[row_offset * col + col_index] = sub_mod_GPU(matrix[row_offset * col + col_index], a, module);
}
row_offset ++;
}
}
}
__global__ void kernel_riduzione_blocco(int *matrix, int row, int col, int module, int pivot_colonna, int inv, int pivot_riga, int thread_height, int block_dim){
extern __shared__ int smem [];
int *smem_riga_pivot = (int*)smem;
int *smem_col_pivot = (int*)&smem_riga_pivot[block_dim];
int a = 0, s = 0, interation = 0;
int col_index = blockIdx.x * blockDim.x + threadIdx.x; //indice della colonna della matrice originale per il thread corrente
//-------------
//inizzializzazione smem per pivot riga
smem_riga_pivot[threadIdx.x] = matrix[pivot_riga * col + col_index]; //ogni thread copia un solo elemento nella riga in shared, un thread per cella di riga
//------------
//inizializzazione smem per pivot colonna
//calcolo del numero di celle (colonna_pivot) che ogni thred deve copiare
int cell_to_copy = 1;
if(thread_height > blockDim.x){
cell_to_copy = thread_height / blockDim.x + 1;
}
int base_row = (pivot_riga + 1) + blockIdx.y * thread_height;
int index = 0;
//copia della porzione di colonna in smem
for(int i=0; i<cell_to_copy; i++){
index = (threadIdx.x * cell_to_copy) + i;
if(base_row + index < row && index < thread_height){
smem_col_pivot[index] = matrix[(base_row + index) * col + pivot_colonna];
}
}
//sincronizza tutti i thread del blocco in modo tale che la smem sia consistente
__syncthreads();
if(col_index < pivot_colonna){
//calcolo del numero di righe sulle quali deve iterare il thread, caso in cui la dimensione della matrice non collima con thread_height
int reached_row = ( pivot_riga + 1) + ((blockIdx.y + 1) * thread_height); //riga raggiunta dal thread corrente
if(reached_row > row){
interation = thread_height - (reached_row - row); //dimensione non collima
}else{
interation = thread_height; //caso normale
}
int row_offset = (pivot_riga + 1) + (blockIdx.y * thread_height);
for(int i=0; i<interation; i++){
int pivot_element = smem_col_pivot[i];
if( pivot_element != 0 ){
s = mul_mod_GPU(inv, pivot_element, module); //tutti i thread sulla stessa riga calcolano lo stesso risultato
a = mul_mod_GPU(s, smem_riga_pivot[threadIdx.x], module);
matrix[row_offset * col + col_index] = sub_mod_GPU(matrix[row_offset * col + col_index], a, module);
}
row_offset ++;
}
}
}
__global__ void kernel_riduzione_riga_base(int *matrix, int row, int col, int module, int start, int pivot_colonna, int inv, int pivot_riga, int cell_per_thread){
int a = 0, s = 0;
int last_row = row - 1;
int row_index = (pivot_riga + 1) + (blockDim.x * blockIdx.x + threadIdx.x);
if(row_index >= start && row_index < row){
int row_linear_index = row_index * col + pivot_colonna;
if( matrix[row_linear_index] != 0 ){
s = mul_mod_GPU(inv,matrix[row_linear_index],module);
for(int k = 0; k < pivot_colonna+1; k++ ){
a = mul_mod_GPU(s,matrix[pivot_riga*col+k],module);
//a = mul_mod_GPU(s, smem[k], module);
matrix[row_index*col+k] = sub_mod_GPU(matrix[row_index*col+k],a,module);
}
}
}
}
__global__ void kernel_riduzione_riga(int *matrix, int row, int col, int module, int start, int pivot_colonna, int inv, int pivot_riga, int cell_per_thread){
extern __shared__ int smem[];
if( (threadIdx.x * cell_per_thread) <= pivot_colonna){
int row_offset = pivot_riga*col;
int thread_offset = threadIdx.x * cell_per_thread;
//allocazione della smem con la riga di pivot, ogni thread copia una porzione di riga pari a "cell_per_thread".
for(int i=0; i<cell_per_thread; i++){
if(thread_offset + i <= pivot_colonna){
smem[thread_offset + i] = matrix[row_offset + thread_offset + i];
}
}
}
__syncthreads();
int a = 0, s = 0;
int last_row = row - 1;
int row_index = (pivot_riga + 1) + (blockDim.x * blockIdx.x + threadIdx.x);
if(row_index >= start && row_index < row){
int row_linear_index = row_index * col + pivot_colonna;
if( matrix[row_linear_index] != 0 ){
s = mul_mod_GPU(inv,matrix[row_linear_index],module);
for(int k = 0; k < pivot_colonna+1; k++ ){
//a = mul_mod_GPU(s,matrix[pivot_riga*col+k],module);
a = mul_mod_GPU(s, smem[k], module);
matrix[row_index*col+k] = sub_mod_GPU(matrix[row_index*col+k],a,module);
}
}
}
}
__global__ void kernel_riduzione_cella(int *matrix, int row, int col, int module, int inv, int pivot_colonna, int pivot_riga){
int last_row = row - 1;
int starting_row = pivot_riga + 1;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y + starting_row;
if( idx < pivot_colonna && idy < row && idy > pivot_riga){ //fermo i thread prima di pivot_colonna per impedire di sovrascrivere il dato necessario per s
int div = matrix[idy*col+pivot_colonna];
if( div != 0 ){
int s = mul_mod_GPU(inv, div, module);
int a = mul_mod_GPU(s, matrix[pivot_riga*col+idx], module);
matrix[idy*col+idx] = sub_mod_GPU(matrix[idy*col+idx], a, module);
}
}
}
__global__ void gauss_kernel_celle(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
int flag=0,invarianti=0,flag2=0,tmp;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
while( r < row && matrix[r*col+pivot_colonna] == 0 ){ //m[r][pivot_colonna]
r++;
}
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if( r < row ){ //significa che ho trovato un valore non nullo
if( r != righe_trovate ){
swap_rows_GPU(matrix,row,col,righe_trovate,r); //sposto la riga appena trovata nella posizone corretta
flag = 1;
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna],module); //inverso dell elemento in m[r][pivot_colonna]
//kernel per riduzione celle
int block_dim = 16;
dim3 threads(block_dim, block_dim, 1);
int block_size = 256;
int numero_righe = row - righe_trovate;
int grid_y = numero_righe/block_dim + 1;
int grid_x = col/block_dim + 1;
dim3 blocks(grid_x, grid_y,1);
hipLaunchKernelGGL(( kernel_riduzione_cella), dim3(blocks), dim3(threads), 0, 0, matrix, row, col, module, inv, pivot_colonna, pivot_riga);
hipDeviceSynchronize();
//necessario azzerare tutta la colonna (pivot_colonna)
for(int x = pivot_riga + 1; x < row; x++){
matrix[x*col+pivot_colonna] = 0;
}
}
}
}
__global__ void reset_pivot_col(int *matrix, int row, int col, int pivot_riga, int pivot_colonna, int thread_height, int block_dim){
int start_row = (pivot_riga + 1) + ( (blockIdx.x * (thread_height*block_dim)) + (threadIdx.x * thread_height));
int reached_row = (pivot_riga + 1) + ( (blockIdx.x * (thread_height*block_dim)) + ( (threadIdx.x + 1) * thread_height));
int iteration = thread_height;
if(reached_row > row){
iteration = thread_height - (reached_row - row);
if(iteration > thread_height){
iteration = 0;
}
}
for(int i=0; i<iteration; i++){
matrix[(start_row + i)*col+pivot_colonna] = 0;
}
}
__global__ void swap_rows(int *matrix, int row, int col, int j, int i){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= col){
return;
}
int ii = i*col+tid;
int jj = j*col+tid;
int tmp = matrix[ii];
matrix[ii] = matrix[jj];
matrix[jj] = tmp;
}
__global__ void find_pivot(int *matrix, int row, int col, int r, int pivot_colonna){
/*
while( r < row && matrix[r*col+pivot_colonna] == 0 ){
r++;
}
pointer_r = r;
*/
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int thread_row = r+tid;
if(thread_row >= row)
return;
if(matrix[thread_row*col+pivot_colonna] != 0){
atomicMin(&next_pivot_row, thread_row);
}
}
__global__ void gauss_kernel_blocco(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
float total_time_for_reduction = 0, total_time_for_reset = 0.0;
double elapsed = 0.0;
clock_t start, stop;
int tick = 0;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
int *p;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
///////////////////////////FIND PIVOT///////////////////////////////////////////////
block_dim = 256;
int row_to_check = row - righe_trovate;
threads_per_block = ( row_to_check < block_dim ? row_to_check : block_dim);
dim3 t_find(threads_per_block);
if( threads_per_block == block_dim && row_to_check != block_dim){
block_x_axis = (row_to_check / block_dim) + 1;
}else{
block_x_axis = 1;
}
dim3 b_find(block_x_axis);
next_pivot_row = row;
hipLaunchKernelGGL(( find_pivot), dim3(b_find), dim3(t_find), 0, 0, matrix, row, col, r, pivot_colonna);
hipDeviceSynchronize();
r = next_pivot_row;
/////////////////////////////////////////////////////////////////////////////////
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if( r < row ){
if( r != righe_trovate ){
////////////////////////SWAP ROWS////////////////////////////////////////////////////////
block_dim = 256;
threads_per_block = ( col < block_dim ? col : block_dim);
dim3 t_swap(threads_per_block);
if( threads_per_block == block_dim && col != block_dim){
block_x_axis = (col / block_dim) + 1;
}else{
block_x_axis = 1;
}
dim3 b_swap(block_x_axis);
//sposto la riga appena trovata nella posizone corretta
hipLaunchKernelGGL(( swap_rows), dim3(b_swap), dim3(t_swap), 0, 0, matrix, row, col, righe_trovate, r);
hipDeviceSynchronize();
////////////////////////////////////////////////////////////////////////////////////////
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna], module);
////////////////////////////////////////REDUCTION BY BLOCK////////////////////////////////////
block_dim = 128;
int col_to_reduce = pivot_colonna;
threads_per_block = ( col_to_reduce < block_dim ? col_to_reduce : block_dim);
dim3 threads(threads_per_block);
if( threads_per_block == block_dim && col_to_reduce != block_dim){
block_x_axis = (col_to_reduce / block_dim) + 1;
}else{
block_x_axis = 1;
}
int thread_height = 32;
int row_to_reduce = row - righe_trovate;
block_y_axis = (row_to_reduce / thread_height) + 1;
dim3 blocks(block_x_axis, block_y_axis);
int shared = (block_dim * sizeof(int)) + (thread_height * sizeof(int));
hipLaunchKernelGGL(( kernel_riduzione_blocco), dim3(blocks), dim3(threads), shared, 0, matrix, row, col, module, pivot_colonna, inv, pivot_riga, thread_height, block_dim);
hipDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////RESET PIVOT COL////////////////////////////////////////
thread_height = 50;
block_dim = 32;
row_to_reduce = row-pivot_riga;
threads_per_block = (row_to_reduce < thread_height ? 1 : block_dim);
block_x_axis = (threads_per_block == block_dim && row_to_reduce != block_dim) ? (row_to_reduce/(thread_height*block_dim)+1) : 1;
dim3 t(threads_per_block);
dim3 b(block_x_axis);
hipLaunchKernelGGL(( reset_pivot_col), dim3(b), dim3(t), 0, 0, matrix, row, col, pivot_riga, pivot_colonna, thread_height, block_dim);
hipDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////
}
}
}
__global__ void gauss_kernel_blocco_base(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
float total_time_for_reduction = 0, total_time_for_reset = 0.0;
double elapsed = 0.0;
clock_t start, stop;
int tick = 0;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
int *p;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
///////////////////////////FIND PIVOT///////////////////////////////////////////////
while( r < row && matrix[r*col+pivot_colonna] == 0 ){ //m[r][pivot_colonna]
r++;
}
/////////////////////////////////////////////////////////////////////////////////
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if( r < row ){
if( r != righe_trovate ){
////////////////////////SWAP ROWS////////////////////////////////////////////////////////
swap_rows_GPU(matrix,row,col,righe_trovate,r);
////////////////////////////////////////////////////////////////////////////////////////
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna], module);
////////////////////////////////////////REDUCTION BY BLOCK////////////////////////////////////
block_dim = 128;
int col_to_reduce = pivot_colonna;
threads_per_block = ( col_to_reduce < block_dim ? col_to_reduce : block_dim);
dim3 threads(threads_per_block);
if( threads_per_block == block_dim && col_to_reduce != block_dim){
block_x_axis = (col_to_reduce / block_dim) + 1;
}else{
block_x_axis = 1;
}
int thread_height = 256;
int row_to_reduce = row - righe_trovate;
block_y_axis = (row_to_reduce / thread_height) + 1;
dim3 blocks(block_x_axis, block_y_axis);
hipLaunchKernelGGL(( kernel_riduzione_blocco_base), dim3(blocks), dim3(threads), 0, 0, matrix, row, col, module, pivot_colonna, inv, pivot_riga, thread_height, block_dim);
hipDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////RESET PIVOT COL////////////////////////////////////////
//necessario azzerare tutta la colonna (pivot_colonna)
for(int x = pivot_riga + 1; x < row; x++){
matrix[x*col+pivot_colonna] = 0;
}
//////////////////////////////////////////////////////////////////////////////////////
}
}
}
__global__ void gauss_kernel_righe(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
int flag=0,invarianti=0,flag2=0,tmp;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
/////////////////////////////////////
block_dim = 256;
int row_to_check = row - righe_trovate;
threads_per_block = ( row_to_check < block_dim ? row_to_check : block_dim);
dim3 t_find(threads_per_block);
if( threads_per_block == block_dim && row_to_check != block_dim){
block_x_axis = (row_to_check / block_dim) + 1;
}else{
block_x_axis = 1;
}
dim3 b_find(block_x_axis);
next_pivot_row = row;
hipLaunchKernelGGL(( find_pivot), dim3(b_find), dim3(t_find), 0, 0, matrix, row, col, r, pivot_colonna);
hipDeviceSynchronize();
r = next_pivot_row;
///////////////////////////////////
if( r < row ){ //significa che ho trovato un valore non nullo
if( r != righe_trovate ){
block_dim = 256;
threads_per_block = ( col < block_dim ? col : block_dim);
dim3 t_swap(threads_per_block);
if( threads_per_block == block_dim && col != block_dim){
block_x_axis = (col / block_dim) + 1;
}else{
block_x_axis = 1;
}
dim3 b_swap(block_x_axis);
//sposto la riga appena trovata nella posizone corretta
hipLaunchKernelGGL(( swap_rows), dim3(b_swap), dim3(t_swap), 0, 0, matrix, row, col, righe_trovate, r);
hipDeviceSynchronize();
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna],module); //inverso dell elemento in m[r][pivot_colonna]
int block_dim = 1024;
//kernel per riduzione righe
int numero_righe = row - righe_trovate;
int t = (numero_righe < block_dim ? numero_righe : block_dim);
int b = 1;
if( t == block_dim && numero_righe != block_dim ){
b = numero_righe / block_dim + 1;
}
dim3 threads(t);
dim3 blocks(b);
int pivot_length = pivot_colonna + 1;
int cell_per_thread = ( t >= pivot_length ) ? 1 : ( pivot_length / t) + 1;
int shared_mem = pivot_length * sizeof(int);
hipLaunchKernelGGL(( kernel_riduzione_riga), dim3(blocks), dim3(threads), shared_mem, 0, matrix, row, col, module, righe_trovate, pivot_colonna, inv, pivot_riga, cell_per_thread);
hipDeviceSynchronize();
}
}
}
__global__ void gauss_kernel_righe_base(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
int flag=0,invarianti=0,flag2=0,tmp;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
while( r < row && matrix[r*col+pivot_colonna] == 0 ){ //m[r][pivot_colonna]
r++;
}
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if( r < row ){ //significa che ho trovato un valore non nullo
if( r != righe_trovate ){
swap_rows_GPU(matrix,row,col,righe_trovate,r); //sposto la riga appena trovata nella posizone corretta
flag = 1;
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna],module); //inverso dell elemento in m[r][pivot_colonna]
int block_dim = 1024;
//kernel per riduzione righe
int numero_righe = row - righe_trovate;
int t = (numero_righe < block_dim ? numero_righe : block_dim);
int b = 1;
if( t == block_dim && numero_righe != block_dim ){
b = numero_righe / block_dim + 1;
}
dim3 threads(t);
dim3 blocks(b);
int pivot_length = pivot_colonna + 1;
int cell_per_thread = ( t >= pivot_length ) ? 1 : ( pivot_length / t) + 1;
hipLaunchKernelGGL(( kernel_riduzione_riga_base), dim3(blocks), dim3(threads), 0, 0, matrix, row, col, module, righe_trovate, pivot_colonna, inv, pivot_riga, cell_per_thread);
hipDeviceSynchronize();
}
}
}
double gauss_GPU(int *m, int row, int col, int module){
int matrix_length = row * col;
int matrix_length_bytes = matrix_length * sizeof(int);
clock_t start, end;
double elapsed = 0.0;
int *m_d;
gpuErrchk(hipMalloc( (void **) &m_d, matrix_length_bytes));
gpuErrchk(hipMemcpy(m_d, m, matrix_length_bytes, hipMemcpyHostToDevice));
start = clock();
hipLaunchKernelGGL(( gauss_kernel_righe_base), dim3(1),dim3(1), 0, 0, m_d, row, col, module);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(m, m_d, matrix_length_bytes, hipMemcpyDeviceToHost));
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
gpuErrchk(hipFree(m_d));
return elapsed;
}
int null_rows(int *m, int row, int col){
//calcola il numero di righe nulle presenti nella matrice m.
int i,j,last,null_rows;
null_rows = 0;
for(i=0; i<row; i++){
last = -1;
for(j=col-1; j>-1; j--){
if(m[i*col+j] != 0 ){
last = j;
break;
}
}
if( last == -1 )
null_rows++;
}
return null_rows;
}
void eliminate_null_rows(int **m, int *row, int col){
//Elimina dalla matrice m le righe nulle.
//N.B. questa procedura elimina le ultime righe nulle della matrice.
//Questa funzione DEVE essere utilizzata dopo la riduzione di Gauss.
//La riduzione di Gauss sposta nelle ultime posizioni tutte le righe nulle.
//Se non si esegue questa funzione dopo Gauss si possono eliminare righe non nulle.
int null_row = null_rows(*m,*row,col);
int new_rows = *row - null_row;
if(null_row != 0){
*m = (int *)realloc( *m , (new_rows*col) * sizeof (int));
*row = new_rows;
}
}
void print_matrix_degree(int *m_deg, FILE *output_file){
//stampa il vettore dei gradi della matrice.
int i;
fprintf(output_file, "Gradi della matrice = {");
for(i=0; i<max_degree+1; i++)
if( m_deg[i] != 0 ) fprintf(output_file, " %d ",i);
fprintf(output_file, "}\n");
}
int target_degree(int *v){
//Controlla se il vettore v rappresenta la condizione di terminazione con gradi completi {1,2,3,...,max_degree}
//Se la condizione soddisfatta return 0 altrimenti -1
int i,flag;
flag = 0;
for(i=1; i<max_degree+1; i++){
if( v[i] != 1 ){
flag = -1;
break;
}
}
return flag;
}
void execute_standard(int **matrix, int * row, int col, struct map map, int *degree, int **monomi, int numero_variabili, int n_loops, int expansion, FILE *output_file){
clock_t start, end;
double elapsed;
//creo l'array che conterr i gradi dei vari round
int **m_deg_array = (int **)malloc(sizeof(int*));
m_deg_array[0] = (int *)calloc(max_degree+1, sizeof(int));
int n_round = 0;
int *m_deg = m_deg_array[0];
int missing_degree = max_degree;
fprintf(output_file, "Inizio computazione, metodo standard\n");
matrix_degree(*matrix, *row, col, m_deg, monomi, numero_variabili);
int flag, old_v, new_v;
flag = old_v = new_v = 0;
old_v = *row;
//assumo espansione normale
int expansion_degree = max_degree;
int st = 0;
while( flag != 1 ){
n_round++;
fprintf(output_file, "\n -Eseguo moltiplicazione, ");
fflush(stdout);
start = clock();
//find missing degree to multiply matrix
for(int i=max_degree; i>0; i--){
if( m_deg[i] == 0 ){
missing_degree = i;
break;
}
}
moltiplica_matrice(matrix, row, col, map, degree, monomi, numero_variabili, missing_degree);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "numero righe: %d (%f sec)", *row, elapsed);
fprintf(output_file, "\n -Eseguo Gauss, ");
fflush(stdout);
//start = clock();
//applico la riduzione di Gauss
elapsed = gauss_GPU(*matrix, *row, col, module);
//elimino le righe nulle della matrice
eliminate_null_rows(matrix, row, col);
//aggiungo all'array i gradi dell'attuale round
//n_round+1 perch salvo anche i gradi prima di inziare i round
m_deg_array = (int **)realloc(m_deg_array, sizeof(int*)*(n_round+1));
m_deg_array[n_round] = (int *)calloc(max_degree+1, sizeof(int));
m_deg = m_deg_array[n_round];
//end = clock();
//elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "numero righe: %d (%f sec)\n", *row, elapsed);
matrix_degree(*matrix,*row, col, m_deg, monomi, numero_variabili);
print_matrix_degree(m_deg, output_file);
new_v = *row;
st = new_v;
if( target_degree(m_deg) == 0 )
flag = 1;
else{
old_v = new_v;
}
for(int i=max_degree; i>0; i--){
if( m_deg[i] == 0 ){
expansion_degree = i;
break;
}
}
}
for (int i = 0; i < n_round+1; i++)
free(m_deg_array[i]);
free(m_deg_array);
}
void print_incognite(int *m, int row, int col, int num_var, int **vet, FILE *output_file){
int grado,last;
for(int r = row - (num_var+1); r<row; r++){
//cerca la posizione dell'ulitmo elemento non nullo della riga r
for( int i=col-1; i>=0; i-- ){
if( m[r*col+i] != 0 ){ //m[r][i] != 0
last = i;
break;
}
}
//calcola il grado della riga r
grado = grado_monomio(last,vet,num_var);
//se il grado della riga r 1 allora stampa tutta la riga della matrice
if( grado == 1 ){
for( int j=0; j<last+1; j++ ){
fprintf(output_file, "%d ", m[r*col+j]); //m[r][j]
}
fprintf(output_file, "\n\n");
}
}
fprintf(output_file, "\n");
}
int main (int argc, char *argv[]){
FILE *input_file = NULL, *output_file = NULL;
//inizializzo flag a false
for (int parsed = 1; parsed < argc; parsed++) {
if (parsed < argc && !strcmp(argv[parsed], "--input")) {
parsed++;
input_file = fopen(argv[parsed], "r");
if (!input_file) {
perror("Errore nell'apertura del file di input");
return (-1);
}
}
else if (parsed < argc && !strcmp(argv[parsed], "--output")) {
parsed++;
output_file = fopen(argv[parsed], "w");
if (!output_file) {
perror("Errore nell'apertura del file di output");
return (-1);
}
}
}
if (!input_file)
input_file = stdin;
if (!output_file)
output_file = stdout;
/*
int peak_clk = 1;
hipError_t err = hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, 0);
if (err != hipSuccess) {printf("cuda err: %d at line %d\n", (int)err, __LINE__); return 1;}
printf("peak clock rate: %dkHz", peak_clk);
*/
int row, col, numero_variabili, tipo_ordinamento;
int *matrix;
char *variabili;
row = col = numero_variabili = 0;
int (*ord) (void*, const void *, const void *);
int *d_row, **map;
struct map smap;
clock_t start, end;
double elapsed = 0.0;
start = clock();
//alloca la matrice principale, legge da input: il modulo,massimo grado e numero variabili
allocation(&matrix, &row, &col, &numero_variabili, &variabili, &tipo_ordinamento, &module, &max_degree, input_file);
int matrix_lentght = row * col; //numero di elementi della matrice
if( order(&ord, tipo_ordinamento) != 0 ){
fprintf(stderr, "Ordinamento insesistente!!!\n\nTERMINAZIONE PROGRAMMA");
return 0;
}
int * degree = (int *)calloc(max_degree+1, sizeof(int));
int numero_monomi = col;
int **monomi;
//crea il vettore con tutti i possibili monomi avendo num_var varaibili e max_degree come massimo grado
monomi = monomial_computation(numero_variabili, max_degree, numero_monomi);
//ordina il vettore dei monomi secondo un determinato ordinamento, ordinamento intercambiabile
qsort_s(monomi, numero_monomi, sizeof(int*), ord, &numero_variabili);
//inizializzazione matrice (lettura dati input)
if (init_matrix(matrix, row, col, monomi, variabili, numero_variabili, ord, input_file) == -1) {
fprintf(stderr, "Errore di input !!!\n\nTERMINAZIONE PROGRAMMA"); //se l'input in formato scorrettro abort del programma
return 0;
}
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nInizializzazione in %f sec\n", elapsed);
start = clock();
setup_struct_map(&smap, monomi, numero_monomi, numero_variabili, max_degree, ord);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nMappa creata in %f sec, %d x %d \n\n", elapsed, col, col);
//RISOLUZIONE PROBLEMA
start = clock();
//inizializzazione vettore dei gradi dei polinomi
init_degree_vector(degree, numero_variabili);
int n_loops = 30, expansion = 1;
//eseguo moltiplicazione e riduzione di Gauss finche non trovo soluzione
execute_standard(&matrix, &row, col, smap, degree, monomi, numero_variabili, n_loops, expansion, output_file);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nTarget raggiunto, soluzione trovata in %f sec\n\n", elapsed);
//print_matrix(matrix, row, col, output_file);
print_incognite(matrix, row, col, numero_variabili, monomi, output_file);
for(int i=0; i<row*col; i++){
if(matrix[i] > module){
printf("OVERFLOW\n");
}
}
free(matrix);
free(degree);
hipDeviceReset();
return 0;
}
| bdd6c0263ad59b7d1c0509963e9437e280a5b673.cu | #define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#include <omp.h>
#include <stdbool.h>
#include <time.h>
#include <cuda_runtime.h>
// compilazione nvcc gm.cu -o gm -w -Xcompiler " -openmp"
// nvcc gm.cu -o gm -w -Xcompiler " -openmp" -gencode arch=compute_61,code=sm_61 -lcudadevrt -rdc=true -O3
// nvcc gm.cu -o gm -w -Xcompiler " -openmp" -gencode arch=compute_52,code=sm_52 -lcudadevrt -rdc=true -O3
__device__ int next_pivot_row = 0;
//dichiarazione variabili globali
int max_degree = 0;
int module = 0;
struct map_row {
int len;
int *col;
};
struct map {
int len;
struct map_row *row;
};
//----------------------------------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------------
void matrix_alloc_int(int ***m, int row, int col) {
//Allocazione di una matrice di tipo int con dimensioni indicate.
*m = (int **)malloc(row * sizeof(int *));
if (*m != NULL)
for (int i = 0; i<row; i++)
(*m)[i] = (int *)calloc(col, sizeof(int));
}
void matrix_free_int(int ***m, int row, int col) {
//Deallocazione di una matrice di tipo int con dimensioni indicate.
for (int i = 0; i<row; i++)
free((*m)[i]);
free(*m);
}
//copia il vettore vet2 in vet1, entrambi di lunghezza len
void vctcpy(int *vet1, const int *vet2, int len) {
for (int i = 0; i < len; i++)
vet1[i] = vet2[i];
return;
}
/*funzione ricorsiva che calcola tutti i possibili monomi con n variabili e grado <= m
e li inserisce nell'array vet. I monomi sono rappresentati come array di interi dove
il valore di ogni posizione rappresenta il grado della variabile in quella posizione.
Esempio: n=3, x^2*y*z = [2,1,1].
L'array vet deve essere già allocato correttamente. Gli altri parametri sono necessari
per la struttura ricorsiva della funzione e alla prima chiamata devono essere:
- turn = 0, rappresenta la posizione della variabile nel monomio
- monomial = array di interi di lunghezza n già allocato e usato per calcolare i vari monomi
- *pos = 0 puntatore ad intero, rappresenta la prima posizione libera nell'array vet
*/
void monomial_computation_rec(int n, int m, int **vet, int turn, int *monomial, int *pos) {
//per ogni variabile provo tutti i gradi da 0 a m
for (int degree = 0; degree <= m; degree++) {
//se questa è la prima variabile azzero il monomio
if (turn == 0) {
//azzero il monomio lasciando solo il grado della prima variabile
monomial[0] = degree;
for (int v = 1; v < n; v++)
monomial[v] = 0;
}
//altrimenti le altre variabili aggiungo il proprio grado al monomio
else
monomial[turn] = degree;
//ottengo il grado del monomio sommando i gradi delle variabili
int sum = 0;
for (int v = 0; v <= turn; v++)
sum += monomial[v];
//se il grado del monomio supera quello massimo non ha senso continuare a cercare
//altri monomi partendo da questo, perchè tutti avranno grado maggiore o uguale
if (sum > m)
break;
//se questa è l'ultima variabile copia il monomio nell'array vet and incrementa l'indice pos
if (turn == (n - 1)) {
vctcpy(vet[(*pos)], monomial, n);
(*pos)++;
}
//altrimenti richiama se stessa cambiando la variabile (turn)
else
monomial_computation_rec(n, m, vet, turn + 1, monomial, pos);
}
return;
}
/*restituisce un array contenente tutti i len monomi con n variabili e grado <= m
len è il numero di possibili monomi con n variabili e grado <= m
i monomi sono array di interi di lunghezza n dove il valore di ogni posizione rappresenta
il grado della variabile in quella posizione. Esempio: n=3, x^2*y*z = [2,1,1]
len viene passato come argomento per evitare di ricalcolarlo internamente
*/
int **monomial_computation(int n, int m, int len) {
int **vet, *monomial;
//alloco la memoria per l'array
matrix_alloc_int(&vet,len,n);
//strutture di supporto necessarie per il calcolo
monomial = (int *)malloc(n * sizeof(int));
int pos = 0;
//il calcolo è fatto dalla funzione ricorsiva correttemente parametrizzata
monomial_computation_rec(n, m, vet, 0, monomial, &pos);
free(monomial);
return vet;
}
//calcola il fattoriale di n (se n è negativo return -1)
long long factorial(int n) {
long long k;
if (n<0) //se n è negativo non esiste il fattoriale
{
return -1; //codice di errore
}
else { //altrimenti calcolo il fattoriale
if (n == 0 || n == 1) {
return 1;
}
else {
k = 1;
for (int i = 2; i <= n; i++) {
k *= i;
}
return k;
}
}
}
//restituisce il numero di possibili monomi con n variabili e grado = m
int combination(int n, int m) {
long long num, den;
//calcolo {(m+n-1)! / m!*(n-1)!}
//se n>=m semplificato a {(j+n-1)*(j+n-2)* ... *(n) / j!}
if (n >= m) {
num = 1;
for (int k = m; k > 0; k--)
num = num * (n + k - 1);
den = factorial(m);
}
//se m>n semplificato a {(j+n-1)*(j+n-2)* ... *(j) / (n-1)!}
else {
num = 1;
for (int k = n; k > 1; k--)
num = num * (m + k - 1);
den = factorial(n - 1);
}
return (num / den);
}
//restituisce il numero di tutti i possibili monomi con n variabili e grado <= m
int monomial_combinations(int n, int m) {
int result = 0;
//result = Sommatoria (per j da 1 a m) {(j+n-1)! / j!*(n-1)!}
for (int j = 0; j <= m; j++)
result += combination(n, j);
return result;
}
void allocation(int **matrix, int *row, int *col, int *numero_variabili, char **variabili, int *tipo_ordinamento, int *modulo, int *max_degree, FILE *input_file) {
/*
Legge da input le seguenti informazioni:
- modulo dei coefficienti
- grado massimo
- numero dei polinomi di partenza
- tipo di ordinamento
- variabili utilizzate nei polinomi
con queste informazioni alloca la matrice principale (matrice che conterrà i polinomi) e stabilisce il numero di variabili utilizzate.
*/
fscanf(input_file, "%d", modulo); //leggo il modulo
fgetc(input_file);
fscanf(input_file, "%d", max_degree); //leggo il grado massimo
fgetc(input_file);
fscanf(input_file, "%d", row); //leggo numero dei polinomi di partenza
fgetc(input_file);
fscanf(input_file, "%d", tipo_ordinamento); //leggo tipo di ordinamento
fgetc(input_file);
int i, j, k, pos_pol, num_pol;
char c;
i = 0;
pos_pol = 0;
*variabili = (char *)malloc(sizeof(char));
c = fgetc(input_file);
while (c != '\n') {
(*variabili)[i] = c;
i++;
(*numero_variabili)++;
*variabili = (char *)realloc(*variabili, (i + 1) * sizeof(char));
c = fgetc(input_file);
}
*col = monomial_combinations(*numero_variabili, *max_degree);
*matrix = (int *)calloc((*row) * (*col), sizeof(int));
}
void print_matrix(int *matrix, int row, int col, FILE *output_file) {
for (int x = 0; x < row; x++) {
for (int y = 0; y < col; y++) {
fprintf(output_file, "%d ", matrix[ (x*col) + y]);
}
fprintf(output_file, "\n\n\n");
}
fprintf(output_file, "\n");
}
//confronta due monomi di *arg variabili secondo l'ordinamento grevlex
//restituisce un intero positivo se monom1 > monom2, zero se sono uguali, uno negativo altrimenti
//i monomi sono sempre rappresentati come array di lunghezza pari al numero delle variabili
//sono fatti diversi cast perchè il tipo degli argomenti sia compatibile con qsort_r
int grevlex_comparison(const void *monom1, const void *monom2, void *arg) {
int degree1 = 0, degree2 = 0, n, *mon1, *mon2;
n = *((int *)arg);
mon1 = *((int **)monom1);
mon2 = *((int **)monom2);
//calcolo i gradi dei monomi
for (int v = 0; v < n; v++) {
degree1 += mon1[v];
degree2 += mon2[v];
}
if (degree1 > degree2)
return 1;
else if (degree1 < degree2)
return -1;
//se il grado è uguale guardo l'utlima cifra non nulla
//del array risultante dalla sottrazione dei monomi
else {
int *temp = (int *)malloc(n * sizeof(int));
int result;
for (int v = 0; v < n; v++)
temp[v] = mon1[v] - mon2[v];
for (int v = (n - 1); v >= 0; v--) {
if (temp[v] != 0) {
result = -temp[v];
free(temp);
//per evitare di fare free due volte sul puntatore lo setto a NULL dopo la free
temp = NULL;
return result;
}
}
free(temp);
}
return 0;
}
//confronta due monomi di *arg variabili secondo l'ordinamento grevlex
//restituisce un intero positivo se monom1 > monom2, zero se sono uguali, uno negativo altrimenti
//i monomi sono sempre rappresentati come array di lunghezza pari al numero delle variabili
//sono fatti diversi cast perchè il tipo degli argomenti sia compatibile con qsort_r
int grevlex_comparison_mcvs(void *arg, const void *monom1, const void *monom2) {
int degree1 = 0, degree2 = 0, n, *mon1, *mon2;
n = *((int *)arg);
mon1 = *((int **)monom1);
mon2 = *((int **)monom2);
//calcolo i gradi dei monomi
for (int v = 0; v < n; v++) {
degree1 += mon1[v];
degree2 += mon2[v];
}
if (degree1 > degree2)
return 1;
else if (degree1 < degree2)
return -1;
//se il grado è uguale guardo l'utlima cifra non nulla
//del array risultante dalla sottrazione dei monomi
else {
int *temp = (int *)malloc(n * sizeof(int));
int result;
for (int v = 0; v < n; v++)
temp[v] = mon1[v] - mon2[v];
for (int v = (n - 1); v >= 0; v--) {
if (temp[v] != 0) {
result = -temp[v];
free(temp);
//per evitare di fare free due volte sul puntatore lo setto a NULL dopo la free
temp = NULL;
return result;
}
}
free(temp);
}
return 0;
}
int order(int(**ord) (void*, const void *, const void *), int n) {
//inizializza il puntatore ord alla funzione di ordinamento adeguata. Il numero n indica quale funzione scegliere.
switch (n) {
case 0:
*ord = grevlex_comparison_mcvs;
return 0;
break;
default:
return -1;
break;
}
}
//n mod p
//Riduzione di n in modulo p.
long long mod(long long n, long long p) {
long long v = n, x = 0;
if (v >= p) {
v = n % p;
}
else {
if (v < 0) {
x = n / p;
v = n - (x*p);
v += p;
}
}
return v;
}
//https://git.devuan.org/jaretcantu/eudev/commit/a9e12476ed32256690eb801099c41526834b6390
//mancante nella stdlib, controparte di qsort_r
//effettua una ricerca binaria di key nell'array base di lunghezza nmemb i cui elementi
//hanno dimensione size, e restituisce un puntatore all'elemento uguale a key se c'è, altrimenti NULL.
//compar è la funzione di ordinamento con cui viene confrontato key con base
//arg è il terzo argomento di compar
void *bsearch_r(const void *key, const void *base, size_t nmemb, size_t size,
int(*compar) (void *, const void *, const void *),
void *arg) {
size_t l, u, idx;
const void *p;
int comparison;
l = 0;
u = nmemb;
while (l < u) {
idx = (l + u) / 2;
p = (void *)(((const char *)base) + (idx * size));
comparison = compar(arg, key, p);
if (comparison < 0)
u = idx;
else if (comparison > 0)
l = idx + 1;
else
return (void *)p;
}
return NULL;
}
/* mon: stringa che rappresenta un monomio (non c'è carattere terminazione stringa)
* len: numero di caratteri in mon
* val: variabile in cui salvare il coefficiente del monomio
* num_var: numero di variabili nel sistema
* vet: vettore di caratteri in cui ogni carattere è una variabile (letto precedentemente da input)
* grade: vettore in cui salvare i gradi delle variabili secondo l'ordine di vet
* module: campo su cui è rappresentato il sistema
*/
int parse_mon(char * mon, int len, int * val, int num_var, char *vet, int *grade, int module) {
//parsing prima del coefficiente
int index = 0;
//se il primo carattere è una lettera (variabile) il coefficiente è 1
if (isalpha(mon[index]))
*val = 1;
//altrimenti leggo il coefficiente
else {
//se non è nè lettera nè cifra il formato input è sbagliato
if (!isdigit(mon[index]))
return -1;
char *coefficient = (char *)malloc(sizeof(char));
while (index < len && isdigit(mon[index])) {
coefficient = (char *)realloc(coefficient, (index + 1) * sizeof(char));
coefficient[index] = mon[index];
index++;
}
//aggiungo il carattere di temrinazione
coefficient = (char *)realloc(coefficient, (index + 1) * sizeof(char));
coefficient[index] = '\0';
//traduco il coefficiente in valore numerico e calcolo il modulo
*val = mod(atoll(coefficient), module);
free(coefficient);
}
//assumo grado zero di ogni variabile, aggiornato in seguito
for (int k = 0; k < num_var; k++)
grade[k] = 0;
//parsing delle incognite
char variable;
int variable_degree;
int variable_index;
int exponent_index;
char *exponent;
while (index < len) {
variable_index = -1;
variable_degree = 0;
//salto il moltiplicatore
if (mon[index] == '*' || mon[index] == ' ')
index++;
//leggo la variabile
if (index < len && isalpha(mon[index])) {
variable = mon[index];
//cerco la posizione della variabile in vet
for (int i = 0; i < num_var; i++)
if (vet[i] == mon[index]) {
variable_index = i;
//se è presente ha almeno grado 1
variable_degree = 1;
break;
}
//se non trovo la variabile in vet segnalo errore
if (variable_index == -1)
return -1;
index++;
}
//se c'è il carattere dell'elevato leggo l'esponente
if (index < len && mon[index] == '^') {
index++;
exponent_index = 0;
//se non è una cifra segnalo errore
if (index > len || !isdigit(mon[index]))
return -1;
exponent = (char *)malloc(sizeof(char));
while (index < len && isdigit(mon[index])) {
exponent = (char *)realloc(exponent, (exponent_index + 1) * sizeof(char));
exponent[exponent_index] = mon[index];
exponent_index++;
index++;
}
//metto il carattere di terminazoine stringa
exponent = (char *)realloc(exponent, (exponent_index + 1) * sizeof(char));
exponent[exponent_index] = '\0';
//leggo l'esponente
variable_degree = atoi(exponent);
free(exponent);
}
//se c'è la variabile
if (variable_index != -1)
//metto in grade il grado della variabile nella posizione corretta
grade[variable_index] = variable_degree;
}
return 0;
}
int parse(int numero_variabili, char *variabili, int *matrix, int row, int **monomi, int len, int module, int(*ord) (void*, const void *, const void *), FILE *input_file) {
/*
Esegue la lettura (parse) dei polinomi di partenza nel seguente modo.
Si legge un monomio alla volta.
Il monomio viene scomposta dalla funzione parse_mon.
Si inserisce il coefficiente del monomio nella matrice principale (matrice dei coefficienti) nella posizione corretta.
La posizione corretta è indicata da vet_grd.
Si leggono tutti i monomi di tutti i polinomi di partenza.
In caso di errore di formato nell'input la funzione si interrompe restituendo segnale di errore -1.
*/
int pos_pol = 0, i, col;
char c, *mon;
int cof = 0;
c = fgetc(input_file);
int linear_index = 0;
int *grade;
//finchè non termino il file o non ho terminato il numero di polinomi dichiarati
while (c != EOF && pos_pol < row) {
mon = (char *)malloc(sizeof(char));
grade = (int *)calloc(numero_variabili, sizeof(int));
i = 0;
while (c != '+' && c != EOF && c != '\n') {
mon = (char *)realloc(mon, (i + 1) * sizeof(char));
mon[i] = c;
i++;
c = fgetc(input_file);
}
//se non ho salvato niente in mon (i = 0) non faccio il parsing
if (i != 0 && parse_mon(mon, i, &cof, numero_variabili, variabili, grade, module) == -1) {
return -1;
}
//inserire monomio in posizione corretta
col = int((int **)(bsearch_r((void *)&grade, (void *)monomi, len, (sizeof(int*)), ord, &numero_variabili)) - monomi);
linear_index = (pos_pol * len) + col;
matrix[linear_index] = cof;
if (c == '\n') {
pos_pol++;
}
free(mon);
free(grade);
c = fgetc(input_file);
cof = 0;
}
return 0;
}
int init_matrix(int *matrix, int row, int col, int **vet_grd, char *variabili, int num_var, int(*ord) (void*, const void *, const void *), FILE *input_file) {
//Inizializza la matrice principale (dei coefficienti) con i coefficienti dei polinomi forniti come input.
return parse(num_var, variabili, matrix, row, vet_grd, col, module, ord, input_file);
}
void setup_struct_map(struct map *map, int **monomi, int len, int n, int m, int (*compar) (void*, const void *, const void *) ){
int sum, index=len;
// inizializzo la struttura map, la mappa ha len righe.
map->len = len;
map->row = (map_row *)malloc( map->len * sizeof(struct map_row) );
//per ogni monomio in vet
int row, col, i, v;
for (row = 0; row < len; row++){
index = 0;
//dichiarati dentro per la parallelizzazione
int *temp = (int *)malloc(n * sizeof(int));
int *save = (int *)calloc(len, sizeof(int));
//provo a moltiplicarlo con ogni monomio in vet
for (col = 0; col < len; col++) {
sum = 0;
//eseguo il prodotto (sum è la somma dei gradi)
for (v = 0; v < n; v++) {
temp[v] = monomi[row][v] + monomi[col][v];
sum += temp[v];
}
//se il grado del prodotto > grado massimo tutti i restanti prodotti
//su quella riga sono > grado massimo
if (sum > m) {
// a questo punto col è l'indice del primo elemento della mappa che non è possibile rappresentare, quindi la riga row ha solo col numero di celle e non len come prima.
index = col;
break;
}
//altrimenti cerco il prodotto in vet e metto l'indice in save
else{
save[col] = (int **)(bsearch_r((void *) &temp, (void *) monomi, len, (sizeof(int*)), compar, &n)) - monomi;
}
}
// terminato il ciclo sulle colonne posso inizializzare la struttura perchè conosco tutti gli elementi da inserire
// la riga attuale ha esattamente index elementi diversi da -1, quindi la riga avrà lunghezza pari a index precedentemente calcolato
// alloco la riga con un array da index elementi
map->row[row].len = index;
map->row[row].col = (int *)malloc( map->row[row].len * sizeof(int) );
// a questo map devo copiare gli elementi generati dento alla struttura
for(i=0; i<map->row[row].len; i++)
map->row[row].col[i] = save[i];
free(temp);
free(save);
}
}
void init_degree_vector(int *degree, int num_var){
//inizializza il vettore degree con il numero di monomi di grado i-esimo <= del grado massimo
int i,j,c;
for(i=0; i<max_degree+1; i++){
c = combination(num_var,i);
degree[i] = c;
}
}
int grado_monomio(int posizione, int **vet, int num_var){
//Calcola il grado del monomio a partire dalla posizione occupata nel vettore (ordinato) delle posizioni rispetto l'ordinamento scelto.
//(la posizione occupata deve essere corretta).
int i,grado;
grado = 0;
for(i=0; i<num_var; i++){
grado += vet[posizione][i];
}
return grado;
}
void matrix_degree(int *m, int row, int col, int *m_deg, int **vet, int num_var){
//m_deg è un vettore che ha lunghezza pari al grado massimo.
//la funzione calcola i gradi dei polinomi presenti nella matrice.
//Ogni cella del vettore m_deg rappresenta un grado, se esso compare nella matrice allora viene impostato a 1 o altrimenti.
int i,j,last,grado, linear_index = 0;
for(i=0; i<row; i++){
for(j=col-1; j>0; j--){
linear_index = i*col + j;
if( m[linear_index] != 0 ){
last = j; //posizione dell'ultimo coefficiente della riga
break;
}
}
grado = grado_monomio(last, vet, num_var);
m_deg[grado] = 1;
}
}
void moltiplica_matrice(int **m, int *row, int col, struct map map, int * degree, int **vet, int num_var, int expansion_degree){
int riga;
int grado_massimo_riga, grado_massimo_monomio,i,j,last,new_row = 0;
last = -1;
int linear_index = 0;
long long total_dim = 0;
int *last_index = (int*)calloc(*row, sizeof(int));
int *numero_polinomi = (int*)calloc(*row, sizeof(int));
int numero_nuovi_polinomi = 0;
for(riga=0; riga<*row; riga++){
for(i=col-1; i>0; i--){
linear_index = riga * col + i;
if( (*m)[linear_index] != 0 ){ //(*m)[riga][i] != 0
last = i;
break;
}
}
//risalgo al grado del monomio appena trovato
//scorro la lista delle posizioni di inizio dei monomi con lo stesso grado
last_index[riga] = last;
if( last != -1 ){
grado_massimo_riga = grado_monomio(last,vet,num_var);
//calcolo il grado massimo che deve avere il monomio per cui moltiplicare
grado_massimo_monomio = max_degree - grado_massimo_riga;
// a questo punto conosco per quanti monomi devo moltiplicare e quindi
// conosco il numero di righe che devo aggiungere alla matrice
if( expansion_degree != 0 ){
if( grado_massimo_monomio > expansion_degree ){
grado_massimo_monomio = expansion_degree;
}
}
for(i=1; i<(grado_massimo_monomio+1); i++){
new_row += degree[i];
numero_nuovi_polinomi += degree[i];
}
numero_polinomi[riga] = numero_nuovi_polinomi;
numero_nuovi_polinomi = 0;
}
}
//--------------------------------------------------------------
//printf("nuove righe %d, totale righe %d", new_row, (*row+new_row) );
//ridimensionamento della matrice
total_dim = (*row * col) + (new_row * col);
*m = (int *)realloc( *m , total_dim * sizeof(int) );
//azzeramento delle nuove righe
for(i=*row; i<*row+new_row; i++){
for(j=0; j<col; j++){
(*m)[i*col+j] = 0;
}
}
int len = *row;
for(riga=0; riga<len; riga++){
if( last_index[riga] != -1 ){
for(i=1; i<(numero_polinomi[riga]+1); i++){ //scorre tutti i monomi per i quali posso moltiplicare
for(j=0; j<(last_index[riga]+1); j++){ //scorre fino all'ultimo elemento della riga
//(*m)[*row][ map.row[i].col[j] ] = (*m)[riga][j]; //shift nella posizione corretta indicata dalla mappa
linear_index = *row * col + map.row[i].col[j];
(*m)[linear_index] = (*m)[riga*col+j];
}
*row = *row + 1; //aumento del conteggio delle righe
}
}
}
free(last_index);
free(numero_polinomi);
}
void moltiplica_riga_forn(int **m, int *row, int col, int riga, struct map map, int * degree, int **vet, int num_var, int stop_degree){
int grado_massimo_riga, grado_massimo_monomio,i,j,last,new_row;
last = -1;
int linear_index = 0;
long long total_dim = 0;
//cerco la posizione dell'ultimo coefficiente non nullo del polinomio rappresentato nella riga.
for(i=col-1; i>0; i--){
linear_index = riga * col + i;
if( (*m)[linear_index] != 0 ){ //(*m)[riga][i] != 0
last = i;
break;
}
}
//risalgo al grado del monomio appena trovato
//scorro la lista delle posizioni di inizio dei monomi con lo stesso grado
if( last != -1 ){
grado_massimo_riga = grado_monomio(last,vet,num_var);
//calcolo il grado massimo che deve avere il monomio per cui moltiplicare
grado_massimo_monomio = max_degree - grado_massimo_riga;
// a questo punto conosco per quanti monomi devo moltiplicare e quindi
// conosco il numero di righe che devo aggiungere alla matrice
new_row = 0;
for(i=1; i<(grado_massimo_monomio+1); i++){
new_row += degree[i];
}
total_dim = (*row * col) + (new_row * col);
*m = (int *)realloc( *m , total_dim * sizeof(int) );
//azzeramento delle nuove righe
for(i=*row; i<*row+new_row; i++){
for(j=0; j<col; j++){
(*m)[i*col+j] = 0;
}
}
for(i=1; i<(new_row+1); i++){ //scorre tutti i monomi per i quali posso moltiplicare
for(j=0; j<(last+1); j++){ //scorre fino all'ultimo elemento della riga
//(*m)[*row][ map.row[i].col[j] ] = (*m)[riga][j]; //shift nella posizione corretta indicata dalla mappa
linear_index = *row * col + map.row[i].col[j];
(*m)[linear_index] = (*m)[riga*col+j];
}
*row = *row + 1; //aumento del conteggio delle righe
}
}
}
//Scambio di due righe della matrice m.
__device__ void swap_rows_GPU(int *m, int row, int col, int j, int i){
int k;
long long tmp;
if( j!=i ){
for(k=0;k<col;k++){
tmp = m[i*col+k]; //m[i][k];
m[i*col+k] = m[j*col+k]; //m[i][k] = m[j][k];
m[j*col+k] = tmp; //m[j][k] = tmp;
}
}
}
//n mod p
//Riduzione di n in modulo p.
__device__ int mod_long_GPU(long long n, long long p) {
long long v = n, x = 0;
if (v >= p) {
v = n % p;
}
else {
if (v < 0) {
x = n / p;
v = n - (x*p);
v += p;
}
}
int r = v;
return r;
}
//n mod p
//Riduzione di n in modulo p.
__device__ int mod_GPU(int n, int p) {
int v = n, x = 0;
if (v >= p) {
v = n % p;
}
else {
if (v < 0) {
x = n / p;
v = n - (x*p);
v += p;
}
}
return v;
}
//inverso moltiplicativo di n in modulo p (con p primo).
__device__ int invers_GPU(int n, int p){
int b0 = p, t, q;
int x0 = 0, x1 = 1;
if (p == 1) return 1;
while (n > 1) {
q = n / p;
t = p, p = (n % p), n = t;
t = x0, x0 = x1 - q * x0, x1 = t;
}
if (x1 < 0) x1 += b0;
return x1;
}
// a + b mod p
//sommatoria di a e b in modulo p
__device__ int add_mod_GPU(int a, int b, int p){
return mod_GPU((a+b),p);
}
// a - b mod p
//sottrazione di a e b in modulo p
__device__ int sub_mod_GPU(int a, int b, int p){
long long aa,bb;
aa = a;
bb = b;
return mod_long_GPU((aa-bb),p);
}
// a * b mod p
//prodotto di a e b in modulo p
__device__ int mul_mod_GPU(int a, int b, int p){
long long aa,bb;
aa = a;
bb = b;
return mod_long_GPU((aa*bb),p);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*
Ottimizzazioni
- branch divergence
Correttezza
- kernel celle (calcoli errati)
Aggiunte
- risoluzione e stampa soluzioni delle incognite
Test
- performance al variare della dimensione di grid e block
*/
__global__ void kernel_riduzione_blocco_base(int *matrix, int row, int col, int module, int pivot_colonna, int inv, int pivot_riga, int thread_height, int block_dim){
int a = 0, s = 0;
int col_index = blockIdx.x * blockDim.x + threadIdx.x; //indice della colonna della matrice originale per il thread corrente
int interation = 0;
if(col_index < pivot_colonna){
int reached_row = ( pivot_riga + 1) + ((blockIdx.y + 1) * thread_height); //riga raggiunta dal thread corrente
if(reached_row > row){
interation = thread_height - (reached_row - row); //numero di iterazioni nel caso in cui la matrice non collima con la dimensione della grid
}else{
interation = thread_height;
}
int block_row_offset = (pivot_riga + 1) + (blockIdx.y * thread_height);
int row_offset = block_row_offset;
for(int i=0; i<interation; i++){
if( matrix[row_offset * col + pivot_colonna] != 0 ){
s = mul_mod_GPU(inv, matrix[row_offset * col + pivot_colonna], module); //tutti i thread sulla stessa riga calcolano lo stesso risultato
a = mul_mod_GPU(s, matrix[pivot_riga * col + col_index], module);
matrix[row_offset * col + col_index] = sub_mod_GPU(matrix[row_offset * col + col_index], a, module);
}
row_offset ++;
}
}
}
__global__ void kernel_riduzione_blocco(int *matrix, int row, int col, int module, int pivot_colonna, int inv, int pivot_riga, int thread_height, int block_dim){
extern __shared__ int smem [];
int *smem_riga_pivot = (int*)smem;
int *smem_col_pivot = (int*)&smem_riga_pivot[block_dim];
int a = 0, s = 0, interation = 0;
int col_index = blockIdx.x * blockDim.x + threadIdx.x; //indice della colonna della matrice originale per il thread corrente
//-------------
//inizzializzazione smem per pivot riga
smem_riga_pivot[threadIdx.x] = matrix[pivot_riga * col + col_index]; //ogni thread copia un solo elemento nella riga in shared, un thread per cella di riga
//------------
//inizializzazione smem per pivot colonna
//calcolo del numero di celle (colonna_pivot) che ogni thred deve copiare
int cell_to_copy = 1;
if(thread_height > blockDim.x){
cell_to_copy = thread_height / blockDim.x + 1;
}
int base_row = (pivot_riga + 1) + blockIdx.y * thread_height;
int index = 0;
//copia della porzione di colonna in smem
for(int i=0; i<cell_to_copy; i++){
index = (threadIdx.x * cell_to_copy) + i;
if(base_row + index < row && index < thread_height){
smem_col_pivot[index] = matrix[(base_row + index) * col + pivot_colonna];
}
}
//sincronizza tutti i thread del blocco in modo tale che la smem sia consistente
__syncthreads();
if(col_index < pivot_colonna){
//calcolo del numero di righe sulle quali deve iterare il thread, caso in cui la dimensione della matrice non collima con thread_height
int reached_row = ( pivot_riga + 1) + ((blockIdx.y + 1) * thread_height); //riga raggiunta dal thread corrente
if(reached_row > row){
interation = thread_height - (reached_row - row); //dimensione non collima
}else{
interation = thread_height; //caso normale
}
int row_offset = (pivot_riga + 1) + (blockIdx.y * thread_height);
for(int i=0; i<interation; i++){
int pivot_element = smem_col_pivot[i];
if( pivot_element != 0 ){
s = mul_mod_GPU(inv, pivot_element, module); //tutti i thread sulla stessa riga calcolano lo stesso risultato
a = mul_mod_GPU(s, smem_riga_pivot[threadIdx.x], module);
matrix[row_offset * col + col_index] = sub_mod_GPU(matrix[row_offset * col + col_index], a, module);
}
row_offset ++;
}
}
}
__global__ void kernel_riduzione_riga_base(int *matrix, int row, int col, int module, int start, int pivot_colonna, int inv, int pivot_riga, int cell_per_thread){
int a = 0, s = 0;
int last_row = row - 1;
int row_index = (pivot_riga + 1) + (blockDim.x * blockIdx.x + threadIdx.x);
if(row_index >= start && row_index < row){
int row_linear_index = row_index * col + pivot_colonna;
if( matrix[row_linear_index] != 0 ){
s = mul_mod_GPU(inv,matrix[row_linear_index],module);
for(int k = 0; k < pivot_colonna+1; k++ ){
a = mul_mod_GPU(s,matrix[pivot_riga*col+k],module);
//a = mul_mod_GPU(s, smem[k], module);
matrix[row_index*col+k] = sub_mod_GPU(matrix[row_index*col+k],a,module);
}
}
}
}
__global__ void kernel_riduzione_riga(int *matrix, int row, int col, int module, int start, int pivot_colonna, int inv, int pivot_riga, int cell_per_thread){
extern __shared__ int smem[];
if( (threadIdx.x * cell_per_thread) <= pivot_colonna){
int row_offset = pivot_riga*col;
int thread_offset = threadIdx.x * cell_per_thread;
//allocazione della smem con la riga di pivot, ogni thread copia una porzione di riga pari a "cell_per_thread".
for(int i=0; i<cell_per_thread; i++){
if(thread_offset + i <= pivot_colonna){
smem[thread_offset + i] = matrix[row_offset + thread_offset + i];
}
}
}
__syncthreads();
int a = 0, s = 0;
int last_row = row - 1;
int row_index = (pivot_riga + 1) + (blockDim.x * blockIdx.x + threadIdx.x);
if(row_index >= start && row_index < row){
int row_linear_index = row_index * col + pivot_colonna;
if( matrix[row_linear_index] != 0 ){
s = mul_mod_GPU(inv,matrix[row_linear_index],module);
for(int k = 0; k < pivot_colonna+1; k++ ){
//a = mul_mod_GPU(s,matrix[pivot_riga*col+k],module);
a = mul_mod_GPU(s, smem[k], module);
matrix[row_index*col+k] = sub_mod_GPU(matrix[row_index*col+k],a,module);
}
}
}
}
__global__ void kernel_riduzione_cella(int *matrix, int row, int col, int module, int inv, int pivot_colonna, int pivot_riga){
int last_row = row - 1;
int starting_row = pivot_riga + 1;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y + starting_row;
if( idx < pivot_colonna && idy < row && idy > pivot_riga){ //fermo i thread prima di pivot_colonna per impedire di sovrascrivere il dato necessario per s
int div = matrix[idy*col+pivot_colonna];
if( div != 0 ){
int s = mul_mod_GPU(inv, div, module);
int a = mul_mod_GPU(s, matrix[pivot_riga*col+idx], module);
matrix[idy*col+idx] = sub_mod_GPU(matrix[idy*col+idx], a, module);
}
}
}
__global__ void gauss_kernel_celle(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
int flag=0,invarianti=0,flag2=0,tmp;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
while( r < row && matrix[r*col+pivot_colonna] == 0 ){ //m[r][pivot_colonna]
r++;
}
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if( r < row ){ //significa che ho trovato un valore non nullo
if( r != righe_trovate ){
swap_rows_GPU(matrix,row,col,righe_trovate,r); //sposto la riga appena trovata nella posizone corretta
flag = 1;
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna],module); //inverso dell´ elemento in m[r][pivot_colonna]
//kernel per riduzione celle
int block_dim = 16;
dim3 threads(block_dim, block_dim, 1);
int block_size = 256;
int numero_righe = row - righe_trovate;
int grid_y = numero_righe/block_dim + 1;
int grid_x = col/block_dim + 1;
dim3 blocks(grid_x, grid_y,1);
kernel_riduzione_cella<<<blocks, threads>>>(matrix, row, col, module, inv, pivot_colonna, pivot_riga);
cudaDeviceSynchronize();
//necessario azzerare tutta la colonna (pivot_colonna)
for(int x = pivot_riga + 1; x < row; x++){
matrix[x*col+pivot_colonna] = 0;
}
}
}
}
__global__ void reset_pivot_col(int *matrix, int row, int col, int pivot_riga, int pivot_colonna, int thread_height, int block_dim){
int start_row = (pivot_riga + 1) + ( (blockIdx.x * (thread_height*block_dim)) + (threadIdx.x * thread_height));
int reached_row = (pivot_riga + 1) + ( (blockIdx.x * (thread_height*block_dim)) + ( (threadIdx.x + 1) * thread_height));
int iteration = thread_height;
if(reached_row > row){
iteration = thread_height - (reached_row - row);
if(iteration > thread_height){
iteration = 0;
}
}
for(int i=0; i<iteration; i++){
matrix[(start_row + i)*col+pivot_colonna] = 0;
}
}
__global__ void swap_rows(int *matrix, int row, int col, int j, int i){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= col){
return;
}
int ii = i*col+tid;
int jj = j*col+tid;
int tmp = matrix[ii];
matrix[ii] = matrix[jj];
matrix[jj] = tmp;
}
__global__ void find_pivot(int *matrix, int row, int col, int r, int pivot_colonna){
/*
while( r < row && matrix[r*col+pivot_colonna] == 0 ){
r++;
}
pointer_r = r;
*/
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int thread_row = r+tid;
if(thread_row >= row)
return;
if(matrix[thread_row*col+pivot_colonna] != 0){
atomicMin(&next_pivot_row, thread_row);
}
}
__global__ void gauss_kernel_blocco(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
float total_time_for_reduction = 0, total_time_for_reset = 0.0;
double elapsed = 0.0;
clock_t start, stop;
int tick = 0;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
int *p;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
///////////////////////////FIND PIVOT///////////////////////////////////////////////
block_dim = 256;
int row_to_check = row - righe_trovate;
threads_per_block = ( row_to_check < block_dim ? row_to_check : block_dim);
dim3 t_find(threads_per_block);
if( threads_per_block == block_dim && row_to_check != block_dim){
block_x_axis = (row_to_check / block_dim) + 1;
}else{
block_x_axis = 1;
}
dim3 b_find(block_x_axis);
next_pivot_row = row;
find_pivot<<<b_find, t_find>>>(matrix, row, col, r, pivot_colonna);
cudaDeviceSynchronize();
r = next_pivot_row;
/////////////////////////////////////////////////////////////////////////////////
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if( r < row ){
if( r != righe_trovate ){
////////////////////////SWAP ROWS////////////////////////////////////////////////////////
block_dim = 256;
threads_per_block = ( col < block_dim ? col : block_dim);
dim3 t_swap(threads_per_block);
if( threads_per_block == block_dim && col != block_dim){
block_x_axis = (col / block_dim) + 1;
}else{
block_x_axis = 1;
}
dim3 b_swap(block_x_axis);
//sposto la riga appena trovata nella posizone corretta
swap_rows<<<b_swap, t_swap>>>(matrix, row, col, righe_trovate, r);
cudaDeviceSynchronize();
////////////////////////////////////////////////////////////////////////////////////////
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna], module);
////////////////////////////////////////REDUCTION BY BLOCK////////////////////////////////////
block_dim = 128;
int col_to_reduce = pivot_colonna;
threads_per_block = ( col_to_reduce < block_dim ? col_to_reduce : block_dim);
dim3 threads(threads_per_block);
if( threads_per_block == block_dim && col_to_reduce != block_dim){
block_x_axis = (col_to_reduce / block_dim) + 1;
}else{
block_x_axis = 1;
}
int thread_height = 32;
int row_to_reduce = row - righe_trovate;
block_y_axis = (row_to_reduce / thread_height) + 1;
dim3 blocks(block_x_axis, block_y_axis);
int shared = (block_dim * sizeof(int)) + (thread_height * sizeof(int));
kernel_riduzione_blocco<<<blocks, threads, shared>>>(matrix, row, col, module, pivot_colonna, inv, pivot_riga, thread_height, block_dim);
cudaDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////RESET PIVOT COL////////////////////////////////////////
thread_height = 50;
block_dim = 32;
row_to_reduce = row-pivot_riga;
threads_per_block = (row_to_reduce < thread_height ? 1 : block_dim);
block_x_axis = (threads_per_block == block_dim && row_to_reduce != block_dim) ? (row_to_reduce/(thread_height*block_dim)+1) : 1;
dim3 t(threads_per_block);
dim3 b(block_x_axis);
reset_pivot_col<<<b, t>>>(matrix, row, col, pivot_riga, pivot_colonna, thread_height, block_dim);
cudaDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////
}
}
}
__global__ void gauss_kernel_blocco_base(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
float total_time_for_reduction = 0, total_time_for_reset = 0.0;
double elapsed = 0.0;
clock_t start, stop;
int tick = 0;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
int *p;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
///////////////////////////FIND PIVOT///////////////////////////////////////////////
while( r < row && matrix[r*col+pivot_colonna] == 0 ){ //m[r][pivot_colonna]
r++;
}
/////////////////////////////////////////////////////////////////////////////////
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if( r < row ){
if( r != righe_trovate ){
////////////////////////SWAP ROWS////////////////////////////////////////////////////////
swap_rows_GPU(matrix,row,col,righe_trovate,r);
////////////////////////////////////////////////////////////////////////////////////////
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna], module);
////////////////////////////////////////REDUCTION BY BLOCK////////////////////////////////////
block_dim = 128;
int col_to_reduce = pivot_colonna;
threads_per_block = ( col_to_reduce < block_dim ? col_to_reduce : block_dim);
dim3 threads(threads_per_block);
if( threads_per_block == block_dim && col_to_reduce != block_dim){
block_x_axis = (col_to_reduce / block_dim) + 1;
}else{
block_x_axis = 1;
}
int thread_height = 256;
int row_to_reduce = row - righe_trovate;
block_y_axis = (row_to_reduce / thread_height) + 1;
dim3 blocks(block_x_axis, block_y_axis);
kernel_riduzione_blocco_base<<<blocks, threads>>>(matrix, row, col, module, pivot_colonna, inv, pivot_riga, thread_height, block_dim);
cudaDeviceSynchronize();
//////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////RESET PIVOT COL////////////////////////////////////////
//necessario azzerare tutta la colonna (pivot_colonna)
for(int x = pivot_riga + 1; x < row; x++){
matrix[x*col+pivot_colonna] = 0;
}
//////////////////////////////////////////////////////////////////////////////////////
}
}
}
__global__ void gauss_kernel_righe(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
int flag=0,invarianti=0,flag2=0,tmp;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
/////////////////////////////////////
block_dim = 256;
int row_to_check = row - righe_trovate;
threads_per_block = ( row_to_check < block_dim ? row_to_check : block_dim);
dim3 t_find(threads_per_block);
if( threads_per_block == block_dim && row_to_check != block_dim){
block_x_axis = (row_to_check / block_dim) + 1;
}else{
block_x_axis = 1;
}
dim3 b_find(block_x_axis);
next_pivot_row = row;
find_pivot<<<b_find, t_find>>>(matrix, row, col, r, pivot_colonna);
cudaDeviceSynchronize();
r = next_pivot_row;
///////////////////////////////////
if( r < row ){ //significa che ho trovato un valore non nullo
if( r != righe_trovate ){
block_dim = 256;
threads_per_block = ( col < block_dim ? col : block_dim);
dim3 t_swap(threads_per_block);
if( threads_per_block == block_dim && col != block_dim){
block_x_axis = (col / block_dim) + 1;
}else{
block_x_axis = 1;
}
dim3 b_swap(block_x_axis);
//sposto la riga appena trovata nella posizone corretta
swap_rows<<<b_swap, t_swap>>>(matrix, row, col, righe_trovate, r);
cudaDeviceSynchronize();
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna],module); //inverso dell´ elemento in m[r][pivot_colonna]
int block_dim = 1024;
//kernel per riduzione righe
int numero_righe = row - righe_trovate;
int t = (numero_righe < block_dim ? numero_righe : block_dim);
int b = 1;
if( t == block_dim && numero_righe != block_dim ){
b = numero_righe / block_dim + 1;
}
dim3 threads(t);
dim3 blocks(b);
int pivot_length = pivot_colonna + 1;
int cell_per_thread = ( t >= pivot_length ) ? 1 : ( pivot_length / t) + 1;
int shared_mem = pivot_length * sizeof(int);
kernel_riduzione_riga<<<blocks, threads, shared_mem>>>(matrix, row, col, module, righe_trovate, pivot_colonna, inv, pivot_riga, cell_per_thread);
cudaDeviceSynchronize();
}
}
}
__global__ void gauss_kernel_righe_base(int *matrix, int row, int col, int module){
int pivot_riga = 0,r = 0,righe_trovate = 0,i,k;
int s,inv,a;
int flag=0,invarianti=0,flag2=0,tmp;
int block_dim = 0;
int threads_per_block = 0;
int block_x_axis, block_y_axis = 0;
for(int pivot_colonna = col-1; pivot_colonna >= 0; pivot_colonna-- ){
r = righe_trovate;
while( r < row && matrix[r*col+pivot_colonna] == 0 ){ //m[r][pivot_colonna]
r++;
}
// ho trovato la prima riga con elemento non nullo in posizione r e pivot_colonna oppure non esiste nessuna riga con elemento non nullo in posizione pivot_colonna
if( r < row ){ //significa che ho trovato un valore non nullo
if( r != righe_trovate ){
swap_rows_GPU(matrix,row,col,righe_trovate,r); //sposto la riga appena trovata nella posizone corretta
flag = 1;
}
pivot_riga = righe_trovate;
righe_trovate++;
inv = invers_GPU(matrix[pivot_riga*col+pivot_colonna],module); //inverso dell´ elemento in m[r][pivot_colonna]
int block_dim = 1024;
//kernel per riduzione righe
int numero_righe = row - righe_trovate;
int t = (numero_righe < block_dim ? numero_righe : block_dim);
int b = 1;
if( t == block_dim && numero_righe != block_dim ){
b = numero_righe / block_dim + 1;
}
dim3 threads(t);
dim3 blocks(b);
int pivot_length = pivot_colonna + 1;
int cell_per_thread = ( t >= pivot_length ) ? 1 : ( pivot_length / t) + 1;
kernel_riduzione_riga_base<<<blocks, threads>>>(matrix, row, col, module, righe_trovate, pivot_colonna, inv, pivot_riga, cell_per_thread);
cudaDeviceSynchronize();
}
}
}
double gauss_GPU(int *m, int row, int col, int module){
int matrix_length = row * col;
int matrix_length_bytes = matrix_length * sizeof(int);
clock_t start, end;
double elapsed = 0.0;
int *m_d;
gpuErrchk(cudaMalloc( (void **) &m_d, matrix_length_bytes));
gpuErrchk(cudaMemcpy(m_d, m, matrix_length_bytes, cudaMemcpyHostToDevice));
start = clock();
gauss_kernel_righe_base<<<1,1>>>(m_d, row, col, module);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(m, m_d, matrix_length_bytes, cudaMemcpyDeviceToHost));
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
gpuErrchk(cudaFree(m_d));
return elapsed;
}
int null_rows(int *m, int row, int col){
//calcola il numero di righe nulle presenti nella matrice m.
int i,j,last,null_rows;
null_rows = 0;
for(i=0; i<row; i++){
last = -1;
for(j=col-1; j>-1; j--){
if(m[i*col+j] != 0 ){
last = j;
break;
}
}
if( last == -1 )
null_rows++;
}
return null_rows;
}
void eliminate_null_rows(int **m, int *row, int col){
//Elimina dalla matrice m le righe nulle.
//N.B. questa procedura elimina le ultime righe nulle della matrice.
//Questa funzione DEVE essere utilizzata dopo la riduzione di Gauss.
//La riduzione di Gauss sposta nelle ultime posizioni tutte le righe nulle.
//Se non si esegue questa funzione dopo Gauss si possono eliminare righe non nulle.
int null_row = null_rows(*m,*row,col);
int new_rows = *row - null_row;
if(null_row != 0){
*m = (int *)realloc( *m , (new_rows*col) * sizeof (int));
*row = new_rows;
}
}
void print_matrix_degree(int *m_deg, FILE *output_file){
//stampa il vettore dei gradi della matrice.
int i;
fprintf(output_file, "Gradi della matrice = {");
for(i=0; i<max_degree+1; i++)
if( m_deg[i] != 0 ) fprintf(output_file, " %d ",i);
fprintf(output_file, "}\n");
}
int target_degree(int *v){
//Controlla se il vettore v rappresenta la condizione di terminazione con gradi completi {1,2,3,...,max_degree}
//Se la condizione è soddisfatta return 0 altrimenti -1
int i,flag;
flag = 0;
for(i=1; i<max_degree+1; i++){
if( v[i] != 1 ){
flag = -1;
break;
}
}
return flag;
}
void execute_standard(int **matrix, int * row, int col, struct map map, int *degree, int **monomi, int numero_variabili, int n_loops, int expansion, FILE *output_file){
clock_t start, end;
double elapsed;
//creo l'array che conterrà i gradi dei vari round
int **m_deg_array = (int **)malloc(sizeof(int*));
m_deg_array[0] = (int *)calloc(max_degree+1, sizeof(int));
int n_round = 0;
int *m_deg = m_deg_array[0];
int missing_degree = max_degree;
fprintf(output_file, "Inizio computazione, metodo standard\n");
matrix_degree(*matrix, *row, col, m_deg, monomi, numero_variabili);
int flag, old_v, new_v;
flag = old_v = new_v = 0;
old_v = *row;
//assumo espansione normale
int expansion_degree = max_degree;
int st = 0;
while( flag != 1 ){
n_round++;
fprintf(output_file, "\n -Eseguo moltiplicazione, ");
fflush(stdout);
start = clock();
//find missing degree to multiply matrix
for(int i=max_degree; i>0; i--){
if( m_deg[i] == 0 ){
missing_degree = i;
break;
}
}
moltiplica_matrice(matrix, row, col, map, degree, monomi, numero_variabili, missing_degree);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "numero righe: %d (%f sec)", *row, elapsed);
fprintf(output_file, "\n -Eseguo Gauss, ");
fflush(stdout);
//start = clock();
//applico la riduzione di Gauss
elapsed = gauss_GPU(*matrix, *row, col, module);
//elimino le righe nulle della matrice
eliminate_null_rows(matrix, row, col);
//aggiungo all'array i gradi dell'attuale round
//n_round+1 perchè salvo anche i gradi prima di inziare i round
m_deg_array = (int **)realloc(m_deg_array, sizeof(int*)*(n_round+1));
m_deg_array[n_round] = (int *)calloc(max_degree+1, sizeof(int));
m_deg = m_deg_array[n_round];
//end = clock();
//elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "numero righe: %d (%f sec)\n", *row, elapsed);
matrix_degree(*matrix,*row, col, m_deg, monomi, numero_variabili);
print_matrix_degree(m_deg, output_file);
new_v = *row;
st = new_v;
if( target_degree(m_deg) == 0 )
flag = 1;
else{
old_v = new_v;
}
for(int i=max_degree; i>0; i--){
if( m_deg[i] == 0 ){
expansion_degree = i;
break;
}
}
}
for (int i = 0; i < n_round+1; i++)
free(m_deg_array[i]);
free(m_deg_array);
}
void print_incognite(int *m, int row, int col, int num_var, int **vet, FILE *output_file){
int grado,last;
for(int r = row - (num_var+1); r<row; r++){
//cerca la posizione dell'ulitmo elemento non nullo della riga r
for( int i=col-1; i>=0; i-- ){
if( m[r*col+i] != 0 ){ //m[r][i] != 0
last = i;
break;
}
}
//calcola il grado della riga r
grado = grado_monomio(last,vet,num_var);
//se il grado della riga r è 1 allora stampa tutta la riga della matrice
if( grado == 1 ){
for( int j=0; j<last+1; j++ ){
fprintf(output_file, "%d ", m[r*col+j]); //m[r][j]
}
fprintf(output_file, "\n\n");
}
}
fprintf(output_file, "\n");
}
int main (int argc, char *argv[]){
FILE *input_file = NULL, *output_file = NULL;
//inizializzo flag a false
for (int parsed = 1; parsed < argc; parsed++) {
if (parsed < argc && !strcmp(argv[parsed], "--input")) {
parsed++;
input_file = fopen(argv[parsed], "r");
if (!input_file) {
perror("Errore nell'apertura del file di input");
return (-1);
}
}
else if (parsed < argc && !strcmp(argv[parsed], "--output")) {
parsed++;
output_file = fopen(argv[parsed], "w");
if (!output_file) {
perror("Errore nell'apertura del file di output");
return (-1);
}
}
}
if (!input_file)
input_file = stdin;
if (!output_file)
output_file = stdout;
/*
int peak_clk = 1;
cudaError_t err = cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, 0);
if (err != cudaSuccess) {printf("cuda err: %d at line %d\n", (int)err, __LINE__); return 1;}
printf("peak clock rate: %dkHz", peak_clk);
*/
int row, col, numero_variabili, tipo_ordinamento;
int *matrix;
char *variabili;
row = col = numero_variabili = 0;
int (*ord) (void*, const void *, const void *);
int *d_row, **map;
struct map smap;
clock_t start, end;
double elapsed = 0.0;
start = clock();
//alloca la matrice principale, legge da input: il modulo,massimo grado e numero variabili
allocation(&matrix, &row, &col, &numero_variabili, &variabili, &tipo_ordinamento, &module, &max_degree, input_file);
int matrix_lentght = row * col; //numero di elementi della matrice
if( order(&ord, tipo_ordinamento) != 0 ){
fprintf(stderr, "Ordinamento insesistente!!!\n\nTERMINAZIONE PROGRAMMA");
return 0;
}
int * degree = (int *)calloc(max_degree+1, sizeof(int));
int numero_monomi = col;
int **monomi;
//crea il vettore con tutti i possibili monomi avendo num_var varaibili e max_degree come massimo grado
monomi = monomial_computation(numero_variabili, max_degree, numero_monomi);
//ordina il vettore dei monomi secondo un determinato ordinamento, ordinamento intercambiabile
qsort_s(monomi, numero_monomi, sizeof(int*), ord, &numero_variabili);
//inizializzazione matrice (lettura dati input)
if (init_matrix(matrix, row, col, monomi, variabili, numero_variabili, ord, input_file) == -1) {
fprintf(stderr, "Errore di input !!!\n\nTERMINAZIONE PROGRAMMA"); //se l'input è in formato scorrettro abort del programma
return 0;
}
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nInizializzazione in %f sec\n", elapsed);
start = clock();
setup_struct_map(&smap, monomi, numero_monomi, numero_variabili, max_degree, ord);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nMappa creata in %f sec, %d x %d \n\n", elapsed, col, col);
//RISOLUZIONE PROBLEMA
start = clock();
//inizializzazione vettore dei gradi dei polinomi
init_degree_vector(degree, numero_variabili);
int n_loops = 30, expansion = 1;
//eseguo moltiplicazione e riduzione di Gauss finche non trovo soluzione
execute_standard(&matrix, &row, col, smap, degree, monomi, numero_variabili, n_loops, expansion, output_file);
end = clock();
elapsed = ((double)(end - start)) / CLOCKS_PER_SEC;
fprintf(output_file, "\nTarget raggiunto, soluzione trovata in %f sec\n\n", elapsed);
//print_matrix(matrix, row, col, output_file);
print_incognite(matrix, row, col, numero_variabili, monomi, output_file);
for(int i=0; i<row*col; i++){
if(matrix[i] > module){
printf("OVERFLOW\n");
}
}
free(matrix);
free(degree);
cudaDeviceReset();
return 0;
}
|
ec7faf4af14d6f2e555051ccddf997a13d5009ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=256 --gridDim=2
#include "common.h"
///////////////////////////////////////
//// Compute forward substring matches
///////////////////////////////////////
__global__ void
mummergpuKernel(MatchCoord* match_coords,
char* queries,
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len)
{
int qryid = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
XPRINTF("> qryid: %d\n", qryid);
int qlen = queryLengths[qryid];
// start at root for first query character
TextureAddress cur;
cur.data = 0;
int mustmatch = 0;
int qry_match_len = 0;
int qryAddr=queryAddrs[qryid];
MatchCoord * result = match_coords + qryAddr - __umul24(qryid, min_match_len + 1);
queries += qryAddr;
int last = qlen - min_match_len;
for (int qrystart = 0;
qrystart <= last;
qrystart++, result++, queries++)
{
XPRINTF("qry: %s\n", queries + 1);
PixelOfNode node;
TextureAddress prev;
if ((cur.data == 0) || (qry_match_len < 1))
{
// start at root of tree
cur.x = 0; cur.y = 1;
qry_match_len = 1;
mustmatch = 0;
}
char c = queries[qry_match_len];
XPRINTF("In node (%d,%d): starting with %c [%d] => \n", cur.x, cur.y, c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
PixelOfChildren children;
children.data = tex2D(childrentex,cur.x, cur.y);
prev = cur;
switch(c)
{
case 'A': cur=children.children[0]; break;
case 'C': cur=children.children[1]; break;
case 'G': cur=children.children[2]; break;
case 'T': cur=children.children[3]; break;
default: cur.data = 0; break;
};
XPRINTF(" In node: (%d,%d)\n", cur.x, cur.y);
// No edge to follow out of the node
if (cur.data == 0)
{
XPRINTF(" no edge\n");
set_result(prev, result, 0, qry_match_len, min_match_len,
FORWARD);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
{
unsigned short xval = cur.data & 0xFFFF;
unsigned short yval = (cur.data & 0xFFFF0000) >> 16;
node.data = tex2D(nodetex, xval, yval);
}
XPRINTF(" Edge coordinates: %d - %d\n", node.start, node.end);
if (mustmatch)
{
int edgelen = node.end - node.start+1;
if (mustmatch >= edgelen)
{
XPRINTF(" mustmatch(%d) >= edgelen(%d), skipping edge\n", mustmatch, edgelen);
refpos = node.end+1;
qry_match_len += edgelen;
mustmatch -= edgelen;
}
else
{
XPRINTF(" mustmatch(%d) < edgelen(%d), skipping to:%d\n",
mustmatch, edgelen, node.start+mustmatch);
qry_match_len += mustmatch;
refpos = node.start + mustmatch;
mustmatch = 0;
}
}
else
{
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = node.start+1;
}
c = queries[qry_match_len];
while (refpos <= node.end && c != '\0')
{
char r = getRef(refpos);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len,refpos - (node.start));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = queries[qry_match_len];
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
set_result(cur, result, refpos - node.start, qry_match_len,
min_match_len, FORWARD);
mustmatch = refpos - node.start;
qry_match_len -= mustmatch + 1;
NEXT_SUBSTRING:
node.data = tex2D(nodetex, prev.x, prev.y);
cur = node.suffix;
XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:(%d,%d)\n",
mustmatch, qry_match_len, cur.x, cur.y);
do {} while(0);
}
return;
}
| ec7faf4af14d6f2e555051ccddf997a13d5009ba.cu | //pass
//--blockDim=256 --gridDim=2
#include "common.h"
///////////////////////////////////////
//// Compute forward substring matches
///////////////////////////////////////
__global__ void
mummergpuKernel(MatchCoord* match_coords,
char* queries,
const int* queryAddrs,
const int* queryLengths,
const int numQueries,
const int min_match_len)
{
int qryid = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (qryid >= numQueries) { return; }
XPRINTF("> qryid: %d\n", qryid);
int qlen = queryLengths[qryid];
// start at root for first query character
TextureAddress cur;
cur.data = 0;
int mustmatch = 0;
int qry_match_len = 0;
int qryAddr=queryAddrs[qryid];
MatchCoord * result = match_coords + qryAddr - __umul24(qryid, min_match_len + 1);
queries += qryAddr;
int last = qlen - min_match_len;
for (int qrystart = 0;
qrystart <= last;
qrystart++, result++, queries++)
{
XPRINTF("qry: %s\n", queries + 1);
PixelOfNode node;
TextureAddress prev;
if ((cur.data == 0) || (qry_match_len < 1))
{
// start at root of tree
cur.x = 0; cur.y = 1;
qry_match_len = 1;
mustmatch = 0;
}
char c = queries[qry_match_len];
XPRINTF("In node (%d,%d): starting with %c [%d] => \n", cur.x, cur.y, c, qry_match_len);
int refpos = 0;
while ((c != '\0'))
{
XPRINTF("Next edge to follow: %c (%d)\n", c, qry_match_len);
PixelOfChildren children;
children.data = tex2D(childrentex,cur.x, cur.y);
prev = cur;
switch(c)
{
case 'A': cur=children.children[0]; break;
case 'C': cur=children.children[1]; break;
case 'G': cur=children.children[2]; break;
case 'T': cur=children.children[3]; break;
default: cur.data = 0; break;
};
XPRINTF(" In node: (%d,%d)\n", cur.x, cur.y);
// No edge to follow out of the node
if (cur.data == 0)
{
XPRINTF(" no edge\n");
set_result(prev, result, 0, qry_match_len, min_match_len,
FORWARD);
qry_match_len -= 1;
mustmatch = 0;
goto NEXT_SUBSTRING;
}
{
unsigned short xval = cur.data & 0xFFFF;
unsigned short yval = (cur.data & 0xFFFF0000) >> 16;
node.data = tex2D(nodetex, xval, yval);
}
XPRINTF(" Edge coordinates: %d - %d\n", node.start, node.end);
if (mustmatch)
{
int edgelen = node.end - node.start+1;
if (mustmatch >= edgelen)
{
XPRINTF(" mustmatch(%d) >= edgelen(%d), skipping edge\n", mustmatch, edgelen);
refpos = node.end+1;
qry_match_len += edgelen;
mustmatch -= edgelen;
}
else
{
XPRINTF(" mustmatch(%d) < edgelen(%d), skipping to:%d\n",
mustmatch, edgelen, node.start+mustmatch);
qry_match_len += mustmatch;
refpos = node.start + mustmatch;
mustmatch = 0;
}
}
else
{
// Try to walk the edge, the first char definitely matches
qry_match_len++;
refpos = node.start+1;
}
c = queries[qry_match_len];
while (refpos <= node.end && c != '\0')
{
char r = getRef(refpos);
XPRINTF(" Edge cmp ref: %d %c, qry: %d %c\n", refpos, r, qry_match_len, c);
if (r != c)
{
// mismatch on edge
XPRINTF("mismatch on edge: %d, edge_pos: %d\n", qry_match_len,refpos - (node.start));
goto RECORD_RESULT;
}
qry_match_len++;
refpos++;
c = queries[qry_match_len];
}
}
XPRINTF("end of string\n");
RECORD_RESULT:
set_result(cur, result, refpos - node.start, qry_match_len,
min_match_len, FORWARD);
mustmatch = refpos - node.start;
qry_match_len -= mustmatch + 1;
NEXT_SUBSTRING:
node.data = tex2D(nodetex, prev.x, prev.y);
cur = node.suffix;
XPRINTF(" following suffix link. mustmatch:%d qry_match_len:%d sl:(%d,%d)\n",
mustmatch, qry_match_len, cur.x, cur.y);
do {} while(0);
}
return;
}
|
cca5cbdde830d2dfe827444e206cb50745fd7f6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of data sharing environments/
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include <stdio.h>
// Number of threads in the CUDA block.
__device__ static unsigned getNumThreads() { return blockDim.x; }
// Thread ID in the CUDA block
__device__ static unsigned getThreadId() { return threadIdx.x; }
// Warp ID in the CUDA block
__device__ static unsigned getWarpId() { return threadIdx.x / WARPSIZE; }
// The CUDA thread ID of the master thread.
__device__ static unsigned getMasterThreadId() {
unsigned Mask = WARPSIZE - 1;
return (getNumThreads() - 1) & (~Mask);
}
// Find the active threads in the warp - return a mask whose n-th bit is set if
// the n-th thread in the warp is active.
__device__ static unsigned getActiveThreadsMask() {
return __BALLOT_SYNC(0xFFFFFFFF, true);
}
// Return true if this is the first active thread in the warp.
__device__ static bool IsWarpMasterActiveThread() {
unsigned long long Mask = getActiveThreadsMask();
unsigned long long ShNum = WARPSIZE - (getThreadId() % WARPSIZE);
unsigned long long Sh = Mask << ShNum;
// Truncate Sh to the 32 lower bits
return (unsigned)Sh == 0;
}
// Return true if this is the master thread.
__device__ static bool IsMasterThread() {
return getMasterThreadId() == getThreadId();
}
/// Return the provided size aligned to the size of a pointer.
__device__ static size_t AlignVal(size_t Val) {
const size_t Align = (size_t)sizeof(void *);
if (Val & (Align - 1)) {
Val += Align;
Val &= ~(Align - 1);
}
return Val;
}
#define DSFLAG 0
#define DSFLAG_INIT 0
#define DSPRINT(_flag, _str, _args...) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \
} \
}
#define DSPRINT0(_flag, _str) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \
} \
}
// Initialize the shared data structures. This is expected to be called for the
// master thread and warp masters. \param RootS: A pointer to the root of the
// data sharing stack. \param InitialDataSize: The initial size of the data in
// the slot.
EXTERN void
__kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS,
size_t InitialDataSize) {
DSPRINT0(DSFLAG_INIT,
"Entering __kmpc_initialize_data_sharing_environment\n");
unsigned WID = getWarpId();
DSPRINT(DSFLAG_INIT, "Warp ID: %d\n", WID);
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
// We don't need to initialize the frame and active threads.
DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", InitialDataSize);
DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (long long)RootS);
DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n",
(long long)RootS->DataEnd);
DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n", (long long)RootS->Next);
DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n",
(long long)DataSharingState.SlotPtr[WID]);
DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n",
(long long)DataSharingState.StackPtr[WID]);
DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n");
}
EXTERN void *__kmpc_data_sharing_environment_begin(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
size_t SharingDataSize, size_t SharingDefaultDataSize,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n");
// If the runtime has been elided, used __shared__ memory for master-worker
// data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize);
unsigned WID = getWarpId();
unsigned CurActiveThreads = getActiveThreadsMask();
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *&FrameP = DataSharingState.FramePtr[WID];
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Save current slot/stack values.\n");
// Save the current values.
*SavedSharedSlot = SlotP;
*SavedSharedStack = StackP;
*SavedSharedFrame = FrameP;
*SavedActiveThreads = ActiveT;
DSPRINT(DSFLAG, "Warp ID: %d\n", WID);
DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (long long)SlotP);
DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (long long)StackP);
DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT);
// Only the warp active master needs to grow the stack.
if (IsWarpMasterActiveThread()) {
// Save the current active threads.
ActiveT = CurActiveThreads;
// Make sure we use aligned sizes to avoid rematerialization of data.
SharingDataSize = AlignVal(SharingDataSize);
// FIXME: The default data size can be assumed to be aligned?
SharingDefaultDataSize = AlignVal(SharingDefaultDataSize);
// Check if we have room for the data in the current slot.
const uintptr_t CurrentStartAddress = (uintptr_t)StackP;
const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequiredEndAddress =
CurrentStartAddress + (uintptr_t)SharingDataSize;
DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize);
DSPRINT(DSFLAG, "Current Start Address %016llx\n", CurrentStartAddress);
DSPRINT(DSFLAG, "Current End Address %016llx\n", CurrentEndAddress);
DSPRINT(DSFLAG, "Required End Address %016llx\n", RequiredEndAddress);
DSPRINT(DSFLAG, "Active Threads %08x\n", ActiveT);
// If we require a new slot, allocate it and initialize it (or attempt to
// reuse one). Also, set the shared stack and slot pointers to the new
// place. If we do not need to grow the stack, just adapt the stack and
// frame pointers.
if (CurrentEndAddress < RequiredEndAddress) {
size_t NewSize = (SharingDataSize > SharingDefaultDataSize)
? SharingDataSize
: SharingDefaultDataSize;
__kmpc_data_sharing_slot *NewSlot = 0;
// Attempt to reuse an existing slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
if (ExistingSlotSize >= NewSize) {
DSPRINT(DSFLAG, "Reusing stack slot %016llx\n",
(long long)ExistingSlot);
NewSlot = ExistingSlot;
} else {
DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n",
(long long)SlotP->Next);
free(ExistingSlot);
}
}
if (!NewSlot) {
NewSlot = (__kmpc_data_sharing_slot *)malloc(
sizeof(__kmpc_data_sharing_slot) + NewSize);
DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n",
(long long)NewSlot, NewSize);
}
NewSlot->Next = 0;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
SlotP->Next = NewSlot;
SlotP = NewSlot;
StackP = &NewSlot->Data[SharingDataSize];
FrameP = &NewSlot->Data[0];
} else {
// Clean up any old slot that we may still have. The slot producers, do
// not eliminate them because that may be used to return data.
if (SlotP->Next) {
DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n",
(long long)SlotP->Next);
free(SlotP->Next);
SlotP->Next = 0;
}
FrameP = StackP;
StackP = (void *)RequiredEndAddress;
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n");
// All the threads in this warp get the frame they should work with.
return FrameP;
}
EXTERN void __kmpc_data_sharing_environment_end(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
int32_t IsEntryPoint) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n");
unsigned WID = getWarpId();
if (IsEntryPoint) {
if (IsWarpMasterActiveThread()) {
DSPRINT0(DSFLAG, "Doing clean up\n");
// The master thread cleans the saved slot, because this is an environment
// only for the master.
__kmpc_data_sharing_slot *S =
IsMasterThread() ? *SavedSharedSlot : DataSharingState.SlotPtr[WID];
if (S->Next) {
free(S->Next);
S->Next = 0;
}
}
DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n");
return;
}
int32_t CurActive = getActiveThreadsMask();
// Only the warp master can restore the stack and frame information, and only
// if there are no other threads left behind in this environment (i.e. the
// warp diverged and returns in different places). This only works if we
// assume that threads will converge right after the call site that started
// the environment.
if (IsWarpMasterActiveThread()) {
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Before restoring the stack\n");
// Zero the bits in the mask. If it is still different from zero, then we
// have other threads that will return after the current ones.
ActiveT &= ~CurActive;
DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n", CurActive,
ActiveT);
if (!ActiveT) {
// No other active threads? Great, lets restore the stack.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *&FrameP = DataSharingState.FramePtr[WID];
SlotP = *SavedSharedSlot;
StackP = *SavedSharedStack;
FrameP = *SavedSharedFrame;
ActiveT = *SavedActiveThreads;
DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n", (long long)SlotP);
DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n", (long long)StackP);
DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT);
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n");
return;
}
EXTERN void *
__kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n");
// If the runtime has been elided, use __shared__ memory for master-worker
// data sharing. We're reusing the statically allocated data structure
// that is used for standard data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
// Get the frame used by the requested thread.
unsigned SourceWID = SourceThreadID / WARPSIZE;
DSPRINT(DSFLAG, "Source warp: %d\n", SourceWID);
void *P = DataSharingState.FramePtr[SourceWID];
DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n");
return P;
}
////////////////////////////////////////////////////////////////////////////////
// Runtime functions for trunk data sharing scheme.
////////////////////////////////////////////////////////////////////////////////
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization).
EXTERN void __kmpc_data_sharing_init_stack() {
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
unsigned WID = getWarpId();
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.TailPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
// We initialize the list of references to arguments here.
omptarget_nvptx_globalArgs.Init();
}
// Called at the time of the kernel initialization. This is used to initilize
// the list of references to shared variables and to pre-allocate global storage
// for holding the globalized variables.
//
// By default the globalized variables are stored in global memory. If the
// UseSharedMemory is set to true, the runtime will attempt to use shared memory
// as long as the size requested fits the pre-allocated size.
//
// Called by: master, TODO: call by workers
EXTERN void* __kmpc_data_sharing_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
if (IsMasterThread()) {
unsigned WID = getWarpId();
// SlotP will point to either the shared memory slot or an existing
// global memory slot.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
__kmpc_data_sharing_slot *&TailSlotP = DataSharingState.TailPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *FrameP = 0;
// Check if we have room for the data in the current slot.
const uintptr_t StartAddress = (uintptr_t)StackP;
const uintptr_t EndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequestedEndAddress = StartAddress + (uintptr_t)DataSize;
// If we requested more data than there is room for in the rest
// of the slot then we need to either re-use the next slot, if one exists,
// or create a new slot.
if (EndAddress < RequestedEndAddress) {
size_t NewSize = DataSize;
// The new or reused slot for holding the data being pushed.
__kmpc_data_sharing_slot *NewSlot = 0;
// Check if there is a next slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
// Attempt to reuse an existing slot provided the data fits in the slot.
// The leftover data space will not be used.
ptrdiff_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
// Try to add the data in the next available slot. Search for a slot
// with enough space.
while (ExistingSlotSize < NewSize) {
SlotP->Next = ExistingSlot->Next;
SlotP->Next->Prev = ExistingSlot->Prev;
free(ExistingSlot);
ExistingSlot = SlotP->Next;
if (!ExistingSlot)
break;
ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
}
// Check if a slot has been found.
if (ExistingSlotSize >= NewSize) {
NewSlot = ExistingSlot;
NewSlot->PrevSlotStackPtr = StackP;
}
}
if (!NewSlot) {
// Allocate at least the default size.
// TODO: generalize this for workers which need a larger data slot
// i.e. using DS_Worker_Warp_Slot_Size.
if (DS_Slot_Size > DataSize)
NewSize = DS_Slot_Size;
NewSlot = (__kmpc_data_sharing_slot *)malloc(
sizeof(__kmpc_data_sharing_slot) + NewSize);
NewSlot->Next = 0;
NewSlot->Prev = SlotP;
NewSlot->PrevSlotStackPtr = StackP;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
// Newly allocated slots are also tail slots.
TailSlotP = NewSlot;
// Make previous slot point to the newly allocated slot.
SlotP->Next = NewSlot;
}
// The current slot becomes the new slot.
SlotP = NewSlot;
// The stack pointer always points to the next free stack frame.
StackP = &NewSlot->Data[DataSize];
// The frame pointer always points to the beginning of the frame.
FrameP = &NewSlot->Data[0];
} else {
// Add the data chunk to the current slot. The frame pointer is set to
// point to the start of the new frame held in StackP.
FrameP = StackP;
// Reset stack pointer to the requested address.
StackP = (void *)RequestedEndAddress;
}
return FrameP;
}
// TODO: add memory fence here when this function can be called by
// worker threads also. For now, this function is only called by the
// master thread of each team.
// TODO: implement sharing across workers.
return 0;
}
// Pop the stack and free any memory which can be reclaimed.
//
// When the pop operation removes the last global memory slot,
// reclaim all outstanding global memory slots since it is
// likely we have reached the end of the kernel.
EXTERN void __kmpc_data_sharing_pop_stack(void *FrameStart) {
if (IsMasterThread()) {
unsigned WID = getWarpId();
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
// If we try to pop the last frame of the current slot we need to
// move to the previous slot if there is one.
const uintptr_t StartAddress = (uintptr_t)FrameStart;
if (StartAddress == (uintptr_t)&SlotP->Data[0]) {
if (SlotP->Prev) {
// The new stack pointer is the end of the data field of the
// previous slot. This will allow the stack pointer to be
// used in the computation of the remaining data space in
// the current slot.
StackP = SlotP->PrevSlotStackPtr;
// Reset SlotP to previous slot.
SlotP = SlotP->Prev;
}
// If this will "pop" the last global memory node then it is likely
// that we are at the end of the data sharing region and we can
// de-allocate any existing global memory slots.
if (!SlotP->Prev) {
__kmpc_data_sharing_slot *Tail = DataSharingState.TailPtr[WID];
while(Tail && Tail->Prev) {
Tail = Tail->Prev;
free(Tail->Next);
Tail->Next=0;
}
}
} else {
// This is not the last frame popped from this slot.
// Reset StackP
StackP = FrameStart;
}
return;
}
// TODO: add memory fence here when this function can be called by
// worker threads also. For now, this function is only called by the
// master thread of each team.
// TODO: implement sharing across workers.
}
// Begin a data sharing context. Maintain a list of references to shared
// variables. This list of references to shared variables will be passed
// to one or more threads.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_begin_sharing_variables(void ***GlobalArgs, size_t nArgs) {
omptarget_nvptx_globalArgs.EnsureSize(nArgs);
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// End a data sharing context. There is no need to have a list of refs
// to shared variables because the context in which those variables were
// shared has now ended. This should clean-up the list of references only
// without affecting the actual global storage of the variables.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_end_sharing_variables() {
omptarget_nvptx_globalArgs.DeInit();
}
// This function will return a list of references to global variables. This
// is how the workers will get a reference to the globalized variable. The
// members of this list will be passed to the outlined parallel function
// preserving the order.
// Called by all workers.
EXTERN void __kmpc_get_shared_variables(void ***GlobalArgs) {
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
| cca5cbdde830d2dfe827444e206cb50745fd7f6b.cu | //===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of data sharing environments/
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include <stdio.h>
// Number of threads in the CUDA block.
__device__ static unsigned getNumThreads() { return blockDim.x; }
// Thread ID in the CUDA block
__device__ static unsigned getThreadId() { return threadIdx.x; }
// Warp ID in the CUDA block
__device__ static unsigned getWarpId() { return threadIdx.x / WARPSIZE; }
// The CUDA thread ID of the master thread.
__device__ static unsigned getMasterThreadId() {
unsigned Mask = WARPSIZE - 1;
return (getNumThreads() - 1) & (~Mask);
}
// Find the active threads in the warp - return a mask whose n-th bit is set if
// the n-th thread in the warp is active.
__device__ static unsigned getActiveThreadsMask() {
return __BALLOT_SYNC(0xFFFFFFFF, true);
}
// Return true if this is the first active thread in the warp.
__device__ static bool IsWarpMasterActiveThread() {
unsigned long long Mask = getActiveThreadsMask();
unsigned long long ShNum = WARPSIZE - (getThreadId() % WARPSIZE);
unsigned long long Sh = Mask << ShNum;
// Truncate Sh to the 32 lower bits
return (unsigned)Sh == 0;
}
// Return true if this is the master thread.
__device__ static bool IsMasterThread() {
return getMasterThreadId() == getThreadId();
}
/// Return the provided size aligned to the size of a pointer.
__device__ static size_t AlignVal(size_t Val) {
const size_t Align = (size_t)sizeof(void *);
if (Val & (Align - 1)) {
Val += Align;
Val &= ~(Align - 1);
}
return Val;
}
#define DSFLAG 0
#define DSFLAG_INIT 0
#define DSPRINT(_flag, _str, _args...) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \
} \
}
#define DSPRINT0(_flag, _str) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \
} \
}
// Initialize the shared data structures. This is expected to be called for the
// master thread and warp masters. \param RootS: A pointer to the root of the
// data sharing stack. \param InitialDataSize: The initial size of the data in
// the slot.
EXTERN void
__kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS,
size_t InitialDataSize) {
DSPRINT0(DSFLAG_INIT,
"Entering __kmpc_initialize_data_sharing_environment\n");
unsigned WID = getWarpId();
DSPRINT(DSFLAG_INIT, "Warp ID: %d\n", WID);
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
// We don't need to initialize the frame and active threads.
DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", InitialDataSize);
DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (long long)RootS);
DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n",
(long long)RootS->DataEnd);
DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n", (long long)RootS->Next);
DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n",
(long long)DataSharingState.SlotPtr[WID]);
DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n",
(long long)DataSharingState.StackPtr[WID]);
DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n");
}
EXTERN void *__kmpc_data_sharing_environment_begin(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
size_t SharingDataSize, size_t SharingDefaultDataSize,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n");
// If the runtime has been elided, used __shared__ memory for master-worker
// data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize);
unsigned WID = getWarpId();
unsigned CurActiveThreads = getActiveThreadsMask();
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *&FrameP = DataSharingState.FramePtr[WID];
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Save current slot/stack values.\n");
// Save the current values.
*SavedSharedSlot = SlotP;
*SavedSharedStack = StackP;
*SavedSharedFrame = FrameP;
*SavedActiveThreads = ActiveT;
DSPRINT(DSFLAG, "Warp ID: %d\n", WID);
DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (long long)SlotP);
DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (long long)StackP);
DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT);
// Only the warp active master needs to grow the stack.
if (IsWarpMasterActiveThread()) {
// Save the current active threads.
ActiveT = CurActiveThreads;
// Make sure we use aligned sizes to avoid rematerialization of data.
SharingDataSize = AlignVal(SharingDataSize);
// FIXME: The default data size can be assumed to be aligned?
SharingDefaultDataSize = AlignVal(SharingDefaultDataSize);
// Check if we have room for the data in the current slot.
const uintptr_t CurrentStartAddress = (uintptr_t)StackP;
const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequiredEndAddress =
CurrentStartAddress + (uintptr_t)SharingDataSize;
DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize);
DSPRINT(DSFLAG, "Current Start Address %016llx\n", CurrentStartAddress);
DSPRINT(DSFLAG, "Current End Address %016llx\n", CurrentEndAddress);
DSPRINT(DSFLAG, "Required End Address %016llx\n", RequiredEndAddress);
DSPRINT(DSFLAG, "Active Threads %08x\n", ActiveT);
// If we require a new slot, allocate it and initialize it (or attempt to
// reuse one). Also, set the shared stack and slot pointers to the new
// place. If we do not need to grow the stack, just adapt the stack and
// frame pointers.
if (CurrentEndAddress < RequiredEndAddress) {
size_t NewSize = (SharingDataSize > SharingDefaultDataSize)
? SharingDataSize
: SharingDefaultDataSize;
__kmpc_data_sharing_slot *NewSlot = 0;
// Attempt to reuse an existing slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
if (ExistingSlotSize >= NewSize) {
DSPRINT(DSFLAG, "Reusing stack slot %016llx\n",
(long long)ExistingSlot);
NewSlot = ExistingSlot;
} else {
DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n",
(long long)SlotP->Next);
free(ExistingSlot);
}
}
if (!NewSlot) {
NewSlot = (__kmpc_data_sharing_slot *)malloc(
sizeof(__kmpc_data_sharing_slot) + NewSize);
DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n",
(long long)NewSlot, NewSize);
}
NewSlot->Next = 0;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
SlotP->Next = NewSlot;
SlotP = NewSlot;
StackP = &NewSlot->Data[SharingDataSize];
FrameP = &NewSlot->Data[0];
} else {
// Clean up any old slot that we may still have. The slot producers, do
// not eliminate them because that may be used to return data.
if (SlotP->Next) {
DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n",
(long long)SlotP->Next);
free(SlotP->Next);
SlotP->Next = 0;
}
FrameP = StackP;
StackP = (void *)RequiredEndAddress;
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n");
// All the threads in this warp get the frame they should work with.
return FrameP;
}
EXTERN void __kmpc_data_sharing_environment_end(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
int32_t IsEntryPoint) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n");
unsigned WID = getWarpId();
if (IsEntryPoint) {
if (IsWarpMasterActiveThread()) {
DSPRINT0(DSFLAG, "Doing clean up\n");
// The master thread cleans the saved slot, because this is an environment
// only for the master.
__kmpc_data_sharing_slot *S =
IsMasterThread() ? *SavedSharedSlot : DataSharingState.SlotPtr[WID];
if (S->Next) {
free(S->Next);
S->Next = 0;
}
}
DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n");
return;
}
int32_t CurActive = getActiveThreadsMask();
// Only the warp master can restore the stack and frame information, and only
// if there are no other threads left behind in this environment (i.e. the
// warp diverged and returns in different places). This only works if we
// assume that threads will converge right after the call site that started
// the environment.
if (IsWarpMasterActiveThread()) {
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Before restoring the stack\n");
// Zero the bits in the mask. If it is still different from zero, then we
// have other threads that will return after the current ones.
ActiveT &= ~CurActive;
DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n", CurActive,
ActiveT);
if (!ActiveT) {
// No other active threads? Great, lets restore the stack.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *&FrameP = DataSharingState.FramePtr[WID];
SlotP = *SavedSharedSlot;
StackP = *SavedSharedStack;
FrameP = *SavedSharedFrame;
ActiveT = *SavedActiveThreads;
DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n", (long long)SlotP);
DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n", (long long)StackP);
DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT);
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n");
return;
}
EXTERN void *
__kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n");
// If the runtime has been elided, use __shared__ memory for master-worker
// data sharing. We're reusing the statically allocated data structure
// that is used for standard data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
// Get the frame used by the requested thread.
unsigned SourceWID = SourceThreadID / WARPSIZE;
DSPRINT(DSFLAG, "Source warp: %d\n", SourceWID);
void *P = DataSharingState.FramePtr[SourceWID];
DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n");
return P;
}
////////////////////////////////////////////////////////////////////////////////
// Runtime functions for trunk data sharing scheme.
////////////////////////////////////////////////////////////////////////////////
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization).
EXTERN void __kmpc_data_sharing_init_stack() {
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
unsigned WID = getWarpId();
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.TailPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
// We initialize the list of references to arguments here.
omptarget_nvptx_globalArgs.Init();
}
// Called at the time of the kernel initialization. This is used to initilize
// the list of references to shared variables and to pre-allocate global storage
// for holding the globalized variables.
//
// By default the globalized variables are stored in global memory. If the
// UseSharedMemory is set to true, the runtime will attempt to use shared memory
// as long as the size requested fits the pre-allocated size.
//
// Called by: master, TODO: call by workers
EXTERN void* __kmpc_data_sharing_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
if (IsMasterThread()) {
unsigned WID = getWarpId();
// SlotP will point to either the shared memory slot or an existing
// global memory slot.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
__kmpc_data_sharing_slot *&TailSlotP = DataSharingState.TailPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void *FrameP = 0;
// Check if we have room for the data in the current slot.
const uintptr_t StartAddress = (uintptr_t)StackP;
const uintptr_t EndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequestedEndAddress = StartAddress + (uintptr_t)DataSize;
// If we requested more data than there is room for in the rest
// of the slot then we need to either re-use the next slot, if one exists,
// or create a new slot.
if (EndAddress < RequestedEndAddress) {
size_t NewSize = DataSize;
// The new or reused slot for holding the data being pushed.
__kmpc_data_sharing_slot *NewSlot = 0;
// Check if there is a next slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
// Attempt to reuse an existing slot provided the data fits in the slot.
// The leftover data space will not be used.
ptrdiff_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
// Try to add the data in the next available slot. Search for a slot
// with enough space.
while (ExistingSlotSize < NewSize) {
SlotP->Next = ExistingSlot->Next;
SlotP->Next->Prev = ExistingSlot->Prev;
free(ExistingSlot);
ExistingSlot = SlotP->Next;
if (!ExistingSlot)
break;
ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
}
// Check if a slot has been found.
if (ExistingSlotSize >= NewSize) {
NewSlot = ExistingSlot;
NewSlot->PrevSlotStackPtr = StackP;
}
}
if (!NewSlot) {
// Allocate at least the default size.
// TODO: generalize this for workers which need a larger data slot
// i.e. using DS_Worker_Warp_Slot_Size.
if (DS_Slot_Size > DataSize)
NewSize = DS_Slot_Size;
NewSlot = (__kmpc_data_sharing_slot *)malloc(
sizeof(__kmpc_data_sharing_slot) + NewSize);
NewSlot->Next = 0;
NewSlot->Prev = SlotP;
NewSlot->PrevSlotStackPtr = StackP;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
// Newly allocated slots are also tail slots.
TailSlotP = NewSlot;
// Make previous slot point to the newly allocated slot.
SlotP->Next = NewSlot;
}
// The current slot becomes the new slot.
SlotP = NewSlot;
// The stack pointer always points to the next free stack frame.
StackP = &NewSlot->Data[DataSize];
// The frame pointer always points to the beginning of the frame.
FrameP = &NewSlot->Data[0];
} else {
// Add the data chunk to the current slot. The frame pointer is set to
// point to the start of the new frame held in StackP.
FrameP = StackP;
// Reset stack pointer to the requested address.
StackP = (void *)RequestedEndAddress;
}
return FrameP;
}
// TODO: add memory fence here when this function can be called by
// worker threads also. For now, this function is only called by the
// master thread of each team.
// TODO: implement sharing across workers.
return 0;
}
// Pop the stack and free any memory which can be reclaimed.
//
// When the pop operation removes the last global memory slot,
// reclaim all outstanding global memory slots since it is
// likely we have reached the end of the kernel.
EXTERN void __kmpc_data_sharing_pop_stack(void *FrameStart) {
if (IsMasterThread()) {
unsigned WID = getWarpId();
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
// If we try to pop the last frame of the current slot we need to
// move to the previous slot if there is one.
const uintptr_t StartAddress = (uintptr_t)FrameStart;
if (StartAddress == (uintptr_t)&SlotP->Data[0]) {
if (SlotP->Prev) {
// The new stack pointer is the end of the data field of the
// previous slot. This will allow the stack pointer to be
// used in the computation of the remaining data space in
// the current slot.
StackP = SlotP->PrevSlotStackPtr;
// Reset SlotP to previous slot.
SlotP = SlotP->Prev;
}
// If this will "pop" the last global memory node then it is likely
// that we are at the end of the data sharing region and we can
// de-allocate any existing global memory slots.
if (!SlotP->Prev) {
__kmpc_data_sharing_slot *Tail = DataSharingState.TailPtr[WID];
while(Tail && Tail->Prev) {
Tail = Tail->Prev;
free(Tail->Next);
Tail->Next=0;
}
}
} else {
// This is not the last frame popped from this slot.
// Reset StackP
StackP = FrameStart;
}
return;
}
// TODO: add memory fence here when this function can be called by
// worker threads also. For now, this function is only called by the
// master thread of each team.
// TODO: implement sharing across workers.
}
// Begin a data sharing context. Maintain a list of references to shared
// variables. This list of references to shared variables will be passed
// to one or more threads.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_begin_sharing_variables(void ***GlobalArgs, size_t nArgs) {
omptarget_nvptx_globalArgs.EnsureSize(nArgs);
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// End a data sharing context. There is no need to have a list of refs
// to shared variables because the context in which those variables were
// shared has now ended. This should clean-up the list of references only
// without affecting the actual global storage of the variables.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_end_sharing_variables() {
omptarget_nvptx_globalArgs.DeInit();
}
// This function will return a list of references to global variables. This
// is how the workers will get a reference to the globalized variable. The
// members of this list will be passed to the outlined parallel function
// preserving the order.
// Called by all workers.
EXTERN void __kmpc_get_shared_variables(void ***GlobalArgs) {
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
|
81bdd665478f24211e4dfe24bdd85002d2c441d5.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 81bdd665478f24211e4dfe24bdd85002d2c441d5.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
7aca558b6054f38ba5f599ae71f059bff6c58b37.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <hip/hip_runtime.h>
#include "dsc_conv.cuh"
#include "error_util.h"
__global__ void depthwise_conv(float *image, float *tmp, float *depthwiseResult, float *filter, const int input_h, const int input_w, const int depth, const int depthFilterW,
const int depthwiseOutputW, const int padding, const int stride, const int blocksPerChannel)
{
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxCol = threadIdx.y + blockDim.y * blockIdx.y;
int idxXperChannel = idxRow % (blockDim.x * blocksPerChannel);
int idxYperChannel = idxCol % (blockDim.y * blocksPerChannel);
int idxChannel = ((int)(blockIdx.x) / blocksPerChannel) % depth;
if (idxXperChannel <depthwiseOutputW && idxYperChannel < depthwiseOutputW)
{
for (int k = 0; k < depthFilterW; ++k) {
for (int q = 0; q < depthFilterW; ++q) {
depthwiseResult[idxChannel *depthwiseOutputW*depthwiseOutputW + depthwiseOutputW * idxYperChannel + idxXperChannel]
+= tmp[idxChannel *input_h*input_w + input_w*(k + idxYperChannel* stride) + (q + idxXperChannel*stride)]
* filter[idxChannel * depthFilterW*depthFilterW + k*depthFilterW + q];
}
}
}
//int depthBoundary = idxCol / depthwiseOutputW;
//int height_per_channel = idxCol % depthwiseOutputW;
//int tmpSize = col + padding;
//if (idxRow <depthwiseOutputW && idxCol < depthwiseOutputW * depth)
//{
// for (int k = 0; k < depthFilterW; ++k) {
// for (int q = 0; q < depthFilterW; ++q) {
// //depthwiseResult[depthwiseOutputW * idxCol + idxRow] += image[depthBoundary * (int)powf(col, 2) + col*(k + idxCol*stride - depthBoundary * (depthwiseOutputW*stride))
// // + (q + idxRow*stride)] * filter[depthBoundary * (int)powf(depthFilterW, 2) + k*depthFilterW + q];
// depthwiseResult[depthwiseOutputW * idxCol + idxRow] += tmp[depthBoundary *tmpSize*tmpSize + tmpSize*(k + height_per_channel* stride)+ (q + idxRow*stride)]
// * filter[depthBoundary * depthFilterW*depthFilterW + k*depthFilterW + q];
// }
// }
// //printf("idxRow: %d, idxCol: %d \n %d \n ------ \n", idxRow, idxCol, (int)powf(col,2));
//
// //printf("w : %d \n", idxRow*stride);
//}
}
void depth_f(float *image, float *tmp, float *depthwiseResult, float *filter, const int input_h, const int input_w, const int depth, const int depthFilterW, const int depthwiseOutputW,
const int padding, const int stride, const int conv_blocks_num, dim3 threads_per_block, dim3 num_of_blocks) {
depthwise_conv << < num_of_blocks, threads_per_block >> > (image, tmp, depthwiseResult, filter,
input_h, input_w, depth, depthFilterW, depthwiseOutputW, padding, stride, conv_blocks_num);
hipDeviceSynchronize();
return;
}
__global__ void pointwise_conv(float *depthwiseResult, float *pointwiseResult, float *pointwiseFilter, const int col, const int depth, const int depthFilterW,
const int depthwiseOutputW, const int outputDepth, const int padding, const int stride, const int blocksPerChannel)
{
//const int depthwiseOutputW = (col - depthFilterW + 2 * padding) / stride + 1;
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxCol = threadIdx.y + blockDim.y * blockIdx.y;
//int depthBoundary = idxCol / depthwiseOutputW;
int idxXperChannel = idxRow % (blockDim.x * blocksPerChannel);
int idxYperChannel = idxCol % (blockDim.y * blocksPerChannel);
int idxN = ((int)(blockIdx.x) / blocksPerChannel);
if (idxXperChannel < depthwiseOutputW && idxYperChannel < depthwiseOutputW)
{
for (int k = 0; k < depth; ++k)
{
//pointwiseResult[idxCol * depthwiseOutputW + idxRow] = 0.0;
pointwiseResult[idxN *depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel]
+= depthwiseResult[k *depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel]
* pointwiseFilter[k + idxN * depth];
//std::cout << "pointwiseFilter : " << pointwiseFilter[k + depthBoundary * depth] << std::endl;
}
}
}
__global__ void pointwise_conv_bias(float *depthwiseResult, float *pointwiseResult, float *pointwiseFilter, float *bias, const int col, const int depth, const int depthFilterW,
const int depthwiseOutputW, const int outputDepth, const int padding, const int stride, const int blocksPerChannel)
{
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxCol = threadIdx.y + blockDim.y * blockIdx.y;
int idxXperChannel = idxRow % (blockDim.x * blocksPerChannel);
int idxYperChannel = idxCol % (blockDim.y * blocksPerChannel);
int idxN = ((int)(blockIdx.x) / blocksPerChannel);
if (idxXperChannel < depthwiseOutputW && idxYperChannel < depthwiseOutputW)
{
for (int k = 0; k < depth; ++k)
{
pointwiseResult[idxN * depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel]
+= depthwiseResult[k *depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel]
* pointwiseFilter[k + idxN * depth];
}
pointwiseResult[idxN * depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel] += bias[idxN];
}
else {}
}
void point_f(float *depthwiseResult, float *pointwiseResult, float *pointwiseFilter, const int col, const int depth, const int depthFilterW, const int depthwiseOutputW,
const int outputDepth, const int padding, const int stride, const int conv_blocks_num, dim3 threads_per_block, dim3 num_of_blocks) {
pointwise_conv << < num_of_blocks, threads_per_block >> > (depthwiseResult, pointwiseResult, pointwiseFilter, col, depth, depthFilterW, depthwiseOutputW, outputDepth, padding, stride, conv_blocks_num);
hipDeviceSynchronize();
return;
}
void pointbias_f(float *depthwiseResult, float *pointwiseResult, float *pointwiseFilter, float *bias, const int col, const int depth, const int depthFilterW, const int depthwiseOutputW,
const int outputDepth, const int padding, const int stride, const int conv_blocks_num, dim3 threads_per_block, dim3 num_of_blocks) {
pointwise_conv_bias << < num_of_blocks, threads_per_block >> > (depthwiseResult, pointwiseResult, pointwiseFilter, bias, col, depth, depthFilterW, depthwiseOutputW, outputDepth, padding, stride, conv_blocks_num);
hipDeviceSynchronize();
return;
}
| 7aca558b6054f38ba5f599ae71f059bff6c58b37.cu | #include <math.h>
#include <cuda.h>
#include "dsc_conv.cuh"
#include "error_util.h"
__global__ void depthwise_conv(float *image, float *tmp, float *depthwiseResult, float *filter, const int input_h, const int input_w, const int depth, const int depthFilterW,
const int depthwiseOutputW, const int padding, const int stride, const int blocksPerChannel)
{
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxCol = threadIdx.y + blockDim.y * blockIdx.y;
int idxXperChannel = idxRow % (blockDim.x * blocksPerChannel);
int idxYperChannel = idxCol % (blockDim.y * blocksPerChannel);
int idxChannel = ((int)(blockIdx.x) / blocksPerChannel) % depth;
if (idxXperChannel <depthwiseOutputW && idxYperChannel < depthwiseOutputW)
{
for (int k = 0; k < depthFilterW; ++k) {
for (int q = 0; q < depthFilterW; ++q) {
depthwiseResult[idxChannel *depthwiseOutputW*depthwiseOutputW + depthwiseOutputW * idxYperChannel + idxXperChannel]
+= tmp[idxChannel *input_h*input_w + input_w*(k + idxYperChannel* stride) + (q + idxXperChannel*stride)]
* filter[idxChannel * depthFilterW*depthFilterW + k*depthFilterW + q];
}
}
}
//int depthBoundary = idxCol / depthwiseOutputW;
//int height_per_channel = idxCol % depthwiseOutputW;
//int tmpSize = col + padding;
//if (idxRow <depthwiseOutputW && idxCol < depthwiseOutputW * depth)
//{
// for (int k = 0; k < depthFilterW; ++k) {
// for (int q = 0; q < depthFilterW; ++q) {
// //depthwiseResult[depthwiseOutputW * idxCol + idxRow] += image[depthBoundary * (int)powf(col, 2) + col*(k + idxCol*stride - depthBoundary * (depthwiseOutputW*stride))
// // + (q + idxRow*stride)] * filter[depthBoundary * (int)powf(depthFilterW, 2) + k*depthFilterW + q];
// depthwiseResult[depthwiseOutputW * idxCol + idxRow] += tmp[depthBoundary *tmpSize*tmpSize + tmpSize*(k + height_per_channel* stride)+ (q + idxRow*stride)]
// * filter[depthBoundary * depthFilterW*depthFilterW + k*depthFilterW + q];
// }
// }
// //printf("idxRow: %d, idxCol: %d \n %d \n ------ \n", idxRow, idxCol, (int)powf(col,2));
//
// //printf("w : %d \n", idxRow*stride);
//}
}
void depth_f(float *image, float *tmp, float *depthwiseResult, float *filter, const int input_h, const int input_w, const int depth, const int depthFilterW, const int depthwiseOutputW,
const int padding, const int stride, const int conv_blocks_num, dim3 threads_per_block, dim3 num_of_blocks) {
depthwise_conv << < num_of_blocks, threads_per_block >> > (image, tmp, depthwiseResult, filter,
input_h, input_w, depth, depthFilterW, depthwiseOutputW, padding, stride, conv_blocks_num);
cudaDeviceSynchronize();
return;
}
__global__ void pointwise_conv(float *depthwiseResult, float *pointwiseResult, float *pointwiseFilter, const int col, const int depth, const int depthFilterW,
const int depthwiseOutputW, const int outputDepth, const int padding, const int stride, const int blocksPerChannel)
{
//const int depthwiseOutputW = (col - depthFilterW + 2 * padding) / stride + 1;
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxCol = threadIdx.y + blockDim.y * blockIdx.y;
//int depthBoundary = idxCol / depthwiseOutputW;
int idxXperChannel = idxRow % (blockDim.x * blocksPerChannel);
int idxYperChannel = idxCol % (blockDim.y * blocksPerChannel);
int idxN = ((int)(blockIdx.x) / blocksPerChannel);
if (idxXperChannel < depthwiseOutputW && idxYperChannel < depthwiseOutputW)
{
for (int k = 0; k < depth; ++k)
{
//pointwiseResult[idxCol * depthwiseOutputW + idxRow] = 0.0;
pointwiseResult[idxN *depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel]
+= depthwiseResult[k *depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel]
* pointwiseFilter[k + idxN * depth];
//std::cout << "pointwiseFilter : " << pointwiseFilter[k + depthBoundary * depth] << std::endl;
}
}
}
__global__ void pointwise_conv_bias(float *depthwiseResult, float *pointwiseResult, float *pointwiseFilter, float *bias, const int col, const int depth, const int depthFilterW,
const int depthwiseOutputW, const int outputDepth, const int padding, const int stride, const int blocksPerChannel)
{
int idxRow = threadIdx.x + blockDim.x * blockIdx.x;
int idxCol = threadIdx.y + blockDim.y * blockIdx.y;
int idxXperChannel = idxRow % (blockDim.x * blocksPerChannel);
int idxYperChannel = idxCol % (blockDim.y * blocksPerChannel);
int idxN = ((int)(blockIdx.x) / blocksPerChannel);
if (idxXperChannel < depthwiseOutputW && idxYperChannel < depthwiseOutputW)
{
for (int k = 0; k < depth; ++k)
{
pointwiseResult[idxN * depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel]
+= depthwiseResult[k *depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel]
* pointwiseFilter[k + idxN * depth];
}
pointwiseResult[idxN * depthwiseOutputW*depthwiseOutputW + idxYperChannel * depthwiseOutputW + idxXperChannel] += bias[idxN];
}
else {}
}
void point_f(float *depthwiseResult, float *pointwiseResult, float *pointwiseFilter, const int col, const int depth, const int depthFilterW, const int depthwiseOutputW,
const int outputDepth, const int padding, const int stride, const int conv_blocks_num, dim3 threads_per_block, dim3 num_of_blocks) {
pointwise_conv << < num_of_blocks, threads_per_block >> > (depthwiseResult, pointwiseResult, pointwiseFilter, col, depth, depthFilterW, depthwiseOutputW, outputDepth, padding, stride, conv_blocks_num);
cudaDeviceSynchronize();
return;
}
void pointbias_f(float *depthwiseResult, float *pointwiseResult, float *pointwiseFilter, float *bias, const int col, const int depth, const int depthFilterW, const int depthwiseOutputW,
const int outputDepth, const int padding, const int stride, const int conv_blocks_num, dim3 threads_per_block, dim3 num_of_blocks) {
pointwise_conv_bias << < num_of_blocks, threads_per_block >> > (depthwiseResult, pointwiseResult, pointwiseFilter, bias, col, depth, depthFilterW, depthwiseOutputW, outputDepth, padding, stride, conv_blocks_num);
cudaDeviceSynchronize();
return;
}
|
2841d9dc5401ab5934195627d78fcdcdbe772605.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define N 512
__global__ void add( int *a, int *b, int *c ) {
c[blockIdx.x] = a[blockIdx.x]+b[blockIdx.x];
}
void random_ints(int *p, int n) {
int i;
for(i=0; i<n; i++) {
p[i]=rand();
}
}
int main( void ) {
int *a, *b, *c, *d; // host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for 512 // integers
int i;
// allocate device copies of a, b, c
hipMalloc( (void**)&dev_a, size );
hipMalloc( (void**)&dev_b, size );
hipMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
d = (int*)malloc( size );
random_ints( a, N );
random_ints( b, N );
// copy inputs to device
hipMemcpy( dev_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, size, hipMemcpyHostToDevice );
// launch add() kernel with N parallel blocks
hipLaunchKernelGGL(( add), dim3(N), dim3(1) , 0, 0, dev_a, dev_b, dev_c );
// copy device result back to host copy of c
hipMemcpy( c, dev_c, size, hipMemcpyDeviceToHost );
for(i=0; i<N; i++) {
d[i]=a[i]+b[i];
if(d[i]!=c[i]) {
printf("error: expected %d, got %d!\n",c[i], d[i]);
break;
}
}
if(i==N) {
printf("correct!\n");
}
free( a ); free( b ); free( c );
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
}
| 2841d9dc5401ab5934195627d78fcdcdbe772605.cu | #include <stdio.h>
#include <math.h>
#define N 512
__global__ void add( int *a, int *b, int *c ) {
c[blockIdx.x] = a[blockIdx.x]+b[blockIdx.x];
}
void random_ints(int *p, int n) {
int i;
for(i=0; i<n; i++) {
p[i]=rand();
}
}
int main( void ) {
int *a, *b, *c, *d; // host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for 512 // integers
int i;
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
d = (int*)malloc( size );
random_ints( a, N );
random_ints( b, N );
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice );
// launch add() kernel with N parallel blocks
add<<< N, 1 >>>( dev_a, dev_b, dev_c );
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost );
for(i=0; i<N; i++) {
d[i]=a[i]+b[i];
if(d[i]!=c[i]) {
printf("error: expected %d, got %d!\n",c[i], d[i]);
break;
}
}
if(i==N) {
printf("correct!\n");
}
free( a ); free( b ); free( c );
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
7e16e58d3aa686b6029ca0701d308f60066a4461.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <cfloat>
#include <iomanip>
#include <cmath>
#include <hip/hip_runtime.h>
__global__ void atomic_reduction(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx;i<arrayLength;i+=blockDim.x*gridDim.x) {
sum+=in[i];
}
atomicAdd(out,sum);
}
__global__ void atomic_reduction_v2(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx*2;i<arrayLength;i+=blockDim.x*gridDim.x*2) {
sum+=in[i] + in[i+1];
}
atomicAdd(out,sum);
}
__global__ void atomic_reduction_v4(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx*4;i<arrayLength;i+=blockDim.x*gridDim.x*4) {
sum+=in[i] + in[i+1] + in[i+2] + in[i+3];
}
atomicAdd(out,sum);
}
__global__ void atomic_reduction_v8(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx*8;i<arrayLength;i+=blockDim.x*gridDim.x*8) {
sum+=in[i] + in[i+1] + in[i+2] + in[i+3] +in[i+4] +in[i+5] +in[i+6] +in[i+7];
}
atomicAdd(out,sum);
}
__global__ void atomic_reduction_v16(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx*16;i<arrayLength;i+=blockDim.x*gridDim.x*16) {
sum+=in[i] + in[i+1] + in[i+2] + in[i+3] +in[i+4] +in[i+5] +in[i+6] +in[i+7]
+in[i+8] +in[i+9] +in[i+10] +in[i+11] +in[i+12] +in[i+13] +in[i+14] +in[i+15] ;
}
atomicAdd(out,sum);
}
int main(int argc, char** argv)
{
unsigned int arrayLength = 52428800;
unsigned int threads=256;
if(argc == 3) {
arrayLength=atoi(argv[1]);
threads=atoi(argv[2]);
}
// launch the kernel N iterations
int N = 32;
std::cout << "Array size: " << arrayLength*sizeof(int)/1024.0/1024.0 << " MB"<<std::endl;
std::cout << "Thread block size: " << threads << std::endl;
std::cout << "Repeat the kernel execution " << N << " times" << std::endl;
int* array=(int*)malloc(arrayLength*sizeof(int));
int checksum =0;
for(int i=0;i<arrayLength;i++) {
array[i]=rand()%2;
checksum+=array[i];
}
int *in, *out;
// Declare timers
std::chrono::high_resolution_clock::time_point t1, t2;
long long size=sizeof(int)*arrayLength;
// Get device properties
hipDeviceProp_t props;
hipGetDeviceProperties(&props, 0);
std::cout << "Device name: " << props.name << std::endl;
hipMalloc(&in,size);
hipMalloc(&out,sizeof(int));
hipMemcpy(in,array,arrayLength*sizeof(int),hipMemcpyHostToDevice);
hipDeviceSynchronize();
int blocks=::min((arrayLength+threads-1)/threads,2048u);
// warmup
for(int i=0;i<N;i++) {
hipMemsetAsync(out,0,sizeof(int));
hipLaunchKernelGGL(( atomic_reduction), dim3(dim3(blocks)), dim3(dim3(threads)) , 0, 0, in,out,arrayLength);
}
hipDeviceSynchronize();
// start timing
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
hipMemsetAsync(out,0,sizeof(int));
hipLaunchKernelGGL(( atomic_reduction), dim3(dim3(blocks)), dim3(dim3(threads)) , 0, 0, in,out,arrayLength);
}
hipDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
double times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
float GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
int sum;
hipMemcpy(&sum,out,sizeof(int),hipMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
hipMemsetAsync(out,0,sizeof(int));
hipLaunchKernelGGL(( atomic_reduction_v2), dim3(dim3(blocks/2)), dim3(dim3(threads)) , 0, 0, in,out,arrayLength);
}
hipDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
hipMemcpy(&sum,out,sizeof(int),hipMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
hipMemsetAsync(out,0,sizeof(int));
hipLaunchKernelGGL(( atomic_reduction_v4), dim3(dim3(blocks/4)), dim3(dim3(threads)) , 0, 0, in,out,arrayLength);
}
hipDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
hipMemcpy(&sum,out,sizeof(int),hipMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
hipMemsetAsync(out,0,sizeof(int));
hipLaunchKernelGGL(( atomic_reduction_v8), dim3(dim3(blocks/8)), dim3(dim3(threads)) , 0, 0, in,out,arrayLength);
}
hipDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
hipMemcpy(&sum,out,sizeof(int),hipMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
hipMemsetAsync(out,0,sizeof(int));
hipLaunchKernelGGL(( atomic_reduction_v16), dim3(dim3(blocks/16)), dim3(dim3(threads)) , 0, 0, in,out,arrayLength);
}
hipDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
hipMemcpy(&sum,out,sizeof(int),hipMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
hipFree(in);
hipFree(out);
free(array);
return 0;
}
| 7e16e58d3aa686b6029ca0701d308f60066a4461.cu | /*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <cfloat>
#include <iomanip>
#include <cmath>
#include <cuda.h>
__global__ void atomic_reduction(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx;i<arrayLength;i+=blockDim.x*gridDim.x) {
sum+=in[i];
}
atomicAdd(out,sum);
}
__global__ void atomic_reduction_v2(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx*2;i<arrayLength;i+=blockDim.x*gridDim.x*2) {
sum+=in[i] + in[i+1];
}
atomicAdd(out,sum);
}
__global__ void atomic_reduction_v4(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx*4;i<arrayLength;i+=blockDim.x*gridDim.x*4) {
sum+=in[i] + in[i+1] + in[i+2] + in[i+3];
}
atomicAdd(out,sum);
}
__global__ void atomic_reduction_v8(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx*8;i<arrayLength;i+=blockDim.x*gridDim.x*8) {
sum+=in[i] + in[i+1] + in[i+2] + in[i+3] +in[i+4] +in[i+5] +in[i+6] +in[i+7];
}
atomicAdd(out,sum);
}
__global__ void atomic_reduction_v16(int *in, int* out, int arrayLength) {
int sum=int(0);
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for(int i= idx*16;i<arrayLength;i+=blockDim.x*gridDim.x*16) {
sum+=in[i] + in[i+1] + in[i+2] + in[i+3] +in[i+4] +in[i+5] +in[i+6] +in[i+7]
+in[i+8] +in[i+9] +in[i+10] +in[i+11] +in[i+12] +in[i+13] +in[i+14] +in[i+15] ;
}
atomicAdd(out,sum);
}
int main(int argc, char** argv)
{
unsigned int arrayLength = 52428800;
unsigned int threads=256;
if(argc == 3) {
arrayLength=atoi(argv[1]);
threads=atoi(argv[2]);
}
// launch the kernel N iterations
int N = 32;
std::cout << "Array size: " << arrayLength*sizeof(int)/1024.0/1024.0 << " MB"<<std::endl;
std::cout << "Thread block size: " << threads << std::endl;
std::cout << "Repeat the kernel execution " << N << " times" << std::endl;
int* array=(int*)malloc(arrayLength*sizeof(int));
int checksum =0;
for(int i=0;i<arrayLength;i++) {
array[i]=rand()%2;
checksum+=array[i];
}
int *in, *out;
// Declare timers
std::chrono::high_resolution_clock::time_point t1, t2;
long long size=sizeof(int)*arrayLength;
// Get device properties
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
std::cout << "Device name: " << props.name << std::endl;
cudaMalloc(&in,size);
cudaMalloc(&out,sizeof(int));
cudaMemcpy(in,array,arrayLength*sizeof(int),cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
int blocks=std::min((arrayLength+threads-1)/threads,2048u);
// warmup
for(int i=0;i<N;i++) {
cudaMemsetAsync(out,0,sizeof(int));
atomic_reduction<<< dim3(blocks), dim3(threads) >>>(in,out,arrayLength);
}
cudaDeviceSynchronize();
// start timing
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
cudaMemsetAsync(out,0,sizeof(int));
atomic_reduction<<< dim3(blocks), dim3(threads) >>>(in,out,arrayLength);
}
cudaDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
double times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
float GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
int sum;
cudaMemcpy(&sum,out,sizeof(int),cudaMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
cudaMemsetAsync(out,0,sizeof(int));
atomic_reduction_v2<<< dim3(blocks/2), dim3(threads) >>>(in,out,arrayLength);
}
cudaDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
cudaMemcpy(&sum,out,sizeof(int),cudaMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
cudaMemsetAsync(out,0,sizeof(int));
atomic_reduction_v4<<< dim3(blocks/4), dim3(threads) >>>(in,out,arrayLength);
}
cudaDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
cudaMemcpy(&sum,out,sizeof(int),cudaMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
cudaMemsetAsync(out,0,sizeof(int));
atomic_reduction_v8<<< dim3(blocks/8), dim3(threads) >>>(in,out,arrayLength);
}
cudaDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
cudaMemcpy(&sum,out,sizeof(int),cudaMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
t1 = std::chrono::high_resolution_clock::now();
for(int i=0;i<N;i++) {
cudaMemsetAsync(out,0,sizeof(int));
atomic_reduction_v16<<< dim3(blocks/16), dim3(threads) >>>(in,out,arrayLength);
}
cudaDeviceSynchronize();
t2 = std::chrono::high_resolution_clock::now();
times = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
GB=(float)arrayLength*sizeof(int)*N;
std::cout << "The average performance of reduction is "<< 1.0E-09 * GB/times<<" GBytes/sec"<<std::endl;
cudaMemcpy(&sum,out,sizeof(int),cudaMemcpyDeviceToHost);
if(sum==checksum)
std::cout<<"VERIFICATION: result is CORRECT"<<std::endl<<std::endl;
else
std::cout<<"VERIFICATION: result is INCORRECT!!"<<std::endl<<std::endl;
cudaFree(in);
cudaFree(out);
free(array);
return 0;
}
|
097fb8773b438f7d7400cae1d4f3f4e5c1051a10.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#include "device_launch_parameters.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char* msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
// i think the scene_scale actually represents the distance from the origin to the positive extent of the simulation space
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3* dev_pos;
glm::vec3* dev_vel1;
glm::vec3* dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int* dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int* dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int* dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int* dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3* arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance); // doubled because rule distances would be radii
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; // i think the scene_scale actually represents the distance from the origin to the positive extent of the simulation space
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, 16 * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, 16 * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3* pos, float* vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3* vel, float* vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float* vbodptr_positions, float* vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int idx, const glm::vec3* pos, const glm::vec3* vel) {
glm::vec3 thisPos = pos[idx];
glm::vec3 perceivedCenter(0.f);
glm::vec3 repulsion(0.f);
glm::vec3 perceivedVelocity(0.f);
int neighbors1 = 0;
int neighbors3 = 0;
for (int i = 0; i < N; i++) {
// Rule 1 Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
if (i != idx && glm::distance(thisPos, pos[i]) < rule1Distance) {
perceivedCenter += pos[i];
neighbors1++;
}
// Rule 2 Separation: boids try to stay a distance d away from each other
if (i != idx && glm::distance(thisPos, pos[i]) < rule2Distance) {
repulsion -= (pos[i] - thisPos);
}
// Rule 3 Alignment: boids try to match the speed of surrounding boids
if (i != idx && glm::distance(thisPos, pos[i]) < rule3Distance) {
perceivedVelocity += vel[i];
neighbors3++;
}
}
if (neighbors1 != 0) {
perceivedCenter /= neighbors1; // compute the perceived center of mass by dividing by the number of neighbors
}
if (neighbors3 != 0) {
perceivedVelocity /= neighbors3; // compute the perceived average velocity by dividing by the number of neighbors
}
return ((perceivedCenter - thisPos) * rule1Scale) + (repulsion * rule2Scale) + (perceivedVelocity * rule3Scale);
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3* poss,
glm::vec3* vels1, glm::vec3* vels2) {
// Compute a new velocity based on pos and vel1
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 vel = vels1[idx] + computeVelocityChange(N, idx, poss, vels1);
// Clamp the speed
float speed = glm::length(vel);
if (speed > maxSpeed)
vel = (vel / speed) * maxSpeed;
// Record the new velocity into vel2. Question: why NOT vel1?
vels2[idx] = vel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3* pos, glm::vec3* vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3* poss, int* indices, int* gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 pos = poss[idx];
glm::ivec3 gridIdx = glm::floor((pos - gridMin) * inverseCellWidth);
gridIndices[idx] = gridIndex3Dto1D(gridIdx.x, gridIdx.y, gridIdx.z, gridResolution);
indices[idx] = idx;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int* intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int* particleGridIndices,
int* gridCellStartIndices, int* gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// at this point particleGridIndices should be sorted
int curr = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[curr] = index;
return;
}
int prev = particleGridIndices[index - 1];
if (curr != prev) {
gridCellStartIndices[curr] = index;
gridCellEndIndices[prev] = index - 1;
}
// the last value of particleGridIndices will never be assigned
// to be the end index of the last grid in the particleGridIndices
// array. thus it must be set manually.
gridCellEndIndices[particleGridIndices[N - 1]] = N - 1;
}
__device__ glm::vec3 computeVelocityChangeScattered(int N, int gridResolution, int idx, const glm::vec3* pos, const glm::vec3* vel,
glm::ivec3 minIdx3D, glm::ivec3 maxIdx3D, int* gridCellStartIndices, int* gridCellEndIndices, int* particleArrayIndices) {
// Rule 1 Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 thisPos = pos[idx];
glm::vec3 perceivedCenter(0.f);
glm::vec3 repulsion(0.f);
glm::vec3 perceivedVelocity(0.f);
int neighbors1 = 0;
int neighbors3 = 0;
for (int k = minIdx3D.z; k < maxIdx3D.z; k++) {
for (int j = minIdx3D.y; j < maxIdx3D.y; j++) {
for (int i = minIdx3D.x; i < maxIdx3D.x; i++) {
glm::ivec3 gridIdx3D = glm::ivec3(i, j, k);
int gridIdx1D = gridIndex3Dto1D(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z, gridResolution);
int start = gridCellStartIndices[gridIdx1D];
int end = gridCellEndIndices[gridIdx1D];
for (int l = start; l < end - start; l++) {
int thatIdx = particleArrayIndices[l];
// exclude the boid's own data for velocity calculation
if (thatIdx == idx) continue;
glm::vec3 thatPos = pos[thatIdx];
// Rule 1 Cohesion: boids fly towards their local perceived center of mass
if (glm::distance(thisPos, thatPos) < rule1Distance) {
perceivedCenter += thatPos;
neighbors1++;
}
// Rule 2 Separation: boids try to stay a distance d away from each other
if (glm::distance(thisPos, thatPos) < rule2Distance) {
repulsion -= (thatPos - thisPos);
}
// Rule 3 Alignment: boids try to match the speed of surrounding boids
if (glm::distance(thisPos, thatPos) < rule3Distance) {
perceivedVelocity += vel[thatIdx];
neighbors3++;
}
}
}
}
}
if (neighbors1 != 0) {
perceivedCenter /= neighbors1; // compute the perceived center of mass by dividing by the number of neighbors
}
if (neighbors3 != 0) {
perceivedVelocity /= neighbors3; // compute the perceived average velocity by dividing by the number of neighbors
}
return ((perceivedCenter - thisPos) * rule1Scale * 0.f) + (repulsion * rule2Scale) + (perceivedVelocity * rule3Scale * 0.f);
//for (int i = 0; i < 9; i++) {
// if (neighborCells[i] != -1) {
// int start = gridCellStartIndices[neighborCells[i]];
// int end = gridCellEndIndices[neighborCells[i]] + 1;
// for (int j = start; j < end; j++) {
// int iOther = particleArrayIndices[j];
// if (iOther != iSelf && glm::distance(posSelf, pos[iOther]) < rule1Distance) {
// perceivedCenter += pos[iOther];
// neighbors++;
// }
// }
// }
//}
//if (neighbors != 0) {
// perceivedCenter /= neighbors; // compute the perceived center of mass by dividing by the number of neighbors
//}
//glm::vec3 velSelf = (perceivedCenter - posSelf) * rule1Scale;
//// Rule 2 Separation: boids try to stay a distance d away from each other
//glm::vec3 repulsion(0.f);
//for (int i = 0; i < 9; i++) {
// if (neighborCells[i] != -1) {
// int start = gridCellStartIndices[neighborCells[i]];
// int end = gridCellEndIndices[neighborCells[i]] + 1;
// for (int j = start; j < end; j++) {
// int iOther = particleArrayIndices[j];
// if (iOther != iSelf && glm::distance(posSelf, pos[iOther]) < rule1Distance) {
// repulsion -= (pos[iOther] - posSelf);
// }
// }
// }
//}
//velSelf += (repulsion * rule2Scale);
//// Rule 3 Alignment: boids try to match the speed of surrounding boids
//glm::vec3 perceivedVelocity(0.f);
//neighbors = 0;
//for (int i = 0; i < 9; i++) {
// if (neighborCells[i] != -1) {
// int start = gridCellStartIndices[neighborCells[i]];
// int end = gridCellEndIndices[neighborCells[i]] + 1;
// for (int j = start; j < end; j++) {
// int iOther = particleArrayIndices[j];
// if (iOther != iSelf && glm::distance(posSelf, pos[iOther]) < rule1Distance) {
// perceivedVelocity += vel[iOther];
// neighbors++;
// }
// }
// }
//}
//if (neighbors != 0) {
// perceivedVelocity /= neighbors; // compute the perceived average velocity by dividing by the number of neighbors
//}
//velSelf += (perceivedVelocity * rule3Scale);
//return velSelf;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int* gridCellStartIndices, int* gridCellEndIndices,
int* particleArrayIndices,
glm::vec3* pos, glm::vec3* vel1, glm::vec3* vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= N) {
return;
}
float radius = imax(rule1Distance, imax(rule2Distance, rule3Distance));
glm::vec3 thisPos = pos[idx];
glm::vec3 gridIdx3D = glm::floor((thisPos - gridMin) * inverseCellWidth);
int gridIdx1D = gridIndex3Dto1D(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z, gridResolution);
glm::ivec3 minIdx3D = glm::clamp(glm::ivec3(glm::floor(gridIdx3D - (radius * inverseCellWidth))), glm::ivec3(0), glm::ivec3(gridResolution));
glm::ivec3 maxIdx3D = glm::clamp(glm::ivec3(glm::floor(gridIdx3D + (radius * inverseCellWidth))), glm::ivec3(0), glm::ivec3(gridResolution));
glm::vec3 vel = vel1[idx] + computeVelocityChangeScattered(N, gridResolution, idx, pos, vel1, minIdx3D, maxIdx3D, gridCellStartIndices, gridCellEndIndices, particleArrayIndices);
// Clamp the speed
float speed = glm::length(vel);
if (speed > maxSpeed)
vel = (vel / speed) * maxSpeed;
// Record the new velocity into vel2. Question: why NOT vel1?
// since this is all happening in parallel, other threads may have read in the revised information
// when they should have read the original information
vel2[idx] = vel;
vel2[idx] = gridIdx3D / float(gridResolution);
/*
float radius = imax(rule1Distance, imax(rule2Distance, rule3Distance));
glm::vec3 posSelf = pos[index];
glm::vec3 ind3DSelf = glm::floor((posSelf - gridMin) * inverseCellWidth);
glm::ivec3 ind3DMin = glm::clamp(glm::ivec3(glm::floor(ind3DSelf - (radius * inverseCellWidth))), glm::ivec3(0), glm::ivec3(gridResolution));
glm::ivec3 ind3DMax = glm::clamp(glm::ivec3(glm::floor(ind3DSelf + (radius * inverseCellWidth))), glm::ivec3(0), glm::ivec3(gridResolution));
glm::vec3 perceivedCenter(0.f);
int neighborsR1 = 0;
for (int i = ind3DMin.x; i < ind3DMax.x; i++) {
for (int j = ind3DMin.y; j < ind3DMax.y; j++) {
for (int k = ind3DMin.z; k < ind3DMax.z; k++) {
int neighbor1DInd = gridIndex3Dto1D(i, j, k, gridResolution);
int start = gridCellStartIndices[neighbor1DInd];
int end = gridCellEndIndices[neighbor1DInd];
for (int l = start; l <= end; l++) {
// Rule 1 Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
for (int i = 0; i < N; i++) {
if (i != index && glm::distance(posSelf, pos[i]) < rule1Distance) {
perceivedCenter += pos[i];
neighborsR1++;
}
}
if (neighborsR1 != 0) {
perceivedCenter /= neighbors; // compute the perceived center of mass by dividing by the number of neighbors
}
glm::vec3 velSelf = (perceivedCenter - posSelf) * rule1Scale;
// Rule 2 Separation: boids try to stay a distance d away from each other
glm::vec3 repulsion(0.f);
neighbors = 0;
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(posSelf, pos[i]) < rule2Distance) {
repulsion -= (pos[i] - posSelf);
neighbors++;
}
}
velSelf += (repulsion * rule2Scale);
// Rule 3 Alignment: boids try to match the speed of surrounding boids
glm::vec3 perceivedVelocity(0.f);
neighbors = 0;
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(posSelf, pos[i]) < rule3Distance) {
perceivedVelocity += vel[i];
neighbors++;
}
}
if (neighbors != 0) {
perceivedVelocity /= neighbors; // compute the perceived average velocity by dividing by the number of neighbors
}
velSelf += (perceivedVelocity * rule3Scale);
}
}
}
}
*/
//glm::vec3 neighborDet(0.f);
//glm::vec3 posSelf = pos[index];
//glm::vec3 posTemp = glm::fract((pos[index] - gridMin) * inverseCellWidth);
//float3 posSelfF3 = make_float3(posSelf.x, posSelf.y, posSelf.z);
//float3 posTempF3 = make_float3(posTemp.x, posTemp.y, posTemp.z);
//for (int i = 0; i < 3; i++) {
// if (posTemp[i] == 0.5f) {
// neighborDet[i] = 0.f;
// }
// else if (posTemp[i] < 0.5f) {
// neighborDet[i] = -1.f;
// }
// else if (posTemp[i] > 0.5f) {
// neighborDet[i] = 1.f;
// }
//}
//float3 nDetF3 = make_float3(neighborDet.x, neighborDet.y, neighborDet.z);
//glm::vec3 gridMax = gridMin + (gridResolution * cellWidth);
//float3 gridMaxF3 = make_float3(gridMax.x, gridMax.y, gridMax.z);
//int neighborCells[9];
//posTemp = glm::floor(pos[index] - gridMin) * inverseCellWidth;
//neighborCells[8] = gridIndex3Dto1D(posTemp.x, posTemp.y, posTemp.z, gridResolution);
//for (int i = 0; i < 2; i++) {
// for (int j = 0; j < 2; j++) {
// for (int k = 0; k < 2; k++) {
// glm::vec3 neighborShift(neighborDet * glm::vec3(i % 2, j % 2, k % 2));
// glm::vec3 neighborPt = neighborShift * cellWidth + posSelf;
// if (glm::any(glm::lessThan(neighborPt, gridMin)) || glm::any(glm::greaterThan(neighborPt, gridMax))) {
// neighborCells[i * 4 + j * 2 + k] = -1;
// }
// else {
// neighborPt = glm::floor(neighborPt - gridMin) * inverseCellWidth;
// float3 nPtF3 = make_float3(neighborPt.x, neighborPt.y, neighborPt.z);
// neighborCells[i * 4 + j * 2 + k] = gridIndex3Dto1D(neighborPt.x, neighborPt.y, neighborPt.z, gridResolution);
// }
// }
// }
//}
//// Compute a new velocity based on pos and vel1
//glm::vec3 velSelf = vel1[index] + computeVelocityChangeScattered(N, index, pos, vel1, neighborCells, gridCellStartIndices, gridCellEndIndices, particleArrayIndices);
//// Clamp the speed
//float speed = glm::length(velSelf);
//if (speed > maxSpeed)
// velSelf = (velSelf / speed) * maxSpeed;
//// Record the new velocity into vel2. Question: why NOT vel1?
//float3 velSelfF3 = make_float3(velSelf.x, velSelf.y, velSelf.z);
//vel2[index] = velSelf;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int* gridCellStartIndices, int* gridCellEndIndices,
glm::vec3* pos, glm::vec3* vel1, glm::vec3* vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// use the 3 boid rules to compute the new velocity of every boid
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
// compute the new position based on the computed velocity
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
dev_vel1 = dev_vel2;
// hipMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
// checkCUDAErrorWithLine("memcpy back failed!");
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
int N = numObjects;
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); // inexpensive way of computing the ceiling of the division
// int gridCellCount;
// int gridSideCount;
// float gridCellWidth;
// float gridInverseCellWidth;
// glm::vec3 gridMinimum;
// dev_particleArrayIndices is the sorted array of particle indices by grid index
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (N, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, N, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, N, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, N, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
dev_vel1 = dev_vel2;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
checkCUDAErrorWithLine("hipFree failed!");
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int* dev_intKeys;
int* dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 097fb8773b438f7d7400cae1d4f3f4e5c1051a10.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#include "device_launch_parameters.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char* msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
// i think the scene_scale actually represents the distance from the origin to the positive extent of the simulation space
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3* dev_pos;
glm::vec3* dev_vel1;
glm::vec3* dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int* dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int* dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int* dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int* dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3* arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); // doubled because rule distances would be radii
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; // i think the scene_scale actually represents the distance from the origin to the positive extent of the simulation space
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, 16 * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, 16 * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3* pos, float* vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3* vel, float* vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float* vbodptr_positions, float* vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int idx, const glm::vec3* pos, const glm::vec3* vel) {
glm::vec3 thisPos = pos[idx];
glm::vec3 perceivedCenter(0.f);
glm::vec3 repulsion(0.f);
glm::vec3 perceivedVelocity(0.f);
int neighbors1 = 0;
int neighbors3 = 0;
for (int i = 0; i < N; i++) {
// Rule 1 Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
if (i != idx && glm::distance(thisPos, pos[i]) < rule1Distance) {
perceivedCenter += pos[i];
neighbors1++;
}
// Rule 2 Separation: boids try to stay a distance d away from each other
if (i != idx && glm::distance(thisPos, pos[i]) < rule2Distance) {
repulsion -= (pos[i] - thisPos);
}
// Rule 3 Alignment: boids try to match the speed of surrounding boids
if (i != idx && glm::distance(thisPos, pos[i]) < rule3Distance) {
perceivedVelocity += vel[i];
neighbors3++;
}
}
if (neighbors1 != 0) {
perceivedCenter /= neighbors1; // compute the perceived center of mass by dividing by the number of neighbors
}
if (neighbors3 != 0) {
perceivedVelocity /= neighbors3; // compute the perceived average velocity by dividing by the number of neighbors
}
return ((perceivedCenter - thisPos) * rule1Scale) + (repulsion * rule2Scale) + (perceivedVelocity * rule3Scale);
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3* poss,
glm::vec3* vels1, glm::vec3* vels2) {
// Compute a new velocity based on pos and vel1
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 vel = vels1[idx] + computeVelocityChange(N, idx, poss, vels1);
// Clamp the speed
float speed = glm::length(vel);
if (speed > maxSpeed)
vel = (vel / speed) * maxSpeed;
// Record the new velocity into vel2. Question: why NOT vel1?
vels2[idx] = vel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3* pos, glm::vec3* vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3* poss, int* indices, int* gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 pos = poss[idx];
glm::ivec3 gridIdx = glm::floor((pos - gridMin) * inverseCellWidth);
gridIndices[idx] = gridIndex3Dto1D(gridIdx.x, gridIdx.y, gridIdx.z, gridResolution);
indices[idx] = idx;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int* intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int* particleGridIndices,
int* gridCellStartIndices, int* gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// at this point particleGridIndices should be sorted
int curr = particleGridIndices[index];
if (index == 0) {
gridCellStartIndices[curr] = index;
return;
}
int prev = particleGridIndices[index - 1];
if (curr != prev) {
gridCellStartIndices[curr] = index;
gridCellEndIndices[prev] = index - 1;
}
// the last value of particleGridIndices will never be assigned
// to be the end index of the last grid in the particleGridIndices
// array. thus it must be set manually.
gridCellEndIndices[particleGridIndices[N - 1]] = N - 1;
}
__device__ glm::vec3 computeVelocityChangeScattered(int N, int gridResolution, int idx, const glm::vec3* pos, const glm::vec3* vel,
glm::ivec3 minIdx3D, glm::ivec3 maxIdx3D, int* gridCellStartIndices, int* gridCellEndIndices, int* particleArrayIndices) {
// Rule 1 Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 thisPos = pos[idx];
glm::vec3 perceivedCenter(0.f);
glm::vec3 repulsion(0.f);
glm::vec3 perceivedVelocity(0.f);
int neighbors1 = 0;
int neighbors3 = 0;
for (int k = minIdx3D.z; k < maxIdx3D.z; k++) {
for (int j = minIdx3D.y; j < maxIdx3D.y; j++) {
for (int i = minIdx3D.x; i < maxIdx3D.x; i++) {
glm::ivec3 gridIdx3D = glm::ivec3(i, j, k);
int gridIdx1D = gridIndex3Dto1D(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z, gridResolution);
int start = gridCellStartIndices[gridIdx1D];
int end = gridCellEndIndices[gridIdx1D];
for (int l = start; l < end - start; l++) {
int thatIdx = particleArrayIndices[l];
// exclude the boid's own data for velocity calculation
if (thatIdx == idx) continue;
glm::vec3 thatPos = pos[thatIdx];
// Rule 1 Cohesion: boids fly towards their local perceived center of mass
if (glm::distance(thisPos, thatPos) < rule1Distance) {
perceivedCenter += thatPos;
neighbors1++;
}
// Rule 2 Separation: boids try to stay a distance d away from each other
if (glm::distance(thisPos, thatPos) < rule2Distance) {
repulsion -= (thatPos - thisPos);
}
// Rule 3 Alignment: boids try to match the speed of surrounding boids
if (glm::distance(thisPos, thatPos) < rule3Distance) {
perceivedVelocity += vel[thatIdx];
neighbors3++;
}
}
}
}
}
if (neighbors1 != 0) {
perceivedCenter /= neighbors1; // compute the perceived center of mass by dividing by the number of neighbors
}
if (neighbors3 != 0) {
perceivedVelocity /= neighbors3; // compute the perceived average velocity by dividing by the number of neighbors
}
return ((perceivedCenter - thisPos) * rule1Scale * 0.f) + (repulsion * rule2Scale) + (perceivedVelocity * rule3Scale * 0.f);
//for (int i = 0; i < 9; i++) {
// if (neighborCells[i] != -1) {
// int start = gridCellStartIndices[neighborCells[i]];
// int end = gridCellEndIndices[neighborCells[i]] + 1;
// for (int j = start; j < end; j++) {
// int iOther = particleArrayIndices[j];
// if (iOther != iSelf && glm::distance(posSelf, pos[iOther]) < rule1Distance) {
// perceivedCenter += pos[iOther];
// neighbors++;
// }
// }
// }
//}
//if (neighbors != 0) {
// perceivedCenter /= neighbors; // compute the perceived center of mass by dividing by the number of neighbors
//}
//glm::vec3 velSelf = (perceivedCenter - posSelf) * rule1Scale;
//// Rule 2 Separation: boids try to stay a distance d away from each other
//glm::vec3 repulsion(0.f);
//for (int i = 0; i < 9; i++) {
// if (neighborCells[i] != -1) {
// int start = gridCellStartIndices[neighborCells[i]];
// int end = gridCellEndIndices[neighborCells[i]] + 1;
// for (int j = start; j < end; j++) {
// int iOther = particleArrayIndices[j];
// if (iOther != iSelf && glm::distance(posSelf, pos[iOther]) < rule1Distance) {
// repulsion -= (pos[iOther] - posSelf);
// }
// }
// }
//}
//velSelf += (repulsion * rule2Scale);
//// Rule 3 Alignment: boids try to match the speed of surrounding boids
//glm::vec3 perceivedVelocity(0.f);
//neighbors = 0;
//for (int i = 0; i < 9; i++) {
// if (neighborCells[i] != -1) {
// int start = gridCellStartIndices[neighborCells[i]];
// int end = gridCellEndIndices[neighborCells[i]] + 1;
// for (int j = start; j < end; j++) {
// int iOther = particleArrayIndices[j];
// if (iOther != iSelf && glm::distance(posSelf, pos[iOther]) < rule1Distance) {
// perceivedVelocity += vel[iOther];
// neighbors++;
// }
// }
// }
//}
//if (neighbors != 0) {
// perceivedVelocity /= neighbors; // compute the perceived average velocity by dividing by the number of neighbors
//}
//velSelf += (perceivedVelocity * rule3Scale);
//return velSelf;
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int* gridCellStartIndices, int* gridCellEndIndices,
int* particleArrayIndices,
glm::vec3* pos, glm::vec3* vel1, glm::vec3* vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= N) {
return;
}
float radius = imax(rule1Distance, imax(rule2Distance, rule3Distance));
glm::vec3 thisPos = pos[idx];
glm::vec3 gridIdx3D = glm::floor((thisPos - gridMin) * inverseCellWidth);
int gridIdx1D = gridIndex3Dto1D(gridIdx3D.x, gridIdx3D.y, gridIdx3D.z, gridResolution);
glm::ivec3 minIdx3D = glm::clamp(glm::ivec3(glm::floor(gridIdx3D - (radius * inverseCellWidth))), glm::ivec3(0), glm::ivec3(gridResolution));
glm::ivec3 maxIdx3D = glm::clamp(glm::ivec3(glm::floor(gridIdx3D + (radius * inverseCellWidth))), glm::ivec3(0), glm::ivec3(gridResolution));
glm::vec3 vel = vel1[idx] + computeVelocityChangeScattered(N, gridResolution, idx, pos, vel1, minIdx3D, maxIdx3D, gridCellStartIndices, gridCellEndIndices, particleArrayIndices);
// Clamp the speed
float speed = glm::length(vel);
if (speed > maxSpeed)
vel = (vel / speed) * maxSpeed;
// Record the new velocity into vel2. Question: why NOT vel1?
// since this is all happening in parallel, other threads may have read in the revised information
// when they should have read the original information
vel2[idx] = vel;
vel2[idx] = gridIdx3D / float(gridResolution);
/*
float radius = imax(rule1Distance, imax(rule2Distance, rule3Distance));
glm::vec3 posSelf = pos[index];
glm::vec3 ind3DSelf = glm::floor((posSelf - gridMin) * inverseCellWidth);
glm::ivec3 ind3DMin = glm::clamp(glm::ivec3(glm::floor(ind3DSelf - (radius * inverseCellWidth))), glm::ivec3(0), glm::ivec3(gridResolution));
glm::ivec3 ind3DMax = glm::clamp(glm::ivec3(glm::floor(ind3DSelf + (radius * inverseCellWidth))), glm::ivec3(0), glm::ivec3(gridResolution));
glm::vec3 perceivedCenter(0.f);
int neighborsR1 = 0;
for (int i = ind3DMin.x; i < ind3DMax.x; i++) {
for (int j = ind3DMin.y; j < ind3DMax.y; j++) {
for (int k = ind3DMin.z; k < ind3DMax.z; k++) {
int neighbor1DInd = gridIndex3Dto1D(i, j, k, gridResolution);
int start = gridCellStartIndices[neighbor1DInd];
int end = gridCellEndIndices[neighbor1DInd];
for (int l = start; l <= end; l++) {
// Rule 1 Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
for (int i = 0; i < N; i++) {
if (i != index && glm::distance(posSelf, pos[i]) < rule1Distance) {
perceivedCenter += pos[i];
neighborsR1++;
}
}
if (neighborsR1 != 0) {
perceivedCenter /= neighbors; // compute the perceived center of mass by dividing by the number of neighbors
}
glm::vec3 velSelf = (perceivedCenter - posSelf) * rule1Scale;
// Rule 2 Separation: boids try to stay a distance d away from each other
glm::vec3 repulsion(0.f);
neighbors = 0;
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(posSelf, pos[i]) < rule2Distance) {
repulsion -= (pos[i] - posSelf);
neighbors++;
}
}
velSelf += (repulsion * rule2Scale);
// Rule 3 Alignment: boids try to match the speed of surrounding boids
glm::vec3 perceivedVelocity(0.f);
neighbors = 0;
for (int i = 0; i < N; i++) {
if (i != iSelf && glm::distance(posSelf, pos[i]) < rule3Distance) {
perceivedVelocity += vel[i];
neighbors++;
}
}
if (neighbors != 0) {
perceivedVelocity /= neighbors; // compute the perceived average velocity by dividing by the number of neighbors
}
velSelf += (perceivedVelocity * rule3Scale);
}
}
}
}
*/
//glm::vec3 neighborDet(0.f);
//glm::vec3 posSelf = pos[index];
//glm::vec3 posTemp = glm::fract((pos[index] - gridMin) * inverseCellWidth);
//float3 posSelfF3 = make_float3(posSelf.x, posSelf.y, posSelf.z);
//float3 posTempF3 = make_float3(posTemp.x, posTemp.y, posTemp.z);
//for (int i = 0; i < 3; i++) {
// if (posTemp[i] == 0.5f) {
// neighborDet[i] = 0.f;
// }
// else if (posTemp[i] < 0.5f) {
// neighborDet[i] = -1.f;
// }
// else if (posTemp[i] > 0.5f) {
// neighborDet[i] = 1.f;
// }
//}
//float3 nDetF3 = make_float3(neighborDet.x, neighborDet.y, neighborDet.z);
//glm::vec3 gridMax = gridMin + (gridResolution * cellWidth);
//float3 gridMaxF3 = make_float3(gridMax.x, gridMax.y, gridMax.z);
//int neighborCells[9];
//posTemp = glm::floor(pos[index] - gridMin) * inverseCellWidth;
//neighborCells[8] = gridIndex3Dto1D(posTemp.x, posTemp.y, posTemp.z, gridResolution);
//for (int i = 0; i < 2; i++) {
// for (int j = 0; j < 2; j++) {
// for (int k = 0; k < 2; k++) {
// glm::vec3 neighborShift(neighborDet * glm::vec3(i % 2, j % 2, k % 2));
// glm::vec3 neighborPt = neighborShift * cellWidth + posSelf;
// if (glm::any(glm::lessThan(neighborPt, gridMin)) || glm::any(glm::greaterThan(neighborPt, gridMax))) {
// neighborCells[i * 4 + j * 2 + k] = -1;
// }
// else {
// neighborPt = glm::floor(neighborPt - gridMin) * inverseCellWidth;
// float3 nPtF3 = make_float3(neighborPt.x, neighborPt.y, neighborPt.z);
// neighborCells[i * 4 + j * 2 + k] = gridIndex3Dto1D(neighborPt.x, neighborPt.y, neighborPt.z, gridResolution);
// }
// }
// }
//}
//// Compute a new velocity based on pos and vel1
//glm::vec3 velSelf = vel1[index] + computeVelocityChangeScattered(N, index, pos, vel1, neighborCells, gridCellStartIndices, gridCellEndIndices, particleArrayIndices);
//// Clamp the speed
//float speed = glm::length(velSelf);
//if (speed > maxSpeed)
// velSelf = (velSelf / speed) * maxSpeed;
//// Record the new velocity into vel2. Question: why NOT vel1?
//float3 velSelfF3 = make_float3(velSelf.x, velSelf.y, velSelf.z);
//vel2[index] = velSelf;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int* gridCellStartIndices, int* gridCellEndIndices,
glm::vec3* pos, glm::vec3* vel1, glm::vec3* vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// use the 3 boid rules to compute the new velocity of every boid
kernUpdateVelocityBruteForce<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
// compute the new position based on the computed velocity
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
dev_vel1 = dev_vel2;
// cudaMemcpy(dev_vel1, dev_vel2, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
// checkCUDAErrorWithLine("memcpy back failed!");
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
int N = numObjects;
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); // inexpensive way of computing the ceiling of the division
// int gridCellCount;
// int gridSideCount;
// float gridCellWidth;
// float gridInverseCellWidth;
// glm::vec3 gridMinimum;
// dev_particleArrayIndices is the sorted array of particle indices by grid index
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (N, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
kernIdentifyCellStartEnd<<<fullBlocksPerGrid, blockSize>>>(N, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernUpdateVelNeighborSearchScattered<<<fullBlocksPerGrid, blockSize>>>(N, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(N, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
dev_vel1 = dev_vel2;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
checkCUDAErrorWithLine("cudaFree failed!");
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int* dev_intKeys;
int* dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
1c92573c254b9270c63aec82dd2a6372bb7bc5e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/channel_backprop_stats_op.h"
namespace caffe2 {
namespace {
// based on "Optimizing Parallel Reduction in CUDA" by Mark Harris
// note - volatile keyword is needed to allow doing a warp reduction without
// synchronization on recent architectures
template <unsigned int blockSize>
__device__ void warpReduce(volatile float* sdata, unsigned int tid) {
// note - the if statements are "free" as they are resolved at compile time
if (blockSize >= 64)
sdata[tid] += sdata[tid + 32];
if (blockSize >= 32)
sdata[tid] += sdata[tid + 16];
if (blockSize >= 16)
sdata[tid] += sdata[tid + 8];
if (blockSize >= 8)
sdata[tid] += sdata[tid + 4];
if (blockSize >= 4)
sdata[tid] += sdata[tid + 2];
if (blockSize >= 2)
sdata[tid] += sdata[tid + 1];
}
template <unsigned int blockSize>
__global__ void ChannelBackpropStatsBlockKernel(
int N,
int C,
int valsPerChannel,
const float* X,
const float* dY,
const float* mean,
const float* invStddev,
float* dBiasBlocks,
float* dScaleBlocks) {
__shared__ float dBiasData[blockSize];
__shared__ float dScaleData[blockSize];
auto tid = threadIdx.x;
auto numBlocksPerChannel = (valsPerChannel + blockSize - 1) / blockSize;
auto localBlockIndex = blockIdx.x % numBlocksPerChannel;
auto inputIndex = (blockIdx.x / numBlocksPerChannel) * valsPerChannel +
localBlockIndex * blockSize + tid;
auto n = blockIdx.x / numBlocksPerChannel / C;
auto c = (blockIdx.x / numBlocksPerChannel) % C;
dBiasData[tid] = 0;
dScaleData[tid] = 0;
if (localBlockIndex * blockSize + tid < valsPerChannel) {
dBiasData[tid] += dY[inputIndex];
dScaleData[tid] +=
(X[inputIndex] - mean[c]) * invStddev[c] * dY[inputIndex];
}
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
dBiasData[tid] += dBiasData[tid + 256];
dScaleData[tid] += dScaleData[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
dBiasData[tid] += dBiasData[tid + 128];
dScaleData[tid] += dScaleData[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
dBiasData[tid] += dBiasData[tid + 64];
dScaleData[tid] += dScaleData[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
warpReduce<blockSize>(dBiasData, tid);
warpReduce<blockSize>(dScaleData, tid);
}
// output block data sorted by C to simplify second reduction
if (tid == 0) {
auto outputIndex = (c * N + n) * numBlocksPerChannel + localBlockIndex;
dBiasBlocks[outputIndex] = dBiasData[0];
dScaleBlocks[outputIndex] = dScaleData[0];
}
}
template <unsigned int blockSize>
__global__ void ChannelBackpropStatsFinalSumsKernel(
int N,
int C,
int numSumsPerChannel,
const float* dBiasScratch,
const float* dScaleScratch,
float* dBias,
float* dScale) {
__shared__ float dBiasData[blockSize];
__shared__ float dScaleData[blockSize];
auto tid = threadIdx.x;
auto inputIndex = blockIdx.x * N * numSumsPerChannel + tid;
dBiasData[tid] = 0;
dScaleData[tid] = 0;
for (auto i = inputIndex; i < (blockIdx.x + 1) * N * numSumsPerChannel;
i += blockSize) {
dBiasData[tid] += dBiasScratch[i];
dScaleData[tid] += dScaleScratch[i];
}
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
dBiasData[tid] += dBiasData[tid + 256];
dScaleData[tid] += dScaleData[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
dBiasData[tid] += dBiasData[tid + 128];
dScaleData[tid] += dScaleData[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
dBiasData[tid] += dBiasData[tid + 64];
dScaleData[tid] += dScaleData[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
warpReduce<blockSize>(dBiasData, tid);
warpReduce<blockSize>(dScaleData, tid);
}
if (tid == 0) {
dBias[blockIdx.x] = dBiasData[0];
dScale[blockIdx.x] = dScaleData[0];
}
}
} // namespace
template <>
bool ChannelBackpropStatsOp<CUDAContext>::RunOnDevice() {
const auto& X = Input(INPUT);
const auto& dY = Input(OUTPUT_GRAD);
const auto& mean = Input(SAVED_MEAN);
const auto& invStddev = Input(SAVED_INV_STDDEV);
CAFFE_ENFORCE(X.ndim() >= 3 && X.ndim() <= 5);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.ndim() > 3 ? X.dim32(3) : 1;
const int D = X.ndim() > 4 ? X.dim32(4) : 1;
auto dScale = Output(SCALE_GRAD);
auto dBias = Output(BIAS_GRAD);
const auto Xarr = X.data<float>();
const auto dYarr = dY.data<float>();
const auto meanArr = mean.data<float>();
const auto invStddevArr = invStddev.data<float>();
dBias->Resize(C);
dScale->Resize(C);
const auto valsPerChannel = H * W * D;
const auto numBlocksPerChannel = CAFFE_GET_BLOCKS(valsPerChannel);
const auto numBlocksTotal = numBlocksPerChannel * N * C;
dBiasScratch_.Resize(numBlocksTotal);
dScaleScratch_.Resize(numBlocksTotal);
hipLaunchKernelGGL(( ChannelBackpropStatsBlockKernel<CAFFE_CUDA_NUM_THREADS>)
, dim3(numBlocksTotal), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N,
C,
valsPerChannel,
Xarr,
dYarr,
meanArr,
invStddevArr,
dBiasScratch_.mutable_data<float>(),
dScaleScratch_.mutable_data<float>());
hipLaunchKernelGGL(( ChannelBackpropStatsFinalSumsKernel<CAFFE_CUDA_NUM_THREADS>)
, dim3(C), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N,
C,
numBlocksPerChannel,
dBiasScratch_.data<float>(),
dScaleScratch_.data<float>(),
dBias->mutable_data<float>(),
dScale->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
ChannelBackpropStats,
ChannelBackpropStatsOp<CUDAContext>);
} // namespace caffe2
| 1c92573c254b9270c63aec82dd2a6372bb7bc5e3.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/channel_backprop_stats_op.h"
namespace caffe2 {
namespace {
// based on "Optimizing Parallel Reduction in CUDA" by Mark Harris
// note - volatile keyword is needed to allow doing a warp reduction without
// synchronization on recent architectures
template <unsigned int blockSize>
__device__ void warpReduce(volatile float* sdata, unsigned int tid) {
// note - the if statements are "free" as they are resolved at compile time
if (blockSize >= 64)
sdata[tid] += sdata[tid + 32];
if (blockSize >= 32)
sdata[tid] += sdata[tid + 16];
if (blockSize >= 16)
sdata[tid] += sdata[tid + 8];
if (blockSize >= 8)
sdata[tid] += sdata[tid + 4];
if (blockSize >= 4)
sdata[tid] += sdata[tid + 2];
if (blockSize >= 2)
sdata[tid] += sdata[tid + 1];
}
template <unsigned int blockSize>
__global__ void ChannelBackpropStatsBlockKernel(
int N,
int C,
int valsPerChannel,
const float* X,
const float* dY,
const float* mean,
const float* invStddev,
float* dBiasBlocks,
float* dScaleBlocks) {
__shared__ float dBiasData[blockSize];
__shared__ float dScaleData[blockSize];
auto tid = threadIdx.x;
auto numBlocksPerChannel = (valsPerChannel + blockSize - 1) / blockSize;
auto localBlockIndex = blockIdx.x % numBlocksPerChannel;
auto inputIndex = (blockIdx.x / numBlocksPerChannel) * valsPerChannel +
localBlockIndex * blockSize + tid;
auto n = blockIdx.x / numBlocksPerChannel / C;
auto c = (blockIdx.x / numBlocksPerChannel) % C;
dBiasData[tid] = 0;
dScaleData[tid] = 0;
if (localBlockIndex * blockSize + tid < valsPerChannel) {
dBiasData[tid] += dY[inputIndex];
dScaleData[tid] +=
(X[inputIndex] - mean[c]) * invStddev[c] * dY[inputIndex];
}
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
dBiasData[tid] += dBiasData[tid + 256];
dScaleData[tid] += dScaleData[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
dBiasData[tid] += dBiasData[tid + 128];
dScaleData[tid] += dScaleData[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
dBiasData[tid] += dBiasData[tid + 64];
dScaleData[tid] += dScaleData[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
warpReduce<blockSize>(dBiasData, tid);
warpReduce<blockSize>(dScaleData, tid);
}
// output block data sorted by C to simplify second reduction
if (tid == 0) {
auto outputIndex = (c * N + n) * numBlocksPerChannel + localBlockIndex;
dBiasBlocks[outputIndex] = dBiasData[0];
dScaleBlocks[outputIndex] = dScaleData[0];
}
}
template <unsigned int blockSize>
__global__ void ChannelBackpropStatsFinalSumsKernel(
int N,
int C,
int numSumsPerChannel,
const float* dBiasScratch,
const float* dScaleScratch,
float* dBias,
float* dScale) {
__shared__ float dBiasData[blockSize];
__shared__ float dScaleData[blockSize];
auto tid = threadIdx.x;
auto inputIndex = blockIdx.x * N * numSumsPerChannel + tid;
dBiasData[tid] = 0;
dScaleData[tid] = 0;
for (auto i = inputIndex; i < (blockIdx.x + 1) * N * numSumsPerChannel;
i += blockSize) {
dBiasData[tid] += dBiasScratch[i];
dScaleData[tid] += dScaleScratch[i];
}
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
dBiasData[tid] += dBiasData[tid + 256];
dScaleData[tid] += dScaleData[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
dBiasData[tid] += dBiasData[tid + 128];
dScaleData[tid] += dScaleData[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
dBiasData[tid] += dBiasData[tid + 64];
dScaleData[tid] += dScaleData[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
warpReduce<blockSize>(dBiasData, tid);
warpReduce<blockSize>(dScaleData, tid);
}
if (tid == 0) {
dBias[blockIdx.x] = dBiasData[0];
dScale[blockIdx.x] = dScaleData[0];
}
}
} // namespace
template <>
bool ChannelBackpropStatsOp<CUDAContext>::RunOnDevice() {
const auto& X = Input(INPUT);
const auto& dY = Input(OUTPUT_GRAD);
const auto& mean = Input(SAVED_MEAN);
const auto& invStddev = Input(SAVED_INV_STDDEV);
CAFFE_ENFORCE(X.ndim() >= 3 && X.ndim() <= 5);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.ndim() > 3 ? X.dim32(3) : 1;
const int D = X.ndim() > 4 ? X.dim32(4) : 1;
auto dScale = Output(SCALE_GRAD);
auto dBias = Output(BIAS_GRAD);
const auto Xarr = X.data<float>();
const auto dYarr = dY.data<float>();
const auto meanArr = mean.data<float>();
const auto invStddevArr = invStddev.data<float>();
dBias->Resize(C);
dScale->Resize(C);
const auto valsPerChannel = H * W * D;
const auto numBlocksPerChannel = CAFFE_GET_BLOCKS(valsPerChannel);
const auto numBlocksTotal = numBlocksPerChannel * N * C;
dBiasScratch_.Resize(numBlocksTotal);
dScaleScratch_.Resize(numBlocksTotal);
ChannelBackpropStatsBlockKernel<CAFFE_CUDA_NUM_THREADS>
<<<numBlocksTotal, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N,
C,
valsPerChannel,
Xarr,
dYarr,
meanArr,
invStddevArr,
dBiasScratch_.mutable_data<float>(),
dScaleScratch_.mutable_data<float>());
ChannelBackpropStatsFinalSumsKernel<CAFFE_CUDA_NUM_THREADS>
<<<C, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N,
C,
numBlocksPerChannel,
dBiasScratch_.data<float>(),
dScaleScratch_.data<float>(),
dBias->mutable_data<float>(),
dScale->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
ChannelBackpropStats,
ChannelBackpropStatsOp<CUDAContext>);
} // namespace caffe2
|
563be69ff296e04a26baef2be5c85bfa7dcb48fc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
#include <string.h>
__global__ void fun(int *z){
int x = 0;
(int)(&x) & -7;
*z = (int)(&x) & -7;
printf("%d\n", (int)(&x) & -7);
}
int main(void)
{
int z;
int *dev_z;
hipMalloc((void**)&dev_z, sizeof(int));
hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_z);
hipMemcpy(&z, dev_z, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_z);
return 0;
}
//gcc:4;clang:4;nvcc:0;Unspecified value or behavior.C11 sec. 5.1.2.2.3:1;
| 563be69ff296e04a26baef2be5c85bfa7dcb48fc.cu | #include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
#include <string.h>
__global__ void fun(int *z){
int x = 0;
(int)(&x) & -7;
*z = (int)(&x) & -7;
printf("%d\n", (int)(&x) & -7);
}
int main(void)
{
int z;
int *dev_z;
cudaMalloc((void**)&dev_z, sizeof(int));
fun<<<1,1>>>(dev_z);
cudaMemcpy(&z, dev_z, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_z);
return 0;
}
//编译通过;gcc:4;clang:4;nvcc:0;Unspecified value or behavior.C11 sec. 5.1.2.2.3:1;
|
5a01e0ab2b696397ac0ec997943defa00f790efa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
//#include <pcl/gpu/features/device/eigen.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
enum
{
kx = 7,
ky = 7,
STEP = 1
};
__global__ void
computeNmapKernelEigen (int rows, int cols, const PtrStep<float> vmap, PtrStep<float> nmap)
{
int u = threadIdx.x + blockIdx.x * blockDim.x;
int v = threadIdx.y + blockIdx.y * blockDim.y;
if (u >= cols || v >= rows)
return;
nmap.ptr (v)[u] = numeric_limits<float>::quiet_NaN ();
if (isnan (vmap.ptr (v)[u]))
return;
int ty = min (v - ky / 2 + ky, rows - 1);
int tx = min (u - kx / 2 + kx, cols - 1);
float3 centroid = make_float3 (0.f, 0.f, 0.f);
int counter = 0;
for (int cy = max (v - ky / 2, 0); cy < ty; cy += STEP)
for (int cx = max (u - kx / 2, 0); cx < tx; cx += STEP)
{
float v_x = vmap.ptr (cy)[cx];
if (!isnan (v_x))
{
centroid.x += v_x;
centroid.y += vmap.ptr (cy + rows)[cx];
centroid.z += vmap.ptr (cy + 2 * rows)[cx];
++counter;
}
}
if (counter < kx * ky / 2)
return;
centroid *= 1.f / counter;
float cov[] = {0, 0, 0, 0, 0, 0};
for (int cy = max (v - ky / 2, 0); cy < ty; cy += STEP)
for (int cx = max (u - kx / 2, 0); cx < tx; cx += STEP)
{
float3 v;
v.x = vmap.ptr (cy)[cx];
if (isnan (v.x))
continue;
v.y = vmap.ptr (cy + rows)[cx];
v.z = vmap.ptr (cy + 2 * rows)[cx];
float3 d = v - centroid;
cov[0] += d.x * d.x; //cov (0, 0)
cov[1] += d.x * d.y; //cov (0, 1)
cov[2] += d.x * d.z; //cov (0, 2)
cov[3] += d.y * d.y; //cov (1, 1)
cov[4] += d.y * d.z; //cov (1, 2)
cov[5] += d.z * d.z; //cov (2, 2)
}
typedef Eigen33::Mat33 Mat33;
Eigen33 eigen33 (cov);
Mat33 tmp;
Mat33 vec_tmp;
Mat33 evecs;
float3 evals;
eigen33.compute (tmp, vec_tmp, evecs, evals);
float3 n = normalized (evecs[0]);
u = threadIdx.x + blockIdx.x * blockDim.x;
v = threadIdx.y + blockIdx.y * blockDim.y;
nmap.ptr (v )[u] = n.x;
nmap.ptr (v + rows)[u] = n.y;
nmap.ptr (v + 2 * rows)[u] = n.z;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
computeNormalsEigen (const MapArr& vmap, MapArr& nmap)
{
int cols = vmap.cols ();
int rows = vmap.rows () / 3;
nmap.create (vmap.rows (), vmap.cols ());
dim3 block (32, 8);
dim3 grid (1, 1, 1);
grid.x = divUp (cols, block.x);
grid.y = divUp (rows, block.y);
hipLaunchKernelGGL(( computeNmapKernelEigen), dim3(grid), dim3(block), 0, 0, rows, cols, vmap, nmap);
cudaSafeCall (hipGetLastError ());
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
}
| 5a01e0ab2b696397ac0ec997943defa00f790efa.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
//#include <pcl/gpu/features/device/eigen.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
enum
{
kx = 7,
ky = 7,
STEP = 1
};
__global__ void
computeNmapKernelEigen (int rows, int cols, const PtrStep<float> vmap, PtrStep<float> nmap)
{
int u = threadIdx.x + blockIdx.x * blockDim.x;
int v = threadIdx.y + blockIdx.y * blockDim.y;
if (u >= cols || v >= rows)
return;
nmap.ptr (v)[u] = numeric_limits<float>::quiet_NaN ();
if (isnan (vmap.ptr (v)[u]))
return;
int ty = min (v - ky / 2 + ky, rows - 1);
int tx = min (u - kx / 2 + kx, cols - 1);
float3 centroid = make_float3 (0.f, 0.f, 0.f);
int counter = 0;
for (int cy = max (v - ky / 2, 0); cy < ty; cy += STEP)
for (int cx = max (u - kx / 2, 0); cx < tx; cx += STEP)
{
float v_x = vmap.ptr (cy)[cx];
if (!isnan (v_x))
{
centroid.x += v_x;
centroid.y += vmap.ptr (cy + rows)[cx];
centroid.z += vmap.ptr (cy + 2 * rows)[cx];
++counter;
}
}
if (counter < kx * ky / 2)
return;
centroid *= 1.f / counter;
float cov[] = {0, 0, 0, 0, 0, 0};
for (int cy = max (v - ky / 2, 0); cy < ty; cy += STEP)
for (int cx = max (u - kx / 2, 0); cx < tx; cx += STEP)
{
float3 v;
v.x = vmap.ptr (cy)[cx];
if (isnan (v.x))
continue;
v.y = vmap.ptr (cy + rows)[cx];
v.z = vmap.ptr (cy + 2 * rows)[cx];
float3 d = v - centroid;
cov[0] += d.x * d.x; //cov (0, 0)
cov[1] += d.x * d.y; //cov (0, 1)
cov[2] += d.x * d.z; //cov (0, 2)
cov[3] += d.y * d.y; //cov (1, 1)
cov[4] += d.y * d.z; //cov (1, 2)
cov[5] += d.z * d.z; //cov (2, 2)
}
typedef Eigen33::Mat33 Mat33;
Eigen33 eigen33 (cov);
Mat33 tmp;
Mat33 vec_tmp;
Mat33 evecs;
float3 evals;
eigen33.compute (tmp, vec_tmp, evecs, evals);
float3 n = normalized (evecs[0]);
u = threadIdx.x + blockIdx.x * blockDim.x;
v = threadIdx.y + blockIdx.y * blockDim.y;
nmap.ptr (v )[u] = n.x;
nmap.ptr (v + rows)[u] = n.y;
nmap.ptr (v + 2 * rows)[u] = n.z;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
computeNormalsEigen (const MapArr& vmap, MapArr& nmap)
{
int cols = vmap.cols ();
int rows = vmap.rows () / 3;
nmap.create (vmap.rows (), vmap.cols ());
dim3 block (32, 8);
dim3 grid (1, 1, 1);
grid.x = divUp (cols, block.x);
grid.y = divUp (rows, block.y);
computeNmapKernelEigen<<<grid, block>>>(rows, cols, vmap, nmap);
cudaSafeCall (cudaGetLastError ());
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
}
|
5774412f51aeb69e93bd418046944b96ccf164fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/distance_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SquaredL2DistanceKernel(
const int N, const int D, const T* X, const T* Y, T* distance) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float dist = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
T diff = X[i * D + j] - Y[i * D + j];
dist += diff * diff;
}
float total_dist = BlockReduce(temp_storage).Sum(dist);
__syncthreads();
if (threadIdx.x == 0) {
distance[i] = total_dist / 2.0;
}
}
}
} // namespace
template<>
bool SquaredL2DistanceOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto* distance = Output(0);
DCHECK_EQ(X.ndim(), Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
DCHECK_EQ(X.dim32(i), Y.dim32(i));
}
int N = X.ndim() > 0 ? X.dim32(0) : 1;
int D = X.size() / N;
distance->Resize(vector<TIndex>(size_t(1), N));
hipLaunchKernelGGL(( SquaredL2DistanceKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, D, X.data<float>(), Y.data<float>(), distance->mutable_data<float>());
return true;
}
namespace {
template <typename T>
__global__ void
StripedScaleKernel(const int N, const int D, const T* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int k = i / D;
y[i] = x[i] * alpha[k];
}
}
}
template <>
bool SquaredL2DistanceGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dDistance = Input(2);
auto* dX = Output(0);
auto* dY = Output(1);
int N = X.ndim() > 0 ? X.dim32(0) : 1;
int D = N > 0 ? X.size() / N : 0;
CAFFE_ENFORCE(X.ndim() == Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));
}
CAFFE_ENFORCE(dDistance.ndim() == 1);
CAFFE_ENFORCE(dDistance.dim32(0) == N);
dX->ResizeLike(X);
dY->ResizeLike(Y);
math::Sub<float, CUDAContext>(
X.size(),
X.data<float>(),
Y.data<float>(),
dX->mutable_data<float>(),
&context_);
hipLaunchKernelGGL(( StripedScaleKernel<float>),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
dDistance.data<float>(),
dX->data<float>(),
dX->mutable_data<float>());
// The gradient of the other side is basically the negative.
math::Scale<float, CUDAContext>(
X.size(), -1, dX->data<float>(), dY->mutable_data<float>(), &context_);
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(SquaredL2Distance,
SquaredL2DistanceOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SquaredL2DistanceGradient,
SquaredL2DistanceGradientOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
| 5774412f51aeb69e93bd418046944b96ccf164fc.cu | #include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/distance_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SquaredL2DistanceKernel(
const int N, const int D, const T* X, const T* Y, T* distance) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float dist = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
T diff = X[i * D + j] - Y[i * D + j];
dist += diff * diff;
}
float total_dist = BlockReduce(temp_storage).Sum(dist);
__syncthreads();
if (threadIdx.x == 0) {
distance[i] = total_dist / 2.0;
}
}
}
} // namespace
template<>
bool SquaredL2DistanceOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto* distance = Output(0);
DCHECK_EQ(X.ndim(), Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
DCHECK_EQ(X.dim32(i), Y.dim32(i));
}
int N = X.ndim() > 0 ? X.dim32(0) : 1;
int D = X.size() / N;
distance->Resize(vector<TIndex>(size_t(1), N));
SquaredL2DistanceKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, X.data<float>(), Y.data<float>(), distance->mutable_data<float>());
return true;
}
namespace {
template <typename T>
__global__ void
StripedScaleKernel(const int N, const int D, const T* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int k = i / D;
y[i] = x[i] * alpha[k];
}
}
}
template <>
bool SquaredL2DistanceGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dDistance = Input(2);
auto* dX = Output(0);
auto* dY = Output(1);
int N = X.ndim() > 0 ? X.dim32(0) : 1;
int D = N > 0 ? X.size() / N : 0;
CAFFE_ENFORCE(X.ndim() == Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));
}
CAFFE_ENFORCE(dDistance.ndim() == 1);
CAFFE_ENFORCE(dDistance.dim32(0) == N);
dX->ResizeLike(X);
dY->ResizeLike(Y);
math::Sub<float, CUDAContext>(
X.size(),
X.data<float>(),
Y.data<float>(),
dX->mutable_data<float>(),
&context_);
StripedScaleKernel<float><<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
dDistance.data<float>(),
dX->data<float>(),
dX->mutable_data<float>());
// The gradient of the other side is basically the negative.
math::Scale<float, CUDAContext>(
X.size(), -1, dX->data<float>(), dY->mutable_data<float>(), &context_);
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(SquaredL2Distance,
SquaredL2DistanceOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SquaredL2DistanceGradient,
SquaredL2DistanceGradientOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
|
4a5c15f52fa6495253730ac5a668f6d89baafdf2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <helper_cuda.h>
#include <helper_timer.h>
#include "commonDefs.hpp"
#include "commonKernels.hpp"
#define VERIFY_GPU_CORRECTNESS 0
size_t maxSampleSizeInMb = 64;
int numKernelRuns = 100;
int verboseResults = 0;
const char *memAllocTypeStr[MEMALLOC_TYPE_COUNT] = {
"Managed_Memory_With_Hints",
"Managed_Memory_With_Hints_FullyAsync",
"Managed_Memory_NoHints",
"Zero_Copy",
"Memcpy_HostMalloc_DeviceCudaMalloc",
"MemcpyAsync_HostMalloc_DeviceCudaMalloc",
"Memcpy_HostCudaHostAlloc_DeviceCudaMalloc",
"MemcpyAsync_HostCudaHostAlloc_DeviceCudaMalloc"};
const char *memAllocTypeShortStr[MEMALLOC_TYPE_COUNT] = {
"UMhint", // Managed Memory With Hints
"UMhntAs", // Managed Memory With_Hints Async
"UMeasy", // Managed_Memory with No Hints
"0Copy", // Zero Copy
"MemCopy", // USE HOST PAGEABLE AND DEVICE_MEMORY
"CpAsync", // USE HOST PAGEABLE AND DEVICE_MEMORY ASYNC
"CpHpglk", // USE HOST PAGELOCKED AND DEVICE MEMORY
"CpPglAs" // USE HOST PAGELOCKED AND DEVICE MEMORY ASYNC
};
static float RandFloat(float low, float high) {
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
void fillMatrixWithRandomValues(float *matrix, unsigned int matrixDim) {
unsigned int i, j;
for (i = 0; i < matrixDim; ++i) {
for (j = 0; j < matrixDim; ++j) {
matrix[j + i * matrixDim] = RandFloat(0.0f, 10.0f);
}
}
}
#if VERIFY_GPU_CORRECTNESS
void verifyMatrixMultiplyCorrectness(float *C, float *A, float *B,
unsigned int matrixDim) {
unsigned int i, j, k, numErrors = 0;
for (i = 0; i < matrixDim; ++i) {
for (j = 0; j < matrixDim; ++j) {
float result = 0.0f;
for (k = 0; k < matrixDim; ++k) {
result += A[k + i * matrixDim] * B[j + k * matrixDim];
}
if (fabs(C[j + i * matrixDim] - result) > 0.001 * matrixDim) {
printf("At [%u, %u]: Expected %f, Found %f\n", i, j, result,
C[j + i * matrixDim]);
++numErrors;
}
}
}
if (numErrors != 0) {
printf("%d value mismatches occured\n", numErrors);
fflush(stdout);
exit(EXIT_FAILURE); // exit since value mismatches occured
}
}
#endif
void copyMatrix(float *dstMatrix, float *srcMatrix, unsigned int matrixDim) {
size_t size = matrixDim * matrixDim * sizeof(float);
memcpy(dstMatrix, srcMatrix, size);
}
void verifyMatrixData(float *expectedData, float *observedData,
unsigned int matrixDim) {
unsigned int i, j, numErrors = 0;
for (i = 0; i < matrixDim; ++i) {
for (j = 0; j < matrixDim; ++j) {
if (expectedData[j + i * matrixDim] != observedData[j + i * matrixDim]) {
++numErrors;
if (verboseResults) {
printf("At [%u, %u]: Expected %f, Found %f\n", i, j,
expectedData[j + i * matrixDim],
observedData[j + i * matrixDim]);
}
}
}
}
if (numErrors != 0) {
printf("%d value mismatches occured\n", numErrors);
fflush(stdout);
exit(EXIT_FAILURE); // exit since value mismatches occured
}
}
#define BLOCK_SIZE 32
__global__ void matrixMultiplyKernel(float *C, float *A, float *B,
unsigned int matrixDim) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
unsigned int wA = matrixDim;
unsigned int wB = matrixDim;
// Index of the first sub-matrix of A processed by the block
int aBegin = matrixDim * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void runMatrixMultiplyKernel(unsigned int matrixDim, int allocType,
unsigned int numLoops, double *gpuLaunchCallsTimes,
double *gpuTransferToCallsTimes,
double *gpuTransferFromCallsTimes,
double *gpuLaunchAndTransferCallsTimes,
double *gpuLaunchTransferSyncTimes,
double *cpuAccessTimes, double *overallTimes,
int device_id) {
float *dptrA = NULL, *hptrA = NULL;
float *dptrB = NULL, *hptrB = NULL;
float *dptrC = NULL, *hptrC = NULL;
float *randValuesX = NULL, *randValuesY = NULL;
float *randValuesVerifyXmulY = NULL, *randValuesVerifyYmulX = NULL;
bool copyRequired = false, hintsRequired = false;
bool someTransferOpRequired;
bool isAsync = false;
hipStream_t streamToRunOn;
unsigned int *latch;
size_t size = matrixDim * matrixDim * sizeof(float);
dim3 threads(32, 32);
dim3 grid(matrixDim / threads.x, matrixDim / threads.y);
StopWatchInterface *gpuLaunchCallsTimer = 0, *gpuTransferCallsTimer = 0;
StopWatchInterface *gpuSyncTimer = 0, *cpuAccessTimer = 0;
sdkCreateTimer(&gpuLaunchCallsTimer);
sdkCreateTimer(&gpuTransferCallsTimer);
sdkCreateTimer(&gpuSyncTimer);
sdkCreateTimer(&cpuAccessTimer);
unsigned int i;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, device_id));
checkCudaErrors(hipStreamCreate(&streamToRunOn));
randValuesX = (float *)malloc(size);
if (!randValuesX) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
randValuesY = (float *)malloc(size);
if (!randValuesY) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
randValuesVerifyXmulY = (float *)malloc(size);
if (!randValuesVerifyXmulY) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
randValuesVerifyYmulX = (float *)malloc(size);
if (!randValuesVerifyYmulX) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
checkCudaErrors(hipMalloc(&dptrA, size));
checkCudaErrors(hipMalloc(&dptrB, size));
checkCudaErrors(hipMalloc(&dptrC, size));
fillMatrixWithRandomValues(randValuesX, matrixDim);
fillMatrixWithRandomValues(randValuesY, matrixDim);
checkCudaErrors(
hipMemcpyAsync(dptrA, randValuesX, size, hipMemcpyHostToDevice));
checkCudaErrors(
hipMemcpyAsync(dptrB, randValuesY, size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( matrixMultiplyKernel), dim3(grid), dim3(threads), 0, 0, dptrC, dptrA, dptrB, matrixDim);
checkCudaErrors(hipMemcpyAsync(randValuesVerifyXmulY, dptrC, size,
hipMemcpyDeviceToHost));
checkCudaErrors(hipStreamSynchronize(NULL));
hipLaunchKernelGGL(( matrixMultiplyKernel), dim3(grid), dim3(threads), 0, 0, dptrC, dptrB, dptrA, matrixDim);
checkCudaErrors(hipMemcpyAsync(randValuesVerifyYmulX, dptrC, size,
hipMemcpyDeviceToHost));
checkCudaErrors(hipStreamSynchronize(NULL));
#if VERIFY_GPU_CORRECTNESS
verifyMatrixMultiplyCorrectness(randValuesVerifyXmulY, randValuesX,
randValuesY, matrixDim);
verifyMatrixMultiplyCorrectness(randValuesVerifyYmulX, randValuesY,
randValuesX, matrixDim);
#endif
checkCudaErrors(hipFree(dptrA));
checkCudaErrors(hipFree(dptrB));
checkCudaErrors(hipFree(dptrC));
checkCudaErrors(hipHostMalloc(&latch, sizeof(unsigned int)));
switch (allocType) {
case USE_HOST_PAGEABLE_AND_DEVICE_MEMORY:
case USE_HOST_PAGEABLE_AND_DEVICE_MEMORY_ASYNC:
hptrA = (float *)malloc(size);
if (!hptrA) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
hptrB = (float *)malloc(size);
if (!hptrB) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
hptrC = (float *)malloc(size);
if (!hptrC) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
checkCudaErrors(hipMalloc(&dptrA, size));
checkCudaErrors(hipMalloc(&dptrB, size));
checkCudaErrors(hipMalloc(&dptrC, size));
copyRequired = true;
break;
case USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY:
case USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY_ASYNC:
checkCudaErrors(hipHostMalloc(&hptrA, size));
checkCudaErrors(hipHostMalloc(&hptrB, size));
checkCudaErrors(hipHostMalloc(&hptrC, size));
checkCudaErrors(hipMalloc(&dptrA, size));
checkCudaErrors(hipMalloc(&dptrB, size));
checkCudaErrors(hipMalloc(&dptrC, size));
copyRequired = true;
break;
case USE_ZERO_COPY:
checkCudaErrors(hipHostMalloc(&hptrA, size));
checkCudaErrors(hipHostMalloc(&hptrB, size));
checkCudaErrors(hipHostMalloc(&hptrC, size));
checkCudaErrors(hipHostGetDevicePointer(&dptrA, hptrA, 0));
checkCudaErrors(hipHostGetDevicePointer(&dptrB, hptrB, 0));
checkCudaErrors(hipHostGetDevicePointer(&dptrC, hptrC, 0));
break;
case USE_MANAGED_MEMORY:
checkCudaErrors(hipMallocManaged(&dptrA, size));
checkCudaErrors(hipMallocManaged(&dptrB, size));
checkCudaErrors(hipMallocManaged(&dptrC, size));
hptrA = dptrA;
hptrB = dptrB;
hptrC = dptrC;
break;
case USE_MANAGED_MEMORY_WITH_HINTS:
case USE_MANAGED_MEMORY_WITH_HINTS_ASYNC:
if (deviceProp.concurrentManagedAccess) {
checkCudaErrors(hipMallocManaged(&dptrA, size));
checkCudaErrors(hipMallocManaged(&dptrB, size));
checkCudaErrors(hipMallocManaged(&dptrC, size));
checkCudaErrors(hipMemPrefetchAsync(dptrA, size, hipCpuDeviceId));
checkCudaErrors(hipMemPrefetchAsync(dptrB, size, hipCpuDeviceId));
checkCudaErrors(hipMemPrefetchAsync(dptrC, size, hipCpuDeviceId));
} else {
checkCudaErrors(hipMallocManaged(&dptrA, size, hipMemAttachHost));
checkCudaErrors(hipMallocManaged(&dptrB, size, hipMemAttachHost));
checkCudaErrors(hipMallocManaged(&dptrC, size, hipMemAttachHost));
}
hptrA = dptrA;
hptrB = dptrB;
hptrC = dptrC;
hintsRequired = true;
break;
default:
exit(EXIT_FAILURE); // exit with error
}
if (allocType == USE_HOST_PAGEABLE_AND_DEVICE_MEMORY_ASYNC ||
allocType == USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY_ASYNC ||
allocType == USE_MANAGED_MEMORY_WITH_HINTS_ASYNC) {
isAsync = true;
}
someTransferOpRequired = copyRequired || hintsRequired;
// fill buffers with 0 to avoid any first access page-fault overheads.
memset(hptrA, 0, size);
memset(hptrB, 0, size);
memset(hptrC, 0, size);
for (i = 0; i < numLoops; i++) {
cpuAccessTimes[i] = 0.0;
gpuLaunchCallsTimes[i] = 0.0;
gpuTransferToCallsTimes[i] = 0.0;
gpuTransferFromCallsTimes[i] = 0.0;
sdkStartTimer(&cpuAccessTimer);
{
copyMatrix(hptrA, (i & 0x1 == 0) ? randValuesX : randValuesY, matrixDim);
copyMatrix(hptrB, (i & 0x1 == 0) ? randValuesY : randValuesX, matrixDim);
}
sdkStopTimer(&cpuAccessTimer);
cpuAccessTimes[i] += sdkGetAverageTimerValue(&cpuAccessTimer);
sdkResetTimer(&cpuAccessTimer);
if (isAsync && hintsRequired) {
*latch = 0;
// Prevent any work on stream from starting until all work is pushed
hipLaunchKernelGGL(( spinWhileLessThanOne), dim3(1), dim3(1), 0, streamToRunOn, latch);
}
if (someTransferOpRequired) {
sdkStartTimer(&gpuTransferCallsTimer);
if (copyRequired) {
if (isAsync) {
checkCudaErrors(hipMemcpyAsync(
dptrA, hptrA, size, hipMemcpyHostToDevice, streamToRunOn));
checkCudaErrors(hipMemcpyAsync(
dptrB, hptrB, size, hipMemcpyHostToDevice, streamToRunOn));
} else {
checkCudaErrors(
hipMemcpy(dptrA, hptrA, size, hipMemcpyHostToDevice));
checkCudaErrors(
hipMemcpy(dptrB, hptrB, size, hipMemcpyHostToDevice));
}
}
if (hintsRequired) {
if (deviceProp.concurrentManagedAccess) {
checkCudaErrors(
hipMemPrefetchAsync(dptrA, size, device_id, streamToRunOn));
checkCudaErrors(
hipMemPrefetchAsync(dptrB, size, device_id, streamToRunOn));
checkCudaErrors(
hipMemPrefetchAsync(dptrC, size, device_id, streamToRunOn));
} else {
checkCudaErrors(hipStreamAttachMemAsync(streamToRunOn, dptrA, 0,
hipMemAttachGlobal));
checkCudaErrors(hipStreamAttachMemAsync(streamToRunOn, dptrB, 0,
hipMemAttachGlobal));
checkCudaErrors(hipStreamAttachMemAsync(streamToRunOn, dptrC, 0,
hipMemAttachGlobal));
}
if (!isAsync) {
checkCudaErrors(hipStreamSynchronize(streamToRunOn));
}
}
sdkStopTimer(&gpuTransferCallsTimer);
gpuTransferToCallsTimes[i] +=
sdkGetAverageTimerValue(&gpuTransferCallsTimer);
sdkResetTimer(&gpuTransferCallsTimer);
}
sdkStartTimer(&gpuLaunchCallsTimer);
{
hipLaunchKernelGGL(( matrixMultiplyKernel), dim3(grid), dim3(threads), 0, streamToRunOn,
dptrC, dptrA, dptrB, matrixDim);
if (!isAsync) {
checkCudaErrors(hipStreamSynchronize(streamToRunOn));
}
}
sdkStopTimer(&gpuLaunchCallsTimer);
gpuLaunchCallsTimes[i] += sdkGetAverageTimerValue(&gpuLaunchCallsTimer);
sdkResetTimer(&gpuLaunchCallsTimer);
if (someTransferOpRequired) {
sdkStartTimer(&gpuTransferCallsTimer);
if (hintsRequired) {
if (deviceProp.concurrentManagedAccess) {
checkCudaErrors(hipMemPrefetchAsync(dptrA, size, hipCpuDeviceId));
checkCudaErrors(hipMemPrefetchAsync(dptrB, size, hipCpuDeviceId));
checkCudaErrors(hipMemPrefetchAsync(dptrC, size, hipCpuDeviceId));
} else {
checkCudaErrors(hipStreamAttachMemAsync(streamToRunOn, dptrA, 0,
hipMemAttachHost));
checkCudaErrors(hipStreamAttachMemAsync(streamToRunOn, dptrB, 0,
hipMemAttachHost));
checkCudaErrors(hipStreamAttachMemAsync(streamToRunOn, dptrC, 0,
hipMemAttachHost));
}
if (!isAsync) {
checkCudaErrors(hipStreamSynchronize(streamToRunOn));
}
}
if (copyRequired) {
if (isAsync) {
checkCudaErrors(hipMemcpyAsync(
hptrC, dptrC, size, hipMemcpyDeviceToHost, streamToRunOn));
} else {
checkCudaErrors(
hipMemcpy(hptrC, dptrC, size, hipMemcpyDeviceToHost));
}
}
sdkStopTimer(&gpuTransferCallsTimer);
gpuTransferFromCallsTimes[i] +=
sdkGetAverageTimerValue(&gpuTransferCallsTimer);
sdkResetTimer(&gpuTransferCallsTimer);
}
gpuLaunchAndTransferCallsTimes[i] = gpuLaunchCallsTimes[i] +
gpuTransferToCallsTimes[i] +
gpuTransferFromCallsTimes[i];
gpuLaunchTransferSyncTimes[i] = gpuLaunchAndTransferCallsTimes[i];
if (isAsync) {
sdkStartTimer(&gpuSyncTimer);
{
if (hintsRequired) {
*latch = 1;
}
checkCudaErrors(hipStreamSynchronize(streamToRunOn));
}
sdkStopTimer(&gpuSyncTimer);
gpuLaunchTransferSyncTimes[i] += sdkGetAverageTimerValue(&gpuSyncTimer);
sdkResetTimer(&gpuSyncTimer);
}
sdkStartTimer(&cpuAccessTimer);
{
verifyMatrixData(
(i & 0x1 == 0) ? randValuesVerifyXmulY : randValuesVerifyYmulX, hptrC,
matrixDim);
}
sdkStopTimer(&cpuAccessTimer);
cpuAccessTimes[i] += sdkGetAverageTimerValue(&cpuAccessTimer);
sdkResetTimer(&cpuAccessTimer);
overallTimes[i] = cpuAccessTimes[i] + gpuLaunchTransferSyncTimes[i];
}
switch (allocType) {
case USE_HOST_PAGEABLE_AND_DEVICE_MEMORY:
case USE_HOST_PAGEABLE_AND_DEVICE_MEMORY_ASYNC:
free(hptrA);
free(hptrB);
free(hptrC);
checkCudaErrors(hipFree(dptrA));
checkCudaErrors(hipFree(dptrB));
checkCudaErrors(hipFree(dptrC));
break;
case USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY:
case USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY_ASYNC:
checkCudaErrors(hipHostFree(hptrA));
checkCudaErrors(hipHostFree(hptrB));
checkCudaErrors(hipHostFree(hptrC));
checkCudaErrors(hipFree(dptrA));
checkCudaErrors(hipFree(dptrB));
checkCudaErrors(hipFree(dptrC));
break;
case USE_ZERO_COPY:
checkCudaErrors(hipHostFree(hptrA));
checkCudaErrors(hipHostFree(hptrB));
checkCudaErrors(hipHostFree(hptrC));
break;
case USE_MANAGED_MEMORY:
case USE_MANAGED_MEMORY_WITH_HINTS:
case USE_MANAGED_MEMORY_WITH_HINTS_ASYNC:
checkCudaErrors(hipFree(dptrA));
checkCudaErrors(hipFree(dptrB));
checkCudaErrors(hipFree(dptrC));
break;
default:
exit(EXIT_FAILURE); // exit due to error
}
checkCudaErrors(hipStreamDestroy(streamToRunOn));
checkCudaErrors(hipHostFree(latch));
free(randValuesX);
free(randValuesY);
free(randValuesVerifyXmulY);
free(randValuesVerifyYmulX);
sdkDeleteTimer(&gpuLaunchCallsTimer);
sdkDeleteTimer(&gpuTransferCallsTimer);
sdkDeleteTimer(&gpuSyncTimer);
sdkDeleteTimer(&cpuAccessTimer);
}
void matrixMultiplyPerfRunner(bool reportAsBandwidth,
bool print_launch_transfer_results,
bool print_std_deviation, int device_id) {
int i;
unsigned int minMatrixDim = 32;
unsigned int multiplierDim = 2;
unsigned int matrixDim;
unsigned int minSize = minMatrixDim * minMatrixDim * sizeof(float);
unsigned int maxSize =
(maxSampleSizeInMb * ONE_MB) /
4; // 3 buffers are used, but dividing by 4 (power of 2)
unsigned int multiplier = multiplierDim * multiplierDim;
unsigned int numSizesToTest;
struct testResults *results;
struct resultsData *gpuLaunchCallsTimes;
struct resultsData *gpuTransferToCallsTimes;
struct resultsData *gpuTransferFromCallsTimes;
struct resultsData *gpuLaunchAndTransferCallsTimes;
struct resultsData *gpuLaunchTransferSyncTimes;
struct resultsData *cpuAccessTimes;
struct resultsData *overallTimes;
unsigned long *sizesToTest;
unsigned int j;
numSizesToTest = findNumSizesToTest(minSize, maxSize, multiplier);
createAndInitTestResults(&results, "matrixMultiplyPerf", numKernelRuns,
numSizesToTest);
sizesToTest = getPtrSizesToTest(results);
createResultDataAndAddToTestResults(&gpuLaunchCallsTimes, results,
"GPU Kernel Launch Call Time", false,
reportAsBandwidth);
createResultDataAndAddToTestResults(&gpuTransferToCallsTimes, results,
"CPU to GPU Transfer Calls Time", false,
reportAsBandwidth);
createResultDataAndAddToTestResults(&gpuTransferFromCallsTimes, results,
"GPU to CPU Transfer Calls Time", false,
reportAsBandwidth);
createResultDataAndAddToTestResults(&gpuLaunchAndTransferCallsTimes, results,
"GPU Launch and Transfer Calls Time",
false, reportAsBandwidth);
createResultDataAndAddToTestResults(&gpuLaunchTransferSyncTimes, results,
"GPU Launch Transfer and Sync Time",
false, reportAsBandwidth);
createResultDataAndAddToTestResults(
&cpuAccessTimes, results, "CPU Access Time", false, reportAsBandwidth);
createResultDataAndAddToTestResults(&overallTimes, results, "Overall Time",
false, reportAsBandwidth);
printf("Running ");
for (matrixDim = minMatrixDim, j = 0;
matrixDim * matrixDim <= maxSize / sizeof(float);
matrixDim *= multiplierDim, ++j) {
sizesToTest[j] = matrixDim * matrixDim * sizeof(float);
for (i = MEMALLOC_TYPE_START; i <= MEMALLOC_TYPE_END; i++) {
printf(".");
fflush(stdout);
runMatrixMultiplyKernel(
matrixDim, i, numKernelRuns,
getPtrRunTimesInMs(gpuLaunchCallsTimes, i, j),
getPtrRunTimesInMs(gpuTransferToCallsTimes, i, j),
getPtrRunTimesInMs(gpuTransferFromCallsTimes, i, j),
getPtrRunTimesInMs(gpuLaunchAndTransferCallsTimes, i, j),
getPtrRunTimesInMs(gpuLaunchTransferSyncTimes, i, j),
getPtrRunTimesInMs(cpuAccessTimes, i, j),
getPtrRunTimesInMs(overallTimes, i, j), device_id);
}
}
printf("\n");
printResults(results, print_launch_transfer_results, print_std_deviation);
freeTestResultsAndAllResultsData(results);
}
static void usage() {
printf(
"./cudaMemoryTypesPerf [-device=<device_id>] [-reportAsBandwidth] "
"[-print-launch-transfer-results] [-print-std-deviation] [-verbose]\n");
printf("Options:\n");
printf(
"-reportAsBandwidth: By default time taken is printed, this "
"option allows to instead print bandwidth.\n");
printf(
"-print-launch-transfer-results: By default overall results are printed, "
"this option allows to print data transfers and kernel time as well.\n");
printf(
"-print-std-deviation: Prints std deviation of the results.\n");
printf(
"-kernel-iterations=<num>: Number of times the kernel tests should "
"be run[default is 100 iterations].\n");
printf(
"-device=<device_id>: Allows to pass GPU Device ID on which "
"the tests will be run.\n");
printf("-verbose: Prints highly verbose output.\n");
}
int main(int argc, char **argv) {
bool reportAsBandwidth = false;
bool print_launch_transfer_results = false;
bool print_std_deviation = false;
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "h")) {
usage();
printf("&&&& %s WAIVED\n", argv[0]);
exit(EXIT_WAIVED);
}
if (checkCmdLineFlag(argc, (const char **)argv, "reportAsBandwidth")) {
reportAsBandwidth = true;
}
if (checkCmdLineFlag(argc, (const char **)argv,
"print-launch-transfer-results")) {
print_launch_transfer_results = true;
}
if (checkCmdLineFlag(argc, (const char **)argv, "print-std-deviation")) {
print_std_deviation = true;
}
if (checkCmdLineFlag(argc, (const char **)argv, "kernel-iterations")) {
numKernelRuns =
getCmdLineArgumentInt(argc, (const char **)argv, "kernel-iterations");
}
if (checkCmdLineFlag(argc, (const char **)argv, "verbose")) {
verboseResults = 1;
}
int device_id = findCudaDevice(argc, (const char **)argv);
matrixMultiplyPerfRunner(reportAsBandwidth, print_launch_transfer_results,
print_std_deviation, device_id);
printf(
"\nNOTE: The CUDA Samples are not meant for performance measurements. "
"Results may vary when GPU Boost is enabled.\n");
exit(EXIT_SUCCESS);
}
| 4a5c15f52fa6495253730ac5a668f6d89baafdf2.cu | /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <helper_cuda.h>
#include <helper_timer.h>
#include "commonDefs.hpp"
#include "commonKernels.hpp"
#define VERIFY_GPU_CORRECTNESS 0
size_t maxSampleSizeInMb = 64;
int numKernelRuns = 100;
int verboseResults = 0;
const char *memAllocTypeStr[MEMALLOC_TYPE_COUNT] = {
"Managed_Memory_With_Hints",
"Managed_Memory_With_Hints_FullyAsync",
"Managed_Memory_NoHints",
"Zero_Copy",
"Memcpy_HostMalloc_DeviceCudaMalloc",
"MemcpyAsync_HostMalloc_DeviceCudaMalloc",
"Memcpy_HostCudaHostAlloc_DeviceCudaMalloc",
"MemcpyAsync_HostCudaHostAlloc_DeviceCudaMalloc"};
const char *memAllocTypeShortStr[MEMALLOC_TYPE_COUNT] = {
"UMhint", // Managed Memory With Hints
"UMhntAs", // Managed Memory With_Hints Async
"UMeasy", // Managed_Memory with No Hints
"0Copy", // Zero Copy
"MemCopy", // USE HOST PAGEABLE AND DEVICE_MEMORY
"CpAsync", // USE HOST PAGEABLE AND DEVICE_MEMORY ASYNC
"CpHpglk", // USE HOST PAGELOCKED AND DEVICE MEMORY
"CpPglAs" // USE HOST PAGELOCKED AND DEVICE MEMORY ASYNC
};
static float RandFloat(float low, float high) {
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
void fillMatrixWithRandomValues(float *matrix, unsigned int matrixDim) {
unsigned int i, j;
for (i = 0; i < matrixDim; ++i) {
for (j = 0; j < matrixDim; ++j) {
matrix[j + i * matrixDim] = RandFloat(0.0f, 10.0f);
}
}
}
#if VERIFY_GPU_CORRECTNESS
void verifyMatrixMultiplyCorrectness(float *C, float *A, float *B,
unsigned int matrixDim) {
unsigned int i, j, k, numErrors = 0;
for (i = 0; i < matrixDim; ++i) {
for (j = 0; j < matrixDim; ++j) {
float result = 0.0f;
for (k = 0; k < matrixDim; ++k) {
result += A[k + i * matrixDim] * B[j + k * matrixDim];
}
if (fabs(C[j + i * matrixDim] - result) > 0.001 * matrixDim) {
printf("At [%u, %u]: Expected %f, Found %f\n", i, j, result,
C[j + i * matrixDim]);
++numErrors;
}
}
}
if (numErrors != 0) {
printf("%d value mismatches occured\n", numErrors);
fflush(stdout);
exit(EXIT_FAILURE); // exit since value mismatches occured
}
}
#endif
void copyMatrix(float *dstMatrix, float *srcMatrix, unsigned int matrixDim) {
size_t size = matrixDim * matrixDim * sizeof(float);
memcpy(dstMatrix, srcMatrix, size);
}
void verifyMatrixData(float *expectedData, float *observedData,
unsigned int matrixDim) {
unsigned int i, j, numErrors = 0;
for (i = 0; i < matrixDim; ++i) {
for (j = 0; j < matrixDim; ++j) {
if (expectedData[j + i * matrixDim] != observedData[j + i * matrixDim]) {
++numErrors;
if (verboseResults) {
printf("At [%u, %u]: Expected %f, Found %f\n", i, j,
expectedData[j + i * matrixDim],
observedData[j + i * matrixDim]);
}
}
}
}
if (numErrors != 0) {
printf("%d value mismatches occured\n", numErrors);
fflush(stdout);
exit(EXIT_FAILURE); // exit since value mismatches occured
}
}
#define BLOCK_SIZE 32
__global__ void matrixMultiplyKernel(float *C, float *A, float *B,
unsigned int matrixDim) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
unsigned int wA = matrixDim;
unsigned int wB = matrixDim;
// Index of the first sub-matrix of A processed by the block
int aBegin = matrixDim * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void runMatrixMultiplyKernel(unsigned int matrixDim, int allocType,
unsigned int numLoops, double *gpuLaunchCallsTimes,
double *gpuTransferToCallsTimes,
double *gpuTransferFromCallsTimes,
double *gpuLaunchAndTransferCallsTimes,
double *gpuLaunchTransferSyncTimes,
double *cpuAccessTimes, double *overallTimes,
int device_id) {
float *dptrA = NULL, *hptrA = NULL;
float *dptrB = NULL, *hptrB = NULL;
float *dptrC = NULL, *hptrC = NULL;
float *randValuesX = NULL, *randValuesY = NULL;
float *randValuesVerifyXmulY = NULL, *randValuesVerifyYmulX = NULL;
bool copyRequired = false, hintsRequired = false;
bool someTransferOpRequired;
bool isAsync = false;
cudaStream_t streamToRunOn;
unsigned int *latch;
size_t size = matrixDim * matrixDim * sizeof(float);
dim3 threads(32, 32);
dim3 grid(matrixDim / threads.x, matrixDim / threads.y);
StopWatchInterface *gpuLaunchCallsTimer = 0, *gpuTransferCallsTimer = 0;
StopWatchInterface *gpuSyncTimer = 0, *cpuAccessTimer = 0;
sdkCreateTimer(&gpuLaunchCallsTimer);
sdkCreateTimer(&gpuTransferCallsTimer);
sdkCreateTimer(&gpuSyncTimer);
sdkCreateTimer(&cpuAccessTimer);
unsigned int i;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, device_id));
checkCudaErrors(cudaStreamCreate(&streamToRunOn));
randValuesX = (float *)malloc(size);
if (!randValuesX) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
randValuesY = (float *)malloc(size);
if (!randValuesY) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
randValuesVerifyXmulY = (float *)malloc(size);
if (!randValuesVerifyXmulY) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
randValuesVerifyYmulX = (float *)malloc(size);
if (!randValuesVerifyYmulX) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
checkCudaErrors(cudaMalloc(&dptrA, size));
checkCudaErrors(cudaMalloc(&dptrB, size));
checkCudaErrors(cudaMalloc(&dptrC, size));
fillMatrixWithRandomValues(randValuesX, matrixDim);
fillMatrixWithRandomValues(randValuesY, matrixDim);
checkCudaErrors(
cudaMemcpyAsync(dptrA, randValuesX, size, cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMemcpyAsync(dptrB, randValuesY, size, cudaMemcpyHostToDevice));
matrixMultiplyKernel<<<grid, threads>>>(dptrC, dptrA, dptrB, matrixDim);
checkCudaErrors(cudaMemcpyAsync(randValuesVerifyXmulY, dptrC, size,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaStreamSynchronize(NULL));
matrixMultiplyKernel<<<grid, threads>>>(dptrC, dptrB, dptrA, matrixDim);
checkCudaErrors(cudaMemcpyAsync(randValuesVerifyYmulX, dptrC, size,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaStreamSynchronize(NULL));
#if VERIFY_GPU_CORRECTNESS
verifyMatrixMultiplyCorrectness(randValuesVerifyXmulY, randValuesX,
randValuesY, matrixDim);
verifyMatrixMultiplyCorrectness(randValuesVerifyYmulX, randValuesY,
randValuesX, matrixDim);
#endif
checkCudaErrors(cudaFree(dptrA));
checkCudaErrors(cudaFree(dptrB));
checkCudaErrors(cudaFree(dptrC));
checkCudaErrors(cudaMallocHost(&latch, sizeof(unsigned int)));
switch (allocType) {
case USE_HOST_PAGEABLE_AND_DEVICE_MEMORY:
case USE_HOST_PAGEABLE_AND_DEVICE_MEMORY_ASYNC:
hptrA = (float *)malloc(size);
if (!hptrA) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
hptrB = (float *)malloc(size);
if (!hptrB) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
hptrC = (float *)malloc(size);
if (!hptrC) {
exit(EXIT_FAILURE); // exit since memory allocation error
}
checkCudaErrors(cudaMalloc(&dptrA, size));
checkCudaErrors(cudaMalloc(&dptrB, size));
checkCudaErrors(cudaMalloc(&dptrC, size));
copyRequired = true;
break;
case USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY:
case USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY_ASYNC:
checkCudaErrors(cudaMallocHost(&hptrA, size));
checkCudaErrors(cudaMallocHost(&hptrB, size));
checkCudaErrors(cudaMallocHost(&hptrC, size));
checkCudaErrors(cudaMalloc(&dptrA, size));
checkCudaErrors(cudaMalloc(&dptrB, size));
checkCudaErrors(cudaMalloc(&dptrC, size));
copyRequired = true;
break;
case USE_ZERO_COPY:
checkCudaErrors(cudaMallocHost(&hptrA, size));
checkCudaErrors(cudaMallocHost(&hptrB, size));
checkCudaErrors(cudaMallocHost(&hptrC, size));
checkCudaErrors(cudaHostGetDevicePointer(&dptrA, hptrA, 0));
checkCudaErrors(cudaHostGetDevicePointer(&dptrB, hptrB, 0));
checkCudaErrors(cudaHostGetDevicePointer(&dptrC, hptrC, 0));
break;
case USE_MANAGED_MEMORY:
checkCudaErrors(cudaMallocManaged(&dptrA, size));
checkCudaErrors(cudaMallocManaged(&dptrB, size));
checkCudaErrors(cudaMallocManaged(&dptrC, size));
hptrA = dptrA;
hptrB = dptrB;
hptrC = dptrC;
break;
case USE_MANAGED_MEMORY_WITH_HINTS:
case USE_MANAGED_MEMORY_WITH_HINTS_ASYNC:
if (deviceProp.concurrentManagedAccess) {
checkCudaErrors(cudaMallocManaged(&dptrA, size));
checkCudaErrors(cudaMallocManaged(&dptrB, size));
checkCudaErrors(cudaMallocManaged(&dptrC, size));
checkCudaErrors(cudaMemPrefetchAsync(dptrA, size, cudaCpuDeviceId));
checkCudaErrors(cudaMemPrefetchAsync(dptrB, size, cudaCpuDeviceId));
checkCudaErrors(cudaMemPrefetchAsync(dptrC, size, cudaCpuDeviceId));
} else {
checkCudaErrors(cudaMallocManaged(&dptrA, size, cudaMemAttachHost));
checkCudaErrors(cudaMallocManaged(&dptrB, size, cudaMemAttachHost));
checkCudaErrors(cudaMallocManaged(&dptrC, size, cudaMemAttachHost));
}
hptrA = dptrA;
hptrB = dptrB;
hptrC = dptrC;
hintsRequired = true;
break;
default:
exit(EXIT_FAILURE); // exit with error
}
if (allocType == USE_HOST_PAGEABLE_AND_DEVICE_MEMORY_ASYNC ||
allocType == USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY_ASYNC ||
allocType == USE_MANAGED_MEMORY_WITH_HINTS_ASYNC) {
isAsync = true;
}
someTransferOpRequired = copyRequired || hintsRequired;
// fill buffers with 0 to avoid any first access page-fault overheads.
memset(hptrA, 0, size);
memset(hptrB, 0, size);
memset(hptrC, 0, size);
for (i = 0; i < numLoops; i++) {
cpuAccessTimes[i] = 0.0;
gpuLaunchCallsTimes[i] = 0.0;
gpuTransferToCallsTimes[i] = 0.0;
gpuTransferFromCallsTimes[i] = 0.0;
sdkStartTimer(&cpuAccessTimer);
{
copyMatrix(hptrA, (i & 0x1 == 0) ? randValuesX : randValuesY, matrixDim);
copyMatrix(hptrB, (i & 0x1 == 0) ? randValuesY : randValuesX, matrixDim);
}
sdkStopTimer(&cpuAccessTimer);
cpuAccessTimes[i] += sdkGetAverageTimerValue(&cpuAccessTimer);
sdkResetTimer(&cpuAccessTimer);
if (isAsync && hintsRequired) {
*latch = 0;
// Prevent any work on stream from starting until all work is pushed
spinWhileLessThanOne<<<1, 1, 0, streamToRunOn>>>(latch);
}
if (someTransferOpRequired) {
sdkStartTimer(&gpuTransferCallsTimer);
if (copyRequired) {
if (isAsync) {
checkCudaErrors(cudaMemcpyAsync(
dptrA, hptrA, size, cudaMemcpyHostToDevice, streamToRunOn));
checkCudaErrors(cudaMemcpyAsync(
dptrB, hptrB, size, cudaMemcpyHostToDevice, streamToRunOn));
} else {
checkCudaErrors(
cudaMemcpy(dptrA, hptrA, size, cudaMemcpyHostToDevice));
checkCudaErrors(
cudaMemcpy(dptrB, hptrB, size, cudaMemcpyHostToDevice));
}
}
if (hintsRequired) {
if (deviceProp.concurrentManagedAccess) {
checkCudaErrors(
cudaMemPrefetchAsync(dptrA, size, device_id, streamToRunOn));
checkCudaErrors(
cudaMemPrefetchAsync(dptrB, size, device_id, streamToRunOn));
checkCudaErrors(
cudaMemPrefetchAsync(dptrC, size, device_id, streamToRunOn));
} else {
checkCudaErrors(cudaStreamAttachMemAsync(streamToRunOn, dptrA, 0,
cudaMemAttachGlobal));
checkCudaErrors(cudaStreamAttachMemAsync(streamToRunOn, dptrB, 0,
cudaMemAttachGlobal));
checkCudaErrors(cudaStreamAttachMemAsync(streamToRunOn, dptrC, 0,
cudaMemAttachGlobal));
}
if (!isAsync) {
checkCudaErrors(cudaStreamSynchronize(streamToRunOn));
}
}
sdkStopTimer(&gpuTransferCallsTimer);
gpuTransferToCallsTimes[i] +=
sdkGetAverageTimerValue(&gpuTransferCallsTimer);
sdkResetTimer(&gpuTransferCallsTimer);
}
sdkStartTimer(&gpuLaunchCallsTimer);
{
matrixMultiplyKernel<<<grid, threads, 0, streamToRunOn>>>(
dptrC, dptrA, dptrB, matrixDim);
if (!isAsync) {
checkCudaErrors(cudaStreamSynchronize(streamToRunOn));
}
}
sdkStopTimer(&gpuLaunchCallsTimer);
gpuLaunchCallsTimes[i] += sdkGetAverageTimerValue(&gpuLaunchCallsTimer);
sdkResetTimer(&gpuLaunchCallsTimer);
if (someTransferOpRequired) {
sdkStartTimer(&gpuTransferCallsTimer);
if (hintsRequired) {
if (deviceProp.concurrentManagedAccess) {
checkCudaErrors(cudaMemPrefetchAsync(dptrA, size, cudaCpuDeviceId));
checkCudaErrors(cudaMemPrefetchAsync(dptrB, size, cudaCpuDeviceId));
checkCudaErrors(cudaMemPrefetchAsync(dptrC, size, cudaCpuDeviceId));
} else {
checkCudaErrors(cudaStreamAttachMemAsync(streamToRunOn, dptrA, 0,
cudaMemAttachHost));
checkCudaErrors(cudaStreamAttachMemAsync(streamToRunOn, dptrB, 0,
cudaMemAttachHost));
checkCudaErrors(cudaStreamAttachMemAsync(streamToRunOn, dptrC, 0,
cudaMemAttachHost));
}
if (!isAsync) {
checkCudaErrors(cudaStreamSynchronize(streamToRunOn));
}
}
if (copyRequired) {
if (isAsync) {
checkCudaErrors(cudaMemcpyAsync(
hptrC, dptrC, size, cudaMemcpyDeviceToHost, streamToRunOn));
} else {
checkCudaErrors(
cudaMemcpy(hptrC, dptrC, size, cudaMemcpyDeviceToHost));
}
}
sdkStopTimer(&gpuTransferCallsTimer);
gpuTransferFromCallsTimes[i] +=
sdkGetAverageTimerValue(&gpuTransferCallsTimer);
sdkResetTimer(&gpuTransferCallsTimer);
}
gpuLaunchAndTransferCallsTimes[i] = gpuLaunchCallsTimes[i] +
gpuTransferToCallsTimes[i] +
gpuTransferFromCallsTimes[i];
gpuLaunchTransferSyncTimes[i] = gpuLaunchAndTransferCallsTimes[i];
if (isAsync) {
sdkStartTimer(&gpuSyncTimer);
{
if (hintsRequired) {
*latch = 1;
}
checkCudaErrors(cudaStreamSynchronize(streamToRunOn));
}
sdkStopTimer(&gpuSyncTimer);
gpuLaunchTransferSyncTimes[i] += sdkGetAverageTimerValue(&gpuSyncTimer);
sdkResetTimer(&gpuSyncTimer);
}
sdkStartTimer(&cpuAccessTimer);
{
verifyMatrixData(
(i & 0x1 == 0) ? randValuesVerifyXmulY : randValuesVerifyYmulX, hptrC,
matrixDim);
}
sdkStopTimer(&cpuAccessTimer);
cpuAccessTimes[i] += sdkGetAverageTimerValue(&cpuAccessTimer);
sdkResetTimer(&cpuAccessTimer);
overallTimes[i] = cpuAccessTimes[i] + gpuLaunchTransferSyncTimes[i];
}
switch (allocType) {
case USE_HOST_PAGEABLE_AND_DEVICE_MEMORY:
case USE_HOST_PAGEABLE_AND_DEVICE_MEMORY_ASYNC:
free(hptrA);
free(hptrB);
free(hptrC);
checkCudaErrors(cudaFree(dptrA));
checkCudaErrors(cudaFree(dptrB));
checkCudaErrors(cudaFree(dptrC));
break;
case USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY:
case USE_HOST_PAGELOCKED_AND_DEVICE_MEMORY_ASYNC:
checkCudaErrors(cudaFreeHost(hptrA));
checkCudaErrors(cudaFreeHost(hptrB));
checkCudaErrors(cudaFreeHost(hptrC));
checkCudaErrors(cudaFree(dptrA));
checkCudaErrors(cudaFree(dptrB));
checkCudaErrors(cudaFree(dptrC));
break;
case USE_ZERO_COPY:
checkCudaErrors(cudaFreeHost(hptrA));
checkCudaErrors(cudaFreeHost(hptrB));
checkCudaErrors(cudaFreeHost(hptrC));
break;
case USE_MANAGED_MEMORY:
case USE_MANAGED_MEMORY_WITH_HINTS:
case USE_MANAGED_MEMORY_WITH_HINTS_ASYNC:
checkCudaErrors(cudaFree(dptrA));
checkCudaErrors(cudaFree(dptrB));
checkCudaErrors(cudaFree(dptrC));
break;
default:
exit(EXIT_FAILURE); // exit due to error
}
checkCudaErrors(cudaStreamDestroy(streamToRunOn));
checkCudaErrors(cudaFreeHost(latch));
free(randValuesX);
free(randValuesY);
free(randValuesVerifyXmulY);
free(randValuesVerifyYmulX);
sdkDeleteTimer(&gpuLaunchCallsTimer);
sdkDeleteTimer(&gpuTransferCallsTimer);
sdkDeleteTimer(&gpuSyncTimer);
sdkDeleteTimer(&cpuAccessTimer);
}
void matrixMultiplyPerfRunner(bool reportAsBandwidth,
bool print_launch_transfer_results,
bool print_std_deviation, int device_id) {
int i;
unsigned int minMatrixDim = 32;
unsigned int multiplierDim = 2;
unsigned int matrixDim;
unsigned int minSize = minMatrixDim * minMatrixDim * sizeof(float);
unsigned int maxSize =
(maxSampleSizeInMb * ONE_MB) /
4; // 3 buffers are used, but dividing by 4 (power of 2)
unsigned int multiplier = multiplierDim * multiplierDim;
unsigned int numSizesToTest;
struct testResults *results;
struct resultsData *gpuLaunchCallsTimes;
struct resultsData *gpuTransferToCallsTimes;
struct resultsData *gpuTransferFromCallsTimes;
struct resultsData *gpuLaunchAndTransferCallsTimes;
struct resultsData *gpuLaunchTransferSyncTimes;
struct resultsData *cpuAccessTimes;
struct resultsData *overallTimes;
unsigned long *sizesToTest;
unsigned int j;
numSizesToTest = findNumSizesToTest(minSize, maxSize, multiplier);
createAndInitTestResults(&results, "matrixMultiplyPerf", numKernelRuns,
numSizesToTest);
sizesToTest = getPtrSizesToTest(results);
createResultDataAndAddToTestResults(&gpuLaunchCallsTimes, results,
"GPU Kernel Launch Call Time", false,
reportAsBandwidth);
createResultDataAndAddToTestResults(&gpuTransferToCallsTimes, results,
"CPU to GPU Transfer Calls Time", false,
reportAsBandwidth);
createResultDataAndAddToTestResults(&gpuTransferFromCallsTimes, results,
"GPU to CPU Transfer Calls Time", false,
reportAsBandwidth);
createResultDataAndAddToTestResults(&gpuLaunchAndTransferCallsTimes, results,
"GPU Launch and Transfer Calls Time",
false, reportAsBandwidth);
createResultDataAndAddToTestResults(&gpuLaunchTransferSyncTimes, results,
"GPU Launch Transfer and Sync Time",
false, reportAsBandwidth);
createResultDataAndAddToTestResults(
&cpuAccessTimes, results, "CPU Access Time", false, reportAsBandwidth);
createResultDataAndAddToTestResults(&overallTimes, results, "Overall Time",
false, reportAsBandwidth);
printf("Running ");
for (matrixDim = minMatrixDim, j = 0;
matrixDim * matrixDim <= maxSize / sizeof(float);
matrixDim *= multiplierDim, ++j) {
sizesToTest[j] = matrixDim * matrixDim * sizeof(float);
for (i = MEMALLOC_TYPE_START; i <= MEMALLOC_TYPE_END; i++) {
printf(".");
fflush(stdout);
runMatrixMultiplyKernel(
matrixDim, i, numKernelRuns,
getPtrRunTimesInMs(gpuLaunchCallsTimes, i, j),
getPtrRunTimesInMs(gpuTransferToCallsTimes, i, j),
getPtrRunTimesInMs(gpuTransferFromCallsTimes, i, j),
getPtrRunTimesInMs(gpuLaunchAndTransferCallsTimes, i, j),
getPtrRunTimesInMs(gpuLaunchTransferSyncTimes, i, j),
getPtrRunTimesInMs(cpuAccessTimes, i, j),
getPtrRunTimesInMs(overallTimes, i, j), device_id);
}
}
printf("\n");
printResults(results, print_launch_transfer_results, print_std_deviation);
freeTestResultsAndAllResultsData(results);
}
static void usage() {
printf(
"./cudaMemoryTypesPerf [-device=<device_id>] [-reportAsBandwidth] "
"[-print-launch-transfer-results] [-print-std-deviation] [-verbose]\n");
printf("Options:\n");
printf(
"-reportAsBandwidth: By default time taken is printed, this "
"option allows to instead print bandwidth.\n");
printf(
"-print-launch-transfer-results: By default overall results are printed, "
"this option allows to print data transfers and kernel time as well.\n");
printf(
"-print-std-deviation: Prints std deviation of the results.\n");
printf(
"-kernel-iterations=<num>: Number of times the kernel tests should "
"be run[default is 100 iterations].\n");
printf(
"-device=<device_id>: Allows to pass GPU Device ID on which "
"the tests will be run.\n");
printf("-verbose: Prints highly verbose output.\n");
}
int main(int argc, char **argv) {
bool reportAsBandwidth = false;
bool print_launch_transfer_results = false;
bool print_std_deviation = false;
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "h")) {
usage();
printf("&&&& %s WAIVED\n", argv[0]);
exit(EXIT_WAIVED);
}
if (checkCmdLineFlag(argc, (const char **)argv, "reportAsBandwidth")) {
reportAsBandwidth = true;
}
if (checkCmdLineFlag(argc, (const char **)argv,
"print-launch-transfer-results")) {
print_launch_transfer_results = true;
}
if (checkCmdLineFlag(argc, (const char **)argv, "print-std-deviation")) {
print_std_deviation = true;
}
if (checkCmdLineFlag(argc, (const char **)argv, "kernel-iterations")) {
numKernelRuns =
getCmdLineArgumentInt(argc, (const char **)argv, "kernel-iterations");
}
if (checkCmdLineFlag(argc, (const char **)argv, "verbose")) {
verboseResults = 1;
}
int device_id = findCudaDevice(argc, (const char **)argv);
matrixMultiplyPerfRunner(reportAsBandwidth, print_launch_transfer_results,
print_std_deviation, device_id);
printf(
"\nNOTE: The CUDA Samples are not meant for performance measurements. "
"Results may vary when GPU Boost is enabled.\n");
exit(EXIT_SUCCESS);
}
|
bbef71fffbd181867631cf387a888b56a7fe608f.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_helper.h"
#include "cs_matrix.h"
#include "cs_decode_parser.h"
#include "cs_quantize.h"
#define CUDA_DBG
#define BUF_SIZE_INT 10
#define BUF_SIZE_INT1 8
int *dp1, *dp2, *dp3 ;
int *dp11, *dp22, *dp33 ;
int buf1[ BUF_SIZE_INT ] ;
float dbuf[ BUF_SIZE_INT ] ;
float fres, *fp1 ;
float hd1[] = { 0.2, 0.4, 0.6 } ;
float hd2[] = { 1.2, 1.4, 1.6 } ;
struct CS_EncParams CS_EncParams ; // 1
struct RawVidInfo RawVidInfo ; // 2
struct VidRegion VidRegion ; // 3
struct SensingMatrixWH SensingMatrixWH ; // 4
struct UniformQuantizer UniformQuantizer ; // 5
struct QuantMeasurementsBasic QuantMeasurementsBasic ; // 6
int msr_idx[10000 ] ;
int
main( int ac, char *av[] )
{
int *d_int, i, type, k ;
int first = 1 ;
setbuf( stdout, NULL ) ;
setbuf( stderr, NULL ) ;
dbg_init ( 1024 * 1024 ) ;
QuantMeasurementsBasic.h_msr_idxp = msr_idx ;
hipMalloc ( &d_int, 10000 * sizeof ( int )) ;
if ( ac != 2 )
{
printf("usage : %s csvid_file\n", av[0] ) ;
exit( 2 ) ;
}
if (! cs_decode_parser_init( av[1], 10000 ))
{
printf("%s : failed init\n", __func__ ) ;
exit( 3 ) ;
}
k = 10000 ;
while ( k-- )
{
i = get_next_type ( &type ) ;
if ( i == 0 )
{
printf("%s : failed type \n") ;
exit( 3 ) ;
} else if ( i < 0 )
{
printf("%s : eof\n", __func__ ) ;
exit( 0 ) ;
}
printf("TYPE %d --------------------------------------------------\n", type ) ;
switch ( type ) {
case 1 :
if ( !get_next_element ( type, ( void *)&CS_EncParams ))
exit( 3 ) ;
p_element( type, "from 1", ( void *)&CS_EncParams ) ;
break ;
case 2 :
if ( !get_next_element ( type, ( void *)&RawVidInfo ))
exit ( 2 ) ;
p_element( type, "from 2", ( void *)&RawVidInfo ) ;
break ;
case 3 :
if ( !get_next_element ( type, ( void *)&VidRegion ))
exit ( 2 ) ;
p_element( type, "from 3", ( void *)&VidRegion ) ;
break ;
case 4 :
if ( !get_next_element ( type, ( void *)&SensingMatrixWH ))
exit ( 2 ) ;
p_element( type, "from 4", ( void *)&SensingMatrixWH ) ;
break ;
case 5 :
if ( !get_next_element ( type, ( void *)&UniformQuantizer ))
exit ( 2 ) ;
p_element( type, "from 5", ( void *)&UniformQuantizer ) ;
break ;
case 6 :
if ( !get_next_element ( type, ( void *)&QuantMeasurementsBasic ))
exit ( 2 ) ;
p_element( type, "from 6", ( void *)&QuantMeasurementsBasic ) ;
put_d_data_i ( d_int, msr_idx, sizeof ( int ) * QuantMeasurementsBasic.lenb ) ;
h_do_unquan_adj_index ( d_int, QuantMeasurementsBasic.lenb, QuantMeasurementsBasic.noclip,
QuantMeasurementsBasic.nbin/2 -1,
QuantMeasurementsBasic.nbin ) ;
if ( first )
{
first = 0 ;
cs_decode_parser_reinit ( 12000 ) ;
}
break ;
default :
printf("%s :: wrong default type %d \n", type ) ;
exit( 4 ) ;
}
}
}
| bbef71fffbd181867631cf387a888b56a7fe608f.cu |
#include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_helper.h"
#include "cs_matrix.h"
#include "cs_decode_parser.h"
#include "cs_quantize.h"
#define CUDA_DBG
#define BUF_SIZE_INT 10
#define BUF_SIZE_INT1 8
int *dp1, *dp2, *dp3 ;
int *dp11, *dp22, *dp33 ;
int buf1[ BUF_SIZE_INT ] ;
float dbuf[ BUF_SIZE_INT ] ;
float fres, *fp1 ;
float hd1[] = { 0.2, 0.4, 0.6 } ;
float hd2[] = { 1.2, 1.4, 1.6 } ;
struct CS_EncParams CS_EncParams ; // 1
struct RawVidInfo RawVidInfo ; // 2
struct VidRegion VidRegion ; // 3
struct SensingMatrixWH SensingMatrixWH ; // 4
struct UniformQuantizer UniformQuantizer ; // 5
struct QuantMeasurementsBasic QuantMeasurementsBasic ; // 6
int msr_idx[10000 ] ;
int
main( int ac, char *av[] )
{
int *d_int, i, type, k ;
int first = 1 ;
setbuf( stdout, NULL ) ;
setbuf( stderr, NULL ) ;
dbg_init ( 1024 * 1024 ) ;
QuantMeasurementsBasic.h_msr_idxp = msr_idx ;
cudaMalloc ( &d_int, 10000 * sizeof ( int )) ;
if ( ac != 2 )
{
printf("usage : %s csvid_file\n", av[0] ) ;
exit( 2 ) ;
}
if (! cs_decode_parser_init( av[1], 10000 ))
{
printf("%s : failed init\n", __func__ ) ;
exit( 3 ) ;
}
k = 10000 ;
while ( k-- )
{
i = get_next_type ( &type ) ;
if ( i == 0 )
{
printf("%s : failed type \n") ;
exit( 3 ) ;
} else if ( i < 0 )
{
printf("%s : eof\n", __func__ ) ;
exit( 0 ) ;
}
printf("TYPE %d --------------------------------------------------\n", type ) ;
switch ( type ) {
case 1 :
if ( !get_next_element ( type, ( void *)&CS_EncParams ))
exit( 3 ) ;
p_element( type, "from 1", ( void *)&CS_EncParams ) ;
break ;
case 2 :
if ( !get_next_element ( type, ( void *)&RawVidInfo ))
exit ( 2 ) ;
p_element( type, "from 2", ( void *)&RawVidInfo ) ;
break ;
case 3 :
if ( !get_next_element ( type, ( void *)&VidRegion ))
exit ( 2 ) ;
p_element( type, "from 3", ( void *)&VidRegion ) ;
break ;
case 4 :
if ( !get_next_element ( type, ( void *)&SensingMatrixWH ))
exit ( 2 ) ;
p_element( type, "from 4", ( void *)&SensingMatrixWH ) ;
break ;
case 5 :
if ( !get_next_element ( type, ( void *)&UniformQuantizer ))
exit ( 2 ) ;
p_element( type, "from 5", ( void *)&UniformQuantizer ) ;
break ;
case 6 :
if ( !get_next_element ( type, ( void *)&QuantMeasurementsBasic ))
exit ( 2 ) ;
p_element( type, "from 6", ( void *)&QuantMeasurementsBasic ) ;
put_d_data_i ( d_int, msr_idx, sizeof ( int ) * QuantMeasurementsBasic.lenb ) ;
h_do_unquan_adj_index ( d_int, QuantMeasurementsBasic.lenb, QuantMeasurementsBasic.noclip,
QuantMeasurementsBasic.nbin/2 -1,
QuantMeasurementsBasic.nbin ) ;
if ( first )
{
first = 0 ;
cs_decode_parser_reinit ( 12000 ) ;
}
break ;
default :
printf("%s :: wrong default type %d \n", type ) ;
exit( 4 ) ;
}
}
}
|
c2627ee57f02aa35459800a6590a23c51f8fd922.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "WarpSelectImpl_hip.cuh"
namespace faiss { namespace gpu {
WARP_SELECT_IMPL(float, true, 128, 3);
WARP_SELECT_IMPL(float, false, 128, 3);
} } // namespace
| c2627ee57f02aa35459800a6590a23c51f8fd922.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "WarpSelectImpl.cuh"
namespace faiss { namespace gpu {
WARP_SELECT_IMPL(float, true, 128, 3);
WARP_SELECT_IMPL(float, false, 128, 3);
} } // namespace
|
3a44931172f4b50a0d4f47eec8eefe44d85bcdc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc -std=c++11 -arch=sm_61 -shared ani_op.cc.cu kernel_cpu.o kernel.cu -o ani.so ${TF_CFLAGS[@]} ${TF_LFLAGS[@]} -I ~/Code/cub-1.8.0/ -Xcompiler -fPIC -O3 -D GOOGLE_CUDA=1 -I /usr/local/ --expt-relaxed-constexpr -ltensorflow_framework
#ifdef GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "tensorflow/core/util/cuda_kernel_helper.h"
using namespace tensorflow;
#include "functor_op.h"
#include "parameters.h"
#include "kernel_hip.cuh"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using GPUDevice = Eigen::GpuDevice;
// template<>
// void AniFunctor<GPUDevice>::operator()(
// const GPUDevice& d,
// const float *Xs,
// const float *Ys,
// const float *Zs,
// const int *atomic_nums,
// const int *mol_offsets,
// const int *mol_atom_count,
// const int num_mols, // actually equal to blockDim.x
// const int *scatter_idxs, // LOCAL WITHIN THE ATOM TYPE
// float *X_feat_out_H,
// float *X_feat_out_C,
// float *X_feat_out_N,
// float *X_feat_out_O,
// const int *acs) {
// gpuErrchk(hipMemsetAsync(X_feat_out_H, 0, acs[0]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
// gpuErrchk(hipMemsetAsync(X_feat_out_C, 0, acs[1]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
// gpuErrchk(hipMemsetAsync(X_feat_out_N, 0, acs[2]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
// gpuErrchk(hipMemsetAsync(X_feat_out_O, 0, acs[3]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
// if(num_mols > 0) {
// // gpu kernel's can't be launched with a zero blockdim
// hipLaunchKernelGGL(( featurize), dim3(num_mols), dim3(32), 0, d.stream(),
// Xs, Ys, Zs, atomic_nums, mol_offsets, mol_atom_count, num_mols, scatter_idxs,
// X_feat_out_H, X_feat_out_C, X_feat_out_N, X_feat_out_O);
// gpuErrchk(hipPeekAtLastError());
// }
// };
// template struct AniFunctor<GPUDevice>;
template<typename NumericType>
struct AniFunctor<GPUDevice, NumericType> {
void operator()(
const GPUDevice& d,
const float *Xs,
const float *Ys,
const float *Zs,
const int *atomic_nums,
const int *mol_offsets,
const int *mol_atom_count,
const int num_mols, // actually equal to blockDim.x
const int *scatter_idxs, // LOCAL WITHIN THE ATOM TYPE
float *X_feat_out_H,
float *X_feat_out_C,
float *X_feat_out_N,
float *X_feat_out_O,
const int *acs) {
gpuErrchk(hipMemsetAsync(X_feat_out_H, 0, acs[0]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
gpuErrchk(hipMemsetAsync(X_feat_out_C, 0, acs[1]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
gpuErrchk(hipMemsetAsync(X_feat_out_N, 0, acs[2]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
gpuErrchk(hipMemsetAsync(X_feat_out_O, 0, acs[3]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
if(num_mols > 0) {
// gpu kernel's can't be launched with a zero blockdim
hipLaunchKernelGGL(( featurize), dim3(num_mols), dim3(32), 0, d.stream(),
Xs, Ys, Zs, atomic_nums, mol_offsets, mol_atom_count, num_mols, scatter_idxs,
X_feat_out_H, X_feat_out_C, X_feat_out_N, X_feat_out_O);
gpuErrchk(hipPeekAtLastError());
}
}
};
// instantiation
template struct AniFunctor<GPUDevice, float>;
#endif | 3a44931172f4b50a0d4f47eec8eefe44d85bcdc8.cu | // nvcc -std=c++11 -arch=sm_61 -shared ani_op.cc.cu kernel_cpu.o kernel.cu -o ani.so ${TF_CFLAGS[@]} ${TF_LFLAGS[@]} -I ~/Code/cub-1.8.0/ -Xcompiler -fPIC -O3 -D GOOGLE_CUDA=1 -I /usr/local/ --expt-relaxed-constexpr -ltensorflow_framework
#ifdef GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "tensorflow/core/util/cuda_kernel_helper.h"
using namespace tensorflow;
#include "functor_op.h"
#include "parameters.h"
#include "kernel.cuh"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using GPUDevice = Eigen::GpuDevice;
// template<>
// void AniFunctor<GPUDevice>::operator()(
// const GPUDevice& d,
// const float *Xs,
// const float *Ys,
// const float *Zs,
// const int *atomic_nums,
// const int *mol_offsets,
// const int *mol_atom_count,
// const int num_mols, // actually equal to blockDim.x
// const int *scatter_idxs, // LOCAL WITHIN THE ATOM TYPE
// float *X_feat_out_H,
// float *X_feat_out_C,
// float *X_feat_out_N,
// float *X_feat_out_O,
// const int *acs) {
// gpuErrchk(cudaMemsetAsync(X_feat_out_H, 0, acs[0]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
// gpuErrchk(cudaMemsetAsync(X_feat_out_C, 0, acs[1]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
// gpuErrchk(cudaMemsetAsync(X_feat_out_N, 0, acs[2]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
// gpuErrchk(cudaMemsetAsync(X_feat_out_O, 0, acs[3]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
// if(num_mols > 0) {
// // gpu kernel's can't be launched with a zero blockdim
// featurize<<<num_mols, 32, 0, d.stream()>>>(
// Xs, Ys, Zs, atomic_nums, mol_offsets, mol_atom_count, num_mols, scatter_idxs,
// X_feat_out_H, X_feat_out_C, X_feat_out_N, X_feat_out_O);
// gpuErrchk(cudaPeekAtLastError());
// }
// };
// template struct AniFunctor<GPUDevice>;
template<typename NumericType>
struct AniFunctor<GPUDevice, NumericType> {
void operator()(
const GPUDevice& d,
const float *Xs,
const float *Ys,
const float *Zs,
const int *atomic_nums,
const int *mol_offsets,
const int *mol_atom_count,
const int num_mols, // actually equal to blockDim.x
const int *scatter_idxs, // LOCAL WITHIN THE ATOM TYPE
float *X_feat_out_H,
float *X_feat_out_C,
float *X_feat_out_N,
float *X_feat_out_O,
const int *acs) {
gpuErrchk(cudaMemsetAsync(X_feat_out_H, 0, acs[0]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
gpuErrchk(cudaMemsetAsync(X_feat_out_C, 0, acs[1]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
gpuErrchk(cudaMemsetAsync(X_feat_out_N, 0, acs[2]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
gpuErrchk(cudaMemsetAsync(X_feat_out_O, 0, acs[3]*TOTAL_FEATURE_SIZE*sizeof(int), d.stream()));
if(num_mols > 0) {
// gpu kernel's can't be launched with a zero blockdim
featurize<<<num_mols, 32, 0, d.stream()>>>(
Xs, Ys, Zs, atomic_nums, mol_offsets, mol_atom_count, num_mols, scatter_idxs,
X_feat_out_H, X_feat_out_C, X_feat_out_N, X_feat_out_O);
gpuErrchk(cudaPeekAtLastError());
}
}
};
// instantiation
template struct AniFunctor<GPUDevice, float>;
#endif |
e402f456ae132d26eb0466fc7df7a0229ac8af0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "benchmark_gpu_float_rrr.hpp"
#include "benchmark_cpu_float_rrr.hpp"
#include "numeric.hpp"
#include "time.hpp"
#include "cuda_runtime.hpp"
#include <hiprand/hiprand.h>
#include <mma.h>
#include <iostream>
/* a*b=c
a[m x p]
b[p x n]
c[m x n]
all row-major
*/
__global__ void mm(float *_c, const float *_a, const float *_b, const int m, const int n, const int p)
{
#define a(_i, _j) _a[(_i)*p + (_j)]
#define b(_i, _j) _b[(_i)*n + (_j)]
#define c(_i, _j) _c[(_i)*n + (_j)]
for (int i = blockIdx.y * blockDim.y + threadIdx.y; i < m; i += blockDim.y * gridDim.y)
{
for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < n; j += blockDim.x * gridDim.x)
{
float acc = 0;
for (int k = 0; k < p; ++k)
{
acc += a(i, k) * b(k, j);
}
c(i, j) = acc;
}
}
#undef a
#undef b
#undef c
}
GPUFloatRRR::GPUFloatRRR()
{
CUDA_RUNTIME(hipStreamCreate(&stream_));
CUDA_RUNTIME(hipEventCreate(&start_));
CUDA_RUNTIME(hipEventCreate(&stop_));
}
GPUFloatRRR::~GPUFloatRRR()
{
CUDA_RUNTIME(hipStreamDestroy(stream_));
CUDA_RUNTIME(hipEventDestroy(start_));
CUDA_RUNTIME(hipEventDestroy(stop_));
}
bool GPUFloatRRR::check()
{
bool success = true;
// compute expected
std::vector<float> _ce(m_ * n_);
CPURRR::mm(_ce.data(), a_, b_, m_, n_, k_);
#define ca(i, j) (c_[(i)*n_ + (j)])
#define ce(i, j) (_ce[(i)*n_ + (j)])
for (int i = 0; i < m_; ++i)
{
for (int j = 0; j < n_; ++j)
{
if (!almost_equal(ca(i, j), ce(i, j), 1e-2))
{
std::cerr << "ERR at " << i << " " << j << " "
<< "ce=" << ce(i, j) << " ca=" << ca(i, j) << std::endl;
success = false;
}
}
}
#undef ca
#undef ce
// send ca back to GPU
CUDA_RUNTIME(hipMemPrefetchAsync(c_, sizeof(*c_) * m_ * n_, 0, 0));
CUDA_RUNTIME(hipDeviceSynchronize());
return success;
}
void GPUFloatRRR::initialize(const Spec &spec)
{
m_ = spec.m;
n_ = spec.n;
k_ = spec.k;
// generate random numbers on CPU
CUDA_RUNTIME(hipMallocManaged(&a_, sizeof(*a_) * m_ * k_));
CUDA_RUNTIME(hipMallocManaged(&b_, sizeof(*b_) * k_ * n_));
fill(a_, m_ * k_);
fill(b_, k_ * n_);
// send to GPU
CUDA_RUNTIME(hipMemPrefetchAsync(a_, sizeof(*a_) * m_ * k_, 0, 0));
CUDA_RUNTIME(hipMemPrefetchAsync(b_, sizeof(*b_) * k_ * n_, 0, 0));
CUDA_RUNTIME(hipDeviceSynchronize());
// GPU output
CUDA_RUNTIME(hipMallocManaged(&c_, sizeof(*c_) * m_ * n_));
}
void GPUFloatRRR::finalize()
{
CUDA_RUNTIME(hipFree(a_));
CUDA_RUNTIME(hipFree(b_));
CUDA_RUNTIME(hipFree(c_));
}
double GPUFloatRRR::sample()
{
dim3 bd(32, 8, 1);
dim3 gd((m_ + bd.y - 1) / bd.y, (n_ + bd.x - 1) / bd.x, 1);
hipEventRecord(start_, stream_);
hipLaunchKernelGGL(( mm), dim3(gd), dim3(bd), 0, stream_, c_, a_, b_, m_, n_, k_);
CUDA_RUNTIME(hipEventRecord(stop_, stream_));
CUDA_RUNTIME(hipGetLastError());
CUDA_RUNTIME(hipEventSynchronize(stop_));
float millis;
CUDA_RUNTIME(hipEventElapsedTime(&millis, start_, stop_));
return millis / 1e3;
}
| e402f456ae132d26eb0466fc7df7a0229ac8af0d.cu | #include "benchmark_gpu_float_rrr.hpp"
#include "benchmark_cpu_float_rrr.hpp"
#include "numeric.hpp"
#include "time.hpp"
#include "cuda_runtime.hpp"
#include <curand.h>
#include <mma.h>
#include <iostream>
/* a*b=c
a[m x p]
b[p x n]
c[m x n]
all row-major
*/
__global__ void mm(float *_c, const float *_a, const float *_b, const int m, const int n, const int p)
{
#define a(_i, _j) _a[(_i)*p + (_j)]
#define b(_i, _j) _b[(_i)*n + (_j)]
#define c(_i, _j) _c[(_i)*n + (_j)]
for (int i = blockIdx.y * blockDim.y + threadIdx.y; i < m; i += blockDim.y * gridDim.y)
{
for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < n; j += blockDim.x * gridDim.x)
{
float acc = 0;
for (int k = 0; k < p; ++k)
{
acc += a(i, k) * b(k, j);
}
c(i, j) = acc;
}
}
#undef a
#undef b
#undef c
}
GPUFloatRRR::GPUFloatRRR()
{
CUDA_RUNTIME(cudaStreamCreate(&stream_));
CUDA_RUNTIME(cudaEventCreate(&start_));
CUDA_RUNTIME(cudaEventCreate(&stop_));
}
GPUFloatRRR::~GPUFloatRRR()
{
CUDA_RUNTIME(cudaStreamDestroy(stream_));
CUDA_RUNTIME(cudaEventDestroy(start_));
CUDA_RUNTIME(cudaEventDestroy(stop_));
}
bool GPUFloatRRR::check()
{
bool success = true;
// compute expected
std::vector<float> _ce(m_ * n_);
CPURRR::mm(_ce.data(), a_, b_, m_, n_, k_);
#define ca(i, j) (c_[(i)*n_ + (j)])
#define ce(i, j) (_ce[(i)*n_ + (j)])
for (int i = 0; i < m_; ++i)
{
for (int j = 0; j < n_; ++j)
{
if (!almost_equal(ca(i, j), ce(i, j), 1e-2))
{
std::cerr << "ERR at " << i << " " << j << " "
<< "ce=" << ce(i, j) << " ca=" << ca(i, j) << std::endl;
success = false;
}
}
}
#undef ca
#undef ce
// send ca back to GPU
CUDA_RUNTIME(cudaMemPrefetchAsync(c_, sizeof(*c_) * m_ * n_, 0, 0));
CUDA_RUNTIME(cudaDeviceSynchronize());
return success;
}
void GPUFloatRRR::initialize(const Spec &spec)
{
m_ = spec.m;
n_ = spec.n;
k_ = spec.k;
// generate random numbers on CPU
CUDA_RUNTIME(cudaMallocManaged(&a_, sizeof(*a_) * m_ * k_));
CUDA_RUNTIME(cudaMallocManaged(&b_, sizeof(*b_) * k_ * n_));
fill(a_, m_ * k_);
fill(b_, k_ * n_);
// send to GPU
CUDA_RUNTIME(cudaMemPrefetchAsync(a_, sizeof(*a_) * m_ * k_, 0, 0));
CUDA_RUNTIME(cudaMemPrefetchAsync(b_, sizeof(*b_) * k_ * n_, 0, 0));
CUDA_RUNTIME(cudaDeviceSynchronize());
// GPU output
CUDA_RUNTIME(cudaMallocManaged(&c_, sizeof(*c_) * m_ * n_));
}
void GPUFloatRRR::finalize()
{
CUDA_RUNTIME(cudaFree(a_));
CUDA_RUNTIME(cudaFree(b_));
CUDA_RUNTIME(cudaFree(c_));
}
double GPUFloatRRR::sample()
{
dim3 bd(32, 8, 1);
dim3 gd((m_ + bd.y - 1) / bd.y, (n_ + bd.x - 1) / bd.x, 1);
cudaEventRecord(start_, stream_);
mm<<<gd, bd, 0, stream_>>>(c_, a_, b_, m_, n_, k_);
CUDA_RUNTIME(cudaEventRecord(stop_, stream_));
CUDA_RUNTIME(cudaGetLastError());
CUDA_RUNTIME(cudaEventSynchronize(stop_));
float millis;
CUDA_RUNTIME(cudaEventElapsedTime(&millis, start_, stop_));
return millis / 1e3;
}
|
8348f4eef4dabf7f7193984d0d91d48492981516.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
__global__ void get_logLum_max_min(const float* const d_logLuminance,
float* logLumArr,
const int min_or_max,
const size_t size) {
// looks correct
extern __shared__ float sdata[];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int thread_id = threadIdx.x;
if (idx >= size) {
return;
}
sdata[thread_id] = d_logLuminance[idx];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (thread_id < s && (idx + s) < size && (thread_id + s) < blockDim.x) {
float a = sdata[thread_id];
float b = sdata[thread_id + s];
if (min_or_max == 1 && a < b) {
sdata[thread_id] = b;
} else if (min_or_max == 0 && a > b) {
sdata[thread_id] = b;
}
}
__syncthreads();
}
if (thread_id == 0) {
logLumArr[blockIdx.x] = sdata[0];
}
}
void helper_compute_min_max(const float* const d_logLuminance,
const int min_or_max,
const size_t numRows,
const size_t numCols,
float &result) {
// looks correct
size_t size = numRows * numCols;
const int reduce_blockSize = 512;
int reduce_gridSize;
float* d_in;
float* d_out;
checkCudaErrors(hipMalloc(&d_in, sizeof(float) * size));
checkCudaErrors(hipMemcpy(d_in, d_logLuminance, sizeof(float) * size, hipMemcpyDeviceToDevice));
while(true) {
reduce_gridSize = (size + reduce_blockSize - 1) / reduce_blockSize;
// Allocate memory for logLum
checkCudaErrors(hipMalloc(&d_out, sizeof(float) * reduce_gridSize));
// Find the minimum and maximum across the image (reduce)
hipLaunchKernelGGL(( get_logLum_max_min), dim3(reduce_gridSize), dim3(reduce_blockSize), reduce_blockSize * sizeof(float), 0, d_in,
d_out,
min_or_max,
size);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Free memory
checkCudaErrors(hipFree(d_in));
if (reduce_gridSize == 1) {
break;
}
d_in = d_out;
size = reduce_gridSize;
}
checkCudaErrors(hipMemcpy(&result, d_out, sizeof(float), hipMemcpyDeviceToHost));
// Free memory
checkCudaErrors(hipFree(d_out));
}
__global__ void compute_histogram(const float* const d_logLuminance,
float min_logLum,
float max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins,
unsigned int* logLum_hist) {
// Looks correct
const int2 thread_2D_pos = make_int2(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
float luminance = d_logLuminance[thread_1D_pos];
float logLumRange = max_logLum - min_logLum;
unsigned int bin = static_cast<unsigned int>((luminance - min_logLum) / logLumRange * numBins);
if (bin > static_cast<unsigned int>(numBins - 1)) {
bin = static_cast<unsigned int>(numBins - 1);
}
atomicAdd(&(logLum_hist[bin]), 1);
}
/*__global__ void compute_cumulative_hist_naive(unsigned int* logLum_hist,*/
/* unsigned int* const d_cdf,*/
/* const size_t numBins) {*/
/* const int idx = blockDim.x * blockIdx.x + threadIdx.x;*/
/* if (idx >= numBins) {*/
/* return;*/
/* }*/
/**/
/* extern __shared__ unsigned int tmp[];*/
/* int pout = 0, pin = 1;*/
/**/
/* tmp[pout * numBins + idx] = (idx > 0)? logLum_hist[idx - 1] : 0;*/
/* __syncthreads();*/
/**/
/* for (int offset = 1; offset < numBins; offset *= 2) {*/
/* pout = 1 - pout;*/
/* pin = 1 - pout;*/
/* if (idx >= offset) {*/
/* tmp[pout * numBins + idx] += tmp[pin * numBins + idx - offset];*/
/* } else {*/
/* tmp[pout * numBins + idx] += tmp[pin * numBins + idx];*/
/* }*/
/* __syncthreads();*/
/* }*/
/**/
/* d_cdf[idx] = tmp[pout * numBins + idx]; */
/*}*/
__global__ void compute_cumulative_hist(unsigned int* logLum_hist,
unsigned int* const d_cdf,
const size_t numBins) {
// Perform exclusive scan (Blelloch scan)
// Assume numBins is a multiple of two
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (2 * idx >= numBins || (2 * idx + 1) >= numBins) {
return;
}
extern __shared__ unsigned int tmp[];
tmp[2 * idx] = logLum_hist[2 * idx];
tmp[2 * idx + 1] = logLum_hist[2 * idx + 1];
int offset = 1;
// Perform upsweep
for (int d = numBins >> 1; d > 0; d >>= 1) {
__syncthreads();
if (idx < d) {
int index1 = offset * (2 * idx + 1) - 1;
int index2 = offset * (2 * idx + 2) - 1;
tmp[index2] += tmp[index1];
}
offset *= 2;
}
// Perform downsweep
if (idx == 0) {
tmp[numBins - 1] = 0;
}
for (int d = 1; d < numBins; d *= 2) {
offset >>= 1;
__syncthreads();
if (idx < d) {
int index1 = offset * (2 * idx + 1) - 1;
int index2 = offset * (2 * idx + 2) - 1;
unsigned int t = tmp[index1];
tmp[index1] = tmp[index2];
tmp[index2] += t;
}
}
__syncthreads();
d_cdf[2 * idx] = tmp[2 * idx];
d_cdf[2 * idx + 1] = tmp[2 * idx + 1];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
// Set block size and grid size
const dim3 hist_blockSize(32, 32, 1);
const dim3 hist_gridSize((numCols + 32 - 1)/32, (numRows + 32 - 1) / 32, 1);
const dim3 scan_blockSize(256, 1, 1);
const dim3 scan_gridSize((numBins/2 + 256 - 1)/256, 1, 1);
// Compute minimum and maximum
helper_compute_min_max(d_logLuminance, 1, numRows, numCols, max_logLum);
helper_compute_min_max(d_logLuminance, 0, numRows, numCols, min_logLum);
// Allocate memory for histogram
unsigned int* logLum_hist;
checkCudaErrors(hipMalloc(&logLum_hist, sizeof(unsigned int) * numBins));
checkCudaErrors(hipMemset(logLum_hist, 0, sizeof(unsigned int) * numBins));
// Build a histogram (atomicAdd)
hipLaunchKernelGGL(( compute_histogram), dim3(hist_gridSize), dim3(hist_blockSize), 0, 0, d_logLuminance,
min_logLum,
max_logLum,
numRows,
numCols,
numBins,
logLum_hist);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Cumulative add (scan)
hipLaunchKernelGGL(( compute_cumulative_hist), dim3(scan_blockSize), dim3(scan_gridSize), sizeof(unsigned int) * numBins, 0, logLum_hist,
d_cdf,
numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Free memory
checkCudaErrors(hipFree(logLum_hist));
}
| 8348f4eef4dabf7f7193984d0d91d48492981516.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
__global__ void get_logLum_max_min(const float* const d_logLuminance,
float* logLumArr,
const int min_or_max,
const size_t size) {
// looks correct
extern __shared__ float sdata[];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int thread_id = threadIdx.x;
if (idx >= size) {
return;
}
sdata[thread_id] = d_logLuminance[idx];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (thread_id < s && (idx + s) < size && (thread_id + s) < blockDim.x) {
float a = sdata[thread_id];
float b = sdata[thread_id + s];
if (min_or_max == 1 && a < b) {
sdata[thread_id] = b;
} else if (min_or_max == 0 && a > b) {
sdata[thread_id] = b;
}
}
__syncthreads();
}
if (thread_id == 0) {
logLumArr[blockIdx.x] = sdata[0];
}
}
void helper_compute_min_max(const float* const d_logLuminance,
const int min_or_max,
const size_t numRows,
const size_t numCols,
float &result) {
// looks correct
size_t size = numRows * numCols;
const int reduce_blockSize = 512;
int reduce_gridSize;
float* d_in;
float* d_out;
checkCudaErrors(cudaMalloc(&d_in, sizeof(float) * size));
checkCudaErrors(cudaMemcpy(d_in, d_logLuminance, sizeof(float) * size, cudaMemcpyDeviceToDevice));
while(true) {
reduce_gridSize = (size + reduce_blockSize - 1) / reduce_blockSize;
// Allocate memory for logLum
checkCudaErrors(cudaMalloc(&d_out, sizeof(float) * reduce_gridSize));
// Find the minimum and maximum across the image (reduce)
get_logLum_max_min<<<reduce_gridSize, reduce_blockSize, reduce_blockSize * sizeof(float)>>>(d_in,
d_out,
min_or_max,
size);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Free memory
checkCudaErrors(cudaFree(d_in));
if (reduce_gridSize == 1) {
break;
}
d_in = d_out;
size = reduce_gridSize;
}
checkCudaErrors(cudaMemcpy(&result, d_out, sizeof(float), cudaMemcpyDeviceToHost));
// Free memory
checkCudaErrors(cudaFree(d_out));
}
__global__ void compute_histogram(const float* const d_logLuminance,
float min_logLum,
float max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins,
unsigned int* logLum_hist) {
// Looks correct
const int2 thread_2D_pos = make_int2(blockDim.x * blockIdx.x + threadIdx.x,
blockDim.y * blockIdx.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
return;
}
float luminance = d_logLuminance[thread_1D_pos];
float logLumRange = max_logLum - min_logLum;
unsigned int bin = static_cast<unsigned int>((luminance - min_logLum) / logLumRange * numBins);
if (bin > static_cast<unsigned int>(numBins - 1)) {
bin = static_cast<unsigned int>(numBins - 1);
}
atomicAdd(&(logLum_hist[bin]), 1);
}
/*__global__ void compute_cumulative_hist_naive(unsigned int* logLum_hist,*/
/* unsigned int* const d_cdf,*/
/* const size_t numBins) {*/
/* const int idx = blockDim.x * blockIdx.x + threadIdx.x;*/
/* if (idx >= numBins) {*/
/* return;*/
/* }*/
/**/
/* extern __shared__ unsigned int tmp[];*/
/* int pout = 0, pin = 1;*/
/**/
/* tmp[pout * numBins + idx] = (idx > 0)? logLum_hist[idx - 1] : 0;*/
/* __syncthreads();*/
/**/
/* for (int offset = 1; offset < numBins; offset *= 2) {*/
/* pout = 1 - pout;*/
/* pin = 1 - pout;*/
/* if (idx >= offset) {*/
/* tmp[pout * numBins + idx] += tmp[pin * numBins + idx - offset];*/
/* } else {*/
/* tmp[pout * numBins + idx] += tmp[pin * numBins + idx];*/
/* }*/
/* __syncthreads();*/
/* }*/
/**/
/* d_cdf[idx] = tmp[pout * numBins + idx]; */
/*}*/
__global__ void compute_cumulative_hist(unsigned int* logLum_hist,
unsigned int* const d_cdf,
const size_t numBins) {
// Perform exclusive scan (Blelloch scan)
// Assume numBins is a multiple of two
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (2 * idx >= numBins || (2 * idx + 1) >= numBins) {
return;
}
extern __shared__ unsigned int tmp[];
tmp[2 * idx] = logLum_hist[2 * idx];
tmp[2 * idx + 1] = logLum_hist[2 * idx + 1];
int offset = 1;
// Perform upsweep
for (int d = numBins >> 1; d > 0; d >>= 1) {
__syncthreads();
if (idx < d) {
int index1 = offset * (2 * idx + 1) - 1;
int index2 = offset * (2 * idx + 2) - 1;
tmp[index2] += tmp[index1];
}
offset *= 2;
}
// Perform downsweep
if (idx == 0) {
tmp[numBins - 1] = 0;
}
for (int d = 1; d < numBins; d *= 2) {
offset >>= 1;
__syncthreads();
if (idx < d) {
int index1 = offset * (2 * idx + 1) - 1;
int index2 = offset * (2 * idx + 2) - 1;
unsigned int t = tmp[index1];
tmp[index1] = tmp[index2];
tmp[index2] += t;
}
}
__syncthreads();
d_cdf[2 * idx] = tmp[2 * idx];
d_cdf[2 * idx + 1] = tmp[2 * idx + 1];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
// Set block size and grid size
const dim3 hist_blockSize(32, 32, 1);
const dim3 hist_gridSize((numCols + 32 - 1)/32, (numRows + 32 - 1) / 32, 1);
const dim3 scan_blockSize(256, 1, 1);
const dim3 scan_gridSize((numBins/2 + 256 - 1)/256, 1, 1);
// Compute minimum and maximum
helper_compute_min_max(d_logLuminance, 1, numRows, numCols, max_logLum);
helper_compute_min_max(d_logLuminance, 0, numRows, numCols, min_logLum);
// Allocate memory for histogram
unsigned int* logLum_hist;
checkCudaErrors(cudaMalloc(&logLum_hist, sizeof(unsigned int) * numBins));
checkCudaErrors(cudaMemset(logLum_hist, 0, sizeof(unsigned int) * numBins));
// Build a histogram (atomicAdd)
compute_histogram<<<hist_gridSize, hist_blockSize>>>(d_logLuminance,
min_logLum,
max_logLum,
numRows,
numCols,
numBins,
logLum_hist);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Cumulative add (scan)
compute_cumulative_hist<<<scan_blockSize, scan_gridSize, sizeof(unsigned int) * numBins>>>(logLum_hist,
d_cdf,
numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Free memory
checkCudaErrors(cudaFree(logLum_hist));
}
|
2d00f4f9ea588bc288bd1618e3e5cba77b27bd08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cmath>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/lstm_layer.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void LSTMActsForward(const int nthreads, const int dim,
const Dtype* X, Dtype* X_acts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int x_dim = 4 * dim;
const int d = index % x_dim;
if (d < 3 * dim) {
X_acts[index] = sigmoid(X[index]);
} else {
X_acts[index] = tanh(X[index]);
}
}
}
template <typename Dtype>
__global__ void LSTMUnitForward(const int nthreads, const int dim,
const Dtype* C_prev, const Dtype* X, const Dtype* cont,
Dtype* C, Dtype* H) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const Dtype* X_offset = X + 4 * dim * n;
const Dtype i = X_offset[d];
const Dtype f = X_offset[1 * dim + d];
const Dtype o = X_offset[2 * dim + d];
const Dtype g = X_offset[3 * dim + d];
const Dtype c_prev = C_prev[index];
const Dtype c = cont[n] * f * c_prev + i * g;
C[index] = c;
const Dtype tanh_c = tanh(c);
H[index] = o * tanh_c;
}
}
template <typename Dtype>
void LSTMUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = top[1]->count();
const Dtype* C_prev = bottom[0]->gpu_data();
const Dtype* X = bottom[1]->gpu_data();
const Dtype* cont = bottom[2]->gpu_data();
Dtype* X_acts = X_acts_.mutable_gpu_data();
Dtype* C = top[0]->mutable_gpu_data();
Dtype* H = top[1]->mutable_gpu_data();
const int X_count = bottom[1]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LSTMActsForward<Dtype>), dim3(CAFFE_GET_BLOCKS(X_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
X_count, hidden_dim_, X, X_acts);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LSTMUnitForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, hidden_dim_, C_prev, X_acts, cont, C, H);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void LSTMUnitBackward(const int nthreads, const int dim,
const Dtype* C_prev, const Dtype* X, const Dtype* C, const Dtype* H,
const Dtype* cont, const Dtype* C_diff, const Dtype* H_diff,
Dtype* C_prev_diff, Dtype* X_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const Dtype* X_offset = X + 4 * dim * n;
const Dtype i = X_offset[d];
const Dtype f = X_offset[1 * dim + d];
const Dtype o = X_offset[2 * dim + d];
const Dtype g = X_offset[3 * dim + d];
const Dtype c_prev = C_prev[index];
const Dtype c = C[index];
const Dtype tanh_c = tanh(c);
Dtype* c_prev_diff = C_prev_diff + index;
Dtype* X_diff_offset = X_diff + 4 * dim * n;
Dtype* i_diff = X_diff_offset + d;
Dtype* f_diff = X_diff_offset + 1 * dim + d;
Dtype* o_diff = X_diff_offset + 2 * dim + d;
Dtype* g_diff = X_diff_offset + 3 * dim + d;
const Dtype c_term_diff =
C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c);
const Dtype cont_n = cont[n];
*c_prev_diff = cont_n * c_term_diff * f;
*i_diff = c_term_diff * g;
*f_diff = cont_n * c_term_diff * c_prev;
*o_diff = H_diff[index] * tanh_c;
*g_diff = c_term_diff * i;
}
}
template <typename Dtype>
__global__ void LSTMActsBackward(const int nthreads, const int dim,
const Dtype* X_acts, const Dtype* X_acts_diff, Dtype* X_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int x_dim = 4 * dim;
const int d = index % x_dim;
const Dtype X_act = X_acts[index];
if (d < 3 * dim) {
X_diff[index] = X_acts_diff[index] * X_act * (Dtype(1) - X_act);
} else {
X_diff[index] = X_acts_diff[index] * (Dtype(1) - X_act * X_act);
}
}
}
template <typename Dtype>
void LSTMUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[2]) << "Cannot backpropagate to sequence indicators.";
if (!propagate_down[0] && !propagate_down[1]) { return; }
const int count = top[1]->count();
const Dtype* C_prev = bottom[0]->gpu_data();
const Dtype* X_acts = X_acts_.gpu_data();
const Dtype* cont = bottom[2]->gpu_data();
const Dtype* C = top[0]->gpu_data();
const Dtype* H = top[1]->gpu_data();
const Dtype* C_diff = top[0]->gpu_diff();
const Dtype* H_diff = top[1]->gpu_diff();
Dtype* C_prev_diff = bottom[0]->mutable_gpu_diff();
Dtype* X_acts_diff = X_acts_.mutable_gpu_diff();
LSTMUnitBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, hidden_dim_,
C_prev, X_acts, C, H, cont, C_diff, H_diff, C_prev_diff, X_acts_diff);
CUDA_POST_KERNEL_CHECK;
const int X_count = bottom[1]->count();
Dtype* X_diff = bottom[1]->mutable_gpu_diff();
LSTMActsBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(X_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
X_count, hidden_dim_, X_acts, X_acts_diff, X_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(LSTMUnitLayer);
} // namespace caffe
| 2d00f4f9ea588bc288bd1618e3e5cba77b27bd08.cu | /*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cmath>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/lstm_layer.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void LSTMActsForward(const int nthreads, const int dim,
const Dtype* X, Dtype* X_acts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int x_dim = 4 * dim;
const int d = index % x_dim;
if (d < 3 * dim) {
X_acts[index] = sigmoid(X[index]);
} else {
X_acts[index] = tanh(X[index]);
}
}
}
template <typename Dtype>
__global__ void LSTMUnitForward(const int nthreads, const int dim,
const Dtype* C_prev, const Dtype* X, const Dtype* cont,
Dtype* C, Dtype* H) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const Dtype* X_offset = X + 4 * dim * n;
const Dtype i = X_offset[d];
const Dtype f = X_offset[1 * dim + d];
const Dtype o = X_offset[2 * dim + d];
const Dtype g = X_offset[3 * dim + d];
const Dtype c_prev = C_prev[index];
const Dtype c = cont[n] * f * c_prev + i * g;
C[index] = c;
const Dtype tanh_c = tanh(c);
H[index] = o * tanh_c;
}
}
template <typename Dtype>
void LSTMUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = top[1]->count();
const Dtype* C_prev = bottom[0]->gpu_data();
const Dtype* X = bottom[1]->gpu_data();
const Dtype* cont = bottom[2]->gpu_data();
Dtype* X_acts = X_acts_.mutable_gpu_data();
Dtype* C = top[0]->mutable_gpu_data();
Dtype* H = top[1]->mutable_gpu_data();
const int X_count = bottom[1]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
LSTMActsForward<Dtype><<<CAFFE_GET_BLOCKS(X_count), CAFFE_CUDA_NUM_THREADS>>>(
X_count, hidden_dim_, X, X_acts);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
LSTMUnitForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, hidden_dim_, C_prev, X_acts, cont, C, H);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void LSTMUnitBackward(const int nthreads, const int dim,
const Dtype* C_prev, const Dtype* X, const Dtype* C, const Dtype* H,
const Dtype* cont, const Dtype* C_diff, const Dtype* H_diff,
Dtype* C_prev_diff, Dtype* X_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const Dtype* X_offset = X + 4 * dim * n;
const Dtype i = X_offset[d];
const Dtype f = X_offset[1 * dim + d];
const Dtype o = X_offset[2 * dim + d];
const Dtype g = X_offset[3 * dim + d];
const Dtype c_prev = C_prev[index];
const Dtype c = C[index];
const Dtype tanh_c = tanh(c);
Dtype* c_prev_diff = C_prev_diff + index;
Dtype* X_diff_offset = X_diff + 4 * dim * n;
Dtype* i_diff = X_diff_offset + d;
Dtype* f_diff = X_diff_offset + 1 * dim + d;
Dtype* o_diff = X_diff_offset + 2 * dim + d;
Dtype* g_diff = X_diff_offset + 3 * dim + d;
const Dtype c_term_diff =
C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c);
const Dtype cont_n = cont[n];
*c_prev_diff = cont_n * c_term_diff * f;
*i_diff = c_term_diff * g;
*f_diff = cont_n * c_term_diff * c_prev;
*o_diff = H_diff[index] * tanh_c;
*g_diff = c_term_diff * i;
}
}
template <typename Dtype>
__global__ void LSTMActsBackward(const int nthreads, const int dim,
const Dtype* X_acts, const Dtype* X_acts_diff, Dtype* X_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int x_dim = 4 * dim;
const int d = index % x_dim;
const Dtype X_act = X_acts[index];
if (d < 3 * dim) {
X_diff[index] = X_acts_diff[index] * X_act * (Dtype(1) - X_act);
} else {
X_diff[index] = X_acts_diff[index] * (Dtype(1) - X_act * X_act);
}
}
}
template <typename Dtype>
void LSTMUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[2]) << "Cannot backpropagate to sequence indicators.";
if (!propagate_down[0] && !propagate_down[1]) { return; }
const int count = top[1]->count();
const Dtype* C_prev = bottom[0]->gpu_data();
const Dtype* X_acts = X_acts_.gpu_data();
const Dtype* cont = bottom[2]->gpu_data();
const Dtype* C = top[0]->gpu_data();
const Dtype* H = top[1]->gpu_data();
const Dtype* C_diff = top[0]->gpu_diff();
const Dtype* H_diff = top[1]->gpu_diff();
Dtype* C_prev_diff = bottom[0]->mutable_gpu_diff();
Dtype* X_acts_diff = X_acts_.mutable_gpu_diff();
LSTMUnitBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, hidden_dim_,
C_prev, X_acts, C, H, cont, C_diff, H_diff, C_prev_diff, X_acts_diff);
CUDA_POST_KERNEL_CHECK;
const int X_count = bottom[1]->count();
Dtype* X_diff = bottom[1]->mutable_gpu_diff();
LSTMActsBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(X_count), CAFFE_CUDA_NUM_THREADS>>>(
X_count, hidden_dim_, X_acts, X_acts_diff, X_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(LSTMUnitLayer);
} // namespace caffe
|
f2be807e55192b2c4be3eef70f8fc94f4a8561c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void kernel(int *array){
int index = blockIdx.x*blockDim.x + threadIdx.x;
array[index] = index;
}
int main(){
int num_elements = 256;
int num_bytes = num_elements*sizeof(int);
int *device_array = 0;
int *host_array = 0;
host_array = (int *)malloc(num_bytes);
hipMalloc((void **) &device_array, num_bytes);
int block_size = 128;
int grid_size = num_elements/block_size;
hipLaunchKernelGGL((
kernel), dim3(grid_size), dim3(block_size), 0, 0, device_array);
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
int i;
for(i=0;i<num_elements; ++i)
printf("%d\n", host_array[i]);
free(host_array);
hipFree(device_array);
return 0;
} | f2be807e55192b2c4be3eef70f8fc94f4a8561c2.cu | #include <stdlib.h>
#include <stdio.h>
__global__ void kernel(int *array){
int index = blockIdx.x*blockDim.x + threadIdx.x;
array[index] = index;
}
int main(){
int num_elements = 256;
int num_bytes = num_elements*sizeof(int);
int *device_array = 0;
int *host_array = 0;
host_array = (int *)malloc(num_bytes);
cudaMalloc((void **) &device_array, num_bytes);
int block_size = 128;
int grid_size = num_elements/block_size;
kernel<<<grid_size, block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
int i;
for(i=0;i<num_elements; ++i)
printf("%d\n", host_array[i]);
free(host_array);
cudaFree(device_array);
return 0;
} |
ace0fcacb79aac0d2a9f1910f29f09595bd72026.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void add_strided_float(int n, int xOffset,int yOffset,float *dx, float *dy,int incx,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] + dx[i];
}
} | ace0fcacb79aac0d2a9f1910f29f09595bd72026.cu | #include "includes.h"
extern "C"
__global__ void add_strided_float(int n, int xOffset,int yOffset,float *dx, float *dy,int incx,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] + dx[i];
}
} |
af5c5f0b42556ac874ded918ddf1edeac394bfae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#define FILTER_WIDTH 3
__constant__ int dc_xFilter[FILTER_WIDTH * FILTER_WIDTH];
__constant__ int dc_yFilter[FILTER_WIDTH * FILTER_WIDTH];
#define CHECK(call){\
const hipError_t error = call;\
if (error != hipSuccess){\
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\
fprintf(stderr, "code: %d, reason: %s\n", error, hipGetErrorString(error));\
exit(EXIT_FAILURE);\
}\
}
struct GpuTimer{
hipEvent_t start;
hipEvent_t stop;
GpuTimer(){
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer(){
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start(){
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop(){
hipEventRecord(stop, 0);
}
float Eplapsed(){
float eplapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&eplapsed, start, stop);
return eplapsed;
}
};
void readRGBPnm (char *fileName, int &width, int &height, uchar3 *&pixels){
FILE *f = fopen(fileName, "r");
if (f == NULL){
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
// Check the type of input img
if (strcmp(type, "P3") != 0){
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int maxVal;
fscanf(f, "%i", &maxVal);
// Assume 1 byte per value
if (maxVal > 255){
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i< width * height; i++){
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
}
fclose(f);
}
void writeRGBPnm (const uchar3 *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++){
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
}
fclose(f);
}
void writeGrayScalePnm (int *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P2\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++){
fprintf(f, "%hhu\n", pixels[i]);
}
fclose(f);
}
void writeMatrixTxt (int *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
fprintf(f, "%d ", pixels[i * width + j]);
}
fprintf(f, "\n");
}
fclose(f);
}
void initSobelFilter(int *filter, bool horizontal){
int filterWidth = FILTER_WIDTH;
int val = 0;
int margin = filterWidth / 2;
for (int filterR = 0; filterR < filterWidth; filterR++){
for (int filterC = 0; filterC < filterWidth; filterC++){
if (horizontal == true){
if (filterC < margin){
val = 1;
}
else if (filterC == margin){
val = 0;
}
else{
val = -1;
}
if (filterR == margin){
val *= 2;
}
}
else{
if (filterR < margin){
val = 1;
}
else if (filterR == margin){
val = 0;
}
else{
val = -1;
}
if (filterC == margin){
val *= 2;
}
}
filter[filterR * filterWidth + filterC] = val;
}
}
}
void convertRgb2Gray (const uchar3 *in, int n, int *out){
for (int i = 0; i < n; i++){
out[i] = 0.299f * in[i].x + 0.587f * in[i].y + 0.114f * in[i].z;
}
}
void getPixelsImportance (int *in, int width, int height, int *xFilter, int *yFilter, int filterWidth, int *out){
int margin = filterWidth / 2;
for (int col = 0; col < width; col++){
for (int row = 0; row < height; row++){
int curIdx = row * width + col;
float xSum = 0, ySum = 0;
for (int filterRow = -margin; filterRow <= margin; filterRow++){
for (int filterCol = -margin; filterCol <= margin; filterCol++){
int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin;
int dx = min(width - 1, max(0, col + filterCol));
int dy = min(height - 1, max(0, row + filterRow));
int idx = dy * width + dx;
xSum += in[idx] * xFilter[filterIdx];
ySum += in[idx] * yFilter[filterIdx];
}
}
out[curIdx] = abs(xSum) + abs(ySum);
}
}
}
void getLeastImportantPixels (int *in, int width, int height, int *out){
int lastRow = (height - 1) * width;
memcpy(out + lastRow, in + lastRow, width * sizeof(int));
for (int row = height - 2; row >= 0; row--){
int below = row + 1;
for (int col = 0; col < width; col++ ){
int idx = row * width + col;
int leftCol = max(0, col - 1);
int rightCol = min(width - 1, col + 1);
int belowIdx = below * width + col;
int leftBelowIdx = below * width + leftCol;
int rightBelowIdx = below * width + rightCol;
out[idx] = min(out[belowIdx], min(out[leftBelowIdx], out[rightBelowIdx])) + in[idx];
}
}
}
void getSeamAt (int *in, int width, int height, int *out, int col){
out[0] = col;
for (int row = 1; row < height; row++){
int col = out[row - 1];
int idx = row * width + col;
int leftCol = max(0, col - 1);
int rightCol = min(width - 1, col + 1);
int leftIdx = row * width + leftCol;
int rightIdx = row * width + rightCol;
if (in[leftIdx] < in[idx]){
if (in[leftIdx] < in[rightIdx])
out[row] = leftCol;
else
out[row] = rightCol;
}
else{
if (in[idx] < in[rightIdx])
out[row] = col;
else
out[row] = rightCol;
}
}
}
void getLeastImportantSeam (int *in, int width, int height, int *out){
int minCol = 0;
for (int i = 0; i < width; i++){
if (in[i] < in[minCol])
minCol = i;
}
// printf("min col %d-%d\n", minCol, in[minCol]);
getSeamAt(in, width, height, out, minCol);
}
void removeSeam (const uchar3 *in, int width, int height, uchar3 *out, int *seam){
int newWidth = width - 1;
for (int row = 0; row < height; row++){
int col = seam[row];
memcpy(out + row * newWidth, in + row * width, col * sizeof(uchar3));
int nextIdxOut = row * newWidth + col;
int nextIdxIn = row * width + col + 1;
memcpy(out + nextIdxOut, in + nextIdxIn, (newWidth - col) * sizeof(uchar3));
}
}
void seamCarvingHost(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth){
// convert image to grayscale
int *grayScalePixels = (int *)malloc(width * height * sizeof(int));
convertRgb2Gray(in, width * height, grayScalePixels);
// edge detection
int *pixelsImportance = (int *)malloc(width * height * sizeof(int));
getPixelsImportance(grayScalePixels, width, height, xFilter, yFilter, filterWidth, pixelsImportance);
// find the least important seam
int *leastPixelsImportance = (int *)malloc(width * height * sizeof(int));
getLeastImportantPixels(pixelsImportance, width, height, leastPixelsImportance);
int *leastImportantSeam = (int *)malloc(height * sizeof(int));
getLeastImportantSeam(leastPixelsImportance, width, height, leastImportantSeam);
// remove the least important seam
removeSeam(in, width, height, out, leastImportantSeam);
// free memories
free(grayScalePixels);
free(pixelsImportance);
free(leastPixelsImportance);
free(leastImportantSeam);
}
__global__ void convertRgb2GrayKernel(uchar3 *in, int width, int height, int *out){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height){
int idx = row * width + col;
out[idx] = 0.299f * in[idx].x + 0.587f * in[idx].y + 0.114f * in[idx].z;
}
}
__global__ void getPixelsImportanceKernel (int *in, int width, int height, int filterWidth, int *out){
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (col < width && row < height){
int margin = filterWidth / 2;
int curIdx = row * width + col;
float xSum = 0, ySum = 0;
for (int filterRow = -margin; filterRow <= margin; filterRow++){
for (int filterCol = -margin; filterCol <= margin; filterCol++){
int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin;
int dx = min(width - 1, max(0, col + filterCol));
int dy = min(height - 1, max(0, row + filterRow));
int idx = dy * width + dx;
xSum += in[idx] * dc_xFilter[filterIdx];
ySum += in[idx] * dc_yFilter[filterIdx];
}
}
out[curIdx] = abs(xSum) + abs(ySum);
}
}
__global__ void upTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){
int xStart = baseWith * blockIdx.x * blockDim.x + threadIdx.x * baseWith;
int xStop = xStart + baseWith - 1;
for (int y = yStart; y >= yStop; y--){
for (int x = xStart; x <= xStop; x++){
if (x < width){
int idx = y * width + x;
int below = (y + 1) * width + x;
int left = (y + 1) * width + max(0, x - 1);
int right = (y + 1) * width + min(width - 1, x + 1);
out[idx] = in[idx] + min(out[below], min(out[left], out[right]));
}
}
xStart += 1;
xStop -= 1;
}
}
__global__ void downTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){
int xStop = baseWith * (threadIdx.x + blockDim.x * blockIdx.x);
int xStart = xStop - 1;
for (int y = yStart; y >= yStop; y--){
for (int x = xStart; x <= xStop; x++){
if (x >= 0 && x < width){
int idx = y * width + x;
int below = (y + 1) * width + x;
int left = (y + 1) * width + max(0, x - 1);
int right = (y + 1) * width + min(width - 1, x + 1);
out[idx] = in[idx] + min(out[below], min(out[left], out[right]));
}
}
xStart -= 1;
xStop += 1;
}
}
__device__ int bCount = 0;
volatile __device__ int bCount1 = 0;
__global__ void resetCount(){
if (threadIdx.x == 0){
bCount = 0;
bCount1 = 0;
}
}
__global__ void getMinColSeam (int *in, int width, int *out){
extern __shared__ int s_mem[];
__shared__ int bi;
if (threadIdx.x == 0){
bi = atomicAdd(&bCount, 1);
}
__syncthreads();
int i = blockDim.x * bi * 2 + threadIdx.x;
if (i < width)
s_mem[threadIdx.x] = i;
if (i + blockDim.x < width)
s_mem[threadIdx.x + blockDim.x] = i + blockDim.x;
__syncthreads();
for (int stride = blockDim.x; stride >= 1; stride /= 2)
{
if (threadIdx.x < stride)
{
if (i + stride < width){
if (in[s_mem[threadIdx.x]] > in[s_mem[threadIdx.x + stride]]){
s_mem[threadIdx.x] = s_mem[threadIdx.x + stride];
}
}
}
__syncthreads();
}
if (threadIdx.x == 0){
out[bi] = s_mem[0];
if (bi > 0){
while(bCount1 < bi) {}
if (in[out[bi]] < in[out[0]])
out[0] = out[bi];
}
bCount1 += 1;
}
}
void seamCarvingDevice(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth, dim3 blockSize, int baseWith){
// prepare some values
int lastRowIdx = (height - 1) * width;
int stripHeight = baseWith % 2 == 0 ? baseWith / 2 + 1 : (baseWith + 1) / 2 + 1;
int gridSizeTriangle = (width - 1) / (blockSize.x * baseWith) + 1;
int minColGridSize = (width - 1) / (2 * blockSize.x) + 1;
size_t dataSize = width * height * sizeof(uchar3);
size_t rowSize = width * sizeof(int);
size_t grayScaleSize = width * height * sizeof(int);
dim3 gridSize((width - 1) / blockSize.x + 1, (height - 1) / blockSize.y + 1);
// allocate device memories
uchar3 *d_in;
int *d_grayScalePixels, *d_pixelsImportance, *d_leastImportantPixels, *d_minCol;
CHECK(hipMalloc(&d_in, dataSize));
CHECK(hipMalloc(&d_grayScalePixels, grayScaleSize));
CHECK(hipMalloc(&d_pixelsImportance, grayScaleSize));
CHECK(hipMalloc(&d_leastImportantPixels, grayScaleSize));
CHECK(hipMalloc(&d_minCol, minColGridSize * sizeof(int)));
// allocate host memories
int *leastPixelsImportance = (int *)malloc(grayScaleSize);
int *leastImportantSeam = (int *)malloc(height * sizeof(int));
int *minCol = (int *)malloc(minColGridSize * sizeof(int));
// copy data to device memories
CHECK(hipMemcpy(d_in, in, dataSize, hipMemcpyHostToDevice));
// convert image to grayscale
hipLaunchKernelGGL(( convertRgb2GrayKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_in, width, height, d_grayScalePixels);
CHECK(hipGetLastError());
// edge detection
hipLaunchKernelGGL(( getPixelsImportanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_grayScalePixels, width, height, filterWidth, d_pixelsImportance);
CHECK(hipGetLastError());
// find the least important pixels
CHECK(hipMemcpy(d_leastImportantPixels + lastRowIdx, d_pixelsImportance + lastRowIdx, rowSize, hipMemcpyDeviceToDevice));
for (int y = height - 2; y >= 0; y -= stripHeight){
int yStart = y;
int yStop = max(0, yStart - stripHeight + 1);
hipLaunchKernelGGL(( upTriangle), dim3(gridSizeTriangle), dim3(blockSize.x), 0, 0, d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels);
yStart = max(0, yStart - 1);
yStop = max(0, yStart - stripHeight + 1);
hipLaunchKernelGGL(( downTriangle), dim3(gridSizeTriangle + 1), dim3(blockSize.x), 0, 0, d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels);
}
CHECK(hipMemcpy(leastPixelsImportance, d_leastImportantPixels, grayScaleSize, hipMemcpyDeviceToHost));
// find the least important seam
hipLaunchKernelGGL(( resetCount), dim3(1), dim3(1), 0, 0, );
hipLaunchKernelGGL(( getMinColSeam), dim3(minColGridSize), dim3(blockSize.x), blockSize.x * 2 * sizeof(int), 0, d_leastImportantPixels, width, d_minCol);
int mc;
CHECK(hipMemcpy(&mc, d_minCol, sizeof(int), hipMemcpyDeviceToHost));
getSeamAt(leastPixelsImportance, width, height, leastImportantSeam, mc);
// remove the least important seam
removeSeam(in, width, height, out, leastImportantSeam);
// free memories
CHECK(hipFree(d_in));
CHECK(hipFree(d_grayScalePixels));
CHECK(hipFree(d_pixelsImportance));
CHECK(hipFree(d_leastImportantPixels));
CHECK(hipFree(d_minCol));
free(leastPixelsImportance);
free(leastImportantSeam);
free(minCol);
}
void seamCarving(const uchar3 *in, int width, int height, uchar3 *out, int newWidth, int *xFilter, int *yFilter, int filterWidth, bool usingDevice=false, dim3 blockSize=dim3(1, 1), int baseWith = 0){
if (usingDevice == false){
printf("\nSeam carving by host\n");
}
else{
printf("\nSeam carving by device\n");
// copy x filter, y filter on host to dc_x filter, dc_y filter on device
size_t filterSize = filterWidth * filterWidth * sizeof(int);
CHECK(hipMemcpyToSymbol(dc_xFilter, xFilter, filterSize));
CHECK(hipMemcpyToSymbol(dc_yFilter, yFilter, filterSize));
}
GpuTimer timer;
timer.Start();
// allocate host memories
uchar3 *src = (uchar3 *)malloc(width * height * sizeof(uchar3));
uchar3 *dst = (uchar3 *)malloc(width * height * sizeof(uchar3));
// store the pointer for freeing
uchar3 *originalSrc = src;
uchar3 *originalDst = dst;
// copy input data to src pointer
memcpy(src, in, width * height * sizeof(uchar3));
// do the seam carving by decrease width by 1 until newWidth
for (int w = width; w > newWidth; w--){
// resize the dst pointer with current width - 1;
dst = (uchar3 *)realloc(dst, (w-1) * height * sizeof(uchar3));
// seamCarving the picture
if (usingDevice == false){
seamCarvingHost(src, w, height, dst, xFilter, yFilter, filterWidth);
}
else{
seamCarvingDevice(src, w, height, dst, xFilter, yFilter, filterWidth, blockSize, baseWith);
}
// swap src and dst
uchar3 * temp = src;
src = dst;
dst = temp;
}
// copy the output data to the out pointer
memcpy(out, src, newWidth * height * sizeof(uchar3));
// free memories
free(originalDst);
free(originalSrc);
timer.Stop();
printf("Time: %.3f ms\n", timer.Eplapsed());
}
float computeError (uchar3 *a1, uchar3* a2, int n){
float err = 0;
for (int i = 0; i < n; i++){
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
void printError (uchar3 *a1, uchar3 *a2, int width, int height){
float err = computeError(a1, a2, width * height);
printf("Error: %f\n", err);
}
void printDeviceInfo(int codeVer){
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("Vesrion of code: %d\n", codeVer);
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("CMEM: %lu bytes\n", devProv.totalConstMem);
printf("L2 cache: %i bytes\n", devProv.l2CacheSize);
printf("SMEM / one SM: %lu bytes\n", devProv.sharedMemPerMultiprocessor);
printf("****************************\n");
}
char *concatStr(const char *s1, const char *s2){
char *result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
int main (int argc, char **argv){
if (argc != 4 && argc != 6){
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
int seamCount = atoi(argv[2]);
int baseWith = atoi(argv[3]);
// Read input image file
int width, height;
uchar3 *inPixels;
readRGBPnm(argv[1], width, height, inPixels);
printf("\nImage size (width * height): %i x %i\n", width, height);
int newWidth = width - seamCount;
if (newWidth <= 0){
printf("The count of removed seams must be smaller than the width of the image");
return EXIT_FAILURE;
}
printf("\nNew image size (width * height): %i x %i\n", newWidth, height);
// print device info
int codeVer = 1;
printDeviceInfo(codeVer);
// init out pointer
uchar3 *correctOutPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3));
uchar3 *outPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3));
// Set up x sobel filter and y sobel filter
int filterWidth = FILTER_WIDTH;
int *xFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int));
int *yFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int));
initSobelFilter(xFilter, true);
initSobelFilter(yFilter, false);
// Seam carving not using device
seamCarving(inPixels, width, height, correctOutPixels, newWidth, xFilter, yFilter, filterWidth);
// get input block size
dim3 blockSize(32, 32); //default
if (argc == 5){
blockSize.x = atoi(argv[3]);
blockSize.y = atoi(argv[4]);
}
// Seam carving using device
seamCarving(inPixels, width, height, outPixels, newWidth, xFilter, yFilter, filterWidth, true, blockSize, baseWith);
printError(correctOutPixels, outPixels, newWidth, height);
// Write results to files
char *outFileNameBase = strtok(argv[1], "."); //get rid of extension
writeRGBPnm(correctOutPixels, newWidth, height, concatStr(outFileNameBase, "_host.pnm"));
writeRGBPnm(outPixels, newWidth, height, concatStr(outFileNameBase, "_device.pnm"));
// Free memories
free(inPixels);
free(xFilter);
free(yFilter);
free(correctOutPixels);
free(outPixels);
}
| af5c5f0b42556ac874ded918ddf1edeac394bfae.cu | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#define FILTER_WIDTH 3
__constant__ int dc_xFilter[FILTER_WIDTH * FILTER_WIDTH];
__constant__ int dc_yFilter[FILTER_WIDTH * FILTER_WIDTH];
#define CHECK(call){\
const cudaError_t error = call;\
if (error != cudaSuccess){\
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\
fprintf(stderr, "code: %d, reason: %s\n", error, cudaGetErrorString(error));\
exit(EXIT_FAILURE);\
}\
}
struct GpuTimer{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer(){
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer(){
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start(){
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop(){
cudaEventRecord(stop, 0);
}
float Eplapsed(){
float eplapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&eplapsed, start, stop);
return eplapsed;
}
};
void readRGBPnm (char *fileName, int &width, int &height, uchar3 *&pixels){
FILE *f = fopen(fileName, "r");
if (f == NULL){
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
// Check the type of input img
if (strcmp(type, "P3") != 0){
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int maxVal;
fscanf(f, "%i", &maxVal);
// Assume 1 byte per value
if (maxVal > 255){
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i< width * height; i++){
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
}
fclose(f);
}
void writeRGBPnm (const uchar3 *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++){
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
}
fclose(f);
}
void writeGrayScalePnm (int *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P2\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++){
fprintf(f, "%hhu\n", pixels[i]);
}
fclose(f);
}
void writeMatrixTxt (int *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
fprintf(f, "%d ", pixels[i * width + j]);
}
fprintf(f, "\n");
}
fclose(f);
}
void initSobelFilter(int *filter, bool horizontal){
int filterWidth = FILTER_WIDTH;
int val = 0;
int margin = filterWidth / 2;
for (int filterR = 0; filterR < filterWidth; filterR++){
for (int filterC = 0; filterC < filterWidth; filterC++){
if (horizontal == true){
if (filterC < margin){
val = 1;
}
else if (filterC == margin){
val = 0;
}
else{
val = -1;
}
if (filterR == margin){
val *= 2;
}
}
else{
if (filterR < margin){
val = 1;
}
else if (filterR == margin){
val = 0;
}
else{
val = -1;
}
if (filterC == margin){
val *= 2;
}
}
filter[filterR * filterWidth + filterC] = val;
}
}
}
void convertRgb2Gray (const uchar3 *in, int n, int *out){
for (int i = 0; i < n; i++){
out[i] = 0.299f * in[i].x + 0.587f * in[i].y + 0.114f * in[i].z;
}
}
void getPixelsImportance (int *in, int width, int height, int *xFilter, int *yFilter, int filterWidth, int *out){
int margin = filterWidth / 2;
for (int col = 0; col < width; col++){
for (int row = 0; row < height; row++){
int curIdx = row * width + col;
float xSum = 0, ySum = 0;
for (int filterRow = -margin; filterRow <= margin; filterRow++){
for (int filterCol = -margin; filterCol <= margin; filterCol++){
int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin;
int dx = min(width - 1, max(0, col + filterCol));
int dy = min(height - 1, max(0, row + filterRow));
int idx = dy * width + dx;
xSum += in[idx] * xFilter[filterIdx];
ySum += in[idx] * yFilter[filterIdx];
}
}
out[curIdx] = abs(xSum) + abs(ySum);
}
}
}
void getLeastImportantPixels (int *in, int width, int height, int *out){
int lastRow = (height - 1) * width;
memcpy(out + lastRow, in + lastRow, width * sizeof(int));
for (int row = height - 2; row >= 0; row--){
int below = row + 1;
for (int col = 0; col < width; col++ ){
int idx = row * width + col;
int leftCol = max(0, col - 1);
int rightCol = min(width - 1, col + 1);
int belowIdx = below * width + col;
int leftBelowIdx = below * width + leftCol;
int rightBelowIdx = below * width + rightCol;
out[idx] = min(out[belowIdx], min(out[leftBelowIdx], out[rightBelowIdx])) + in[idx];
}
}
}
void getSeamAt (int *in, int width, int height, int *out, int col){
out[0] = col;
for (int row = 1; row < height; row++){
int col = out[row - 1];
int idx = row * width + col;
int leftCol = max(0, col - 1);
int rightCol = min(width - 1, col + 1);
int leftIdx = row * width + leftCol;
int rightIdx = row * width + rightCol;
if (in[leftIdx] < in[idx]){
if (in[leftIdx] < in[rightIdx])
out[row] = leftCol;
else
out[row] = rightCol;
}
else{
if (in[idx] < in[rightIdx])
out[row] = col;
else
out[row] = rightCol;
}
}
}
void getLeastImportantSeam (int *in, int width, int height, int *out){
int minCol = 0;
for (int i = 0; i < width; i++){
if (in[i] < in[minCol])
minCol = i;
}
// printf("min col %d-%d\n", minCol, in[minCol]);
getSeamAt(in, width, height, out, minCol);
}
void removeSeam (const uchar3 *in, int width, int height, uchar3 *out, int *seam){
int newWidth = width - 1;
for (int row = 0; row < height; row++){
int col = seam[row];
memcpy(out + row * newWidth, in + row * width, col * sizeof(uchar3));
int nextIdxOut = row * newWidth + col;
int nextIdxIn = row * width + col + 1;
memcpy(out + nextIdxOut, in + nextIdxIn, (newWidth - col) * sizeof(uchar3));
}
}
void seamCarvingHost(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth){
// convert image to grayscale
int *grayScalePixels = (int *)malloc(width * height * sizeof(int));
convertRgb2Gray(in, width * height, grayScalePixels);
// edge detection
int *pixelsImportance = (int *)malloc(width * height * sizeof(int));
getPixelsImportance(grayScalePixels, width, height, xFilter, yFilter, filterWidth, pixelsImportance);
// find the least important seam
int *leastPixelsImportance = (int *)malloc(width * height * sizeof(int));
getLeastImportantPixels(pixelsImportance, width, height, leastPixelsImportance);
int *leastImportantSeam = (int *)malloc(height * sizeof(int));
getLeastImportantSeam(leastPixelsImportance, width, height, leastImportantSeam);
// remove the least important seam
removeSeam(in, width, height, out, leastImportantSeam);
// free memories
free(grayScalePixels);
free(pixelsImportance);
free(leastPixelsImportance);
free(leastImportantSeam);
}
__global__ void convertRgb2GrayKernel(uchar3 *in, int width, int height, int *out){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height){
int idx = row * width + col;
out[idx] = 0.299f * in[idx].x + 0.587f * in[idx].y + 0.114f * in[idx].z;
}
}
__global__ void getPixelsImportanceKernel (int *in, int width, int height, int filterWidth, int *out){
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (col < width && row < height){
int margin = filterWidth / 2;
int curIdx = row * width + col;
float xSum = 0, ySum = 0;
for (int filterRow = -margin; filterRow <= margin; filterRow++){
for (int filterCol = -margin; filterCol <= margin; filterCol++){
int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin;
int dx = min(width - 1, max(0, col + filterCol));
int dy = min(height - 1, max(0, row + filterRow));
int idx = dy * width + dx;
xSum += in[idx] * dc_xFilter[filterIdx];
ySum += in[idx] * dc_yFilter[filterIdx];
}
}
out[curIdx] = abs(xSum) + abs(ySum);
}
}
__global__ void upTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){
int xStart = baseWith * blockIdx.x * blockDim.x + threadIdx.x * baseWith;
int xStop = xStart + baseWith - 1;
for (int y = yStart; y >= yStop; y--){
for (int x = xStart; x <= xStop; x++){
if (x < width){
int idx = y * width + x;
int below = (y + 1) * width + x;
int left = (y + 1) * width + max(0, x - 1);
int right = (y + 1) * width + min(width - 1, x + 1);
out[idx] = in[idx] + min(out[below], min(out[left], out[right]));
}
}
xStart += 1;
xStop -= 1;
}
}
__global__ void downTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){
int xStop = baseWith * (threadIdx.x + blockDim.x * blockIdx.x);
int xStart = xStop - 1;
for (int y = yStart; y >= yStop; y--){
for (int x = xStart; x <= xStop; x++){
if (x >= 0 && x < width){
int idx = y * width + x;
int below = (y + 1) * width + x;
int left = (y + 1) * width + max(0, x - 1);
int right = (y + 1) * width + min(width - 1, x + 1);
out[idx] = in[idx] + min(out[below], min(out[left], out[right]));
}
}
xStart -= 1;
xStop += 1;
}
}
__device__ int bCount = 0;
volatile __device__ int bCount1 = 0;
__global__ void resetCount(){
if (threadIdx.x == 0){
bCount = 0;
bCount1 = 0;
}
}
__global__ void getMinColSeam (int *in, int width, int *out){
extern __shared__ int s_mem[];
__shared__ int bi;
if (threadIdx.x == 0){
bi = atomicAdd(&bCount, 1);
}
__syncthreads();
int i = blockDim.x * bi * 2 + threadIdx.x;
if (i < width)
s_mem[threadIdx.x] = i;
if (i + blockDim.x < width)
s_mem[threadIdx.x + blockDim.x] = i + blockDim.x;
__syncthreads();
for (int stride = blockDim.x; stride >= 1; stride /= 2)
{
if (threadIdx.x < stride)
{
if (i + stride < width){
if (in[s_mem[threadIdx.x]] > in[s_mem[threadIdx.x + stride]]){
s_mem[threadIdx.x] = s_mem[threadIdx.x + stride];
}
}
}
__syncthreads();
}
if (threadIdx.x == 0){
out[bi] = s_mem[0];
if (bi > 0){
while(bCount1 < bi) {}
if (in[out[bi]] < in[out[0]])
out[0] = out[bi];
}
bCount1 += 1;
}
}
void seamCarvingDevice(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth, dim3 blockSize, int baseWith){
// prepare some values
int lastRowIdx = (height - 1) * width;
int stripHeight = baseWith % 2 == 0 ? baseWith / 2 + 1 : (baseWith + 1) / 2 + 1;
int gridSizeTriangle = (width - 1) / (blockSize.x * baseWith) + 1;
int minColGridSize = (width - 1) / (2 * blockSize.x) + 1;
size_t dataSize = width * height * sizeof(uchar3);
size_t rowSize = width * sizeof(int);
size_t grayScaleSize = width * height * sizeof(int);
dim3 gridSize((width - 1) / blockSize.x + 1, (height - 1) / blockSize.y + 1);
// allocate device memories
uchar3 *d_in;
int *d_grayScalePixels, *d_pixelsImportance, *d_leastImportantPixels, *d_minCol;
CHECK(cudaMalloc(&d_in, dataSize));
CHECK(cudaMalloc(&d_grayScalePixels, grayScaleSize));
CHECK(cudaMalloc(&d_pixelsImportance, grayScaleSize));
CHECK(cudaMalloc(&d_leastImportantPixels, grayScaleSize));
CHECK(cudaMalloc(&d_minCol, minColGridSize * sizeof(int)));
// allocate host memories
int *leastPixelsImportance = (int *)malloc(grayScaleSize);
int *leastImportantSeam = (int *)malloc(height * sizeof(int));
int *minCol = (int *)malloc(minColGridSize * sizeof(int));
// copy data to device memories
CHECK(cudaMemcpy(d_in, in, dataSize, cudaMemcpyHostToDevice));
// convert image to grayscale
convertRgb2GrayKernel<<<gridSize, blockSize>>>(d_in, width, height, d_grayScalePixels);
CHECK(cudaGetLastError());
// edge detection
getPixelsImportanceKernel<<<gridSize, blockSize>>>(d_grayScalePixels, width, height, filterWidth, d_pixelsImportance);
CHECK(cudaGetLastError());
// find the least important pixels
CHECK(cudaMemcpy(d_leastImportantPixels + lastRowIdx, d_pixelsImportance + lastRowIdx, rowSize, cudaMemcpyDeviceToDevice));
for (int y = height - 2; y >= 0; y -= stripHeight){
int yStart = y;
int yStop = max(0, yStart - stripHeight + 1);
upTriangle<<<gridSizeTriangle, blockSize.x>>>(d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels);
yStart = max(0, yStart - 1);
yStop = max(0, yStart - stripHeight + 1);
downTriangle<<<gridSizeTriangle + 1, blockSize.x>>>(d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels);
}
CHECK(cudaMemcpy(leastPixelsImportance, d_leastImportantPixels, grayScaleSize, cudaMemcpyDeviceToHost));
// find the least important seam
resetCount<<<1, 1>>>();
getMinColSeam<<<minColGridSize, blockSize.x, blockSize.x * 2 * sizeof(int)>>>(d_leastImportantPixels, width, d_minCol);
int mc;
CHECK(cudaMemcpy(&mc, d_minCol, sizeof(int), cudaMemcpyDeviceToHost));
getSeamAt(leastPixelsImportance, width, height, leastImportantSeam, mc);
// remove the least important seam
removeSeam(in, width, height, out, leastImportantSeam);
// free memories
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_grayScalePixels));
CHECK(cudaFree(d_pixelsImportance));
CHECK(cudaFree(d_leastImportantPixels));
CHECK(cudaFree(d_minCol));
free(leastPixelsImportance);
free(leastImportantSeam);
free(minCol);
}
void seamCarving(const uchar3 *in, int width, int height, uchar3 *out, int newWidth, int *xFilter, int *yFilter, int filterWidth, bool usingDevice=false, dim3 blockSize=dim3(1, 1), int baseWith = 0){
if (usingDevice == false){
printf("\nSeam carving by host\n");
}
else{
printf("\nSeam carving by device\n");
// copy x filter, y filter on host to dc_x filter, dc_y filter on device
size_t filterSize = filterWidth * filterWidth * sizeof(int);
CHECK(cudaMemcpyToSymbol(dc_xFilter, xFilter, filterSize));
CHECK(cudaMemcpyToSymbol(dc_yFilter, yFilter, filterSize));
}
GpuTimer timer;
timer.Start();
// allocate host memories
uchar3 *src = (uchar3 *)malloc(width * height * sizeof(uchar3));
uchar3 *dst = (uchar3 *)malloc(width * height * sizeof(uchar3));
// store the pointer for freeing
uchar3 *originalSrc = src;
uchar3 *originalDst = dst;
// copy input data to src pointer
memcpy(src, in, width * height * sizeof(uchar3));
// do the seam carving by decrease width by 1 until newWidth
for (int w = width; w > newWidth; w--){
// resize the dst pointer with current width - 1;
dst = (uchar3 *)realloc(dst, (w-1) * height * sizeof(uchar3));
// seamCarving the picture
if (usingDevice == false){
seamCarvingHost(src, w, height, dst, xFilter, yFilter, filterWidth);
}
else{
seamCarvingDevice(src, w, height, dst, xFilter, yFilter, filterWidth, blockSize, baseWith);
}
// swap src and dst
uchar3 * temp = src;
src = dst;
dst = temp;
}
// copy the output data to the out pointer
memcpy(out, src, newWidth * height * sizeof(uchar3));
// free memories
free(originalDst);
free(originalSrc);
timer.Stop();
printf("Time: %.3f ms\n", timer.Eplapsed());
}
float computeError (uchar3 *a1, uchar3* a2, int n){
float err = 0;
for (int i = 0; i < n; i++){
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
void printError (uchar3 *a1, uchar3 *a2, int width, int height){
float err = computeError(a1, a2, width * height);
printf("Error: %f\n", err);
}
void printDeviceInfo(int codeVer){
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("Vesrion of code: %d\n", codeVer);
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("CMEM: %lu bytes\n", devProv.totalConstMem);
printf("L2 cache: %i bytes\n", devProv.l2CacheSize);
printf("SMEM / one SM: %lu bytes\n", devProv.sharedMemPerMultiprocessor);
printf("****************************\n");
}
char *concatStr(const char *s1, const char *s2){
char *result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
int main (int argc, char **argv){
if (argc != 4 && argc != 6){
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
int seamCount = atoi(argv[2]);
int baseWith = atoi(argv[3]);
// Read input image file
int width, height;
uchar3 *inPixels;
readRGBPnm(argv[1], width, height, inPixels);
printf("\nImage size (width * height): %i x %i\n", width, height);
int newWidth = width - seamCount;
if (newWidth <= 0){
printf("The count of removed seams must be smaller than the width of the image");
return EXIT_FAILURE;
}
printf("\nNew image size (width * height): %i x %i\n", newWidth, height);
// print device info
int codeVer = 1;
printDeviceInfo(codeVer);
// init out pointer
uchar3 *correctOutPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3));
uchar3 *outPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3));
// Set up x sobel filter and y sobel filter
int filterWidth = FILTER_WIDTH;
int *xFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int));
int *yFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int));
initSobelFilter(xFilter, true);
initSobelFilter(yFilter, false);
// Seam carving not using device
seamCarving(inPixels, width, height, correctOutPixels, newWidth, xFilter, yFilter, filterWidth);
// get input block size
dim3 blockSize(32, 32); //default
if (argc == 5){
blockSize.x = atoi(argv[3]);
blockSize.y = atoi(argv[4]);
}
// Seam carving using device
seamCarving(inPixels, width, height, outPixels, newWidth, xFilter, yFilter, filterWidth, true, blockSize, baseWith);
printError(correctOutPixels, outPixels, newWidth, height);
// Write results to files
char *outFileNameBase = strtok(argv[1], "."); //get rid of extension
writeRGBPnm(correctOutPixels, newWidth, height, concatStr(outFileNameBase, "_host.pnm"));
writeRGBPnm(outPixels, newWidth, height, concatStr(outFileNameBase, "_device.pnm"));
// Free memories
free(inPixels);
free(xFilter);
free(yFilter);
free(correctOutPixels);
free(outPixels);
}
|
2911792a5037480a20a58cc576ef1a87eb789d0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2018 ETH Zrich
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "view_correction/cuda_tv_inpainting_functions.cuh"
#include <hipcub/hipcub.hpp>
#include <glog/logging.h>
#include "view_correction/cuda_util.h"
#include "view_correction/helper_math.h"
namespace view_correction {
constexpr float kSqrt2 = 1.4142135623731f;
constexpr float kHuberEpsilon = 0.01f;
constexpr float kCellChangeThreshold = 10e-6f;
constexpr float kDualIntToFloat = 2.f / 32767; // std::numeric_limits<int16_t>::max();
// One iteration is one dual (D) and one primal (P) step. Theoretically one
// could make this more fine-grained to for example also do DPD and PDP, but it
// would complicate the code unnecessarily.
constexpr int kIterationsPerKernelCall = 4;
__global__ void TVInpaintingInitializeVariablesKernel(
int grid_dim_x,
bool kUseSingleKernel,
float depth_input_scaling_factor,
hipTextureObject_t depth_map_input,
CUDABuffer_<bool> tv_flag,
CUDABuffer_<bool> tv_dual_flag,
CUDABuffer_<int16_t> tv_dual_x,
CUDABuffer_<int16_t> tv_dual_y,
CUDABuffer_<float> tv_u,
CUDABuffer_<float> tv_u_bar,
CUDABuffer_<uint16_t> block_coordinates) {
const int width = tv_u.width();
const int height = tv_u.height();
unsigned int x;
unsigned int y;
if (kUseSingleKernel) {
const int kBlockOutputSizeX = 32 - 2 * kIterationsPerKernelCall;
const int kBlockOutputSizeY = 32 - 2 * kIterationsPerKernelCall;
x = blockIdx.x * kBlockOutputSizeX + threadIdx.x - kIterationsPerKernelCall;
y = blockIdx.y * kBlockOutputSizeY + threadIdx.y - kIterationsPerKernelCall;
} else {
x = blockIdx.x * blockDim.x + threadIdx.x;
y = blockIdx.y * blockDim.y + threadIdx.y;
}
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < 32 - kIterationsPerKernelCall &&
threadIdx.y < 32 - kIterationsPerKernelCall &&
x < width &&
y < height;
bool thread_is_active = false;
if (kOutput) {
tv_dual_x(y, x) = 0;
tv_dual_y(y, x) = 0;
const float depth_input = depth_input_scaling_factor * tex2D<float>(depth_map_input, x, y);
tv_flag(y, x) = (depth_input == 0);
thread_is_active =
(depth_input == 0 ||
(x > 0 && tex2D<float>(depth_map_input, x - 1, y) == 0) ||
(y > 0 && tex2D<float>(depth_map_input, x, y - 1) == 0) ||
(x < width - 1 && tex2D<float>(depth_map_input, x + 1, y) == 0) ||
(y < height - 1 && tex2D<float>(depth_map_input, x, y + 1) == 0));
tv_dual_flag(y, x) = thread_is_active;
tv_u(y, x) = depth_input;
tv_u_bar(y, x) = depth_input;
}
typedef hipcub::BlockReduce<
int, 32, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, 32> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int num_active_threads = BlockReduceInt(int_storage).Reduce(thread_is_active ? 1 : 0, hipcub::Sum());
if (threadIdx.x == 0 && threadIdx.y == 0) {
reinterpret_cast<uint8_t*>(block_coordinates.address())[blockIdx.x + blockIdx.y * grid_dim_x] = (num_active_threads > 0) ? 1 : 0;
}
}
// set up all block activities (primal and dual) for all blocks
// that overlap with the inpainting area
__global__ void InitBlockActivationsFromInpaintRegionKernel(
hipTextureObject_t d_f,
const int width,
const int height,
CUDABuffer_<unsigned char> d_block_activities)
{
if (threadIdx.x == 0 && threadIdx.y == 0) {
bool blockIsEmpty = true;
// for all threads in the block
for (int j=0; j<blockDim.y; ++j) {
for (int i=0; i<blockDim.x; ++i) {
const int x = blockIdx.x * blockDim.x + i;
const int y = blockIdx.y * blockDim.y + j;
if (x < width && y < height && tex2D<float>(d_f, x, y) == 0) {
blockIsEmpty = false;
break;
}
}
if (!blockIsEmpty) break;
}
if (blockIsEmpty) d_block_activities(blockIdx.y, blockIdx.x) = 0;
else d_block_activities(blockIdx.y, blockIdx.x) = 3;
} // if (threadIdx.x == 0 && threadIdx.y == 0)
}
// checks the convergence of individual blocks and keeps track of the block boundary
// in order to steer deactivation and reactivation of block updates
__device__ void UpdateBlockActivations(
const float local_value,
const float prev_value,
const float cell_change_threshold,
const unsigned char activity_flag,
volatile float *sdata,
CUDABuffer_<unsigned char> d_block_activities) {
const float diff = local_value != 0 ?
fabs(local_value-prev_value)/fabs(local_value) :
(prev_value != 0 ? fabs(local_value-prev_value)/fabs(prev_value) : 0);
sdata[threadIdx.x + blockDim.x*threadIdx.y] = diff; // save value to shared memory
__syncthreads();
// reduction code to compute column sums of shared memory in parallel
float sum = 0;
float lsum, rsum, tsum = 0, bsum = 0;
if (threadIdx.y == 0) {
for (int j=0; j<blockDim.y; ++j) {
const float value = sdata[threadIdx.x + blockDim.x*j];
if (j == 0) tsum += value;
if (j == blockDim.y-1) bsum += value;
sum += value;
}
if (threadIdx.x == 0) lsum = sum;
if (threadIdx.x == blockDim.x-1) rsum = sum;
sdata[threadIdx.x] = sum;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
// compute final sum for the whole warp
sum = 0;
for (int j=0; j<blockDim.x; ++j) sum += sdata[j];
// unset activity flag if converged (i.e. change was very small)
if (sum < cell_change_threshold*blockDim.x*blockDim.y) {
d_block_activities(blockIdx.y, blockIdx.x) &= ~activity_flag; // unset flag
}
} // if (threadIdx.x == 0 && threadIdx.y == 0)
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
// reactivate neighboring blocks if necessary
if (lsum >= cell_change_threshold*blockDim.y && blockIdx.x > 0)
d_block_activities(blockIdx.y, blockIdx.x-1) |= activity_flag;
if (rsum >= cell_change_threshold*blockDim.y && blockIdx.x < gridDim.x-1)
d_block_activities(blockIdx.y, blockIdx.x+1) |= activity_flag;
if (tsum >= cell_change_threshold*blockDim.x && blockIdx.y > 0)
d_block_activities(blockIdx.y-1, blockIdx.x) |= activity_flag;
if (bsum >= cell_change_threshold*blockDim.x && blockIdx.y < gridDim.y-1)
d_block_activities(blockIdx.y+1, blockIdx.x) |= activity_flag;
} // if (threadIdx.x == 0 && threadIdx.y == 0)
}
// performs primal update and extrapolation step:
// u^{k+1} = u^k + tau* div(p^{k+1})
// \bar{u}^{k+1} = 2*u^{k+1} - u^k
template<bool check_convergence, bool block_adaptive>
__global__ void TVInpaintingPrimalStepKernel(
const float cell_change_threshold,
CUDABuffer_<bool> d_tv_flag,
CUDABuffer_<int16_t> d_dualTVX,
CUDABuffer_<int16_t> d_dualTVY,
CUDABuffer_<float> d_u,
CUDABuffer_<float> d_u_bar,
CUDABuffer_<float> d_m,
CUDABuffer_<unsigned char> d_block_activities) {
if (block_adaptive) {
// check block activity
if (d_block_activities(blockIdx.y, blockIdx.x) == 0) return;
}
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
// this will accumulate the update step for the primal variable
float update = 0;
// this will accumulate all row entries of the linear operator for the preconditioned step width
float rowSum = 0;
float u = 0;
// only update within the inpainting region (f == 0)
if (x < d_u.width() && y < d_u.height() && d_tv_flag(y, x)) {
// compute divergence update of dualTV - Neumann boundary conditions,
// keep track of row sum for preconditioning
update += kDualIntToFloat * (d_dualTVX(y, x) + d_dualTVY(y, x));
rowSum += 2;
if (x > 0) {
update -= kDualIntToFloat * d_dualTVX(y, x - 1);
rowSum++;
}
if (y > 0) {
update -= kDualIntToFloat * d_dualTVY(y - 1, x);
rowSum++;
}
constexpr float kPrimalStepWidth = 1.f;
const float tau = kPrimalStepWidth / rowSum;
u = d_u(y, x);
update = u + tau * update;
// primal proximal point extrapolation
constexpr float kGamma = 0.1f;
update += kGamma * (update - u);
d_u(y, x) = update;
d_u_bar(y, x) = 2 * update - u;
if (check_convergence) {
d_m(y, x) = fabs((update - u) / u);
}
}
if (block_adaptive) {
extern __shared__ float sdata[];
UpdateBlockActivations(update, u, cell_change_threshold, 1,
sdata, d_block_activities);
}
}
// performs dual update step
// p^{k=1} = \Pi_{|p|<=g} [ p^k + \sigma * \nabla \bar{u}^k ]
// p^{k=1} = \Pi_{|p|<=g} [ (p^k + \sigma * \nabla \bar{u}^k) / (1+\sigma*huberEpsilon) ]
template<bool use_weighting, bool block_adaptive>
__global__ void TVInpaintingDualStepKernel(
const float huber_epsilon,
const float cell_change_threshold,
CUDABuffer_<bool> d_tv_dual_flag,
CUDABuffer_<float> d_u,
hipTextureObject_t d_tvWeight,
CUDABuffer_<int16_t> d_dualTVX,
CUDABuffer_<int16_t> d_dualTVY,
CUDABuffer_<unsigned char> d_block_activities) {
if (block_adaptive) {
// check block activity
if (d_block_activities(blockIdx.y, blockIdx.x) == 0) return;
}
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float resultX = 0, resultY = 0;
float dualTVX = 0, dualTVY = 0;
if (x < d_u.width() && y < d_u.height() && d_tv_dual_flag(y, x)) {
// update using the gradient of u
constexpr float kDualStepWidth = 1.f;
const float huberFactor = 1.0f / (1.0f + kDualStepWidth * 0.5f * huber_epsilon);
const float u = d_u(y, x);
dualTVX = kDualIntToFloat * d_dualTVX(y, x);
dualTVY = kDualIntToFloat * d_dualTVY(y, x);
resultX =
huberFactor * (dualTVX + kDualStepWidth * 0.5f *
( (x < d_u.width() - 1) ? (d_u(y, x + 1) - u) : 0 ));
resultY =
huberFactor * (dualTVY + kDualStepWidth * 0.5f *
( (y < d_u.height() - 1) ? (d_u(y + 1, x) - u) : 0 ));
// project onto the g-unit ball
float denom;
if (use_weighting) {
// Optimization: remove 1 / weight and turn division by weight below into multiplication.
const float weight = /*1.f /*/ (1.f + tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f);
// const float weight = 1.f / (__expf(tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f)*5);
denom = max(1.0f, hypotf(resultX, resultY) * weight);
} else {
denom = max(1.0f, hypotf(resultX, resultY));
}
resultX /= denom;
resultY /= denom;
// dual proximal point extrapolation
constexpr float kGamma = 0.1f;
resultX += kGamma*(resultX-dualTVX);
resultY += kGamma*(resultY-dualTVY);
// write result back into global memory
d_dualTVX(y, x) = resultX * 1.f / kDualIntToFloat;
d_dualTVY(y, x) = resultY * 1.f / kDualIntToFloat;
}
if (block_adaptive) {
extern __shared__ float sdata[];
UpdateBlockActivations(hypotf(resultX, resultY),
hypotf(dualTVX, dualTVY), cell_change_threshold, 2,
sdata, d_block_activities);
}
}
// This kernel does not produce output for the first kIterationsPerKernelCall
// rows and columns and for the last kIterationsPerKernelCall rows and columns.
template<int block_size_x, int block_size_y, bool use_weighting, bool check_convergence>
__global__ void TVInpaintingDualAndPrimalStepsKernel(
const float huber_epsilon,
CUDABuffer_<uint16_t> block_coordinates,
CUDABuffer_<bool> d_tv_flag,
CUDABuffer_<bool> d_tv_dual_flag,
hipTextureObject_t d_tvWeight,
CUDABuffer_<int16_t> d_dualTVX,
CUDABuffer_<int16_t> d_dualTVY,
CUDABuffer_<float> d_u,
CUDABuffer_<float> d_u_bar,
CUDABuffer_<float> d_m) {
const int x = max(0, min(d_u.width() - 1, block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall));
const int y = max(0, min(d_u.height() - 1, block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall));
const bool kDualFlag = d_tv_dual_flag(y, x);
const bool kPrimalFlag = d_tv_flag(y, x);
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < block_size_x - kIterationsPerKernelCall &&
threadIdx.y < block_size_y - kIterationsPerKernelCall &&
block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall < d_u.width() &&
block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall < d_u.height();
typedef hipcub::BlockReduce<
float, 32, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, 32> BlockReduceFloat;
__shared__ typename BlockReduceFloat::TempStorage float_storage;
// Load inputs into private or shared memory.
__shared__ float u_bar_shared[block_size_x * block_size_y];
__shared__ float dual_x_shared[block_size_x * block_size_y];
__shared__ float dual_y_shared[block_size_x * block_size_y];
int shared_mem_index = threadIdx.x + block_size_x * threadIdx.y;
float u_bar = d_u_bar(y, x);
float dualTVX = kDualIntToFloat * d_dualTVX(y, x);
float dualTVY = kDualIntToFloat * d_dualTVY(y, x);
float u = d_u(y, x);
const float weight = /*1.f /*/ (1.f + tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f);
// const float weight = 1.f / (__expf(tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f)*5);
u_bar_shared[shared_mem_index] = u_bar;
dual_x_shared[shared_mem_index] = dualTVX;
dual_y_shared[shared_mem_index] = dualTVY;
// Wait for shared memory to be loaded.
__syncthreads();
#pragma unroll
for (int i = 0; i < kIterationsPerKernelCall; ++ i) {
// Dual step.
if (kDualFlag) {
// update using the gradient of u
constexpr float kDualStepWidth = 1.f;
const float huberFactor = 1.0f / (1.0f + kDualStepWidth * 0.5f * huber_epsilon);
float resultX =
huberFactor * (dualTVX + kDualStepWidth * 0.5f *
( (x < d_u_bar.width() - 1 && threadIdx.x < block_size_x - 1) ? (u_bar_shared[shared_mem_index + 1] - u_bar) : 0));
float resultY =
huberFactor * (dualTVY + kDualStepWidth * 0.5f *
( (y < d_u_bar.height() - 1 && threadIdx.y < block_size_y - 1) ? (u_bar_shared[shared_mem_index + block_size_x] - u_bar) : 0));
// project onto the g-unit ball
float denom;
if (use_weighting) {
denom = max(1.0f, hypotf(resultX, resultY) * weight);
} else {
denom = max(1.0f, hypotf(resultX, resultY));
}
resultX /= denom;
resultY /= denom;
// dual proximal point extrapolation
constexpr float kGamma = 0.1f;
resultX += kGamma * (resultX - dualTVX);
resultY += kGamma * (resultY - dualTVY);
// write result back
dualTVX = resultX;
dualTVY = resultY;
dual_x_shared[shared_mem_index] = dualTVX;
dual_y_shared[shared_mem_index] = dualTVY;
}
__syncthreads();
// Primal step.
float max_change = 0;
if (kPrimalFlag) {
// compute divergence update of dualTV - Neumann boundary conditions,
// keep track of row sum for preconditioning
float update = dualTVX + dualTVY;
float rowSum = 2;
if (x > 0 && threadIdx.x > 0) {
update -= dual_x_shared[shared_mem_index - 1];
rowSum++;
}
if (y > 0 && threadIdx.y > 0) {
update -= dual_y_shared[shared_mem_index - block_size_x];
rowSum++;
}
constexpr float kPrimalStepWidth = 1.f;
const float tau = kPrimalStepWidth / rowSum;
update = u + tau * update;
// primal proximal point extrapolation
constexpr float kGamma = 0.1f;
update += kGamma * (update - u);
// write result back
u_bar = 2 * update - u;
if (check_convergence && i == kIterationsPerKernelCall - 1 && kOutput) {
max_change = fabs((update - u) / u);
}
u = update;
u_bar_shared[shared_mem_index] = u_bar;
}
if (check_convergence) {
float max_change_reduced = BlockReduceFloat(float_storage).Reduce(max_change, hipcub::Max());
if (threadIdx.x == 0 && threadIdx.y == 0) {
d_m(0, blockIdx.x) = max_change_reduced;
}
}
__syncthreads();
}
// write outputs back into global memory
if (kOutput) {
if (kPrimalFlag) {
d_u(y, x) = u;
d_u_bar(y, x) = u_bar;
}
if (kDualFlag) {
d_dualTVX(y, x) = dualTVX * 1.f / kDualIntToFloat;
d_dualTVY(y, x) = dualTVY * 1.f / kDualIntToFloat;
}
}
}
int InpaintAdaptiveDepthMapCUDA(
hipStream_t stream,
int max_num_iterations,
float max_change_rate_threshold,
float depth_input_scaling_factor,
bool block_adaptive,
bool use_tv_weights,
hipTextureObject_t gradient_magnitude_div_sqrt2,
hipTextureObject_t depth_map_input,
CUDABuffer<bool>* tv_flag,
CUDABuffer<bool>* tv_dual_flag,
CUDABuffer<int16_t>* tv_dual_x,
CUDABuffer<int16_t>* tv_dual_y,
CUDABuffer<float>* tv_u_bar,
CUDABuffer<float>* tv_max_change,
CUDABuffer<float>* depth_map_output,
CUDABuffer<uint16_t>* block_coordinates,
CUDABuffer<unsigned char>* block_activities) {
const int width = depth_map_output->width();
const int height = depth_map_output->height();
const int kBlockWidth = block_adaptive ? 16 : 32;
const int kBlockHeight = block_adaptive ? 16 : 32;
dim3 grid_dim(cuda_util::GetBlockCount(width, kBlockWidth),
cuda_util::GetBlockCount(height, kBlockHeight));
const dim3 block_dim(kBlockWidth, kBlockHeight);
const int sm_size = block_adaptive ? kBlockWidth*kBlockHeight*sizeof(float) : 0;
CUDABuffer<float>* tv_u = depth_map_output;
constexpr bool kUseSingleKernel = true;
const int kBlockOutputSizeX = kBlockWidth - 2 * kIterationsPerKernelCall;
const int kBlockOutputSizeY = kBlockHeight - 2 * kIterationsPerKernelCall;
grid_dim = dim3(cuda_util::GetBlockCount(width, kBlockOutputSizeX),
cuda_util::GetBlockCount(height, kBlockOutputSizeY));
// Initialize variables.
hipLaunchKernelGGL(( TVInpaintingInitializeVariablesKernel), dim3(grid_dim), dim3(block_dim), 0, stream,
grid_dim.x, kUseSingleKernel, depth_input_scaling_factor, depth_map_input, tv_flag->ToCUDA(), tv_dual_flag->ToCUDA(), tv_dual_x->ToCUDA(),
tv_dual_y->ToCUDA(), tv_u->ToCUDA(), tv_u_bar->ToCUDA(), block_coordinates->ToCUDA());
CHECK_CUDA_NO_ERROR();
if (block_adaptive) {
hipLaunchKernelGGL(( InitBlockActivationsFromInpaintRegionKernel), dim3(grid_dim), dim3(block_dim), 0, stream,
depth_map_input,
width, height,
block_activities->ToCUDA());
CHECK_CUDA_NO_ERROR();
}
uint8_t* block_activity = new uint8_t[grid_dim.x * grid_dim.y];
block_coordinates->DownloadPartAsync(0, grid_dim.x * grid_dim.y * sizeof(uint8_t), stream, reinterpret_cast<uint16_t*>(block_activity));
hipStreamSynchronize(stream);
int active_block_count = 0;
uint16_t* block_coordinates_cpu = new uint16_t[2 * grid_dim.x * grid_dim.y];
for (size_t y = 0; y < grid_dim.y; ++ y) {
for (size_t x = 0; x < grid_dim.x; ++ x) {
if (block_activity[x + y * grid_dim.x] > 0) {
block_coordinates_cpu[2 * active_block_count + 0] = x * (kUseSingleKernel ? kBlockOutputSizeX : kBlockWidth);
block_coordinates_cpu[2 * active_block_count + 1] = y * (kUseSingleKernel ? kBlockOutputSizeY : kBlockHeight);
++ active_block_count;
}
}
}
delete[] block_activity;
if (active_block_count == 0) {
delete[] block_coordinates_cpu;
return 0;
}
block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
float* max_change = new float[grid_dim.x * grid_dim.y];
// Run optimization iterations.
int i = 0;
int last_convergence_check_iteration = -180;
for (i = 0; i < max_num_iterations; i += (kUseSingleKernel ? kIterationsPerKernelCall : 1)) {
const bool check_convergence = (i - last_convergence_check_iteration >= 200);
if (kUseSingleKernel) {
dim3 grid_dim_single_kernel(active_block_count);
CHECK_EQ(kBlockWidth, 32);
CHECK_EQ(kBlockHeight, 32);
if (check_convergence) {
hipLaunchKernelGGL(( TVInpaintingDualAndPrimalStepsKernel<32, 32, true, true>), dim3(grid_dim_single_kernel), dim3(block_dim), sm_size, stream,
kHuberEpsilon,
block_coordinates->ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_flag->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(),
tv_dual_y->ToCUDA(),
tv_u->ToCUDA(),
tv_u_bar->ToCUDA(),
tv_max_change->ToCUDA());
} else {
hipLaunchKernelGGL(( TVInpaintingDualAndPrimalStepsKernel<32, 32, true, false>), dim3(grid_dim_single_kernel), dim3(block_dim), sm_size, stream,
kHuberEpsilon,
block_coordinates->ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_flag->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(),
tv_dual_y->ToCUDA(),
tv_u->ToCUDA(),
tv_u_bar->ToCUDA(),
tv_max_change->ToCUDA());
}
} else {
if (block_adaptive) {
if (use_tv_weights) {
hipLaunchKernelGGL(( TVInpaintingDualStepKernel<true,true>), dim3(grid_dim), dim3(block_dim), sm_size, stream,
kHuberEpsilon, kCellChangeThreshold,
tv_dual_flag->ToCUDA(),
tv_u_bar->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
block_activities->ToCUDA());
} else {
hipLaunchKernelGGL(( TVInpaintingDualStepKernel<false,true>), dim3(grid_dim), dim3(block_dim), sm_size, stream,
kHuberEpsilon, kCellChangeThreshold,
tv_dual_flag->ToCUDA(),
tv_u_bar->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
block_activities->ToCUDA());
}
} else {
if (use_tv_weights) {
hipLaunchKernelGGL(( TVInpaintingDualStepKernel<true,false>), dim3(grid_dim), dim3(block_dim), sm_size, stream,
kHuberEpsilon, kCellChangeThreshold,
tv_dual_flag->ToCUDA(),
tv_u_bar->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
block_activities->ToCUDA());
} else {
hipLaunchKernelGGL(( TVInpaintingDualStepKernel<false,false>), dim3(grid_dim), dim3(block_dim), sm_size, stream,
kHuberEpsilon, kCellChangeThreshold,
tv_dual_flag->ToCUDA(),
tv_u_bar->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
block_activities->ToCUDA());
}
} // if (block_adaptive)
if (check_convergence) {
if (block_adaptive) {
hipLaunchKernelGGL(( TVInpaintingPrimalStepKernel<true,true>), dim3(grid_dim), dim3(block_dim), sm_size, stream,
kCellChangeThreshold,
tv_flag->ToCUDA(), tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
tv_u->ToCUDA(), tv_u_bar->ToCUDA(), tv_max_change->ToCUDA(),
block_activities->ToCUDA());
} else {
hipLaunchKernelGGL(( TVInpaintingPrimalStepKernel<true,false>), dim3(grid_dim), dim3(block_dim), sm_size, stream,
kCellChangeThreshold,
tv_flag->ToCUDA(), tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
tv_u->ToCUDA(), tv_u_bar->ToCUDA(), tv_max_change->ToCUDA(),
block_activities->ToCUDA());
}
} else {
if (block_adaptive) {
hipLaunchKernelGGL(( TVInpaintingPrimalStepKernel<false,true>), dim3(grid_dim), dim3(block_dim), sm_size, stream,
kCellChangeThreshold,
tv_flag->ToCUDA(), tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
tv_u->ToCUDA(), tv_u_bar->ToCUDA(), CUDABuffer_<float>(),
block_activities->ToCUDA());
} else {
hipLaunchKernelGGL(( TVInpaintingPrimalStepKernel<false,false>), dim3(grid_dim), dim3(block_dim), sm_size, stream,
kCellChangeThreshold,
tv_flag->ToCUDA(), tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
tv_u->ToCUDA(), tv_u_bar->ToCUDA(), CUDABuffer_<float>(),
block_activities->ToCUDA());
}
} // if (check_convergence)
} // if (kUseSingleKernel)
if (check_convergence) {
tv_max_change->DownloadPartAsync(0, active_block_count * sizeof(float), stream, max_change);
hipStreamSynchronize(stream);
int new_active_block_count = 0;
for (int j = 0, end = active_block_count; j < end; j ++) {
if (max_change[j] > max_change_rate_threshold) {
// block_coordinates_cpu[2 * new_active_block_count + 0] = block_coordinates_cpu[2 * j + 0];
// block_coordinates_cpu[2 * new_active_block_count + 1] = block_coordinates_cpu[2 * j + 1];
++ new_active_block_count;
}
}
//LOG(INFO) << "[" << i << "] Active blocks: " << active_block_count << " -> " << new_active_block_count;
//LOG(INFO) << "max_change_rate: " << max_change_rate << " / " << max_change_rate_threshold;
if (new_active_block_count == 0) {
break;
}
//active_block_count = new_active_block_count;
//block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
last_convergence_check_iteration = i;
} // if (check_convergence)
} // for (i = 0; i < max_num_iterations; ++i)
delete[] max_change;
delete[] block_coordinates_cpu;
CHECK_CUDA_NO_ERROR();
if (i < max_num_iterations) {
LOG(INFO) << "TV converged after iteration: " << i;
} else {
LOG(WARNING) << "TV used maximum iteration count: " << i;
}
return i;
}
int InpaintDepthMapCUDA(
hipStream_t stream,
InpaintingMode inpainting_mode,
bool use_tv_weights,
int max_num_iterations,
float max_change_rate_threshold,
float depth_input_scaling_factor,
hipTextureObject_t gradient_magnitude_div_sqrt2,
hipTextureObject_t depth_map_input,
CUDABuffer<bool>* tv_flag,
CUDABuffer<bool>* tv_dual_flag,
CUDABuffer<int16_t>* tv_dual_x,
CUDABuffer<int16_t>* tv_dual_y,
CUDABuffer<float>* tv_u_bar,
CUDABuffer<float>* tv_max_change,
CUDABuffer<float>* depth_map_output,
CUDABuffer<uint16_t>* block_coordinates,
CUDABuffer<unsigned char>* block_activities) {
switch(inpainting_mode) {
case kIMClassic:
return InpaintAdaptiveDepthMapCUDA(
stream,
max_num_iterations, max_change_rate_threshold, depth_input_scaling_factor,
false, use_tv_weights,
gradient_magnitude_div_sqrt2, depth_map_input,
tv_flag, tv_dual_flag, tv_dual_x, tv_dual_y, tv_u_bar, tv_max_change,
depth_map_output, block_coordinates, block_activities);
case kIMAdaptive:
return InpaintAdaptiveDepthMapCUDA(
stream,
max_num_iterations, max_change_rate_threshold, depth_input_scaling_factor,
true, use_tv_weights,
gradient_magnitude_div_sqrt2, depth_map_input,
tv_flag, tv_dual_flag, tv_dual_x, tv_dual_y, tv_u_bar, tv_max_change,
depth_map_output, block_coordinates, block_activities);
default:
return InpaintAdaptiveDepthMapCUDA(
stream,
max_num_iterations, max_change_rate_threshold, depth_input_scaling_factor,
false, use_tv_weights,
gradient_magnitude_div_sqrt2, depth_map_input,
tv_flag, tv_dual_flag, tv_dual_x, tv_dual_y, tv_u_bar, tv_max_change,
depth_map_output, block_coordinates, block_activities);
} // switch(inpainting_mode)
}
__global__ void TVInpaintingInitializeVariablesKernel(
int grid_dim_x,
CUDABuffer_<uchar4> input,
CUDABuffer_<bool> tv_flag,
CUDABuffer_<bool> tv_dual_flag,
CUDABuffer_<float> tv_dual_x_r,
CUDABuffer_<float> tv_dual_x_g,
CUDABuffer_<float> tv_dual_x_b,
CUDABuffer_<float> tv_dual_y_r,
CUDABuffer_<float> tv_dual_y_g,
CUDABuffer_<float> tv_dual_y_b,
CUDABuffer_<float> tv_u_r,
CUDABuffer_<float> tv_u_g,
CUDABuffer_<float> tv_u_b,
CUDABuffer_<float> tv_u_bar_r,
CUDABuffer_<float> tv_u_bar_g,
CUDABuffer_<float> tv_u_bar_b,
CUDABuffer_<uint16_t> block_coordinates) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
const int width = tv_u_r.width();
const int height = tv_u_r.height();
bool thread_is_active = false;
if (x < width && y < height) {
tv_dual_x_r(y, x) = 0.f;
tv_dual_x_g(y, x) = 0.f;
tv_dual_x_b(y, x) = 0.f;
tv_dual_y_r(y, x) = 0.f;
tv_dual_y_g(y, x) = 0.f;
tv_dual_y_b(y, x) = 0.f;
const uchar4 f_input = input(y, x);
tv_flag(y, x) = (f_input.w == 0);
thread_is_active =
(f_input.w == 0 ||
(x > 0 && input(y, x - 1).w == 0) ||
(y > 0 && input(y - 1, x).w == 0) ||
(x < input.width() - 1 && input(y, x + 1).w == 0) ||
(y < input.height() - 1 && input(y + 1, x).w == 0));
tv_dual_flag(y, x) = thread_is_active;
const float3 f_input_float = make_float3(
(1.f / 255.f) * f_input.x,
(1.f / 255.f) * f_input.y,
(1.f / 255.f) * f_input.z);
tv_u_r(y, x) = f_input_float.x;
tv_u_g(y, x) = f_input_float.y;
tv_u_b(y, x) = f_input_float.z;
tv_u_bar_r(y, x) = f_input_float.x;
tv_u_bar_g(y, x) = f_input_float.y;
tv_u_bar_b(y, x) = f_input_float.z;
}
typedef hipcub::BlockReduce<
int, 32, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, 32> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int num_active_threads = BlockReduceInt(int_storage).Reduce(thread_is_active ? 1 : 0, hipcub::Sum());
if (threadIdx.x == 0 && threadIdx.y == 0) {
reinterpret_cast<uint8_t*>(block_coordinates.address())[blockIdx.x + blockIdx.y * grid_dim_x] = (num_active_threads > 0) ? 1 : 0;
}
}
// performs primal update and extrapolation step:
// u^{k+1} = u^k + tau* div(p^{k+1})
// \bar{u}^{k+1} = 2*u^{k+1} - u^k
template<bool check_convergence>
__global__ void TVInpaintingPrimalStepKernel(
CUDABuffer_<uint16_t> block_coordinates,
CUDABuffer_<bool> tv_flag,
CUDABuffer_<float> d_dualTVX_r,
CUDABuffer_<float> d_dualTVX_g,
CUDABuffer_<float> d_dualTVX_b,
CUDABuffer_<float> d_dualTVY_r,
CUDABuffer_<float> d_dualTVY_g,
CUDABuffer_<float> d_dualTVY_b,
CUDABuffer_<float> d_u_r,
CUDABuffer_<float> d_u_g,
CUDABuffer_<float> d_u_b,
CUDABuffer_<float> d_u_bar_r,
CUDABuffer_<float> d_u_bar_g,
CUDABuffer_<float> d_u_bar_b,
CUDABuffer_<float> d_m) {
const int x = block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x;
const int y = block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y;
typedef hipcub::BlockReduce<
float, 32, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, 32> BlockReduceFloat;
__shared__ typename BlockReduceFloat::TempStorage float_storage;
// only update within the inpainting region (f == 0)
float max_change = 0;
if (x < d_u_r.width() && y < d_u_r.height() && tv_flag(y, x)) {
// this will accumulate the update step for the primal variable
float3 update = make_float3(0, 0, 0);
// this will accumulate all row entries of the linear operator for the preconditioned step width
float rowSum = 0;
// compute divergence update of dualTV - Neumann boundary conditions,
// keep track of row sum for preconditioning
update.x += d_dualTVX_r(y, x) + d_dualTVY_r(y, x);
update.y += d_dualTVX_g(y, x) + d_dualTVY_g(y, x);
update.z += d_dualTVX_b(y, x) + d_dualTVY_b(y, x);
rowSum += 2;
if (x > 0) {
update.x -= d_dualTVX_r(y, x - 1);
update.y -= d_dualTVX_g(y, x - 1);
update.z -= d_dualTVX_b(y, x - 1);
rowSum++;
}
if (y > 0) {
update.x -= d_dualTVY_r(y - 1, x);
update.y -= d_dualTVY_g(y - 1, x);
update.z -= d_dualTVY_b(y - 1, x);
rowSum++;
}
constexpr float kPrimalStepWidth = 1.f;
const float tau = kPrimalStepWidth / rowSum;
const float3 u = make_float3(d_u_r(y, x), d_u_g(y, x), d_u_b(y, x));
update = u + tau * update;
d_u_r(y, x) = update.x;
d_u_g(y, x) = update.y;
d_u_b(y, x) = update.z;
float3 u_bar = 2 * update - u;
d_u_bar_r(y, x) = u_bar.x;
d_u_bar_g(y, x) = u_bar.y;
d_u_bar_b(y, x) = u_bar.z;
if (check_convergence) {
max_change = max(max(fabs((update.x - u.x) / u.x),
fabs((update.y - u.y) / u.y)),
fabs((update.z - u.z) / u.z));
}
}
if (check_convergence) {
float max_change_reduced = BlockReduceFloat(float_storage).Reduce(max_change, hipcub::Max());
if (threadIdx.x == 0 && threadIdx.y == 0) {
d_m(0, blockIdx.x) = max_change_reduced;
}
}
}
// performs dual update step
// p^{k=1} = \Pi_{|p|<=g} [ p^k + \sigma * \nabla \bar{u}^k ]
template<bool use_weighting>
__global__ void TVInpaintingDualStepKernel(
CUDABuffer_<uint16_t> block_coordinates,
CUDABuffer_<bool> tv_dual_flag,
CUDABuffer_<float> d_u_r,
CUDABuffer_<float> d_u_g,
CUDABuffer_<float> d_u_b,
hipTextureObject_t d_tvWeight,
CUDABuffer_<float> d_dualTVX_r,
CUDABuffer_<float> d_dualTVX_g,
CUDABuffer_<float> d_dualTVX_b,
CUDABuffer_<float> d_dualTVY_r,
CUDABuffer_<float> d_dualTVY_g,
CUDABuffer_<float> d_dualTVY_b) {
const int x = block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x;
const int y = block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y;
if (x < d_u_r.width() && y < d_u_r.height() && tv_dual_flag(y, x)) {
const float dualStepWidth = 1.0f;
const float HUBER_EPS = 0.01f;
const float huberFactor = 1.0f / (1.0f + dualStepWidth * 0.5f * HUBER_EPS);
// update using the gradient of u
const float3 u = make_float3(d_u_r(y, x), d_u_g(y, x), d_u_b(y, x));
constexpr float kDualStepWidth = 1.f;
float3 u_plusx_minus_u = make_float3(0, 0, 0);
if (x < d_u_r.width() - 1) {
u_plusx_minus_u = make_float3(d_u_r(y, x + 1), d_u_g(y, x + 1), d_u_b(y, x + 1)) - u;
}
const float3 dualTVX = make_float3(d_dualTVX_r(y, x), d_dualTVX_g(y, x), d_dualTVX_b(y, x));
float3 u_plusy_minus_u = make_float3(0, 0, 0);
if (y < d_u_r.height() - 1) {
u_plusy_minus_u = make_float3(d_u_r(y + 1, x), d_u_g(y + 1, x), d_u_b(y + 1, x)) - u;
}
const float3 dualTVY = make_float3(d_dualTVY_r(y, x), d_dualTVY_g(y, x), d_dualTVY_b(y, x));
float3 resultX =
huberFactor * (dualTVX + kDualStepWidth * 0.5f * u_plusx_minus_u);
float3 resultY =
huberFactor * (dualTVY + kDualStepWidth * 0.5f * u_plusy_minus_u);
// project onto the g-unit ball
float3 denom;
if (use_weighting) {
// Optimization: remove 1 / weight and turn division by weight below into multiplication.
const float weight = /*1.f /*/ (1.f + tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f);
denom.x = max(1.0f, hypotf(resultX.x, resultY.x) * weight);
denom.y = max(1.0f, hypotf(resultX.y, resultY.y) * weight);
denom.z = max(1.0f, hypotf(resultX.z, resultY.z) * weight);
} else {
denom.x = max(1.0f, hypotf(resultX.x, resultY.x));
denom.y = max(1.0f, hypotf(resultX.y, resultY.y));
denom.z = max(1.0f, hypotf(resultX.z, resultY.z));
}
resultX /= denom;
resultY /= denom;
// write result back into global memory
d_dualTVX_r(y, x) = resultX.x;
d_dualTVX_g(y, x) = resultX.y;
d_dualTVX_b(y, x) = resultX.z;
d_dualTVY_r(y, x) = resultY.x;
d_dualTVY_g(y, x) = resultY.y;
d_dualTVY_b(y, x) = resultY.z;
}
}
int InpaintImageCUDA(
hipStream_t stream,
int max_num_iterations,
float max_change_rate_threshold,
hipTextureObject_t gradient_magnitude_div_sqrt2,
const CUDABuffer<uchar4>& input,
CUDABuffer<bool>* tv_flag,
CUDABuffer<bool>* tv_dual_flag,
CUDABuffer<float>* tv_dual_x_r,
CUDABuffer<float>* tv_dual_x_g,
CUDABuffer<float>* tv_dual_x_b,
CUDABuffer<float>* tv_dual_y_r,
CUDABuffer<float>* tv_dual_y_g,
CUDABuffer<float>* tv_dual_y_b,
CUDABuffer<float>* tv_u_bar_r,
CUDABuffer<float>* tv_u_bar_g,
CUDABuffer<float>* tv_u_bar_b,
CUDABuffer<float>* tv_max_change,
CUDABuffer<float>* output_r,
CUDABuffer<float>* output_g,
CUDABuffer<float>* output_b,
CUDABuffer<uint16_t>* block_coordinates) {
const int width = output_r->width();
const int height = output_r->height();
constexpr int kBlockWidth = 32;
constexpr int kBlockHeight = 32;
dim3 grid_dim(cuda_util::GetBlockCount(width, kBlockWidth),
cuda_util::GetBlockCount(height, kBlockHeight));
const dim3 block_dim(kBlockWidth, kBlockHeight);
CUDABuffer<float>* tv_u_r = output_r;
CUDABuffer<float>* tv_u_g = output_g;
CUDABuffer<float>* tv_u_b = output_b;
// Initialize variables.
hipLaunchKernelGGL(( TVInpaintingInitializeVariablesKernel), dim3(grid_dim), dim3(block_dim), 0, stream,
grid_dim.x,
input.ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_flag->ToCUDA(),
tv_dual_x_r->ToCUDA(),
tv_dual_x_g->ToCUDA(),
tv_dual_x_b->ToCUDA(),
tv_dual_y_r->ToCUDA(),
tv_dual_y_g->ToCUDA(),
tv_dual_y_b->ToCUDA(),
tv_u_r->ToCUDA(),
tv_u_g->ToCUDA(),
tv_u_b->ToCUDA(),
tv_u_bar_r->ToCUDA(),
tv_u_bar_g->ToCUDA(),
tv_u_bar_b->ToCUDA(),
block_coordinates->ToCUDA());
CHECK_CUDA_NO_ERROR();
uint8_t* block_activity = new uint8_t[grid_dim.x * grid_dim.y];
block_coordinates->DownloadPartAsync(0, grid_dim.x * grid_dim.y * sizeof(uint8_t), stream, reinterpret_cast<uint16_t*>(block_activity));
hipStreamSynchronize(stream);
int active_block_count = 0;
uint16_t* block_coordinates_cpu = new uint16_t[2 * grid_dim.x * grid_dim.y];
for (size_t y = 0; y < grid_dim.y; ++ y) {
for (size_t x = 0; x < grid_dim.x; ++ x) {
if (block_activity[x + y * grid_dim.x] > 0) {
block_coordinates_cpu[2 * active_block_count + 0] = x * kBlockWidth;
block_coordinates_cpu[2 * active_block_count + 1] = y * kBlockHeight;
++ active_block_count;
}
}
}
delete[] block_activity;
if (active_block_count == 0) {
return 0;
}
block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
float* max_change = new float[grid_dim.x * grid_dim.y];
// Run optimization iterations.
int i = 0;
int last_convergence_check_iteration = -180;
for (i = 0; i < max_num_iterations; i += 1) {
// TODO: HACK: Minimum iteration count is necessary since it exits too early in some cases
const bool check_convergence = (i - last_convergence_check_iteration >= 200) /*&& (i >= 500)*/;
dim3 grid_dim_active(active_block_count);
hipLaunchKernelGGL(( TVInpaintingDualStepKernel<true>), dim3(grid_dim_active), dim3(block_dim), 0, stream,
block_coordinates->ToCUDA(),
tv_dual_flag->ToCUDA(),
tv_u_bar_r->ToCUDA(),
tv_u_bar_g->ToCUDA(),
tv_u_bar_b->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x_r->ToCUDA(),
tv_dual_x_g->ToCUDA(),
tv_dual_x_b->ToCUDA(),
tv_dual_y_r->ToCUDA(),
tv_dual_y_g->ToCUDA(),
tv_dual_y_b->ToCUDA());
if (check_convergence) {
hipLaunchKernelGGL(( TVInpaintingPrimalStepKernel<true>), dim3(grid_dim_active), dim3(block_dim), 0, stream,
block_coordinates->ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_x_r->ToCUDA(),
tv_dual_x_g->ToCUDA(),
tv_dual_x_b->ToCUDA(),
tv_dual_y_r->ToCUDA(),
tv_dual_y_g->ToCUDA(),
tv_dual_y_b->ToCUDA(),
tv_u_r->ToCUDA(),
tv_u_g->ToCUDA(),
tv_u_b->ToCUDA(),
tv_u_bar_r->ToCUDA(),
tv_u_bar_g->ToCUDA(),
tv_u_bar_b->ToCUDA(),
tv_max_change->ToCUDA());
} else {
hipLaunchKernelGGL(( TVInpaintingPrimalStepKernel<false>), dim3(grid_dim_active), dim3(block_dim), 0, stream,
block_coordinates->ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_x_r->ToCUDA(),
tv_dual_x_g->ToCUDA(),
tv_dual_x_b->ToCUDA(),
tv_dual_y_r->ToCUDA(),
tv_dual_y_g->ToCUDA(),
tv_dual_y_b->ToCUDA(),
tv_u_r->ToCUDA(),
tv_u_g->ToCUDA(),
tv_u_b->ToCUDA(),
tv_u_bar_r->ToCUDA(),
tv_u_bar_g->ToCUDA(),
tv_u_bar_b->ToCUDA(),
CUDABuffer_<float>());
}
if (check_convergence) {
tv_max_change->DownloadPartAsync(0, active_block_count * sizeof(float), stream, max_change);
hipStreamSynchronize(stream);
int new_active_block_count = 0;
for (int j = 0, end = active_block_count; j < end; j ++) {
if (max_change[j] > max_change_rate_threshold) {
// block_coordinates_cpu[2 * new_active_block_count + 0] = block_coordinates_cpu[2 * j + 0];
// block_coordinates_cpu[2 * new_active_block_count + 1] = block_coordinates_cpu[2 * j + 1];
++ new_active_block_count;
}
}
//LOG(INFO) << "[" << i << "] Active blocks: " << active_block_count << " -> " << new_active_block_count;
//LOG(INFO) << "max_change_rate: " << max_change_rate << " / " << max_change_rate_threshold;
if (new_active_block_count == 0) {
break;
}
//active_block_count = new_active_block_count;
//block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
last_convergence_check_iteration = i;
} // if (check_convergence)
}
delete[] max_change;
delete[] block_coordinates_cpu;
CHECK_CUDA_NO_ERROR();
if (i < max_num_iterations) {
LOG(INFO) << "Color TV converged after iteration: " << i;
} else {
LOG(WARNING) << "Color TV used maximum iteration count: " << i;
}
return i;
}
} // namespace view_correction
| 2911792a5037480a20a58cc576ef1a87eb789d0a.cu | // Copyright 2018 ETH Zürich
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "view_correction/cuda_tv_inpainting_functions.cuh"
#include <cub/cub.cuh>
#include <glog/logging.h>
#include "view_correction/cuda_util.h"
#include "view_correction/helper_math.h"
namespace view_correction {
constexpr float kSqrt2 = 1.4142135623731f;
constexpr float kHuberEpsilon = 0.01f;
constexpr float kCellChangeThreshold = 10e-6f;
constexpr float kDualIntToFloat = 2.f / 32767; // std::numeric_limits<int16_t>::max();
// One iteration is one dual (D) and one primal (P) step. Theoretically one
// could make this more fine-grained to for example also do DPD and PDP, but it
// would complicate the code unnecessarily.
constexpr int kIterationsPerKernelCall = 4;
__global__ void TVInpaintingInitializeVariablesKernel(
int grid_dim_x,
bool kUseSingleKernel,
float depth_input_scaling_factor,
cudaTextureObject_t depth_map_input,
CUDABuffer_<bool> tv_flag,
CUDABuffer_<bool> tv_dual_flag,
CUDABuffer_<int16_t> tv_dual_x,
CUDABuffer_<int16_t> tv_dual_y,
CUDABuffer_<float> tv_u,
CUDABuffer_<float> tv_u_bar,
CUDABuffer_<uint16_t> block_coordinates) {
const int width = tv_u.width();
const int height = tv_u.height();
unsigned int x;
unsigned int y;
if (kUseSingleKernel) {
const int kBlockOutputSizeX = 32 - 2 * kIterationsPerKernelCall;
const int kBlockOutputSizeY = 32 - 2 * kIterationsPerKernelCall;
x = blockIdx.x * kBlockOutputSizeX + threadIdx.x - kIterationsPerKernelCall;
y = blockIdx.y * kBlockOutputSizeY + threadIdx.y - kIterationsPerKernelCall;
} else {
x = blockIdx.x * blockDim.x + threadIdx.x;
y = blockIdx.y * blockDim.y + threadIdx.y;
}
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < 32 - kIterationsPerKernelCall &&
threadIdx.y < 32 - kIterationsPerKernelCall &&
x < width &&
y < height;
bool thread_is_active = false;
if (kOutput) {
tv_dual_x(y, x) = 0;
tv_dual_y(y, x) = 0;
const float depth_input = depth_input_scaling_factor * tex2D<float>(depth_map_input, x, y);
tv_flag(y, x) = (depth_input == 0);
thread_is_active =
(depth_input == 0 ||
(x > 0 && tex2D<float>(depth_map_input, x - 1, y) == 0) ||
(y > 0 && tex2D<float>(depth_map_input, x, y - 1) == 0) ||
(x < width - 1 && tex2D<float>(depth_map_input, x + 1, y) == 0) ||
(y < height - 1 && tex2D<float>(depth_map_input, x, y + 1) == 0));
tv_dual_flag(y, x) = thread_is_active;
tv_u(y, x) = depth_input;
tv_u_bar(y, x) = depth_input;
}
typedef cub::BlockReduce<
int, 32, cub::BLOCK_REDUCE_WARP_REDUCTIONS, 32> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int num_active_threads = BlockReduceInt(int_storage).Reduce(thread_is_active ? 1 : 0, cub::Sum());
if (threadIdx.x == 0 && threadIdx.y == 0) {
reinterpret_cast<uint8_t*>(block_coordinates.address())[blockIdx.x + blockIdx.y * grid_dim_x] = (num_active_threads > 0) ? 1 : 0;
}
}
// set up all block activities (primal and dual) for all blocks
// that overlap with the inpainting area
__global__ void InitBlockActivationsFromInpaintRegionKernel(
cudaTextureObject_t d_f,
const int width,
const int height,
CUDABuffer_<unsigned char> d_block_activities)
{
if (threadIdx.x == 0 && threadIdx.y == 0) {
bool blockIsEmpty = true;
// for all threads in the block
for (int j=0; j<blockDim.y; ++j) {
for (int i=0; i<blockDim.x; ++i) {
const int x = blockIdx.x * blockDim.x + i;
const int y = blockIdx.y * blockDim.y + j;
if (x < width && y < height && tex2D<float>(d_f, x, y) == 0) {
blockIsEmpty = false;
break;
}
}
if (!blockIsEmpty) break;
}
if (blockIsEmpty) d_block_activities(blockIdx.y, blockIdx.x) = 0;
else d_block_activities(blockIdx.y, blockIdx.x) = 3;
} // if (threadIdx.x == 0 && threadIdx.y == 0)
}
// checks the convergence of individual blocks and keeps track of the block boundary
// in order to steer deactivation and reactivation of block updates
__device__ void UpdateBlockActivations(
const float local_value,
const float prev_value,
const float cell_change_threshold,
const unsigned char activity_flag,
volatile float *sdata,
CUDABuffer_<unsigned char> d_block_activities) {
const float diff = local_value != 0 ?
fabs(local_value-prev_value)/fabs(local_value) :
(prev_value != 0 ? fabs(local_value-prev_value)/fabs(prev_value) : 0);
sdata[threadIdx.x + blockDim.x*threadIdx.y] = diff; // save value to shared memory
__syncthreads();
// reduction code to compute column sums of shared memory in parallel
float sum = 0;
float lsum, rsum, tsum = 0, bsum = 0;
if (threadIdx.y == 0) {
for (int j=0; j<blockDim.y; ++j) {
const float value = sdata[threadIdx.x + blockDim.x*j];
if (j == 0) tsum += value;
if (j == blockDim.y-1) bsum += value;
sum += value;
}
if (threadIdx.x == 0) lsum = sum;
if (threadIdx.x == blockDim.x-1) rsum = sum;
sdata[threadIdx.x] = sum;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
// compute final sum for the whole warp
sum = 0;
for (int j=0; j<blockDim.x; ++j) sum += sdata[j];
// unset activity flag if converged (i.e. change was very small)
if (sum < cell_change_threshold*blockDim.x*blockDim.y) {
d_block_activities(blockIdx.y, blockIdx.x) &= ~activity_flag; // unset flag
}
} // if (threadIdx.x == 0 && threadIdx.y == 0)
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
// reactivate neighboring blocks if necessary
if (lsum >= cell_change_threshold*blockDim.y && blockIdx.x > 0)
d_block_activities(blockIdx.y, blockIdx.x-1) |= activity_flag;
if (rsum >= cell_change_threshold*blockDim.y && blockIdx.x < gridDim.x-1)
d_block_activities(blockIdx.y, blockIdx.x+1) |= activity_flag;
if (tsum >= cell_change_threshold*blockDim.x && blockIdx.y > 0)
d_block_activities(blockIdx.y-1, blockIdx.x) |= activity_flag;
if (bsum >= cell_change_threshold*blockDim.x && blockIdx.y < gridDim.y-1)
d_block_activities(blockIdx.y+1, blockIdx.x) |= activity_flag;
} // if (threadIdx.x == 0 && threadIdx.y == 0)
}
// performs primal update and extrapolation step:
// u^{k+1} = u^k + tau* div(p^{k+1})
// \bar{u}^{k+1} = 2*u^{k+1} - u^k
template<bool check_convergence, bool block_adaptive>
__global__ void TVInpaintingPrimalStepKernel(
const float cell_change_threshold,
CUDABuffer_<bool> d_tv_flag,
CUDABuffer_<int16_t> d_dualTVX,
CUDABuffer_<int16_t> d_dualTVY,
CUDABuffer_<float> d_u,
CUDABuffer_<float> d_u_bar,
CUDABuffer_<float> d_m,
CUDABuffer_<unsigned char> d_block_activities) {
if (block_adaptive) {
// check block activity
if (d_block_activities(blockIdx.y, blockIdx.x) == 0) return;
}
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
// this will accumulate the update step for the primal variable
float update = 0;
// this will accumulate all row entries of the linear operator for the preconditioned step width
float rowSum = 0;
float u = 0;
// only update within the inpainting region (f == 0)
if (x < d_u.width() && y < d_u.height() && d_tv_flag(y, x)) {
// compute divergence update of dualTV - Neumann boundary conditions,
// keep track of row sum for preconditioning
update += kDualIntToFloat * (d_dualTVX(y, x) + d_dualTVY(y, x));
rowSum += 2;
if (x > 0) {
update -= kDualIntToFloat * d_dualTVX(y, x - 1);
rowSum++;
}
if (y > 0) {
update -= kDualIntToFloat * d_dualTVY(y - 1, x);
rowSum++;
}
constexpr float kPrimalStepWidth = 1.f;
const float tau = kPrimalStepWidth / rowSum;
u = d_u(y, x);
update = u + tau * update;
// primal proximal point extrapolation
constexpr float kGamma = 0.1f;
update += kGamma * (update - u);
d_u(y, x) = update;
d_u_bar(y, x) = 2 * update - u;
if (check_convergence) {
d_m(y, x) = fabs((update - u) / u);
}
}
if (block_adaptive) {
extern __shared__ float sdata[];
UpdateBlockActivations(update, u, cell_change_threshold, 1,
sdata, d_block_activities);
}
}
// performs dual update step
// p^{k=1} = \Pi_{|p|<=g} [ p^k + \sigma * \nabla \bar{u}^k ]
// p^{k=1} = \Pi_{|p|<=g} [ (p^k + \sigma * \nabla \bar{u}^k) / (1+\sigma*huberEpsilon) ]
template<bool use_weighting, bool block_adaptive>
__global__ void TVInpaintingDualStepKernel(
const float huber_epsilon,
const float cell_change_threshold,
CUDABuffer_<bool> d_tv_dual_flag,
CUDABuffer_<float> d_u,
cudaTextureObject_t d_tvWeight,
CUDABuffer_<int16_t> d_dualTVX,
CUDABuffer_<int16_t> d_dualTVY,
CUDABuffer_<unsigned char> d_block_activities) {
if (block_adaptive) {
// check block activity
if (d_block_activities(blockIdx.y, blockIdx.x) == 0) return;
}
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float resultX = 0, resultY = 0;
float dualTVX = 0, dualTVY = 0;
if (x < d_u.width() && y < d_u.height() && d_tv_dual_flag(y, x)) {
// update using the gradient of u
constexpr float kDualStepWidth = 1.f;
const float huberFactor = 1.0f / (1.0f + kDualStepWidth * 0.5f * huber_epsilon);
const float u = d_u(y, x);
dualTVX = kDualIntToFloat * d_dualTVX(y, x);
dualTVY = kDualIntToFloat * d_dualTVY(y, x);
resultX =
huberFactor * (dualTVX + kDualStepWidth * 0.5f *
( (x < d_u.width() - 1) ? (d_u(y, x + 1) - u) : 0 ));
resultY =
huberFactor * (dualTVY + kDualStepWidth * 0.5f *
( (y < d_u.height() - 1) ? (d_u(y + 1, x) - u) : 0 ));
// project onto the g-unit ball
float denom;
if (use_weighting) {
// Optimization: remove 1 / weight and turn division by weight below into multiplication.
const float weight = /*1.f /*/ (1.f + tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f);
// const float weight = 1.f / (__expf(tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f)*5);
denom = max(1.0f, hypotf(resultX, resultY) * weight);
} else {
denom = max(1.0f, hypotf(resultX, resultY));
}
resultX /= denom;
resultY /= denom;
// dual proximal point extrapolation
constexpr float kGamma = 0.1f;
resultX += kGamma*(resultX-dualTVX);
resultY += kGamma*(resultY-dualTVY);
// write result back into global memory
d_dualTVX(y, x) = resultX * 1.f / kDualIntToFloat;
d_dualTVY(y, x) = resultY * 1.f / kDualIntToFloat;
}
if (block_adaptive) {
extern __shared__ float sdata[];
UpdateBlockActivations(hypotf(resultX, resultY),
hypotf(dualTVX, dualTVY), cell_change_threshold, 2,
sdata, d_block_activities);
}
}
// This kernel does not produce output for the first kIterationsPerKernelCall
// rows and columns and for the last kIterationsPerKernelCall rows and columns.
template<int block_size_x, int block_size_y, bool use_weighting, bool check_convergence>
__global__ void TVInpaintingDualAndPrimalStepsKernel(
const float huber_epsilon,
CUDABuffer_<uint16_t> block_coordinates,
CUDABuffer_<bool> d_tv_flag,
CUDABuffer_<bool> d_tv_dual_flag,
cudaTextureObject_t d_tvWeight,
CUDABuffer_<int16_t> d_dualTVX,
CUDABuffer_<int16_t> d_dualTVY,
CUDABuffer_<float> d_u,
CUDABuffer_<float> d_u_bar,
CUDABuffer_<float> d_m) {
const int x = max(0, min(d_u.width() - 1, block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall));
const int y = max(0, min(d_u.height() - 1, block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall));
const bool kDualFlag = d_tv_dual_flag(y, x);
const bool kPrimalFlag = d_tv_flag(y, x);
const bool kOutput =
threadIdx.x >= kIterationsPerKernelCall &&
threadIdx.y >= kIterationsPerKernelCall &&
threadIdx.x < block_size_x - kIterationsPerKernelCall &&
threadIdx.y < block_size_y - kIterationsPerKernelCall &&
block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x - kIterationsPerKernelCall < d_u.width() &&
block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y - kIterationsPerKernelCall < d_u.height();
typedef cub::BlockReduce<
float, 32, cub::BLOCK_REDUCE_WARP_REDUCTIONS, 32> BlockReduceFloat;
__shared__ typename BlockReduceFloat::TempStorage float_storage;
// Load inputs into private or shared memory.
__shared__ float u_bar_shared[block_size_x * block_size_y];
__shared__ float dual_x_shared[block_size_x * block_size_y];
__shared__ float dual_y_shared[block_size_x * block_size_y];
int shared_mem_index = threadIdx.x + block_size_x * threadIdx.y;
float u_bar = d_u_bar(y, x);
float dualTVX = kDualIntToFloat * d_dualTVX(y, x);
float dualTVY = kDualIntToFloat * d_dualTVY(y, x);
float u = d_u(y, x);
const float weight = /*1.f /*/ (1.f + tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f);
// const float weight = 1.f / (__expf(tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f)*5);
u_bar_shared[shared_mem_index] = u_bar;
dual_x_shared[shared_mem_index] = dualTVX;
dual_y_shared[shared_mem_index] = dualTVY;
// Wait for shared memory to be loaded.
__syncthreads();
#pragma unroll
for (int i = 0; i < kIterationsPerKernelCall; ++ i) {
// Dual step.
if (kDualFlag) {
// update using the gradient of u
constexpr float kDualStepWidth = 1.f;
const float huberFactor = 1.0f / (1.0f + kDualStepWidth * 0.5f * huber_epsilon);
float resultX =
huberFactor * (dualTVX + kDualStepWidth * 0.5f *
( (x < d_u_bar.width() - 1 && threadIdx.x < block_size_x - 1) ? (u_bar_shared[shared_mem_index + 1] - u_bar) : 0));
float resultY =
huberFactor * (dualTVY + kDualStepWidth * 0.5f *
( (y < d_u_bar.height() - 1 && threadIdx.y < block_size_y - 1) ? (u_bar_shared[shared_mem_index + block_size_x] - u_bar) : 0));
// project onto the g-unit ball
float denom;
if (use_weighting) {
denom = max(1.0f, hypotf(resultX, resultY) * weight);
} else {
denom = max(1.0f, hypotf(resultX, resultY));
}
resultX /= denom;
resultY /= denom;
// dual proximal point extrapolation
constexpr float kGamma = 0.1f;
resultX += kGamma * (resultX - dualTVX);
resultY += kGamma * (resultY - dualTVY);
// write result back
dualTVX = resultX;
dualTVY = resultY;
dual_x_shared[shared_mem_index] = dualTVX;
dual_y_shared[shared_mem_index] = dualTVY;
}
__syncthreads();
// Primal step.
float max_change = 0;
if (kPrimalFlag) {
// compute divergence update of dualTV - Neumann boundary conditions,
// keep track of row sum for preconditioning
float update = dualTVX + dualTVY;
float rowSum = 2;
if (x > 0 && threadIdx.x > 0) {
update -= dual_x_shared[shared_mem_index - 1];
rowSum++;
}
if (y > 0 && threadIdx.y > 0) {
update -= dual_y_shared[shared_mem_index - block_size_x];
rowSum++;
}
constexpr float kPrimalStepWidth = 1.f;
const float tau = kPrimalStepWidth / rowSum;
update = u + tau * update;
// primal proximal point extrapolation
constexpr float kGamma = 0.1f;
update += kGamma * (update - u);
// write result back
u_bar = 2 * update - u;
if (check_convergence && i == kIterationsPerKernelCall - 1 && kOutput) {
max_change = fabs((update - u) / u);
}
u = update;
u_bar_shared[shared_mem_index] = u_bar;
}
if (check_convergence) {
float max_change_reduced = BlockReduceFloat(float_storage).Reduce(max_change, cub::Max());
if (threadIdx.x == 0 && threadIdx.y == 0) {
d_m(0, blockIdx.x) = max_change_reduced;
}
}
__syncthreads();
}
// write outputs back into global memory
if (kOutput) {
if (kPrimalFlag) {
d_u(y, x) = u;
d_u_bar(y, x) = u_bar;
}
if (kDualFlag) {
d_dualTVX(y, x) = dualTVX * 1.f / kDualIntToFloat;
d_dualTVY(y, x) = dualTVY * 1.f / kDualIntToFloat;
}
}
}
int InpaintAdaptiveDepthMapCUDA(
cudaStream_t stream,
int max_num_iterations,
float max_change_rate_threshold,
float depth_input_scaling_factor,
bool block_adaptive,
bool use_tv_weights,
cudaTextureObject_t gradient_magnitude_div_sqrt2,
cudaTextureObject_t depth_map_input,
CUDABuffer<bool>* tv_flag,
CUDABuffer<bool>* tv_dual_flag,
CUDABuffer<int16_t>* tv_dual_x,
CUDABuffer<int16_t>* tv_dual_y,
CUDABuffer<float>* tv_u_bar,
CUDABuffer<float>* tv_max_change,
CUDABuffer<float>* depth_map_output,
CUDABuffer<uint16_t>* block_coordinates,
CUDABuffer<unsigned char>* block_activities) {
const int width = depth_map_output->width();
const int height = depth_map_output->height();
const int kBlockWidth = block_adaptive ? 16 : 32;
const int kBlockHeight = block_adaptive ? 16 : 32;
dim3 grid_dim(cuda_util::GetBlockCount(width, kBlockWidth),
cuda_util::GetBlockCount(height, kBlockHeight));
const dim3 block_dim(kBlockWidth, kBlockHeight);
const int sm_size = block_adaptive ? kBlockWidth*kBlockHeight*sizeof(float) : 0;
CUDABuffer<float>* tv_u = depth_map_output;
constexpr bool kUseSingleKernel = true;
const int kBlockOutputSizeX = kBlockWidth - 2 * kIterationsPerKernelCall;
const int kBlockOutputSizeY = kBlockHeight - 2 * kIterationsPerKernelCall;
grid_dim = dim3(cuda_util::GetBlockCount(width, kBlockOutputSizeX),
cuda_util::GetBlockCount(height, kBlockOutputSizeY));
// Initialize variables.
TVInpaintingInitializeVariablesKernel<<<grid_dim, block_dim, 0, stream>>>(
grid_dim.x, kUseSingleKernel, depth_input_scaling_factor, depth_map_input, tv_flag->ToCUDA(), tv_dual_flag->ToCUDA(), tv_dual_x->ToCUDA(),
tv_dual_y->ToCUDA(), tv_u->ToCUDA(), tv_u_bar->ToCUDA(), block_coordinates->ToCUDA());
CHECK_CUDA_NO_ERROR();
if (block_adaptive) {
InitBlockActivationsFromInpaintRegionKernel<<<grid_dim, block_dim, 0, stream>>>(
depth_map_input,
width, height,
block_activities->ToCUDA());
CHECK_CUDA_NO_ERROR();
}
uint8_t* block_activity = new uint8_t[grid_dim.x * grid_dim.y];
block_coordinates->DownloadPartAsync(0, grid_dim.x * grid_dim.y * sizeof(uint8_t), stream, reinterpret_cast<uint16_t*>(block_activity));
cudaStreamSynchronize(stream);
int active_block_count = 0;
uint16_t* block_coordinates_cpu = new uint16_t[2 * grid_dim.x * grid_dim.y];
for (size_t y = 0; y < grid_dim.y; ++ y) {
for (size_t x = 0; x < grid_dim.x; ++ x) {
if (block_activity[x + y * grid_dim.x] > 0) {
block_coordinates_cpu[2 * active_block_count + 0] = x * (kUseSingleKernel ? kBlockOutputSizeX : kBlockWidth);
block_coordinates_cpu[2 * active_block_count + 1] = y * (kUseSingleKernel ? kBlockOutputSizeY : kBlockHeight);
++ active_block_count;
}
}
}
delete[] block_activity;
if (active_block_count == 0) {
delete[] block_coordinates_cpu;
return 0;
}
block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
float* max_change = new float[grid_dim.x * grid_dim.y];
// Run optimization iterations.
int i = 0;
int last_convergence_check_iteration = -180;
for (i = 0; i < max_num_iterations; i += (kUseSingleKernel ? kIterationsPerKernelCall : 1)) {
const bool check_convergence = (i - last_convergence_check_iteration >= 200);
if (kUseSingleKernel) {
dim3 grid_dim_single_kernel(active_block_count);
CHECK_EQ(kBlockWidth, 32);
CHECK_EQ(kBlockHeight, 32);
if (check_convergence) {
TVInpaintingDualAndPrimalStepsKernel<32, 32, true, true><<<grid_dim_single_kernel, block_dim, sm_size, stream>>>(
kHuberEpsilon,
block_coordinates->ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_flag->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(),
tv_dual_y->ToCUDA(),
tv_u->ToCUDA(),
tv_u_bar->ToCUDA(),
tv_max_change->ToCUDA());
} else {
TVInpaintingDualAndPrimalStepsKernel<32, 32, true, false><<<grid_dim_single_kernel, block_dim, sm_size, stream>>>(
kHuberEpsilon,
block_coordinates->ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_flag->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(),
tv_dual_y->ToCUDA(),
tv_u->ToCUDA(),
tv_u_bar->ToCUDA(),
tv_max_change->ToCUDA());
}
} else {
if (block_adaptive) {
if (use_tv_weights) {
TVInpaintingDualStepKernel<true,true><<<grid_dim, block_dim, sm_size, stream>>>(
kHuberEpsilon, kCellChangeThreshold,
tv_dual_flag->ToCUDA(),
tv_u_bar->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
block_activities->ToCUDA());
} else {
TVInpaintingDualStepKernel<false,true><<<grid_dim, block_dim, sm_size, stream>>>(
kHuberEpsilon, kCellChangeThreshold,
tv_dual_flag->ToCUDA(),
tv_u_bar->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
block_activities->ToCUDA());
}
} else {
if (use_tv_weights) {
TVInpaintingDualStepKernel<true,false><<<grid_dim, block_dim, sm_size, stream>>>(
kHuberEpsilon, kCellChangeThreshold,
tv_dual_flag->ToCUDA(),
tv_u_bar->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
block_activities->ToCUDA());
} else {
TVInpaintingDualStepKernel<false,false><<<grid_dim, block_dim, sm_size, stream>>>(
kHuberEpsilon, kCellChangeThreshold,
tv_dual_flag->ToCUDA(),
tv_u_bar->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
block_activities->ToCUDA());
}
} // if (block_adaptive)
if (check_convergence) {
if (block_adaptive) {
TVInpaintingPrimalStepKernel<true,true><<<grid_dim, block_dim, sm_size, stream>>>(
kCellChangeThreshold,
tv_flag->ToCUDA(), tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
tv_u->ToCUDA(), tv_u_bar->ToCUDA(), tv_max_change->ToCUDA(),
block_activities->ToCUDA());
} else {
TVInpaintingPrimalStepKernel<true,false><<<grid_dim, block_dim, sm_size, stream>>>(
kCellChangeThreshold,
tv_flag->ToCUDA(), tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
tv_u->ToCUDA(), tv_u_bar->ToCUDA(), tv_max_change->ToCUDA(),
block_activities->ToCUDA());
}
} else {
if (block_adaptive) {
TVInpaintingPrimalStepKernel<false,true><<<grid_dim, block_dim, sm_size, stream>>>(
kCellChangeThreshold,
tv_flag->ToCUDA(), tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
tv_u->ToCUDA(), tv_u_bar->ToCUDA(), CUDABuffer_<float>(),
block_activities->ToCUDA());
} else {
TVInpaintingPrimalStepKernel<false,false><<<grid_dim, block_dim, sm_size, stream>>>(
kCellChangeThreshold,
tv_flag->ToCUDA(), tv_dual_x->ToCUDA(), tv_dual_y->ToCUDA(),
tv_u->ToCUDA(), tv_u_bar->ToCUDA(), CUDABuffer_<float>(),
block_activities->ToCUDA());
}
} // if (check_convergence)
} // if (kUseSingleKernel)
if (check_convergence) {
tv_max_change->DownloadPartAsync(0, active_block_count * sizeof(float), stream, max_change);
cudaStreamSynchronize(stream);
int new_active_block_count = 0;
for (int j = 0, end = active_block_count; j < end; j ++) {
if (max_change[j] > max_change_rate_threshold) {
// block_coordinates_cpu[2 * new_active_block_count + 0] = block_coordinates_cpu[2 * j + 0];
// block_coordinates_cpu[2 * new_active_block_count + 1] = block_coordinates_cpu[2 * j + 1];
++ new_active_block_count;
}
}
//LOG(INFO) << "[" << i << "] Active blocks: " << active_block_count << " -> " << new_active_block_count;
//LOG(INFO) << "max_change_rate: " << max_change_rate << " / " << max_change_rate_threshold;
if (new_active_block_count == 0) {
break;
}
//active_block_count = new_active_block_count;
//block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
last_convergence_check_iteration = i;
} // if (check_convergence)
} // for (i = 0; i < max_num_iterations; ++i)
delete[] max_change;
delete[] block_coordinates_cpu;
CHECK_CUDA_NO_ERROR();
if (i < max_num_iterations) {
LOG(INFO) << "TV converged after iteration: " << i;
} else {
LOG(WARNING) << "TV used maximum iteration count: " << i;
}
return i;
}
int InpaintDepthMapCUDA(
cudaStream_t stream,
InpaintingMode inpainting_mode,
bool use_tv_weights,
int max_num_iterations,
float max_change_rate_threshold,
float depth_input_scaling_factor,
cudaTextureObject_t gradient_magnitude_div_sqrt2,
cudaTextureObject_t depth_map_input,
CUDABuffer<bool>* tv_flag,
CUDABuffer<bool>* tv_dual_flag,
CUDABuffer<int16_t>* tv_dual_x,
CUDABuffer<int16_t>* tv_dual_y,
CUDABuffer<float>* tv_u_bar,
CUDABuffer<float>* tv_max_change,
CUDABuffer<float>* depth_map_output,
CUDABuffer<uint16_t>* block_coordinates,
CUDABuffer<unsigned char>* block_activities) {
switch(inpainting_mode) {
case kIMClassic:
return InpaintAdaptiveDepthMapCUDA(
stream,
max_num_iterations, max_change_rate_threshold, depth_input_scaling_factor,
false, use_tv_weights,
gradient_magnitude_div_sqrt2, depth_map_input,
tv_flag, tv_dual_flag, tv_dual_x, tv_dual_y, tv_u_bar, tv_max_change,
depth_map_output, block_coordinates, block_activities);
case kIMAdaptive:
return InpaintAdaptiveDepthMapCUDA(
stream,
max_num_iterations, max_change_rate_threshold, depth_input_scaling_factor,
true, use_tv_weights,
gradient_magnitude_div_sqrt2, depth_map_input,
tv_flag, tv_dual_flag, tv_dual_x, tv_dual_y, tv_u_bar, tv_max_change,
depth_map_output, block_coordinates, block_activities);
default:
return InpaintAdaptiveDepthMapCUDA(
stream,
max_num_iterations, max_change_rate_threshold, depth_input_scaling_factor,
false, use_tv_weights,
gradient_magnitude_div_sqrt2, depth_map_input,
tv_flag, tv_dual_flag, tv_dual_x, tv_dual_y, tv_u_bar, tv_max_change,
depth_map_output, block_coordinates, block_activities);
} // switch(inpainting_mode)
}
__global__ void TVInpaintingInitializeVariablesKernel(
int grid_dim_x,
CUDABuffer_<uchar4> input,
CUDABuffer_<bool> tv_flag,
CUDABuffer_<bool> tv_dual_flag,
CUDABuffer_<float> tv_dual_x_r,
CUDABuffer_<float> tv_dual_x_g,
CUDABuffer_<float> tv_dual_x_b,
CUDABuffer_<float> tv_dual_y_r,
CUDABuffer_<float> tv_dual_y_g,
CUDABuffer_<float> tv_dual_y_b,
CUDABuffer_<float> tv_u_r,
CUDABuffer_<float> tv_u_g,
CUDABuffer_<float> tv_u_b,
CUDABuffer_<float> tv_u_bar_r,
CUDABuffer_<float> tv_u_bar_g,
CUDABuffer_<float> tv_u_bar_b,
CUDABuffer_<uint16_t> block_coordinates) {
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
const int width = tv_u_r.width();
const int height = tv_u_r.height();
bool thread_is_active = false;
if (x < width && y < height) {
tv_dual_x_r(y, x) = 0.f;
tv_dual_x_g(y, x) = 0.f;
tv_dual_x_b(y, x) = 0.f;
tv_dual_y_r(y, x) = 0.f;
tv_dual_y_g(y, x) = 0.f;
tv_dual_y_b(y, x) = 0.f;
const uchar4 f_input = input(y, x);
tv_flag(y, x) = (f_input.w == 0);
thread_is_active =
(f_input.w == 0 ||
(x > 0 && input(y, x - 1).w == 0) ||
(y > 0 && input(y - 1, x).w == 0) ||
(x < input.width() - 1 && input(y, x + 1).w == 0) ||
(y < input.height() - 1 && input(y + 1, x).w == 0));
tv_dual_flag(y, x) = thread_is_active;
const float3 f_input_float = make_float3(
(1.f / 255.f) * f_input.x,
(1.f / 255.f) * f_input.y,
(1.f / 255.f) * f_input.z);
tv_u_r(y, x) = f_input_float.x;
tv_u_g(y, x) = f_input_float.y;
tv_u_b(y, x) = f_input_float.z;
tv_u_bar_r(y, x) = f_input_float.x;
tv_u_bar_g(y, x) = f_input_float.y;
tv_u_bar_b(y, x) = f_input_float.z;
}
typedef cub::BlockReduce<
int, 32, cub::BLOCK_REDUCE_WARP_REDUCTIONS, 32> BlockReduceInt;
__shared__ typename BlockReduceInt::TempStorage int_storage;
int num_active_threads = BlockReduceInt(int_storage).Reduce(thread_is_active ? 1 : 0, cub::Sum());
if (threadIdx.x == 0 && threadIdx.y == 0) {
reinterpret_cast<uint8_t*>(block_coordinates.address())[blockIdx.x + blockIdx.y * grid_dim_x] = (num_active_threads > 0) ? 1 : 0;
}
}
// performs primal update and extrapolation step:
// u^{k+1} = u^k + tau* div(p^{k+1})
// \bar{u}^{k+1} = 2*u^{k+1} - u^k
template<bool check_convergence>
__global__ void TVInpaintingPrimalStepKernel(
CUDABuffer_<uint16_t> block_coordinates,
CUDABuffer_<bool> tv_flag,
CUDABuffer_<float> d_dualTVX_r,
CUDABuffer_<float> d_dualTVX_g,
CUDABuffer_<float> d_dualTVX_b,
CUDABuffer_<float> d_dualTVY_r,
CUDABuffer_<float> d_dualTVY_g,
CUDABuffer_<float> d_dualTVY_b,
CUDABuffer_<float> d_u_r,
CUDABuffer_<float> d_u_g,
CUDABuffer_<float> d_u_b,
CUDABuffer_<float> d_u_bar_r,
CUDABuffer_<float> d_u_bar_g,
CUDABuffer_<float> d_u_bar_b,
CUDABuffer_<float> d_m) {
const int x = block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x;
const int y = block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y;
typedef cub::BlockReduce<
float, 32, cub::BLOCK_REDUCE_WARP_REDUCTIONS, 32> BlockReduceFloat;
__shared__ typename BlockReduceFloat::TempStorage float_storage;
// only update within the inpainting region (f == 0)
float max_change = 0;
if (x < d_u_r.width() && y < d_u_r.height() && tv_flag(y, x)) {
// this will accumulate the update step for the primal variable
float3 update = make_float3(0, 0, 0);
// this will accumulate all row entries of the linear operator for the preconditioned step width
float rowSum = 0;
// compute divergence update of dualTV - Neumann boundary conditions,
// keep track of row sum for preconditioning
update.x += d_dualTVX_r(y, x) + d_dualTVY_r(y, x);
update.y += d_dualTVX_g(y, x) + d_dualTVY_g(y, x);
update.z += d_dualTVX_b(y, x) + d_dualTVY_b(y, x);
rowSum += 2;
if (x > 0) {
update.x -= d_dualTVX_r(y, x - 1);
update.y -= d_dualTVX_g(y, x - 1);
update.z -= d_dualTVX_b(y, x - 1);
rowSum++;
}
if (y > 0) {
update.x -= d_dualTVY_r(y - 1, x);
update.y -= d_dualTVY_g(y - 1, x);
update.z -= d_dualTVY_b(y - 1, x);
rowSum++;
}
constexpr float kPrimalStepWidth = 1.f;
const float tau = kPrimalStepWidth / rowSum;
const float3 u = make_float3(d_u_r(y, x), d_u_g(y, x), d_u_b(y, x));
update = u + tau * update;
d_u_r(y, x) = update.x;
d_u_g(y, x) = update.y;
d_u_b(y, x) = update.z;
float3 u_bar = 2 * update - u;
d_u_bar_r(y, x) = u_bar.x;
d_u_bar_g(y, x) = u_bar.y;
d_u_bar_b(y, x) = u_bar.z;
if (check_convergence) {
max_change = max(max(fabs((update.x - u.x) / u.x),
fabs((update.y - u.y) / u.y)),
fabs((update.z - u.z) / u.z));
}
}
if (check_convergence) {
float max_change_reduced = BlockReduceFloat(float_storage).Reduce(max_change, cub::Max());
if (threadIdx.x == 0 && threadIdx.y == 0) {
d_m(0, blockIdx.x) = max_change_reduced;
}
}
}
// performs dual update step
// p^{k=1} = \Pi_{|p|<=g} [ p^k + \sigma * \nabla \bar{u}^k ]
template<bool use_weighting>
__global__ void TVInpaintingDualStepKernel(
CUDABuffer_<uint16_t> block_coordinates,
CUDABuffer_<bool> tv_dual_flag,
CUDABuffer_<float> d_u_r,
CUDABuffer_<float> d_u_g,
CUDABuffer_<float> d_u_b,
cudaTextureObject_t d_tvWeight,
CUDABuffer_<float> d_dualTVX_r,
CUDABuffer_<float> d_dualTVX_g,
CUDABuffer_<float> d_dualTVX_b,
CUDABuffer_<float> d_dualTVY_r,
CUDABuffer_<float> d_dualTVY_g,
CUDABuffer_<float> d_dualTVY_b) {
const int x = block_coordinates(0, 2 * blockIdx.x + 0) + threadIdx.x;
const int y = block_coordinates(0, 2 * blockIdx.x + 1) + threadIdx.y;
if (x < d_u_r.width() && y < d_u_r.height() && tv_dual_flag(y, x)) {
const float dualStepWidth = 1.0f;
const float HUBER_EPS = 0.01f;
const float huberFactor = 1.0f / (1.0f + dualStepWidth * 0.5f * HUBER_EPS);
// update using the gradient of u
const float3 u = make_float3(d_u_r(y, x), d_u_g(y, x), d_u_b(y, x));
constexpr float kDualStepWidth = 1.f;
float3 u_plusx_minus_u = make_float3(0, 0, 0);
if (x < d_u_r.width() - 1) {
u_plusx_minus_u = make_float3(d_u_r(y, x + 1), d_u_g(y, x + 1), d_u_b(y, x + 1)) - u;
}
const float3 dualTVX = make_float3(d_dualTVX_r(y, x), d_dualTVX_g(y, x), d_dualTVX_b(y, x));
float3 u_plusy_minus_u = make_float3(0, 0, 0);
if (y < d_u_r.height() - 1) {
u_plusy_minus_u = make_float3(d_u_r(y + 1, x), d_u_g(y + 1, x), d_u_b(y + 1, x)) - u;
}
const float3 dualTVY = make_float3(d_dualTVY_r(y, x), d_dualTVY_g(y, x), d_dualTVY_b(y, x));
float3 resultX =
huberFactor * (dualTVX + kDualStepWidth * 0.5f * u_plusx_minus_u);
float3 resultY =
huberFactor * (dualTVY + kDualStepWidth * 0.5f * u_plusy_minus_u);
// project onto the g-unit ball
float3 denom;
if (use_weighting) {
// Optimization: remove 1 / weight and turn division by weight below into multiplication.
const float weight = /*1.f /*/ (1.f + tex2D<uchar>(d_tvWeight, x, y) * kSqrt2 / 255.f);
denom.x = max(1.0f, hypotf(resultX.x, resultY.x) * weight);
denom.y = max(1.0f, hypotf(resultX.y, resultY.y) * weight);
denom.z = max(1.0f, hypotf(resultX.z, resultY.z) * weight);
} else {
denom.x = max(1.0f, hypotf(resultX.x, resultY.x));
denom.y = max(1.0f, hypotf(resultX.y, resultY.y));
denom.z = max(1.0f, hypotf(resultX.z, resultY.z));
}
resultX /= denom;
resultY /= denom;
// write result back into global memory
d_dualTVX_r(y, x) = resultX.x;
d_dualTVX_g(y, x) = resultX.y;
d_dualTVX_b(y, x) = resultX.z;
d_dualTVY_r(y, x) = resultY.x;
d_dualTVY_g(y, x) = resultY.y;
d_dualTVY_b(y, x) = resultY.z;
}
}
int InpaintImageCUDA(
cudaStream_t stream,
int max_num_iterations,
float max_change_rate_threshold,
cudaTextureObject_t gradient_magnitude_div_sqrt2,
const CUDABuffer<uchar4>& input,
CUDABuffer<bool>* tv_flag,
CUDABuffer<bool>* tv_dual_flag,
CUDABuffer<float>* tv_dual_x_r,
CUDABuffer<float>* tv_dual_x_g,
CUDABuffer<float>* tv_dual_x_b,
CUDABuffer<float>* tv_dual_y_r,
CUDABuffer<float>* tv_dual_y_g,
CUDABuffer<float>* tv_dual_y_b,
CUDABuffer<float>* tv_u_bar_r,
CUDABuffer<float>* tv_u_bar_g,
CUDABuffer<float>* tv_u_bar_b,
CUDABuffer<float>* tv_max_change,
CUDABuffer<float>* output_r,
CUDABuffer<float>* output_g,
CUDABuffer<float>* output_b,
CUDABuffer<uint16_t>* block_coordinates) {
const int width = output_r->width();
const int height = output_r->height();
constexpr int kBlockWidth = 32;
constexpr int kBlockHeight = 32;
dim3 grid_dim(cuda_util::GetBlockCount(width, kBlockWidth),
cuda_util::GetBlockCount(height, kBlockHeight));
const dim3 block_dim(kBlockWidth, kBlockHeight);
CUDABuffer<float>* tv_u_r = output_r;
CUDABuffer<float>* tv_u_g = output_g;
CUDABuffer<float>* tv_u_b = output_b;
// Initialize variables.
TVInpaintingInitializeVariablesKernel<<<grid_dim, block_dim, 0, stream>>>(
grid_dim.x,
input.ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_flag->ToCUDA(),
tv_dual_x_r->ToCUDA(),
tv_dual_x_g->ToCUDA(),
tv_dual_x_b->ToCUDA(),
tv_dual_y_r->ToCUDA(),
tv_dual_y_g->ToCUDA(),
tv_dual_y_b->ToCUDA(),
tv_u_r->ToCUDA(),
tv_u_g->ToCUDA(),
tv_u_b->ToCUDA(),
tv_u_bar_r->ToCUDA(),
tv_u_bar_g->ToCUDA(),
tv_u_bar_b->ToCUDA(),
block_coordinates->ToCUDA());
CHECK_CUDA_NO_ERROR();
uint8_t* block_activity = new uint8_t[grid_dim.x * grid_dim.y];
block_coordinates->DownloadPartAsync(0, grid_dim.x * grid_dim.y * sizeof(uint8_t), stream, reinterpret_cast<uint16_t*>(block_activity));
cudaStreamSynchronize(stream);
int active_block_count = 0;
uint16_t* block_coordinates_cpu = new uint16_t[2 * grid_dim.x * grid_dim.y];
for (size_t y = 0; y < grid_dim.y; ++ y) {
for (size_t x = 0; x < grid_dim.x; ++ x) {
if (block_activity[x + y * grid_dim.x] > 0) {
block_coordinates_cpu[2 * active_block_count + 0] = x * kBlockWidth;
block_coordinates_cpu[2 * active_block_count + 1] = y * kBlockHeight;
++ active_block_count;
}
}
}
delete[] block_activity;
if (active_block_count == 0) {
return 0;
}
block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
float* max_change = new float[grid_dim.x * grid_dim.y];
// Run optimization iterations.
int i = 0;
int last_convergence_check_iteration = -180;
for (i = 0; i < max_num_iterations; i += 1) {
// TODO: HACK: Minimum iteration count is necessary since it exits too early in some cases
const bool check_convergence = (i - last_convergence_check_iteration >= 200) /*&& (i >= 500)*/;
dim3 grid_dim_active(active_block_count);
TVInpaintingDualStepKernel<true><<<grid_dim_active, block_dim, 0, stream>>>(
block_coordinates->ToCUDA(),
tv_dual_flag->ToCUDA(),
tv_u_bar_r->ToCUDA(),
tv_u_bar_g->ToCUDA(),
tv_u_bar_b->ToCUDA(),
gradient_magnitude_div_sqrt2,
tv_dual_x_r->ToCUDA(),
tv_dual_x_g->ToCUDA(),
tv_dual_x_b->ToCUDA(),
tv_dual_y_r->ToCUDA(),
tv_dual_y_g->ToCUDA(),
tv_dual_y_b->ToCUDA());
if (check_convergence) {
TVInpaintingPrimalStepKernel<true><<<grid_dim_active, block_dim, 0, stream>>>(
block_coordinates->ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_x_r->ToCUDA(),
tv_dual_x_g->ToCUDA(),
tv_dual_x_b->ToCUDA(),
tv_dual_y_r->ToCUDA(),
tv_dual_y_g->ToCUDA(),
tv_dual_y_b->ToCUDA(),
tv_u_r->ToCUDA(),
tv_u_g->ToCUDA(),
tv_u_b->ToCUDA(),
tv_u_bar_r->ToCUDA(),
tv_u_bar_g->ToCUDA(),
tv_u_bar_b->ToCUDA(),
tv_max_change->ToCUDA());
} else {
TVInpaintingPrimalStepKernel<false><<<grid_dim_active, block_dim, 0, stream>>>(
block_coordinates->ToCUDA(),
tv_flag->ToCUDA(),
tv_dual_x_r->ToCUDA(),
tv_dual_x_g->ToCUDA(),
tv_dual_x_b->ToCUDA(),
tv_dual_y_r->ToCUDA(),
tv_dual_y_g->ToCUDA(),
tv_dual_y_b->ToCUDA(),
tv_u_r->ToCUDA(),
tv_u_g->ToCUDA(),
tv_u_b->ToCUDA(),
tv_u_bar_r->ToCUDA(),
tv_u_bar_g->ToCUDA(),
tv_u_bar_b->ToCUDA(),
CUDABuffer_<float>());
}
if (check_convergence) {
tv_max_change->DownloadPartAsync(0, active_block_count * sizeof(float), stream, max_change);
cudaStreamSynchronize(stream);
int new_active_block_count = 0;
for (int j = 0, end = active_block_count; j < end; j ++) {
if (max_change[j] > max_change_rate_threshold) {
// block_coordinates_cpu[2 * new_active_block_count + 0] = block_coordinates_cpu[2 * j + 0];
// block_coordinates_cpu[2 * new_active_block_count + 1] = block_coordinates_cpu[2 * j + 1];
++ new_active_block_count;
}
}
//LOG(INFO) << "[" << i << "] Active blocks: " << active_block_count << " -> " << new_active_block_count;
//LOG(INFO) << "max_change_rate: " << max_change_rate << " / " << max_change_rate_threshold;
if (new_active_block_count == 0) {
break;
}
//active_block_count = new_active_block_count;
//block_coordinates->UploadPartAsync(0, 2 * active_block_count * sizeof(uint16_t), stream, block_coordinates_cpu);
last_convergence_check_iteration = i;
} // if (check_convergence)
}
delete[] max_change;
delete[] block_coordinates_cpu;
CHECK_CUDA_NO_ERROR();
if (i < max_num_iterations) {
LOG(INFO) << "Color TV converged after iteration: " << i;
} else {
LOG(WARNING) << "Color TV used maximum iteration count: " << i;
}
return i;
}
} // namespace view_correction
|
6e72fdb9c1952916248f3ac893033a5914027e55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*
*Computer Engineering Group, Heidelberg University - GPU Computing Exercise 04
*
* Group : TBD
*
* File : kernel.cu
*
* Purpose : Memory Operations Benchmark
*
******************************************************************************/
#include "kernel_hip.cuh"
#include <cassert>
#include <chrono>
#include "errorHandling.h"
#include <iostream>
#include <device_launch_parameters.h>
using std::chrono::nanoseconds;
using std::chrono::high_resolution_clock;
//
// Test Kernel
//
template <typename Callable>
nanoseconds dt(Callable c, size_t n_iters)
{
auto start = high_resolution_clock::now();
c();
auto stop = high_resolution_clock::now();
return (stop - start)/n_iters;
}
__global__ void globalMem2SharedMem(float * d_mem, size_t n_elements) {
extern __shared__ float shared_mem[];
size_t n_threads = blockDim.x;
size_t chunk_size = n_elements / n_threads;
size_t offset = threadIdx.x * chunk_size;
// make sure threads behave like they would in a scenario where we actually use the memory
__syncthreads();
for (size_t i = offset; i != offset + chunk_size; ++i)
shared_mem[i] = d_mem[i];
__syncthreads();
}
nanoseconds globalMem2SharedMem_Wrapper(size_t gridSize, size_t blockSize, size_t bytes, size_t n_iter) {
size_t n_elements = bytes / sizeof(float);
float * d_mem = nullptr;
checkCuda(hipMalloc(&d_mem, bytes));
// chunked access only works if it's cleanly divisible
assert(n_elements % (blockSize * gridSize) == 0);
auto start = high_resolution_clock::now();
for (size_t i = 0; i != n_iter; ++i)
hipLaunchKernelGGL(( globalMem2SharedMem) , dim3(gridSize), dim3(blockSize), bytes , 0, d_mem, n_elements);
hipDeviceSynchronize(); //synchronize after every kernel launch to ensure work get's done
auto stop = high_resolution_clock::now();
quitOnCudaError();
checkCuda(hipFree(d_mem));
return (stop - start) / n_iter;
}
__global__ void SharedMem2globalMem(float * d_mem, size_t n_elements) {
extern __shared__ float shared_mem[];
size_t n_threads = blockDim.x;
size_t chunk_size = n_elements / n_threads;
size_t offset = threadIdx.x * chunk_size;
__syncthreads();
for (size_t i = offset; i != offset + chunk_size; ++i)
shared_mem[i] = d_mem[i];
__syncthreads();
}
nanoseconds sharedMem2globalMem_Wrapper(size_t gridSize, size_t blockSize, size_t bytes, size_t n_iter) {
size_t n_elements = bytes / sizeof(float);
float * d_mem = nullptr;
checkCuda(hipMalloc(&d_mem, n_elements * sizeof(float)));
// chunked access only works if it's cleanly divisible
assert(n_elements % (blockSize * gridSize) == 0);
auto start = high_resolution_clock::now();
for (size_t i = 0; i != n_iter; ++i) {
SharedMem2globalMem << < gridSize, blockSize, bytes >> > (d_mem, n_elements);
hipDeviceSynchronize();
}
auto stop = high_resolution_clock::now();
quitOnCudaError();
checkCuda(hipFree(d_mem));
return (stop - start) / n_iter;
}
__global__ void SharedMem2Registers(size_t chunk_size, float volatile* volatile dummy)
{
extern __shared__ float shared_mem[];
// ideally need static size<63 to ensure these are registers, but then this would have to be a template
// and then the we'd have to summon the TMP Cuthulhu to iterate...
float registers[n_registers];
size_t offset = threadIdx.x * chunk_size;
__syncthreads();
for(size_t i = 0; i!= chunk_size; ++i) {
registers[i] = shared_mem[offset + i];
}
__syncthreads();
// does not do anything but is supposed to confuse the compiler enough to not optimize away access to registers
// We'll never have that many threads...
// This is quite the challenge to get right, according to compilerExplorer the code does not vanish but I'm not sure
// Why all the kernels take the same time then
if(threadIdx.x == 1025) {
printf("We should never have hit this");
*dummy = registers[chunk_size/2];
}
};
nanoseconds sharedMem2Registers_Wrapper(size_t gridSize, size_t blockSize, size_t bytes, size_t n_iters)
{
size_t n_elements = bytes / sizeof(float);
size_t chunk_size = n_elements / blockSize;
assert(chunk_size < n_registers); // writing outside of the statically allocated register would be no bueno
//Wow so you can't pass a float& as an output variable to the kernel because it converts it to a device pointer
//which you then can't dereference. That's some next level bullshit that it let's you do it but creates
//wrong code...
float* dummy = nullptr;
hipMalloc(&dummy, sizeof(float));
// create a lambda function to pass to the timer; capture everything by value except the dummy parameter,
// it needs to be a reference
auto time = dt([=, &dummy]() {
hipLaunchKernelGGL(( SharedMem2Registers) , dim3(gridSize), dim3(blockSize), bytes , 0, chunk_size, dummy);
hipDeviceSynchronize();
},n_iters);
quitOnCudaError();
hipFree(dummy);
return time;
}
__global__ void Registers2SharedMem(size_t chunk_size, float volatile* volatile dummy)
{
extern __shared__ float shared_mem[];
// ideally need static size<63 to ensure these are registers, but then this would have to be a template
// and then the we'd have to summon the TMP Cuthulhu to iterate...
float registers[n_registers];
size_t offset = threadIdx.x * chunk_size;
__syncthreads();
for(size_t i = 0; i!= chunk_size; ++i)
shared_mem[offset+i] = registers[i];
__syncthreads();
if(threadIdx.x == 1025) {
printf("We should never have hit this");
*dummy = shared_mem[chunk_size/2];
}
}
nanoseconds Registers2SharedMem_Wrapper(size_t gridSize, size_t blockSize, size_t bytes, size_t n_iters)
{
size_t n_elements = bytes / sizeof(float);
size_t chunk_size = n_elements / blockSize;
assert(chunk_size < n_registers); // writing outside of the statically allocated register would be no bueno
float* dummy = nullptr;
hipMalloc(&dummy, sizeof(float));
// create a lambda function to pass to the timer; capture everything by value except the dummy parameter,
// it needs to be a reference
auto time = dt([=, &dummy]() {
hipLaunchKernelGGL(( Registers2SharedMem) , dim3(gridSize), dim3(blockSize), bytes , 0, chunk_size, dummy);
hipDeviceSynchronize();
},n_iters);
quitOnCudaError();
hipFree(dummy);
return time;
}
constexpr c64_t max_clock = std::numeric_limits<c64_t>::max();
__global__ void bankConflictsRead(size_t n_iters, size_t stride, double* results)
{
extern __shared__ float shared_mem[];
size_t const chunk_size = 64;
float volatile registers[chunk_size];
size_t offset = threadIdx.x * chunk_size;
auto start = clock64();
for(size_t _=0; _!=n_iters; ++_)
{
__syncthreads();
for(size_t idx = offset; idx!= chunk_size; ++idx)
{
registers[idx] = shared_mem[offset+idx*stride];
}
}
auto stop = clock64();
if(threadIdx.x == 3000)
{
printf("not supposed to happen, just to force compiler to write to registers");
results[0] = registers[0]+registers[63];
}
c64_t result = 0;
if(start>stop)
{
printf("I really don't think this should ever happen...");
result = max_clock-start+stop;
}
else
{
result = stop-start;
}
results[blockIdx.x*blockDim.x+threadIdx.x] = double(result)/n_iters;
}
std::vector<double> bankConflictsRead_Wrapper(size_t gridSize, size_t blockSize, size_t stride, size_t bytes)
{
size_t const n_iters = 1000;
assert(gridSize*blockSize <= bytes/sizeof(float)/64/stride ); //if every thread reads 64 elements, that's all we can do;
double* results_d = nullptr;
size_t result_bytes = gridSize*blockSize * sizeof(double);
hipMalloc(&results_d, result_bytes);
hipLaunchKernelGGL(( bankConflictsRead), dim3(gridSize), dim3(blockSize), bytes, 0, n_iters, stride,results_d);
hipDeviceSynchronize();
quitOnCudaError();
std::vector<double> ret(result_bytes/sizeof(double));
hipMemcpy(ret.data(),results_d,result_bytes, hipMemcpyDeviceToHost);
hipFree(results_d);
return ret;
}
| 6e72fdb9c1952916248f3ac893033a5914027e55.cu | /******************************************************************************
*
*Computer Engineering Group, Heidelberg University - GPU Computing Exercise 04
*
* Group : TBD
*
* File : kernel.cu
*
* Purpose : Memory Operations Benchmark
*
******************************************************************************/
#include "kernel.cuh"
#include <cassert>
#include <chrono>
#include "errorHandling.h"
#include <iostream>
#include <device_launch_parameters.h>
using std::chrono::nanoseconds;
using std::chrono::high_resolution_clock;
//
// Test Kernel
//
template <typename Callable>
nanoseconds dt(Callable c, size_t n_iters)
{
auto start = high_resolution_clock::now();
c();
auto stop = high_resolution_clock::now();
return (stop - start)/n_iters;
}
__global__ void globalMem2SharedMem(float * d_mem, size_t n_elements) {
extern __shared__ float shared_mem[];
size_t n_threads = blockDim.x;
size_t chunk_size = n_elements / n_threads;
size_t offset = threadIdx.x * chunk_size;
// make sure threads behave like they would in a scenario where we actually use the memory
__syncthreads();
for (size_t i = offset; i != offset + chunk_size; ++i)
shared_mem[i] = d_mem[i];
__syncthreads();
}
nanoseconds globalMem2SharedMem_Wrapper(size_t gridSize, size_t blockSize, size_t bytes, size_t n_iter) {
size_t n_elements = bytes / sizeof(float);
float * d_mem = nullptr;
checkCuda(cudaMalloc(&d_mem, bytes));
// chunked access only works if it's cleanly divisible
assert(n_elements % (blockSize * gridSize) == 0);
auto start = high_resolution_clock::now();
for (size_t i = 0; i != n_iter; ++i)
globalMem2SharedMem <<< gridSize, blockSize, bytes >>> (d_mem, n_elements);
cudaDeviceSynchronize(); //synchronize after every kernel launch to ensure work get's done
auto stop = high_resolution_clock::now();
quitOnCudaError();
checkCuda(cudaFree(d_mem));
return (stop - start) / n_iter;
}
__global__ void SharedMem2globalMem(float * d_mem, size_t n_elements) {
extern __shared__ float shared_mem[];
size_t n_threads = blockDim.x;
size_t chunk_size = n_elements / n_threads;
size_t offset = threadIdx.x * chunk_size;
__syncthreads();
for (size_t i = offset; i != offset + chunk_size; ++i)
shared_mem[i] = d_mem[i];
__syncthreads();
}
nanoseconds sharedMem2globalMem_Wrapper(size_t gridSize, size_t blockSize, size_t bytes, size_t n_iter) {
size_t n_elements = bytes / sizeof(float);
float * d_mem = nullptr;
checkCuda(cudaMalloc(&d_mem, n_elements * sizeof(float)));
// chunked access only works if it's cleanly divisible
assert(n_elements % (blockSize * gridSize) == 0);
auto start = high_resolution_clock::now();
for (size_t i = 0; i != n_iter; ++i) {
SharedMem2globalMem << < gridSize, blockSize, bytes >> > (d_mem, n_elements);
cudaDeviceSynchronize();
}
auto stop = high_resolution_clock::now();
quitOnCudaError();
checkCuda(cudaFree(d_mem));
return (stop - start) / n_iter;
}
__global__ void SharedMem2Registers(size_t chunk_size, float volatile* volatile dummy)
{
extern __shared__ float shared_mem[];
// ideally need static size<63 to ensure these are registers, but then this would have to be a template
// and then the we'd have to summon the TMP Cuthulhu to iterate...
float registers[n_registers];
size_t offset = threadIdx.x * chunk_size;
__syncthreads();
for(size_t i = 0; i!= chunk_size; ++i) {
registers[i] = shared_mem[offset + i];
}
__syncthreads();
// does not do anything but is supposed to confuse the compiler enough to not optimize away access to registers
// We'll never have that many threads...
// This is quite the challenge to get right, according to compilerExplorer the code does not vanish but I'm not sure
// Why all the kernels take the same time then
if(threadIdx.x == 1025) {
printf("We should never have hit this");
*dummy = registers[chunk_size/2];
}
};
nanoseconds sharedMem2Registers_Wrapper(size_t gridSize, size_t blockSize, size_t bytes, size_t n_iters)
{
size_t n_elements = bytes / sizeof(float);
size_t chunk_size = n_elements / blockSize;
assert(chunk_size < n_registers); // writing outside of the statically allocated register would be no bueno
//Wow so you can't pass a float& as an output variable to the kernel because it converts it to a device pointer
//which you then can't dereference. That's some next level bullshit that it let's you do it but creates
//wrong code...
float* dummy = nullptr;
cudaMalloc(&dummy, sizeof(float));
// create a lambda function to pass to the timer; capture everything by value except the dummy parameter,
// it needs to be a reference
auto time = dt([=, &dummy]() {
SharedMem2Registers <<< gridSize, blockSize, bytes >>> (chunk_size, dummy);
cudaDeviceSynchronize();
},n_iters);
quitOnCudaError();
cudaFree(dummy);
return time;
}
__global__ void Registers2SharedMem(size_t chunk_size, float volatile* volatile dummy)
{
extern __shared__ float shared_mem[];
// ideally need static size<63 to ensure these are registers, but then this would have to be a template
// and then the we'd have to summon the TMP Cuthulhu to iterate...
float registers[n_registers];
size_t offset = threadIdx.x * chunk_size;
__syncthreads();
for(size_t i = 0; i!= chunk_size; ++i)
shared_mem[offset+i] = registers[i];
__syncthreads();
if(threadIdx.x == 1025) {
printf("We should never have hit this");
*dummy = shared_mem[chunk_size/2];
}
}
nanoseconds Registers2SharedMem_Wrapper(size_t gridSize, size_t blockSize, size_t bytes, size_t n_iters)
{
size_t n_elements = bytes / sizeof(float);
size_t chunk_size = n_elements / blockSize;
assert(chunk_size < n_registers); // writing outside of the statically allocated register would be no bueno
float* dummy = nullptr;
cudaMalloc(&dummy, sizeof(float));
// create a lambda function to pass to the timer; capture everything by value except the dummy parameter,
// it needs to be a reference
auto time = dt([=, &dummy]() {
Registers2SharedMem <<< gridSize, blockSize, bytes >>> (chunk_size, dummy);
cudaDeviceSynchronize();
},n_iters);
quitOnCudaError();
cudaFree(dummy);
return time;
}
constexpr c64_t max_clock = std::numeric_limits<c64_t>::max();
__global__ void bankConflictsRead(size_t n_iters, size_t stride, double* results)
{
extern __shared__ float shared_mem[];
size_t const chunk_size = 64;
float volatile registers[chunk_size];
size_t offset = threadIdx.x * chunk_size;
auto start = clock64();
for(size_t _=0; _!=n_iters; ++_)
{
__syncthreads();
for(size_t idx = offset; idx!= chunk_size; ++idx)
{
registers[idx] = shared_mem[offset+idx*stride];
}
}
auto stop = clock64();
if(threadIdx.x == 3000)
{
printf("not supposed to happen, just to force compiler to write to registers");
results[0] = registers[0]+registers[63];
}
c64_t result = 0;
if(start>stop)
{
printf("I really don't think this should ever happen...");
result = max_clock-start+stop;
}
else
{
result = stop-start;
}
results[blockIdx.x*blockDim.x+threadIdx.x] = double(result)/n_iters;
}
std::vector<double> bankConflictsRead_Wrapper(size_t gridSize, size_t blockSize, size_t stride, size_t bytes)
{
size_t const n_iters = 1000;
assert(gridSize*blockSize <= bytes/sizeof(float)/64/stride ); //if every thread reads 64 elements, that's all we can do;
double* results_d = nullptr;
size_t result_bytes = gridSize*blockSize * sizeof(double);
cudaMalloc(&results_d, result_bytes);
bankConflictsRead<<< gridSize, blockSize, bytes>>>(n_iters, stride,results_d);
cudaDeviceSynchronize();
quitOnCudaError();
std::vector<double> ret(result_bytes/sizeof(double));
cudaMemcpy(ret.data(),results_d,result_bytes, cudaMemcpyDeviceToHost);
cudaFree(results_d);
return ret;
}
|
c6903a6ba5ab1bfda5e8543ee26fdb2af6192e8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=============================================================================================
// Name : matrixMultiplicationCommented.cu
// Author : Jose Refojo
// Version : 06-02-2018
// Creation date : 22-09-2010
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will multiply two matrices into a third one (AB=C)
// This file will describe matmulGPU, which allocates and transfers the matrices in the global memory of the gpu, and then sets up the kernel and runs it.
// The kernel uses a 2d grid (so it spawns a 2d set of threads), one thread per each element of the matrix C
// Each particular thread multiplies its row in A by its column in B and stores the obtained value in its position in C
//=============================================================================================
extern int block_size_x;
extern int block_size_y;
extern int MATRIX_SIZE_N; // Those are the default values of N and M
extern int MATRIX_SIZE_M;
extern int verbose;
extern int skipCpuTest; // Since the CPU test might take quite a long time, we give an option to skip it
#include "cudaUtils.h"
#include "matrixMultiplicationCommented.h"
#include "stdio.h"
#include "time.h"
#include <getopt.h>
// computeMatMulGPU is the kernel that will run the compute in the GPU: It is run by each and every thread of the grid
// It is run by every thread in a 2d grid (so each thread has an id in the first and second dimensions).
__global__ void computeMatMulGPU (int N,int M,float *A1dGPU,float *B1dGPU,float *C1dGPU) {
int idx=blockIdx.x*blockDim.x+threadIdx.x; // The global id of the thread in the first dimension
int idy=blockIdx.y*blockDim.y+threadIdx.y; // The global id of the thread in the second dimension
int k;
if (idx<N) { // We do this check to make sure that we do not go past the boundaries of the matrix in the first dimension
if (idy<N) { // We do this check to make sure that we do not go past the boundaries of the matrix in the second dimension
C1dGPU[idx+idy*N]=0.0f; // Start at zero, add up from there
for (k=0;k<M;k++) { // Add the product of the row of A with the column of B
C1dGPU[idx+idy*N]+=A1dGPU[k+idy*M]*B1dGPU[idx+k*N];
}
}
}
}
// This function serves as a bridge between the main and the GPU code - we can call it from C or C++ code, and it fires up the CUDA code
void matmulGPU (int N,int M,float *A1d,float *B1d,float *C1d) {
//int i,j;
hipError_t err; // We can use this variable to check the return error of many cuda functions
float *A1dGPU,*B1dGPU,*C1dGPU;
// Allocate and transfer matrix A in the GPU
// There are two ways we can catch errors with hipMalloc and other cuda functions that aren't kernel - we can request the last error by calling cudaLastErrorCheck, or we can do it this way:
err = hipMalloc ((void **) &A1dGPU, sizeof(float)*(N*M));
if( hipSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::hipMalloc A1dGPU)",hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = hipMemcpy(A1dGPU, A1d, sizeof(float)*(N*M), hipMemcpyHostToDevice);
if( hipSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::hipMemcpy A1dGPU)",hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = hipMalloc ((void **) &B1dGPU, sizeof(float)*(M*N));
if( hipSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::hipMalloc B1dGPU)",hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = hipMemcpy(B1dGPU, B1d, sizeof(float)*(M*N), hipMemcpyHostToDevice);
if( hipSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::hipMemcpy B1dGPU)",hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = hipMemcpy(B1dGPU, B1d, sizeof(float)*(N*M), hipMemcpyHostToDevice);
if( hipSuccess != err) { // Check for error values
printf("(Cuda error %s): %s\n","(matmulGPU::hipMemcpy B1dGPU)",hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = hipMalloc ((void **) &C1dGPU, sizeof(float)*(N*N));
if( hipSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::hipMalloc C1dGPU)",hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
// Set the grid
dim3 dimBlock(block_size_x,block_size_y); // Set the number of threads per block
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(N/dimBlock.y) + (!(N%dimBlock.y)?0:1) ); // Set the number of blocks in the grid (There are at least as many threads as elements in the result matrix, maybe even more)
// Test block and grid
cudaTestBlockInformation (dimBlock); // Check that we have a legal amount of threads (in most cards, no more than 1024)
cudaLastErrorCheck("(Cuda error cudaTestBlockInformation)");
cudaTestGridInformation (dimGrid); // Check that we have a legal amount of blocks
cudaLastErrorCheck("(Cuda error cudaTestGridInformation)");
// Print the size of the grid
printf("Block size test (2d): %dx%d\n",dimBlock.x,dimBlock.y);
printf("Grid size in each dimension: %dx%dx%d\n",dimGrid.x,dimGrid.y,dimGrid.z);
// Call the kernel
hipLaunchKernelGGL(( computeMatMulGPU) , dim3(dimGrid),dim3(dimBlock), 0, 0, N,M,A1dGPU,B1dGPU,C1dGPU);
// Fetch the last error state, just in case something went wrong
cudaLastErrorCheck("(Cuda error in computeMatMulGPU)");
// Copy the result matrix back into the RAM memory
err = hipMemcpy(&(C1d[0]), C1dGPU, sizeof(float)*N*N, hipMemcpyDeviceToHost);
if( hipSuccess != err) {
printf("(Cuda error %s): %s\n","(matmulGPU::hipMemcpy C1dGPU)",hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
// Free the global memory used
hipFree(A1dGPU);
hipFree(B1dGPU);
hipFree(C1dGPU);
}
| c6903a6ba5ab1bfda5e8543ee26fdb2af6192e8b.cu | //=============================================================================================
// Name : matrixMultiplicationCommented.cu
// Author : Jose Refojo
// Version : 06-02-2018
// Creation date : 22-09-2010
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will multiply two matrices into a third one (AB=C)
// This file will describe matmulGPU, which allocates and transfers the matrices in the global memory of the gpu, and then sets up the kernel and runs it.
// The kernel uses a 2d grid (so it spawns a 2d set of threads), one thread per each element of the matrix C
// Each particular thread multiplies its row in A by its column in B and stores the obtained value in its position in C
//=============================================================================================
extern int block_size_x;
extern int block_size_y;
extern int MATRIX_SIZE_N; // Those are the default values of N and M
extern int MATRIX_SIZE_M;
extern int verbose;
extern int skipCpuTest; // Since the CPU test might take quite a long time, we give an option to skip it
#include "cudaUtils.h"
#include "matrixMultiplicationCommented.h"
#include "stdio.h"
#include "time.h"
#include <getopt.h>
// computeMatMulGPU is the kernel that will run the compute in the GPU: It is run by each and every thread of the grid
// It is run by every thread in a 2d grid (so each thread has an id in the first and second dimensions).
__global__ void computeMatMulGPU (int N,int M,float *A1dGPU,float *B1dGPU,float *C1dGPU) {
int idx=blockIdx.x*blockDim.x+threadIdx.x; // The global id of the thread in the first dimension
int idy=blockIdx.y*blockDim.y+threadIdx.y; // The global id of the thread in the second dimension
int k;
if (idx<N) { // We do this check to make sure that we do not go past the boundaries of the matrix in the first dimension
if (idy<N) { // We do this check to make sure that we do not go past the boundaries of the matrix in the second dimension
C1dGPU[idx+idy*N]=0.0f; // Start at zero, add up from there
for (k=0;k<M;k++) { // Add the product of the row of A with the column of B
C1dGPU[idx+idy*N]+=A1dGPU[k+idy*M]*B1dGPU[idx+k*N];
}
}
}
}
// This function serves as a bridge between the main and the GPU code - we can call it from C or C++ code, and it fires up the CUDA code
void matmulGPU (int N,int M,float *A1d,float *B1d,float *C1d) {
//int i,j;
cudaError_t err; // We can use this variable to check the return error of many cuda functions
float *A1dGPU,*B1dGPU,*C1dGPU;
// Allocate and transfer matrix A in the GPU
// There are two ways we can catch errors with cudaMalloc and other cuda functions that aren't kernel - we can request the last error by calling cudaLastErrorCheck, or we can do it this way:
err = cudaMalloc ((void **) &A1dGPU, sizeof(float)*(N*M));
if( cudaSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::cudaMalloc A1dGPU)",cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = cudaMemcpy(A1dGPU, A1d, sizeof(float)*(N*M), cudaMemcpyHostToDevice);
if( cudaSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::cudaMemcpy A1dGPU)",cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = cudaMalloc ((void **) &B1dGPU, sizeof(float)*(M*N));
if( cudaSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::cudaMalloc B1dGPU)",cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = cudaMemcpy(B1dGPU, B1d, sizeof(float)*(M*N), cudaMemcpyHostToDevice);
if( cudaSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::cudaMemcpy B1dGPU)",cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = cudaMemcpy(B1dGPU, B1d, sizeof(float)*(N*M), cudaMemcpyHostToDevice);
if( cudaSuccess != err) { // Check for error values
printf("(Cuda error %s): %s\n","(matmulGPU::cudaMemcpy B1dGPU)",cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
err = cudaMalloc ((void **) &C1dGPU, sizeof(float)*(N*N));
if( cudaSuccess != err) {
printf("(Cuda error %s): %s\n","(matrixMultiplication::cudaMalloc C1dGPU)",cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
// Set the grid
dim3 dimBlock(block_size_x,block_size_y); // Set the number of threads per block
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(N/dimBlock.y) + (!(N%dimBlock.y)?0:1) ); // Set the number of blocks in the grid (There are at least as many threads as elements in the result matrix, maybe even more)
// Test block and grid
cudaTestBlockInformation (dimBlock); // Check that we have a legal amount of threads (in most cards, no more than 1024)
cudaLastErrorCheck("(Cuda error cudaTestBlockInformation)");
cudaTestGridInformation (dimGrid); // Check that we have a legal amount of blocks
cudaLastErrorCheck("(Cuda error cudaTestGridInformation)");
// Print the size of the grid
printf("Block size test (2d): %dx%d\n",dimBlock.x,dimBlock.y);
printf("Grid size in each dimension: %dx%dx%d\n",dimGrid.x,dimGrid.y,dimGrid.z);
// Call the kernel
computeMatMulGPU <<<dimGrid,dimBlock>>> (N,M,A1dGPU,B1dGPU,C1dGPU);
// Fetch the last error state, just in case something went wrong
cudaLastErrorCheck("(Cuda error in computeMatMulGPU)");
// Copy the result matrix back into the RAM memory
err = cudaMemcpy(&(C1d[0]), C1dGPU, sizeof(float)*N*N, cudaMemcpyDeviceToHost);
if( cudaSuccess != err) {
printf("(Cuda error %s): %s\n","(matmulGPU::cudaMemcpy C1dGPU)",cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
// Free the global memory used
cudaFree(A1dGPU);
cudaFree(B1dGPU);
cudaFree(C1dGPU);
}
|
c48f889716295b0333b79561575352637a55c512.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_8.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.8515588450196,0.00121430213604940,0.786571707358243,0.786296716592743,0.000167844563456543,0.488038682227013,0.00288158860722668,0.999998439423763,1.84438183277425e-08,1.82097276686639e-05,0.999777090775545,1.00745417788989,0.999998681267142,3.66658927008727e-05,0.546505726022791,10.1869983095667,139.379179153826}; for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.0452238260814,0.000108706022167263,0.000159124775268073,0.000265119966583003,0.284358323096010,0.209481064695957,0.107047904137420,2.86430120215509,0.0202992477937955,1.50873877598206,1081.34868000984,0.000400030959633365,0.453106468021992,0.0170762047823025,0.00184218763805341,2.49511058194542e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| c48f889716295b0333b79561575352637a55c512.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_8.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.8515588450196,0.00121430213604940,0.786571707358243,0.786296716592743,0.000167844563456543,0.488038682227013,0.00288158860722668,0.999998439423763,1.84438183277425e-08,1.82097276686639e-05,0.999777090775545,1.00745417788989,0.999998681267142,3.66658927008727e-05,0.546505726022791,10.1869983095667,139.379179153826}; for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.0452238260814,0.000108706022167263,0.000159124775268073,0.000265119966583003,0.284358323096010,0.209481064695957,0.107047904137420,2.86430120215509,0.0202992477937955,1.50873877598206,1081.34868000984,0.000400030959633365,0.453106468021992,0.0170762047823025,0.00184218763805341,2.49511058194542e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
9fb7aa9cab7eba07efe6e7ef9b7ee0ba9cd7f467.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason:%s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n, ", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n");
return;
}
// grid 2D block 2D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx) {
for (int iy = 0; iy < ny; iy++) {
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Useing Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host size
double iStart = cpuSecond();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = cpuSecond() - iStart;
printf("Matrix initialization elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = cpuSecond();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = cpuSecond() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(hipMalloc((void **)&d_MatA, nBytes));
CHECK(hipMalloc((void **)&d_MatB, nBytes));
CHECK(hipMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
dim3 block(128, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
iStart = cpuSecond();
hipLaunchKernelGGL(( sumMatrixOnGPU1D), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny);
iElaps = cpuSecond() - iStart;
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n",
grid.x, grid.y, block.x, block.y, iElaps);
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(hipFree(d_MatA));
CHECK(hipFree(d_MatB));
CHECK(hipFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return (0);
} | 9fb7aa9cab7eba07efe6e7ef9b7ee0ba9cd7f467.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason:%s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n, ", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n");
return;
}
// grid 2D block 2D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx) {
for (int iy = 0; iy < ny; iy++) {
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Useing Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host size
double iStart = cpuSecond();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = cpuSecond() - iStart;
printf("Matrix initialization elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = cpuSecond();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = cpuSecond() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(cudaMalloc((void **)&d_MatA, nBytes));
CHECK(cudaMalloc((void **)&d_MatB, nBytes));
CHECK(cudaMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
dim3 block(128, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
iStart = cpuSecond();
sumMatrixOnGPU1D<<< grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
iElaps = cpuSecond() - iStart;
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n",
grid.x, grid.y, block.x, block.y, iElaps);
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(cudaFree(d_MatA));
CHECK(cudaFree(d_MatB));
CHECK(cudaFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return (0);
} |
924dddc85c36cf3295d2673f835e0d6919be507e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include "guassian.cuh"
#include "common_hip.cuh"
float UcharToFloat(uchar a)
{
return a / 255.f;
}
uchar FloatToUchar(float f)
{
return uchar(clampf(f, 0, 1) * 255.f);
}
void ConvertUcharToFloat3(uchar* a, Float3* b, Int2 size)
{
int s = size.x * size.y;
for (int i = 0; i < s; ++i)
{
b[i].x = UcharToFloat(a[i * 3 + 0]);
b[i].y = UcharToFloat(a[i * 3 + 1]);
b[i].z = UcharToFloat(a[i * 3 + 2]);
}
}
void ConvertFloat3ToUchar(uchar* a, Float3* b, Int2 size)
{
int s = size.x * size.y;
for (int i = 0; i < s; ++i)
{
a[i * 3 + 0] = FloatToUchar(b[i].x);
a[i * 3 + 1] = FloatToUchar(b[i].y);
a[i * 3 + 2] = FloatToUchar(b[i].z);
}
}
enum class FilterType
{
Copy,
Gaussian,
};
void RunFilter(uchar* in, uchar* out, Int2 size, FilterType type)
{
Float3* d_inBuffer = nullptr;
Float3* d_outBuffer = nullptr;
Float3* h_inBuffer = new Float3[size.x * size.y];
Float3* h_outBuffer = new Float3[size.x * size.y];
GpuErrorCheck(hipMalloc((void**)& d_inBuffer, size.x * size.y * sizeof(Float3)));
GpuErrorCheck(hipMalloc((void**)& d_outBuffer, size.x * size.y * sizeof(Float3)));
ConvertUcharToFloat3(in, h_inBuffer, size);
GpuErrorCheck(hipMemcpy(d_inBuffer, h_inBuffer, size.x * size.y * sizeof(Float3), hipMemcpyHostToDevice));
switch (type)
{
case FilterType::Gaussian:
CalculateGaussianKernel(2.0f);
hipLaunchKernelGGL(( GaussianFilter) , dim3(dim3(size.x / 8, size.y / 8, 1)), dim3(dim3(8, 8, 1)) , 0, 0, d_inBuffer, d_outBuffer, size);
break;
case FilterType::Copy:
default:
hipLaunchKernelGGL(( Copy) , dim3(dim3(size.x / 8, size.y / 8, 1)), dim3(dim3(8, 8, 1)) , 0, 0, d_inBuffer, d_outBuffer, size);
}
GpuErrorCheck(hipMemcpy(h_outBuffer, d_outBuffer, size.x * size.y * sizeof(Float3), hipMemcpyDeviceToHost));
ConvertFloat3ToUchar(out, h_outBuffer, size);
GpuErrorCheck(hipFree(d_inBuffer));
GpuErrorCheck(hipFree(d_outBuffer));
delete h_inBuffer;
delete h_outBuffer;
}
int main()
{
int x, y, n;
const char* filename = "lenna.png";
uchar* in = stbi_load(filename, &x, &y, &n, 0);
uchar* out = new uchar[x * y * n];
RunFilter(in, out, Int2(x, y), FilterType::Copy);
std::string filenameWrite = "result/copy.png";
stbi_write_png(filenameWrite.c_str(), x, y, 3, (void*)out, 0);
RunFilter(in, out, Int2(x, y), FilterType::Gaussian);
filenameWrite = "result/gaussian.png";
stbi_write_png(filenameWrite.c_str(), x, y, 3, (void*)out, 0);
delete[] out;
stbi_image_free(in);
return 0;
} | 924dddc85c36cf3295d2673f835e0d6919be507e.cu |
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include "guassian.cuh"
#include "common.cuh"
float UcharToFloat(uchar a)
{
return a / 255.f;
}
uchar FloatToUchar(float f)
{
return uchar(clampf(f, 0, 1) * 255.f);
}
void ConvertUcharToFloat3(uchar* a, Float3* b, Int2 size)
{
int s = size.x * size.y;
for (int i = 0; i < s; ++i)
{
b[i].x = UcharToFloat(a[i * 3 + 0]);
b[i].y = UcharToFloat(a[i * 3 + 1]);
b[i].z = UcharToFloat(a[i * 3 + 2]);
}
}
void ConvertFloat3ToUchar(uchar* a, Float3* b, Int2 size)
{
int s = size.x * size.y;
for (int i = 0; i < s; ++i)
{
a[i * 3 + 0] = FloatToUchar(b[i].x);
a[i * 3 + 1] = FloatToUchar(b[i].y);
a[i * 3 + 2] = FloatToUchar(b[i].z);
}
}
enum class FilterType
{
Copy,
Gaussian,
};
void RunFilter(uchar* in, uchar* out, Int2 size, FilterType type)
{
Float3* d_inBuffer = nullptr;
Float3* d_outBuffer = nullptr;
Float3* h_inBuffer = new Float3[size.x * size.y];
Float3* h_outBuffer = new Float3[size.x * size.y];
GpuErrorCheck(cudaMalloc((void**)& d_inBuffer, size.x * size.y * sizeof(Float3)));
GpuErrorCheck(cudaMalloc((void**)& d_outBuffer, size.x * size.y * sizeof(Float3)));
ConvertUcharToFloat3(in, h_inBuffer, size);
GpuErrorCheck(cudaMemcpy(d_inBuffer, h_inBuffer, size.x * size.y * sizeof(Float3), cudaMemcpyHostToDevice));
switch (type)
{
case FilterType::Gaussian:
CalculateGaussianKernel(2.0f);
GaussianFilter <<< dim3(size.x / 8, size.y / 8, 1), dim3(8, 8, 1) >>> (d_inBuffer, d_outBuffer, size);
break;
case FilterType::Copy:
default:
Copy <<< dim3(size.x / 8, size.y / 8, 1), dim3(8, 8, 1) >>> (d_inBuffer, d_outBuffer, size);
}
GpuErrorCheck(cudaMemcpy(h_outBuffer, d_outBuffer, size.x * size.y * sizeof(Float3), cudaMemcpyDeviceToHost));
ConvertFloat3ToUchar(out, h_outBuffer, size);
GpuErrorCheck(cudaFree(d_inBuffer));
GpuErrorCheck(cudaFree(d_outBuffer));
delete h_inBuffer;
delete h_outBuffer;
}
int main()
{
int x, y, n;
const char* filename = "lenna.png";
uchar* in = stbi_load(filename, &x, &y, &n, 0);
uchar* out = new uchar[x * y * n];
RunFilter(in, out, Int2(x, y), FilterType::Copy);
std::string filenameWrite = "result/copy.png";
stbi_write_png(filenameWrite.c_str(), x, y, 3, (void*)out, 0);
RunFilter(in, out, Int2(x, y), FilterType::Gaussian);
filenameWrite = "result/gaussian.png";
stbi_write_png(filenameWrite.c_str(), x, y, 3, (void*)out, 0);
delete[] out;
stbi_image_free(in);
return 0;
} |
b344f7b1fc12537465695327942373072656df96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _ZQ_CUDA_ATTENUATION_3D_CU_
#define _ZQ_CUDA_ATTENUATION_3D_CU_
#include "ZQlibCudaDefines.cuh"
namespace ZQ_CUDA_Attenuation3D
{
__global__
void Atten_u_Kernel(float* mac_u, const bool* occupy, const float velAtten, const int width, const int height, const int depth)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = bx*blockDim.x+tx;
int y = by*blockDim.y+ty;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
{
if(!occupy[z*height*width+y*width+0])
mac_u[z*height*(width+1)+y*(width+1)+0] *= velAtten;
}
else if(x == width)
{
if(!occupy[z*height*width+y*width+width-1])
mac_u[z*height*(width+1)+y*(width+1)+width] *= velAtten;
}
else
{
if(!occupy[z*height*width+y*width+x-1] && !occupy[z*height*width+y*width+x])
mac_u[z*height*(width+1)+y*(width+1)+x] *= velAtten;
}
}
}
__global__
void Atten_v_Kernel(float* mac_v, const bool* occupy, const float velAtten, const int width, const int height, const int depth)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = bx*blockDim.x+tx;
int y = by*blockDim.y+ty;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
{
if(!occupy[z*height*width+x])
mac_v[z*(height+1)*width+x] *= velAtten;
}
else if(y == height)
{
if(!occupy[z*height*width+(height-1)*width+x])
mac_v[z*(height+1)*width+height*width+x] *= velAtten;
}
else
{
if(!occupy[z*height*width+(y-1)*width+x] && !occupy[z*height*width+y*width+x])
mac_v[z*(height+1)*width+y*width+x] *= velAtten;
}
}
}
__global__
void Atten_w_Kernel(float* mac_w, const bool* occupy, const float velAtten, const int width, const int height, const int depth)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = bx*blockDim.x+tx;
int y = by*blockDim.y+ty;
if(x >= width || y >= height)
return ;
if(!occupy[y*width+x])
mac_w[y*width+x] *= velAtten;
if(!occupy[(depth-1)*height*width+y*width+x])
mac_w[depth*height*width+y*width+x] *= velAtten;
for(int z = 1;z < depth;z++)
{
if(!occupy[(z-1)*height*width+y*width+x] && !occupy[z*height*width+y*width+x])
mac_w[z*height*width+y*width+x] *= velAtten;
}
}
__global__
void Atten_temperature_density_Kernel(float* temperature, float* density, const float tempAtten, const float densityAtten, const int width, const int height, const int depth)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = bx*blockDim.x+tx;
int y = by*blockDim.y+ty;
if(x >= width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
temperature[z*height*width+y*width+x] *= tempAtten;
density[z*height*width+y*width+x] *= densityAtten;
}
}
/*************************************************/
void cu_Attenuation3D(float* mac_u, float* mac_v, float* mac_w, float* temperature, float* density, const bool* occupy,
const float velAtten, const float tempAtten, const float densityAtten, const int width, const int height, const int depth)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
hipLaunchKernelGGL(( Atten_u_Kernel), dim3(u_gridSize),dim3(blockSize), 0, 0, mac_u,occupy,velAtten,width,height,depth);
hipLaunchKernelGGL(( Atten_v_Kernel), dim3(v_gridSize),dim3(blockSize), 0, 0, mac_v,occupy,velAtten,width,height,depth);
hipLaunchKernelGGL(( Atten_w_Kernel), dim3(w_gridSize),dim3(blockSize), 0, 0, mac_w,occupy,velAtten,width,height,depth);
hipLaunchKernelGGL(( Atten_temperature_density_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, temperature,density,tempAtten,densityAtten,width,height,depth);
}
/***********************************************/
extern "C"
float Attenuation3D(float* mac_u, float* mac_v, float* mac_w, float* temperature, float* density, const bool* occupy,
const float velAtten, const float tempAtten, const float densityAtten, const int width, const int height, const int depth)
{
float time = 0;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
float* mac_u_d = 0;
float* mac_v_d = 0;
float* mac_w_d = 0;
float* temperature_d = 0;
float* density_d = 0;
bool* occupy_d = 0;
checkCudaErrors( hipMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( hipMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( hipMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( hipMalloc((void**)&temperature_d,sizeof(float)*width*height*depth) );
checkCudaErrors( hipMalloc((void**)&density_d,sizeof(float)*width*height*depth) );
checkCudaErrors( hipMalloc((void**)&occupy_d,sizeof(bool)*width*height*depth) );
checkCudaErrors( hipMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(temperature_d,temperature,sizeof(float)*width*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(density_d,density,sizeof(float)*width*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(occupy_d,occupy,sizeof(bool)*width*height*depth,hipMemcpyHostToDevice) );
cu_Attenuation3D(mac_u_d,mac_v_d,mac_w_d,temperature_d,density_d,occupy_d,velAtten,tempAtten,densityAtten,width,height,depth);
checkCudaErrors( hipMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(density,density_d,sizeof(float)*width*height*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(temperature,temperature_d,sizeof(float)*width*height*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipFree(mac_u_d) );
checkCudaErrors( hipFree(mac_v_d) );
checkCudaErrors( hipFree(mac_w_d) );
checkCudaErrors( hipFree(temperature_d) );
checkCudaErrors( hipFree(density_d) );
checkCudaErrors( hipFree(occupy_d) );
mac_u_d = 0;
mac_v_d = 0;
mac_w_d = 0;
temperature_d = 0;
density_d = 0;
occupy_d = 0;
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&time,start,stop);
return time;
}
}
#endif | b344f7b1fc12537465695327942373072656df96.cu | #ifndef _ZQ_CUDA_ATTENUATION_3D_CU_
#define _ZQ_CUDA_ATTENUATION_3D_CU_
#include "ZQlibCudaDefines.cuh"
namespace ZQ_CUDA_Attenuation3D
{
__global__
void Atten_u_Kernel(float* mac_u, const bool* occupy, const float velAtten, const int width, const int height, const int depth)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = bx*blockDim.x+tx;
int y = by*blockDim.y+ty;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
{
if(!occupy[z*height*width+y*width+0])
mac_u[z*height*(width+1)+y*(width+1)+0] *= velAtten;
}
else if(x == width)
{
if(!occupy[z*height*width+y*width+width-1])
mac_u[z*height*(width+1)+y*(width+1)+width] *= velAtten;
}
else
{
if(!occupy[z*height*width+y*width+x-1] && !occupy[z*height*width+y*width+x])
mac_u[z*height*(width+1)+y*(width+1)+x] *= velAtten;
}
}
}
__global__
void Atten_v_Kernel(float* mac_v, const bool* occupy, const float velAtten, const int width, const int height, const int depth)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = bx*blockDim.x+tx;
int y = by*blockDim.y+ty;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
{
if(!occupy[z*height*width+x])
mac_v[z*(height+1)*width+x] *= velAtten;
}
else if(y == height)
{
if(!occupy[z*height*width+(height-1)*width+x])
mac_v[z*(height+1)*width+height*width+x] *= velAtten;
}
else
{
if(!occupy[z*height*width+(y-1)*width+x] && !occupy[z*height*width+y*width+x])
mac_v[z*(height+1)*width+y*width+x] *= velAtten;
}
}
}
__global__
void Atten_w_Kernel(float* mac_w, const bool* occupy, const float velAtten, const int width, const int height, const int depth)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = bx*blockDim.x+tx;
int y = by*blockDim.y+ty;
if(x >= width || y >= height)
return ;
if(!occupy[y*width+x])
mac_w[y*width+x] *= velAtten;
if(!occupy[(depth-1)*height*width+y*width+x])
mac_w[depth*height*width+y*width+x] *= velAtten;
for(int z = 1;z < depth;z++)
{
if(!occupy[(z-1)*height*width+y*width+x] && !occupy[z*height*width+y*width+x])
mac_w[z*height*width+y*width+x] *= velAtten;
}
}
__global__
void Atten_temperature_density_Kernel(float* temperature, float* density, const float tempAtten, const float densityAtten, const int width, const int height, const int depth)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = bx*blockDim.x+tx;
int y = by*blockDim.y+ty;
if(x >= width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
temperature[z*height*width+y*width+x] *= tempAtten;
density[z*height*width+y*width+x] *= densityAtten;
}
}
/*************************************************/
void cu_Attenuation3D(float* mac_u, float* mac_v, float* mac_w, float* temperature, float* density, const bool* occupy,
const float velAtten, const float tempAtten, const float densityAtten, const int width, const int height, const int depth)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
Atten_u_Kernel<<<u_gridSize,blockSize>>>(mac_u,occupy,velAtten,width,height,depth);
Atten_v_Kernel<<<v_gridSize,blockSize>>>(mac_v,occupy,velAtten,width,height,depth);
Atten_w_Kernel<<<w_gridSize,blockSize>>>(mac_w,occupy,velAtten,width,height,depth);
Atten_temperature_density_Kernel<<<gridSize,blockSize>>>(temperature,density,tempAtten,densityAtten,width,height,depth);
}
/***********************************************/
extern "C"
float Attenuation3D(float* mac_u, float* mac_v, float* mac_w, float* temperature, float* density, const bool* occupy,
const float velAtten, const float tempAtten, const float densityAtten, const int width, const int height, const int depth)
{
float time = 0;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
float* mac_u_d = 0;
float* mac_v_d = 0;
float* mac_w_d = 0;
float* temperature_d = 0;
float* density_d = 0;
bool* occupy_d = 0;
checkCudaErrors( cudaMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( cudaMalloc((void**)&temperature_d,sizeof(float)*width*height*depth) );
checkCudaErrors( cudaMalloc((void**)&density_d,sizeof(float)*width*height*depth) );
checkCudaErrors( cudaMalloc((void**)&occupy_d,sizeof(bool)*width*height*depth) );
checkCudaErrors( cudaMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(temperature_d,temperature,sizeof(float)*width*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(density_d,density,sizeof(float)*width*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(occupy_d,occupy,sizeof(bool)*width*height*depth,cudaMemcpyHostToDevice) );
cu_Attenuation3D(mac_u_d,mac_v_d,mac_w_d,temperature_d,density_d,occupy_d,velAtten,tempAtten,densityAtten,width,height,depth);
checkCudaErrors( cudaMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(density,density_d,sizeof(float)*width*height*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(temperature,temperature_d,sizeof(float)*width*height*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaFree(mac_u_d) );
checkCudaErrors( cudaFree(mac_v_d) );
checkCudaErrors( cudaFree(mac_w_d) );
checkCudaErrors( cudaFree(temperature_d) );
checkCudaErrors( cudaFree(density_d) );
checkCudaErrors( cudaFree(occupy_d) );
mac_u_d = 0;
mac_v_d = 0;
mac_w_d = 0;
temperature_d = 0;
density_d = 0;
occupy_d = 0;
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
return time;
}
}
#endif |
e9f4ba01a91f7bcc1bdf872ccd8b9f8b4f391501.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<iomanip>
#include<iterator>
#include<cassert>
#include<cuda_runtime.h>
#include<helper_cuda.h>
using std::cout;
using std::endl;
using std::begin;
using std::end;
using std::setprecision;
using std::setw;
#define NBLOCKS 64
#define NTHREADS 256
__global__ static void
timeReduction(float const* input, float *output, clock_t* timer){
// __shared__ float shared[2*blockDim.x];
extern __shared__ float shared[];
int const tid = threadIdx.x;
int const bid = blockIdx.x;
if(tid==0)
timer[bid] = clock();
shared[tid] = input[tid];
shared[tid + blockDim.x] = input[tid + blockDim.x];
//perform reduction to find minimum
for(int d = blockDim.x; d>0; d/=2){
__syncthreads();
if(tid<d){
float f0 = shared[tid];
float f1 = shared[tid+d];
if(f1<f0){
shared[tid] = f1;
}
}
}
if (tid == 0)
output[bid] = shared[0];
__syncthreads();
if(tid == 0)
timer[bid+gridDim.x] = clock();
}
int
main(int argc, char *argv[]){
cout<<"CUDA Clock sample"<<endl;
char const* tmp1 = *argv;
char const** tmp2 = &tmp1;
int dev = findCudaDevice(argc,tmp2);
float *dinput = nullptr;
float *doutput = nullptr;
clock_t *dtimer = nullptr;
float houtput[NBLOCKS];
clock_t timer[NBLOCKS*2];
float input[NTHREADS*2];
for(int i=0;i<NTHREADS; i++)
input[i] = static_cast<float>(i);
checkCudaErrors(hipMalloc((void **)&dinput, sizeof(float)*NTHREADS*2));
checkCudaErrors(hipMalloc((void **)&doutput, sizeof(float)*NBLOCKS));
checkCudaErrors(hipMalloc((void **)&dtimer, sizeof(clock_t)*NBLOCKS*2));
checkCudaErrors(hipMemcpy(dinput, input, sizeof(float)*NTHREADS*2, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( timeReduction), dim3(NBLOCKS),dim3(NTHREADS),sizeof(float)*2*NTHREADS, 0, dinput,doutput,dtimer);
checkCudaErrors(hipMemcpy(timer, dtimer, sizeof(clock_t)*NBLOCKS*2, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(houtput, doutput, sizeof(float)*NBLOCKS, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(dinput));
checkCudaErrors(hipFree(doutput));
checkCudaErrors(hipFree(dtimer));
cout<<"result:"<<endl;
for(auto p=begin(houtput); p != end(houtput); p++){
cout<<setw(6)<<setprecision(10)<<*p<<" ";
}
cout<<endl;
long double avgElapsedClocks = 0;
for(int i=0; i<NBLOCKS; i++){
avgElapsedClocks+=static_cast<long double>(timer[i+NBLOCKS]-timer[i]);
}
avgElapsedClocks = avgElapsedClocks/NBLOCKS;
cout<<"average clocks/block = "<<avgElapsedClocks<<endl;
exit(EXIT_SUCCESS);
}
| e9f4ba01a91f7bcc1bdf872ccd8b9f8b4f391501.cu | #include<iostream>
#include<iomanip>
#include<iterator>
#include<cassert>
#include<cuda_runtime.h>
#include<helper_cuda.h>
using std::cout;
using std::endl;
using std::begin;
using std::end;
using std::setprecision;
using std::setw;
#define NBLOCKS 64
#define NTHREADS 256
__global__ static void
timeReduction(float const* input, float *output, clock_t* timer){
// __shared__ float shared[2*blockDim.x];
extern __shared__ float shared[];
int const tid = threadIdx.x;
int const bid = blockIdx.x;
if(tid==0)
timer[bid] = clock();
shared[tid] = input[tid];
shared[tid + blockDim.x] = input[tid + blockDim.x];
//perform reduction to find minimum
for(int d = blockDim.x; d>0; d/=2){
__syncthreads();
if(tid<d){
float f0 = shared[tid];
float f1 = shared[tid+d];
if(f1<f0){
shared[tid] = f1;
}
}
}
if (tid == 0)
output[bid] = shared[0];
__syncthreads();
if(tid == 0)
timer[bid+gridDim.x] = clock();
}
int
main(int argc, char *argv[]){
cout<<"CUDA Clock sample"<<endl;
char const* tmp1 = *argv;
char const** tmp2 = &tmp1;
int dev = findCudaDevice(argc,tmp2);
float *dinput = nullptr;
float *doutput = nullptr;
clock_t *dtimer = nullptr;
float houtput[NBLOCKS];
clock_t timer[NBLOCKS*2];
float input[NTHREADS*2];
for(int i=0;i<NTHREADS; i++)
input[i] = static_cast<float>(i);
checkCudaErrors(cudaMalloc((void **)&dinput, sizeof(float)*NTHREADS*2));
checkCudaErrors(cudaMalloc((void **)&doutput, sizeof(float)*NBLOCKS));
checkCudaErrors(cudaMalloc((void **)&dtimer, sizeof(clock_t)*NBLOCKS*2));
checkCudaErrors(cudaMemcpy(dinput, input, sizeof(float)*NTHREADS*2, cudaMemcpyHostToDevice));
timeReduction<<<NBLOCKS,NTHREADS,sizeof(float)*2*NTHREADS>>>(dinput,doutput,dtimer);
checkCudaErrors(cudaMemcpy(timer, dtimer, sizeof(clock_t)*NBLOCKS*2, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(houtput, doutput, sizeof(float)*NBLOCKS, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(dinput));
checkCudaErrors(cudaFree(doutput));
checkCudaErrors(cudaFree(dtimer));
cout<<"result:"<<endl;
for(auto p=begin(houtput); p != end(houtput); p++){
cout<<setw(6)<<setprecision(10)<<*p<<" ";
}
cout<<endl;
long double avgElapsedClocks = 0;
for(int i=0; i<NBLOCKS; i++){
avgElapsedClocks+=static_cast<long double>(timer[i+NBLOCKS]-timer[i]);
}
avgElapsedClocks = avgElapsedClocks/NBLOCKS;
cout<<"average clocks/block = "<<avgElapsedClocks<<endl;
exit(EXIT_SUCCESS);
}
|
074f5599ce237e4f99c1df9100e23020e5d5f780.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2013
@author Azzam Haidar
@author Tingxing Dong
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
#define PRECISION_z
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaDoubleComplex shared_data[];
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ double dble_shared_data[];
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void zdotc_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset, magma_int_t *info_array, int gbstep)
{
int tx = threadIdx.x;
magmaDoubleComplex *x = x_array[blockIdx.z]+offset;
double *sdata = dble_shared_data;
magmaDoubleComplex res = MAGMA_Z_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_Z_REAL(res * MAGMA_Z_CNJG(res));
__syncthreads();
for(int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile double* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
double xreal = MAGMA_Z_REAL(x[n*incx]);
//MAGMA_Z_SET2REAL(x[n*incx], sqrt(xreal - sdata[0]));
x[n*incx] = MAGMA_Z_MAKE(sqrt(xreal - sdata[0]), 0);
if(x[n*incx] == MAGMA_Z_ZERO){
info_array[blockIdx.z] = offset + gbstep + 1;
}
}
}
void magma_zpotf2_zdotc_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Zdotc
1) performs zdotc sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > MAX_NTHREADS) {
printf("n = %d > %d is not supported in zpotf2_zdotc\n", (int) n, (int) MAX_NTHREADS);
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
dim3 grid(1, 1, batchCount);
hipLaunchKernelGGL(( zdotc_kernel_batched), dim3(grid), dim3(threadSize),
threadSize * sizeof(double), queue, n, x_array, incx, offset, info_array, gbstep);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void zdscal_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset, magma_int_t *info_array)
{
// checkinfo to avoid computation of the singular matrix
if(info_array[blockIdx.z] != 0 ) return;
int id = threadIdx.x;
magmaDoubleComplex *x = x_array[blockIdx.z]+offset;
__shared__ magmaDoubleComplex factor;
if (threadIdx.x == 0) {
factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id >0) {
x[id*incx] = x[id*incx] * factor;
//printf("x=%f", x[id*incx]);
}
}
void magma_zpotf2_zdscal_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Zdscal perform x[1:n-1]/x[0]
*/
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
hipLaunchKernelGGL(( zdscal_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, x_array, incx, offset, info_array);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(PRECISION_z) || defined(PRECISION_c)
__global__ void zlacgv_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset)
{
int id = threadIdx.x;
magmaDoubleComplex *x = x_array[blockIdx.z]+offset;
if ( id < n ) {
x[id*incx] = MAGMA_Z_CNJG(x[id*incx]);
}
}
void magma_zlacgv_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, int offset, int batchCount, magma_queue_t queue)
{
/*
Purpose
=======
ZLACGV conjugates a complex vector of length N.
Arguments
=========
N (input) INTEGER
The length of the vector X. N >= 0.
X (input/output) COMPLEX*16 array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
INCX (input) INTEGER
The spacing between successive elements of X.
===================================================================== */
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
hipLaunchKernelGGL(( zlacgv_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, x_array, incx, offset);
}
#endif // defined(PRECISION_z) || defined(PRECISION_c)
/////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void zpotf2_device(int m, int n,
magmaDoubleComplex *A, int lda,
magmaDoubleComplex alpha,
magmaDoubleComplex beta, magma_int_t *info, int gbstep)
{
/*
Each thread block load entire A into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
// checkinfo to avoid computation of the singular matrix
if(*info != 0 ) return;
int tx = threadIdx.x;
magmaDoubleComplex *sdata_A = shared_data;
__shared__ magmaDoubleComplex factor;
__shared__ double sum[POTF2_TILE_SIZE];
// load A into sdata_A
if(tx < m)
{
for(int i=0; i<n; i++)
{
sdata_A[tx + i * m] = A[tx + i * lda];
}
}
__syncthreads();
for(int iter=0; iter<n; iter++)
{
double res = MAGMA_D_ZERO;
magmaDoubleComplex res1 = MAGMA_Z_ZERO;
//1) performs zdotc sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1])
//2) updates A[iter,iter] = sqrt(A[iter,iter]-sum);
if(tx<iter)
{
res = MAGMA_Z_REAL (sdata_A[iter + tx * m] * MAGMA_Z_CNJG(sdata_A[iter + tx * m]));
sum[tx] = res;
}
else
{
sum[tx] = 0.0;
}
__syncthreads();
magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);//tried on K40: if m=32 n=32 the overall zpotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms
//magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28.
//magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);
if (tx == 0) {
double xreal = MAGMA_Z_REAL(sdata_A[iter + iter * m]);
sdata_A[iter + iter * m] = MAGMA_Z_MAKE(sqrt(xreal - sum[0]), 0);
if(sdata_A[iter + iter * m] == MAGMA_Z_ZERO){
*info = iter + gbstep + 1;
}
}
__syncthreads();
if(sdata_A[iter + iter * m] == MAGMA_Z_ZERO) return;
__syncthreads();
//zlacgv conjugates a complex vector of length iter. //TODO
#if defined(PRECISION_z) || defined(PRECISION_c)
if(tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_Z_CNJG(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// zgemv
// Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row).
if(tx < m && tx > iter)
{
for(int j=0; j < iter; j++)
{
res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the zlacgv conj to be done automatically here implicitly.
}
sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta;
}
__syncthreads();
//zlacgv conjugates a complex vector of length iter.
#if defined(PRECISION_z) || defined(PRECISION_c)
if(tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_Z_CNJG(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// zdscal perform A[iter:n-1, iter]/A[iter,iter];
if (tx == 0) {
factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(sdata_A[iter + iter * m]), 0.0);
}
__syncthreads();
if ( tx < m && tx > iter) {
sdata_A[ tx + iter * m ] *= factor;
}
__syncthreads();
}// end of iter
//copy sdata_A to A
if(tx < m)
{
for(int i=0; i<n; i++)
{
A[tx + i * lda] = sdata_A[tx + i * m];
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void zpotf2_kernel_batched(int m, int n,
magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex alpha,
magmaDoubleComplex beta,
magma_int_t *info_array, int gbstep)
{
/*
Each thread block load entire dA_array[blockIdx.z] into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
int batchid = blockIdx.z;
zpotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void zpotf2_kernel(int m, int n,
magmaDoubleComplex *dA, int lda,
magmaDoubleComplex alpha,
magmaDoubleComplex beta,
magma_int_t *info)
{
zpotf2_device(m, n, dA, lda, alpha, beta, info, 0);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
zpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@ingroup magma_zposv_aux
********************************************************************/
extern "C" magma_int_t
magma_zpotf2_tile_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
printf("Upper side is unavailable \n");
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE;
magmaDoubleComplex beta = MAGMA_Z_ONE;
dim3 dimGrid(1, 1, batchCount);
dim3 threads(POTF2_TILE_SIZE, 1);
int shared_mem_size = sizeof(magmaDoubleComplex)*m*n; // + sizeof(double)*(POTF2_TILE_SIZE+1);
hipLaunchKernelGGL(( zpotf2_kernel_batched), dim3(dimGrid), dim3(threads), shared_mem_size, queue , m, n, dA_array, lda, alpha, beta, info_array, gbstep);
return arginfo;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_zpotf2_tile(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex *dA, magma_int_t lda,
magma_int_t *info)
{
*info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
*info = -1;
} else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE) {
*info = -2;
} else if (lda < max(1,m)) {
*info = -4;
} else if (m < n) {
*info = -10;
}
if (uplo == MagmaUpper) {
printf("Upper side is unavailable \n");
*info = -1;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE;
magmaDoubleComplex beta = MAGMA_Z_ONE;
dim3 dimGrid(1);
dim3 threads(POTF2_TILE_SIZE, 1);
int shared_mem_size = sizeof(magmaDoubleComplex)*m*n; // + sizeof(double)*(POTF2_TILE_SIZE+1);
hipLaunchKernelGGL(( zpotf2_kernel), dim3(dimGrid), dim3(threads), shared_mem_size, magma_stream , m, n, dA, lda, alpha, beta, info);
return *info;
}
| 074f5599ce237e4f99c1df9100e23020e5d5f780.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2013
@author Azzam Haidar
@author Tingxing Dong
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
#define PRECISION_z
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaDoubleComplex shared_data[];
// dynamically allocated shared memory, set to size number of threads when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ double dble_shared_data[];
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void zdotc_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset, magma_int_t *info_array, int gbstep)
{
int tx = threadIdx.x;
magmaDoubleComplex *x = x_array[blockIdx.z]+offset;
double *sdata = dble_shared_data;
magmaDoubleComplex res = MAGMA_Z_ZERO;
if (tx < n) {
res = x[tx*incx];
}
sdata[tx] = MAGMA_Z_REAL(res * MAGMA_Z_CNJG(res));
__syncthreads();
for(int s = blockDim.x/2; s > 32; s >>= 1 ) {
if (tx < s) {
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if (tx < 32) {
volatile double* smem = sdata;
smem[tx] += smem[tx+32];
smem[tx] += smem[tx+16];
smem[tx] += smem[tx+8];
smem[tx] += smem[tx+4];
smem[tx] += smem[tx+2];
smem[tx] += smem[tx+1];
}
if (tx == 0) {
double xreal = MAGMA_Z_REAL(x[n*incx]);
//MAGMA_Z_SET2REAL(x[n*incx], sqrt(xreal - sdata[0]));
x[n*incx] = MAGMA_Z_MAKE(sqrt(xreal - sdata[0]), 0);
if(x[n*incx] == MAGMA_Z_ZERO){
info_array[blockIdx.z] = offset + gbstep + 1;
}
}
}
void magma_zpotf2_zdotc_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Zdotc
1) performs zdotc sum = x[0:n-1]*conj(x[0:n-1])
2) updates x[n] = sqrt(x[n]-sum);
*/
if (n > MAX_NTHREADS) {
printf("n = %d > %d is not supported in zpotf2_zdotc\n", (int) n, (int) MAX_NTHREADS);
}
int threadSize;
if (n <= 1024 && n > 512) {
threadSize = 1024;
}
else if (n <= 512 && n > 256 ) {
threadSize = 512;
}
else if (n <= 256 && n > 128) {
threadSize = 256;
}
else if (n <= 128 && n > 64) {
threadSize = 128;
}
else {
threadSize = 64;
}
dim3 grid(1, 1, batchCount);
zdotc_kernel_batched<<< grid, threadSize,
threadSize * sizeof(double), queue>>> (n, x_array, incx, offset, info_array, gbstep);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void zdscal_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset, magma_int_t *info_array)
{
// checkinfo to avoid computation of the singular matrix
if(info_array[blockIdx.z] != 0 ) return;
int id = threadIdx.x;
magmaDoubleComplex *x = x_array[blockIdx.z]+offset;
__shared__ magmaDoubleComplex factor;
if (threadIdx.x == 0) {
factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(x[0]), 0.0);
}
__syncthreads();
if ( id < n && id >0) {
x[id*incx] = x[id*incx] * factor;
//printf("x=%f", x[id*incx]);
}
}
void magma_zpotf2_zdscal_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized Zdscal perform x[1:n-1]/x[0]
*/
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
zdscal_kernel_batched<<< grid, threads, 0, queue >>> (n, x_array, incx, offset, info_array);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(PRECISION_z) || defined(PRECISION_c)
__global__ void zlacgv_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset)
{
int id = threadIdx.x;
magmaDoubleComplex *x = x_array[blockIdx.z]+offset;
if ( id < n ) {
x[id*incx] = MAGMA_Z_CNJG(x[id*incx]);
}
}
void magma_zlacgv_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, int offset, int batchCount, magma_queue_t queue)
{
/*
Purpose
=======
ZLACGV conjugates a complex vector of length N.
Arguments
=========
N (input) INTEGER
The length of the vector X. N >= 0.
X (input/output) COMPLEX*16 array, dimension
(1+(N-1)*abs(INCX))
On entry, the vector of length N to be conjugated.
On exit, X is overwritten with conjg(X).
INCX (input) INTEGER
The spacing between successive elements of X.
===================================================================== */
dim3 grid(1, 1, batchCount);
dim3 threads(n, 1, 1);
zlacgv_kernel_batched<<< grid, threads, 0, queue >>> (n, x_array, incx, offset);
}
#endif // defined(PRECISION_z) || defined(PRECISION_c)
/////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void zpotf2_device(int m, int n,
magmaDoubleComplex *A, int lda,
magmaDoubleComplex alpha,
magmaDoubleComplex beta, magma_int_t *info, int gbstep)
{
/*
Each thread block load entire A into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
// checkinfo to avoid computation of the singular matrix
if(*info != 0 ) return;
int tx = threadIdx.x;
magmaDoubleComplex *sdata_A = shared_data;
__shared__ magmaDoubleComplex factor;
__shared__ double sum[POTF2_TILE_SIZE];
// load A into sdata_A
if(tx < m)
{
for(int i=0; i<n; i++)
{
sdata_A[tx + i * m] = A[tx + i * lda];
}
}
__syncthreads();
for(int iter=0; iter<n; iter++)
{
double res = MAGMA_D_ZERO;
magmaDoubleComplex res1 = MAGMA_Z_ZERO;
//1) performs zdotc sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1])
//2) updates A[iter,iter] = sqrt(A[iter,iter]-sum);
if(tx<iter)
{
res = MAGMA_Z_REAL (sdata_A[iter + tx * m] * MAGMA_Z_CNJG(sdata_A[iter + tx * m]));
sum[tx] = res;
}
else
{
sum[tx] = 0.0;
}
__syncthreads();
magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);//tried on K40: if m=32 n=32 the overall zpotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms
//magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28.
//magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);
if (tx == 0) {
double xreal = MAGMA_Z_REAL(sdata_A[iter + iter * m]);
sdata_A[iter + iter * m] = MAGMA_Z_MAKE(sqrt(xreal - sum[0]), 0);
if(sdata_A[iter + iter * m] == MAGMA_Z_ZERO){
*info = iter + gbstep + 1;
}
}
__syncthreads();
if(sdata_A[iter + iter * m] == MAGMA_Z_ZERO) return;
__syncthreads();
//zlacgv conjugates a complex vector of length iter. //TODO
#if defined(PRECISION_z) || defined(PRECISION_c)
if(tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_Z_CNJG(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// zgemv
// Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row).
if(tx < m && tx > iter)
{
for(int j=0; j < iter; j++)
{
res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the zlacgv conj to be done automatically here implicitly.
}
sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta;
}
__syncthreads();
//zlacgv conjugates a complex vector of length iter.
#if defined(PRECISION_z) || defined(PRECISION_c)
if(tx < iter)
{
sdata_A[iter + tx * m] = MAGMA_Z_CNJG(sdata_A[iter + tx * m]);
}
__syncthreads();
#endif
// zdscal perform A[iter:n-1, iter]/A[iter,iter];
if (tx == 0) {
factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(sdata_A[iter + iter * m]), 0.0);
}
__syncthreads();
if ( tx < m && tx > iter) {
sdata_A[ tx + iter * m ] *= factor;
}
__syncthreads();
}// end of iter
//copy sdata_A to A
if(tx < m)
{
for(int i=0; i<n; i++)
{
A[tx + i * lda] = sdata_A[tx + i * m];
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void zpotf2_kernel_batched(int m, int n,
magmaDoubleComplex **dA_array, int lda,
magmaDoubleComplex alpha,
magmaDoubleComplex beta,
magma_int_t *info_array, int gbstep)
{
/*
Each thread block load entire dA_array[blockIdx.z] into shared memory
factorize it and copy back. n must be small enough to fit shared memory.
n is checked by a macro POTF2_TILE_SIZE before the kernel.
*/
int batchid = blockIdx.z;
zpotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void zpotf2_kernel(int m, int n,
magmaDoubleComplex *dA, int lda,
magmaDoubleComplex alpha,
magmaDoubleComplex beta,
magma_int_t *info)
{
zpotf2_device(m, n, dA, lda, alpha, beta, info, 0);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
zpotf2 computes the Cholesky factorization of a real symmetric
positive definite matrix A.
The factorization has the form
A = U**H * U, if UPLO = MagmaUpper, or
A = L * L**H, if UPLO = MagmaLower,
where U is an upper triangular matrix and L is lower triangular.
This is the unblocked version of the algorithm, calling Level 2 BLAS.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
symmetric matrix A is stored.
- = MagmaUpper: Upper triangular
- = MagmaLower: Lower triangular
@param[in]
n INTEGER
The order of the matrix A. N >= 0 and N <= 512.
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading
n by n upper triangular part of A contains the upper
triangular part of the matrix A, and the strictly lower
triangular part of A is not referenced. If UPLO = MagmaLower, the
leading n by n lower triangular part of A contains the lower
triangular part of the matrix A, and the strictly upper
triangular part of A is not referenced.
\n
On exit, if INFO = 0, the factor U or L from the Cholesky
factorization A = U**H * U or A = L * L**H.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, the leading minor of order k is not
positive definite, and the factorization could not be
completed.
@ingroup magma_zposv_aux
********************************************************************/
extern "C" magma_int_t
magma_zpotf2_tile_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
printf("Upper side is unavailable \n");
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE;
magmaDoubleComplex beta = MAGMA_Z_ONE;
dim3 dimGrid(1, 1, batchCount);
dim3 threads(POTF2_TILE_SIZE, 1);
int shared_mem_size = sizeof(magmaDoubleComplex)*m*n; // + sizeof(double)*(POTF2_TILE_SIZE+1);
zpotf2_kernel_batched<<<dimGrid, threads, shared_mem_size, queue >>>(m, n, dA_array, lda, alpha, beta, info_array, gbstep);
return arginfo;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_zpotf2_tile(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex *dA, magma_int_t lda,
magma_int_t *info)
{
*info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
*info = -1;
} else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE) {
*info = -2;
} else if (lda < max(1,m)) {
*info = -4;
} else if (m < n) {
*info = -10;
}
if (uplo == MagmaUpper) {
printf("Upper side is unavailable \n");
*info = -1;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE;
magmaDoubleComplex beta = MAGMA_Z_ONE;
dim3 dimGrid(1);
dim3 threads(POTF2_TILE_SIZE, 1);
int shared_mem_size = sizeof(magmaDoubleComplex)*m*n; // + sizeof(double)*(POTF2_TILE_SIZE+1);
zpotf2_kernel<<<dimGrid, threads, shared_mem_size, magma_stream >>>(m, n, dA, lda, alpha, beta, info);
return *info;
}
|
da70f0cab902605fdd360393c62a792a74ffee42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/bce_with_logits_loss_impl.cuh"
__device__ __forceinline__ size_t Index(const size_t &index, const size_t &dim) { return dim == 1 ? 0 : index; }
template <typename T>
__global__ void FillWithoutBroadcast(const size_t size, const T *src, T *dst) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
dst[pos] = src[pos];
}
return;
}
template <typename T>
__global__ void FillAndBroadcast(const size_t size, const size_t shape_size, const size_t *src_shape,
const size_t *dst_shape, const T *src, T *dst) {
size_t dst_index_array[MAX_LOGITS_DIMENSION];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
size_t tmp_pos = pos;
size_t pos_size = size / dst_shape[0];
dst_index_array[0] = tmp_pos / pos_size;
for (size_t i = 1; i < shape_size; i++) {
tmp_pos -= dst_index_array[i - 1] * pos_size;
pos_size = pos_size / dst_shape[i];
dst_index_array[i] = tmp_pos / pos_size;
}
size_t src_pos = 0;
size_t src_size = 1;
for (size_t i = 0; i < shape_size; i++) {
src_size *= src_shape[i];
}
for (size_t i = 0; i < shape_size; i++) {
src_size /= src_shape[i];
size_t length_by_index = Index(dst_index_array[i], src_shape[i]) * src_size;
src_pos += length_by_index;
}
dst[pos] = src[src_pos];
}
return;
}
template <typename T>
__global__ void BCEWithLogitsLossMain(size_t size, const T *predict, const T *target, const T *shape_broadcasted,
T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
T max_value = -predict[pos];
max_value = max_value > static_cast<T>(0) ? max_value : static_cast<T>(0);
const T log_weight = (shape_broadcasted[pos] - static_cast<T>(1)) * target[pos] + static_cast<T>(1);
output[pos] = (static_cast<T>(1) - target[pos]) * predict[pos] +
log_weight * (log(exp(-max_value) + exp(-predict[pos] - max_value)) + max_value);
}
return;
}
template <>
__global__ void BCEWithLogitsLossMain(size_t size, const half *predict, const half *target,
const half *shape_broadcasted, half *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
half max_value = -predict[pos];
max_value = max_value > static_cast<half>(0) ? max_value : static_cast<half>(0);
const half log_weight = (shape_broadcasted[pos] - static_cast<half>(1)) * target[pos] + static_cast<half>(1);
output[pos] = (static_cast<half>(1) - target[pos]) * predict[pos] +
log_weight * (hlog(hexp(-max_value) + hexp(-predict[pos] - max_value)) + max_value);
}
return;
}
template <typename T>
__global__ void Mul(size_t size, const T *lhs, T *rhs) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
rhs[pos] *= lhs[pos];
}
return;
}
template <typename T>
void CalBCEWithLogitsLoss(const size_t input_size, const T *predict, const T *target, const size_t *input_shape,
const size_t shape_size, const T *weight, const size_t *weight_shape,
const bool weight_need_broadcast, const T *pos_weight, const size_t *pos_weight_shape,
const bool pos_weight_need_broadcast, T *shape_broadcasted, T *output,
hipStream_t cuda_stream) {
if (pos_weight_need_broadcast) {
hipLaunchKernelGGL(( FillAndBroadcast), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream,
input_size, shape_size, pos_weight_shape, input_shape, pos_weight, shape_broadcasted);
} else {
hipLaunchKernelGGL(( FillWithoutBroadcast), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream, input_size, pos_weight,
shape_broadcasted);
}
hipLaunchKernelGGL(( BCEWithLogitsLossMain), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream, input_size, predict, target,
shape_broadcasted, output);
if (weight_need_broadcast) {
hipLaunchKernelGGL(( FillAndBroadcast), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream, input_size, shape_size, weight_shape,
input_shape, weight, shape_broadcasted);
} else {
hipLaunchKernelGGL(( FillWithoutBroadcast), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream, input_size, weight,
shape_broadcasted);
}
hipLaunchKernelGGL(( Mul), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, cuda_stream, input_size, shape_broadcasted, output);
return;
}
template void CalBCEWithLogitsLoss<half>(const size_t input_size, const half *predict, const half *target,
const size_t *input_shape, const size_t shape_size, const half *weight,
const size_t *weight_shape, const bool weight_need_broadcast,
const half *pos_weight, const size_t *pos_weight_shape,
const bool pos_weight_need_broadcast, half *shape_broadcasted, half *output,
hipStream_t cuda_stream);
template void CalBCEWithLogitsLoss<float>(const size_t input_size, const float *predict, const float *target,
const size_t *input_shape, const size_t shape_size, const float *weight,
const size_t *weight_shape, const bool weight_need_broadcast,
const float *pos_weight, const size_t *pos_weight_shape,
const bool pos_weight_need_broadcast, float *shape_broadcasted, float *output,
hipStream_t cuda_stream);
| da70f0cab902605fdd360393c62a792a74ffee42.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/bce_with_logits_loss_impl.cuh"
__device__ __forceinline__ size_t Index(const size_t &index, const size_t &dim) { return dim == 1 ? 0 : index; }
template <typename T>
__global__ void FillWithoutBroadcast(const size_t size, const T *src, T *dst) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
dst[pos] = src[pos];
}
return;
}
template <typename T>
__global__ void FillAndBroadcast(const size_t size, const size_t shape_size, const size_t *src_shape,
const size_t *dst_shape, const T *src, T *dst) {
size_t dst_index_array[MAX_LOGITS_DIMENSION];
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
size_t tmp_pos = pos;
size_t pos_size = size / dst_shape[0];
dst_index_array[0] = tmp_pos / pos_size;
for (size_t i = 1; i < shape_size; i++) {
tmp_pos -= dst_index_array[i - 1] * pos_size;
pos_size = pos_size / dst_shape[i];
dst_index_array[i] = tmp_pos / pos_size;
}
size_t src_pos = 0;
size_t src_size = 1;
for (size_t i = 0; i < shape_size; i++) {
src_size *= src_shape[i];
}
for (size_t i = 0; i < shape_size; i++) {
src_size /= src_shape[i];
size_t length_by_index = Index(dst_index_array[i], src_shape[i]) * src_size;
src_pos += length_by_index;
}
dst[pos] = src[src_pos];
}
return;
}
template <typename T>
__global__ void BCEWithLogitsLossMain(size_t size, const T *predict, const T *target, const T *shape_broadcasted,
T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
T max_value = -predict[pos];
max_value = max_value > static_cast<T>(0) ? max_value : static_cast<T>(0);
const T log_weight = (shape_broadcasted[pos] - static_cast<T>(1)) * target[pos] + static_cast<T>(1);
output[pos] = (static_cast<T>(1) - target[pos]) * predict[pos] +
log_weight * (log(exp(-max_value) + exp(-predict[pos] - max_value)) + max_value);
}
return;
}
template <>
__global__ void BCEWithLogitsLossMain(size_t size, const half *predict, const half *target,
const half *shape_broadcasted, half *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
half max_value = -predict[pos];
max_value = max_value > static_cast<half>(0) ? max_value : static_cast<half>(0);
const half log_weight = (shape_broadcasted[pos] - static_cast<half>(1)) * target[pos] + static_cast<half>(1);
output[pos] = (static_cast<half>(1) - target[pos]) * predict[pos] +
log_weight * (hlog(hexp(-max_value) + hexp(-predict[pos] - max_value)) + max_value);
}
return;
}
template <typename T>
__global__ void Mul(size_t size, const T *lhs, T *rhs) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
rhs[pos] *= lhs[pos];
}
return;
}
template <typename T>
void CalBCEWithLogitsLoss(const size_t input_size, const T *predict, const T *target, const size_t *input_shape,
const size_t shape_size, const T *weight, const size_t *weight_shape,
const bool weight_need_broadcast, const T *pos_weight, const size_t *pos_weight_shape,
const bool pos_weight_need_broadcast, T *shape_broadcasted, T *output,
cudaStream_t cuda_stream) {
if (pos_weight_need_broadcast) {
FillAndBroadcast<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(
input_size, shape_size, pos_weight_shape, input_shape, pos_weight, shape_broadcasted);
} else {
FillWithoutBroadcast<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(input_size, pos_weight,
shape_broadcasted);
}
BCEWithLogitsLossMain<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(input_size, predict, target,
shape_broadcasted, output);
if (weight_need_broadcast) {
FillAndBroadcast<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(input_size, shape_size, weight_shape,
input_shape, weight, shape_broadcasted);
} else {
FillWithoutBroadcast<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(input_size, weight,
shape_broadcasted);
}
Mul<<<GET_BLOCKS(input_size), GET_THREADS, 0, cuda_stream>>>(input_size, shape_broadcasted, output);
return;
}
template void CalBCEWithLogitsLoss<half>(const size_t input_size, const half *predict, const half *target,
const size_t *input_shape, const size_t shape_size, const half *weight,
const size_t *weight_shape, const bool weight_need_broadcast,
const half *pos_weight, const size_t *pos_weight_shape,
const bool pos_weight_need_broadcast, half *shape_broadcasted, half *output,
cudaStream_t cuda_stream);
template void CalBCEWithLogitsLoss<float>(const size_t input_size, const float *predict, const float *target,
const size_t *input_shape, const size_t shape_size, const float *weight,
const size_t *weight_shape, const bool weight_need_broadcast,
const float *pos_weight, const size_t *pos_weight_shape,
const bool pos_weight_need_broadcast, float *shape_broadcasted, float *output,
cudaStream_t cuda_stream);
|
8a7f3ee4a037b0a710a679609619c23be9014518.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <array>
#include <iostream>
#include "CudaUtils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include "util/StringConcat.h"
#include <cstdint>
int main() {
thrust::host_vector<int> src(std::vector<int> { 10, 25, 4, -2, 15, 35, 27, 99, 1 });
thrust::host_vector<int> res;
int sum;
runWithProfiler([&]() {
thrust::device_vector<int> devSrc = src;
thrust::device_vector <uint8_t> devRes(devSrc.size());
thrust::transform(devSrc.begin(), devSrc.end(), devRes.begin(), [] __device__(auto v) { return __popc(v); });
res = devRes;
sum = thrust::reduce(devRes.begin(), devRes.end(), 0, thrust::plus<int>());
});
// Print the results
for (int col = 0; col < res.size(); ++col) {
std::cout << res[col] << std::endl;
}
std::cout << "Sum is: " << sum << std::endl;
}
| 8a7f3ee4a037b0a710a679609619c23be9014518.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <array>
#include <iostream>
#include "CudaUtils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include "util/StringConcat.h"
#include <cstdint>
int main() {
thrust::host_vector<int> src(std::vector<int> { 10, 25, 4, -2, 15, 35, 27, 99, 1 });
thrust::host_vector<int> res;
int sum;
runWithProfiler([&]() {
thrust::device_vector<int> devSrc = src;
thrust::device_vector <uint8_t> devRes(devSrc.size());
thrust::transform(devSrc.begin(), devSrc.end(), devRes.begin(), [] __device__(auto v) { return __popc(v); });
res = devRes;
sum = thrust::reduce(devRes.begin(), devRes.end(), 0, thrust::plus<int>());
});
// Print the results
for (int col = 0; col < res.size(); ++col) {
std::cout << res[col] << std::endl;
}
std::cout << "Sum is: " << sum << std::endl;
}
|
d571ae94e735690bebd57f3d6b38114a80dea2a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This program calculates the OH stetch IR absorption spectrum
* for coupled water from an MD trajectory. The exciton Hamilt-
* onian is built using the maps developed by Skinner and co-
* workers
*/
#include "calcIR.h"
int main(int argc, char *argv[])
{
// Some help for starting the program. User must supply a single argument
if ( argc != 2 ){
printf("Usage:\n"
"\tInclude as the first argument the name of an input file. No other arguments are allowed.\n");
exit(EXIT_FAILURE);
}
// retrieve and print info about gpu
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,0);
printf("\nGPU INFO:\n"
"\tDevice name: %s\n"
"\tMemory: %g gb\n",
prop.name, prop.totalGlobalMem/(1.E9));
// *** Variable Declaration *** //
// **************************************************** //
printf("\n>>> Setting default parameters\n");
// Model parameters
char gmxf[MAX_STR_LEN]; strncpy( gmxf, "traj.xtc", MAX_STR_LEN ); // trajectory file
char outf[MAX_STR_LEN]; strncpy( outf, "spec", MAX_STR_LEN ); // name for output files
char cptf[MAX_STR_LEN]; strncpy( cptf, "spec", MAX_STR_LEN ); // name for output files
char model[MAX_STR_LEN];strncpy( model,"e3b3", MAX_STR_LEN ); // water model tip4p, tip4p2005, e3b2, e3b3
int imodel = 0; // integer for water model
int imap = 0; // integer for spectroscopic map used (0 - 2013 Gruenbaum) (1 - 2010 Li)
int ispecies = 0; // integer for species of interest
int ntcfpoints = 150 ; // the number of tcf points for each spectrum
int nsamples = 1 ; // number of samples to average for the total spectrum
float sampleEvery = 10. ; // sample a new configuration every sampleEvery ps. Note the way the program is written,
// ntcfpoints*dt must be less than sampleEvery.
user_real_t omegaStart = 2000; // starting frequency for spectral density
user_real_t omegaStop = 5000; // ending frequency for spectral density
int omegaStep = 5; // resolution for spectral density
int natom_mol = 4; // Atoms per water molecule :: MODEL DEPENDENT
int nchrom_mol = 2; // Chromophores per molecule :: TWO for stretch -- ONE for bend
int nzeros = 25600; // zeros for padding fft
user_real_t dt = 0.010; // dt between frames in xtc file (in ps)
user_real_t beginTime = 0 ; // the beginning time in ps to allow for equilibration, if desired
user_real_t t1 = 0.260; // relaxation time ( in ps )
user_real_t avef = 3415.2; // the approximate average stretch frequency to get rid of high
// frequency oscillations in the time correlation function
char species[MAX_STR_LEN]; strncpy( species, " ", MAX_STR_LEN ); // species HOD/H2O HOD/D2O H2O D2O
// read in model parameters
// START FROM INPUT FILE
ir_init( argv, gmxf, cptf, outf, model, &dt, &ntcfpoints, &nsamples, &sampleEvery, &t1,
&avef, &omegaStart, &omegaStop, &omegaStep, &natom_mol, &nchrom_mol, &nzeros, &beginTime,
species, &imap );
// Print the parameters to stdout
printf("\tSetting xtc file %s\n", gmxf );
printf("\tSetting default file name to %s\n", outf );
printf("\tSetting model to %s\n", model );
printf("\tSetting the number of tcf points to %d\n", ntcfpoints );
printf("\tSetting nsamples to %d\n", nsamples );
printf("\tSetting sampleEvery to %f (ps)\n", sampleEvery );
printf("\tSetting omegaStep to %d\n", omegaStep );
printf("\tSetting natom_mol to %d\n", natom_mol );
printf("\tSetting nchrom_mol to %d\n", nchrom_mol );
printf("\tSetting nzeros to %d\n", nzeros );
printf("\tSetting map to %d\n", imap );
printf("\tSetting species to %s\n", species );
printf("\tSetting omegaStart to %f\n", omegaStart );
printf("\tSetting omegaStop to %f\n", omegaStop );
printf("\tSetting dt to %f\n", dt );
printf("\tSetting t1 to %f (ps)\n", t1 );
printf("\tSetting avef to %f\n", avef );
printf("\tSetting equilibration time to %f (ps)\n", beginTime );
// Useful variables and condstants
int natoms, nmol, nchrom; // number of atoms, molecules, chromophores
int currentSample = 0; // current sample
int currentFrame = 0; // current frame
const int ntcfpointsR = ( nzeros + ntcfpoints - 1 ) * 2; // number of points for the real fourier transform
const int nomega = ( omegaStop - omegaStart ) / omegaStep + 1; // number of frequencies for the spectral density
magma_int_t nchrom2; // nchrom squared
float desired_time; // desired time for the current frame
int nframes, est_nframes; // variables for indexing offsets
// Trajectory variables for the CPU
rvec *x; // Position vector
matrix box; // Box vectors
float gmxtime, prec; // Time at current frame, precision of xtf file
int step, xdrinfo; // The current step number
int64_t *frame_offset; // Offset for random frame access from trajectory
float frame_dt; // Time between successive frames
// GPU variables
const int blockSize = 128; // The number of threads to launch per block
rvec *x_d; // positions
user_real_t *mux_d, *muy_d, *muz_d; // transition dipole moments
user_real_t *axx_d, *ayy_d, *azz_d; // polarizability
user_real_t *axy_d, *ayz_d, *azx_d; // polarizability
user_complex_t *cmux0_d, *cmuy0_d, *cmuz0_d; // complex version of the transition dipole moment at t=0
user_complex_t *cmux_d, *cmuy_d, *cmuz_d; // complex version of the transition dipole moment
user_complex_t *caxx0_d, *cayy0_d, *cazz0_d; // complex version of the polarizability at t=0
user_complex_t *caxy0_d, *cayz0_d, *cazx0_d; // complex version of the polarizability at t=0
user_complex_t *caxx_d, *cayy_d, *cazz_d; // complex version of the polarizability
user_complex_t *caxy_d, *cayz_d, *cazx_d; // complex version of the polarizability
user_complex_t *tmpmu_d; // to sum all polarizations
user_real_t *MUX_d, *MUY_d, *MUZ_d; // transition dipole moments in the eigen basis
user_real_t *eproj_d; // the electric field projected along the oh bonds
user_real_t *kappa_d; // the hamiltonian on the GPU
user_real_t *kappa;
// magma variables for ssyevd
user_real_t aux_work[1]; // To get optimal size of lwork
magma_int_t aux_iwork[1], info; // To get optimal liwork, and return info
magma_int_t lwork, liwork; // Leading dim of kappa, sizes of work arrays
magma_int_t *iwork; // Work array
user_real_t *work; // Work array
user_real_t *w ; // Eigenvalues
user_real_t wi ; // Eigenvalues
user_real_t *wA ; // Work array
int SSYEVD_ALLOC_FLAG = 1; // flag whether to allocate ssyevr arrays -- it is turned off after they are allocated
// magma variables for gemv
magma_queue_t queue;
// variables for spectrum calculations
user_real_t *w_d; // Eigenvalues on the GPU
user_real_t *omega, *omega_d; // Frequencies on CPU and GPU
user_real_t *Sw, *Sw_d; // Spectral density on CPU and GPU
user_real_t *tmpSw; // Temporary spectral density
user_real_t *Rw; // inverse participation ratio weighted frequency distribution
user_real_t *Rmw; // inverse participation ratio weighted frequency distribution
user_real_t *Pw; // frequency distribution
user_real_t ipr; // inverse participation ratio
user_real_t mipr; // molecular inverse participation ratio
// variables for TCF
user_complex_t *F_d; // F matrix on GPU
user_complex_t *prop_d; // Propigator matrix on GPU
user_complex_t *ctmpmat_d; // temporary complex matrix for matrix multiplications on gpu
user_complex_t *ckappa_d; // A complex version of kappa
user_complex_t tcfx, tcfy, tcfz; // Time correlation function, polarized, ir
user_complex_t tcf_iiFii, tcf_iiFjj, tcf_ijFij; // Time correlation function, polarized, raman
user_complex_t dcy, tcftmp; // Decay constant and a temporary variable for the tcf
user_complex_t *pdtcf, *pdtcf_d; // padded time correlation functions
user_complex_t *tcf; // Time correlation function IR
user_complex_t *tcfvv; // Time correlation function VV raman
user_complex_t *tcfvh; // Time correlation function VH raman
user_real_t *Ftcf, *Ftcf_d; // Fourier transformed time correlation function
user_real_t *Ftcfvv, *Ftcfvh;
// For fft on gpu
hipfftHandle plan;
// for timing and errors
time_t start=time(NULL), end;
hipError_t Cuerr;
int Merr;
size_t freem, total;
int ALLOCATE_2DGPU_ONCE = 0;
// for file output
FILE *rtcf;
FILE *itcf;
FILE *spec_density;
FILE *freq_dist;
FILE *ipr_freq_dist;
FILE *mipr_freq_dist;
FILE *spec_lineshape;
FILE *vv_lineshape;
FILE *vv_rtcf;
FILE *vv_itcf;
FILE *vh_lineshape;
FILE *vh_rtcf;
FILE *vh_itcf;
char *fname;
fname = (char *) malloc( strlen(outf) + 9 );
user_real_t factor; // conversion factor to give energy and correct intensity from FFT
user_real_t freq;
// **************************************************** //
// *** End Variable Declaration *** //
// *** Begin main routine *** //
// **************************************************** //
// Open trajectory file and get info about the systeem
XDRFILE *trj = xdrfile_open( gmxf, "r" );
if ( trj == NULL )
{
printf("WARNING: The file %s could not be opened. Is the name correct?\n", gmxf);
exit(EXIT_FAILURE);
}
read_xtc_natoms( (char *)gmxf, &natoms);
nmol = natoms / natom_mol;
nchrom = nmol * nchrom_mol;
nchrom2 = (magma_int_t) nchrom*nchrom;
if ( nchrom < 6000 ) ALLOCATE_2DGPU_ONCE = 1;
printf(">>> Will read the trajectory from: %s.\n",gmxf);
printf(">>> Found %d atoms and %d molecules.\n",natoms, nmol);
printf(">>> Found %d chromophores.\n",nchrom);
// *** MEMORY ALLOCATION *** //
// **************************************************** //
// determine the number of blocks to launch on the gpu
// each thread takes care of one chromophore for building the electric field and Hamiltonian
const int numBlocks = (nchrom+blockSize-1)/blockSize;
// Initialize magma math library and queue
magma_init(); magma_queue_create( 0, &queue );
// CPU arrays
x = (rvec*) malloc( natoms * sizeof(x[0] )); if ( x == NULL ) MALLOC_ERR;
tcf = (user_complex_t *) calloc( ntcfpoints , sizeof(user_complex_t)); if ( tcf == NULL ) MALLOC_ERR;
tcfvv = (user_complex_t *) calloc( ntcfpoints , sizeof(user_complex_t)); if ( tcfvv == NULL ) MALLOC_ERR;
tcfvh = (user_complex_t *) calloc( ntcfpoints , sizeof(user_complex_t)); if ( tcfvh == NULL ) MALLOC_ERR;
Ftcf = (user_real_t *) calloc( ntcfpointsR , sizeof(user_real_t)); if ( Ftcf == NULL ) MALLOC_ERR;
Ftcfvv = (user_real_t *) calloc( ntcfpointsR , sizeof(user_real_t)); if ( Ftcfvv == NULL ) MALLOC_ERR;
Ftcfvh = (user_real_t *) calloc( ntcfpointsR , sizeof(user_real_t)); if ( Ftcfvh == NULL ) MALLOC_ERR;
// GPU arrays
Cuerr = hipMalloc( &x_d , natoms *sizeof(x[0])); CHK_ERR;
Cuerr = hipMalloc( &eproj_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &Ftcf_d , ntcfpointsR *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &mux_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &muy_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &muz_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &cmux_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cmuy_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cmuz_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cmux0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cmuy0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cmuz0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &tmpmu_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &axx_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &ayy_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &azz_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &axy_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &ayz_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &azx_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &caxx_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cayy_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cazz_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &caxy_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cayz_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cazx_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &caxx0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cayy0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cazz0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &caxy0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cayz0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &cazx0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
// F_d is persistant so alloacate here
Cuerr = hipMalloc( &F_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
// Only allocate temporary non-persistant 2D arrays if the system is small enough
// Otherwise we have to more actively manage memory to avoid
// going over the GPU max memory (4 GB on M1200)
if ( ALLOCATE_2DGPU_ONCE )
{
Cuerr = hipMalloc( &kappa_d , nchrom2 *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &ckappa_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &ctmpmat_d, nchrom2 *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &prop_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
}
kappa = (user_real_t *) malloc( nchrom2 * sizeof(user_real_t)); if ( kappa == NULL ) MALLOC_ERR;
// memory for spectral density calculation
// CPU arrays
omega = (user_real_t *) malloc( nomega * sizeof(user_real_t)); if ( omega == NULL ) MALLOC_ERR;
Sw = (user_real_t *) calloc( nomega , sizeof(user_real_t)); if ( Sw == NULL ) MALLOC_ERR;
tmpSw = (user_real_t *) malloc( nomega * sizeof(user_real_t)); if ( tmpSw == NULL ) MALLOC_ERR;
Pw = (user_real_t *) calloc( nomega , sizeof(user_real_t)); if ( Pw == NULL ) MALLOC_ERR;
Rw = (user_real_t *) calloc( nomega , sizeof(user_real_t)); if ( Rw == NULL ) MALLOC_ERR;
Rmw = (user_real_t *) calloc( nomega , sizeof(user_real_t)); if ( Rmw == NULL ) MALLOC_ERR;
// GPU arrays
Cuerr = hipMalloc( &MUX_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &MUY_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &MUZ_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &omega_d , nomega *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &Sw_d , nomega *sizeof(user_real_t)); CHK_ERR;
Cuerr = hipMalloc( &w_d , nchrom *sizeof(user_real_t)); CHK_ERR;
// initialize omega array
for (int i = 0; i < nomega; i++) omega[i] = (user_real_t) (omegaStart + omegaStep*i);
// *** END MEMORY ALLOCATION *** //
// **************************************************** //
// set imodel based on model passed...if 1, reset OM lengths to tip4p lengths
if ( strcmp( model, "tip4p2005" ) == 0 || strcmp( model, "e3b3" ) == 0 ) imodel = 1;
else if ( strcmp( model, "tip4p" ) == 0 || strcmp( model, "e3b2" ) == 0 )imodel = 0;
else{
printf("WARNING: model: %s is not recognized. Check input file. Aborting...\n", model );
exit(EXIT_FAILURE);
}
// set ispecies based on species passed... 0 H2O, 1 HOD in D2O, 2 HOD in H2O, 3 D2O;
if ( strcmp( species, "H2O" ) == 0 ) ispecies = 0;
else if ( strcmp( species, "HOD/D2O" ) == 0 ) ispecies = 1;
else if ( strcmp( species, "HOD/H2O" ) == 0 ) ispecies = 2;
else if ( strcmp( species, "D2O" ) == 0 ) ispecies = 3;
else{
printf("WARNING: species: %s is not recognized. Check input file. Aborting...\n", species );
exit(EXIT_FAILURE);
}
// index the frames for random access
read_xtc( trj, natoms, &step, &gmxtime, box, x, &prec );
float gmxtime2 = gmxtime;
read_xtc( trj, natoms, &step, &gmxtime, box, x, &prec );
frame_dt = round((gmxtime-gmxtime2)*prec)/(1.*prec);
printf(">>> Frame time offset is: %f (ps)\n", frame_dt );
xdrfile_close(trj);
printf(">>> Now indexing the xtc file to allow random access.\n");
read_xtc_n_frames( gmxf, &nframes, &est_nframes, &frame_offset );
// open xtc file for reading
trj = xdrfile_open( gmxf, "r" );
printf("\n>>> Now calculating the absorption spectrum\n");
printf("----------------------------------------------------------\n");
// **************************************************** //
// *** OUTER LOOP OVER SAMPLES *** //
while( currentSample < nsamples )
{
desired_time = currentSample * sampleEvery + beginTime;
printf("\n Now processing sample %d/%d starting at %.2f ps\n",
currentSample + 1, nsamples, desired_time );
fflush(stdout);
// **************************************************** //
// *** MAIN LOOP OVER TRAJECTORY *** //
while( currentFrame < ntcfpoints )
{
// ---------------------------------------------------- //
// *** Get Info About The System *** //
// read the current frame from the trajectory file and copy to device memory
// this assumes that the trajectory has no gaps and starts at time zero, but should give a warning if something goes wrong
desired_time = currentSample * sampleEvery + beginTime + dt * currentFrame;
int frame = round(desired_time/frame_dt);
xdrinfo = xdr_seek( trj, frame_offset[ frame ], SEEK_SET ); // set point to beginning of current frame
//printf("%f\n", desired_time);
if ( xdrinfo != exdrOK ){
printf("WARNING:: xdr_seek returned error %d.\n", xdrinfo);
xdrfile_close(trj); exit(EXIT_FAILURE);
}
xdrinfo = read_xtc( trj, natoms, &step, &gmxtime, box, x, &prec ); // read frame from disk
if ( xdrinfo != exdrOK ){
printf("Warning:: read_xtc returned error %d.\n", xdrinfo);
xdrfile_close(trj); exit(EXIT_FAILURE);
}
if ( fabs( desired_time - gmxtime ) > frame_dt*1E-1 ){ // check that we have the frame we want
printf("\nWARNING:: could not find the desired frame at time %f (ps).\n", desired_time );
printf("I am instead at gmxtime: %f.\nIs something wrong with the trajectory?", gmxtime );
exit(EXIT_FAILURE);
}
// copy trajectory to gpu memory
hipMemcpy( x_d, x, natoms*sizeof(x[0]), hipMemcpyHostToDevice );
// allocate space for hamiltonian on the GPU if acively managing GPU memory
if ( !ALLOCATE_2DGPU_ONCE ) Cuerr = hipMalloc( &kappa_d , nchrom2 *sizeof(user_real_t)); CHK_ERR;
// launch kernel to calculate the electric field projection along OH bonds and build the exciton hamiltonian
hipLaunchKernelGGL(( get_eproj_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, x_d, box[0][0], box[1][1], box[2][2], natoms, natom_mol, nchrom, nchrom_mol, nmol, imodel, eproj_d );
hipLaunchKernelGGL(( get_kappa_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, x_d, box[0][0], box[1][1], box[2][2], natoms, natom_mol, nchrom, nchrom_mol, nmol, eproj_d, kappa_d,
mux_d, muy_d, muz_d, axx_d, ayy_d, azz_d, axy_d, ayz_d, azx_d, avef, ispecies, imap);
// *** Done getting System Info *** //
// ---------------------------------------------------- //
// ---------------------------------------------------- //
// *** Diagonalize the Hamiltonian *** //
// Note that kappa only needs to be diagonalized if the exact integration method is requested or the spectral density
// if the first time, query for optimal workspace dimensions
if ( SSYEVD_ALLOC_FLAG )
{
magma_ssyevd_gpu( MagmaVec, MagmaUpper, (magma_int_t) nchrom, NULL, (magma_int_t) nchrom,
NULL, NULL, (magma_int_t) nchrom, aux_work, -1, aux_iwork, -1, &info );
lwork = (magma_int_t) aux_work[0];
liwork = aux_iwork[0];
// allocate work arrays, eigenvalues and other stuff
w = (user_real_t *) malloc( nchrom * sizeof(user_real_t)); if ( w == NULL ) MALLOC_ERR;
Merr = magma_imalloc_cpu ( &iwork, liwork ); CHK_MERR;
Merr = magma_smalloc_pinned( &wA , nchrom2 ) ; CHK_MERR;
Merr = magma_smalloc_pinned( &work , lwork ); CHK_MERR;
SSYEVD_ALLOC_FLAG = 0; // is allocated here, so we won't need to do it again
// get info about space needed for diagonalization
hipMemGetInfo( &freem, &total );
printf("\n>>> hipMemGetInfo returned\n"
"\tfree: %g gb\n"
"\ttotal: %g gb\n", (float) freem/(1E9), (float) total/(1E9));
printf(">>> %g gb needed by diagonalization routine.\n", (float) (lwork * (float) sizeof(user_real_t)/(1E9)));
}
magma_ssyevd_gpu( MagmaVec, MagmaUpper, (magma_int_t) nchrom, kappa_d, (magma_int_t) nchrom,
w, wA, (magma_int_t) nchrom, work, lwork, iwork, liwork, &info );
if ( info != 0 ){ printf("ERROR: magma_dsyevd_gpu returned info %lld.\n", info ); exit(EXIT_FAILURE);}
// copy eigenvalues to device memory
hipMemcpy( w_d , w , nchrom*sizeof(user_real_t), hipMemcpyHostToDevice );
// *** Done with the Diagonalization *** //
// ---------------------------------------------------- //
// ---------------------------------------------------- //
// *** The Spectral Density *** //
if ( currentFrame == 0 )
{
// project the transition dipole moments onto the eigenbasis
// MU_d = kappa_d**T x mu_d
magma_sgemv( MagmaTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
1.0, kappa_d, (magma_int_t) nchrom , mux_d, 1, 0.0, MUX_d, 1, queue);
magma_sgemv( MagmaTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
1.0, kappa_d, (magma_int_t) nchrom, muy_d, 1, 0.0, MUY_d, 1, queue);
magma_sgemv( MagmaTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
1.0, kappa_d, (magma_int_t) nchrom, muz_d, 1, 0.0, MUZ_d, 1, queue);
// Initializee the temporary array for spectral density
for (int i = 0; i < nomega; i++) tmpSw[i] = 0.0;
// Copy relevant variables to device memory
hipMemcpy( omega_d, omega, nomega*sizeof(user_real_t), hipMemcpyHostToDevice );
hipMemcpy( Sw_d , tmpSw, nomega*sizeof(user_real_t), hipMemcpyHostToDevice );
// calculate the spectral density on the GPU and copy back to the CPU
hipLaunchKernelGGL(( get_spectral_density) , dim3(numBlocks),dim3(blockSize), 0, 0, w_d, MUX_d, MUY_d, MUZ_d, omega_d, Sw_d, nomega, nchrom, t1, avef );
hipMemcpy( tmpSw, Sw_d, nomega*sizeof(user_real_t), hipMemcpyDeviceToHost );
// Copy temporary to persistant to get average spectral density over samples
for (int i = 0; i < nomega; i++ ) Sw[i] += tmpSw[i];
}
// *** Done the Spectral Density *** //
// ---------------------------------------------------- //
// ---------------------------------------------------- //
// *** The Frequency Distb. *** //
// could make this a function...
// copy eigenvectors back to host memory
hipMemcpy( kappa, kappa_d, nchrom2*sizeof(user_real_t), hipMemcpyDeviceToHost );
// loop over eigenstates belonging to the current thread and calculate ipr
for ( int eign = 0; eign < nchrom; eign ++ ){
user_real_t c;
int bin_num;
// determine ipr
ipr = 0.; // initialize ipr
for ( int i = 0; i < nchrom; i ++ ){
// calculate ipr
c = kappa[eign*nchrom + i];
ipr += c*c*c*c;
}
ipr = 1./ipr;
// determine molecular ipr
user_real_t inner_sum, outer_sum;
int chrom;
outer_sum = 0.;
for ( int i = 0; i < nmol; i ++ ){
inner_sum = 0.; //initialize
for ( int j = 0; j < nchrom_mol; j++ ){
chrom = i*nchrom_mol + j;
c = kappa[eign*nchrom + chrom];
inner_sum += c*c;
}
outer_sum += inner_sum * inner_sum;
}
mipr = 1./outer_sum;
// determine frequency distribution
wi = w[eign] + avef; // frequency of current mode
// determine bin number
bin_num = (int) round((wi - omegaStart)/omegaStep);
if ( bin_num < 0 || bin_num >= nomega ){
printf("WARNING: bin_num is: %d for frequency %g. Check bounds of omegaStart and omegaStop. Aborting.\n", bin_num, wi);
}
// divide by omegaStep to make probability density
Pw[ bin_num] += 1./(omegaStep*1.);
Rw[ bin_num] += ipr/(omegaStep*1.);
Rmw[bin_num] += mipr/(omegaStep*1.);
}
// *** Done the Frequency Distb. *** //
// ---------------------------------------------------- //
// ---------------------------------------------------- //
// *** Time Correlation Function *** //
// allocate space for complex hamiltonian if actively managing memory
if ( !ALLOCATE_2DGPU_ONCE ) Cuerr = hipMalloc( &ckappa_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
// cast variables to complex to calculate time correlation function (which is complex)
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, kappa_d, ckappa_d, nchrom2);
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, mux_d , cmux_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, muy_d , cmuy_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, muz_d , cmuz_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, axx_d , caxx_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, ayy_d , cayy_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, azz_d , cazz_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, axy_d , caxy_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, ayz_d , cayz_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, azx_d , cazx_d , nchrom );
// free float hamiltonian since we won't need it from here and allocate space for the rest
// of the 2D matrix variables that have not yet been allocated if actively managing memory
if ( !ALLOCATE_2DGPU_ONCE )
{
hipFree( kappa_d );
Cuerr = hipMalloc( &ctmpmat_d, nchrom2 *sizeof(user_complex_t)); CHK_ERR;
Cuerr = hipMalloc( &prop_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
}
// ---------------------------------------------------- //
// *** Calculate the F matrix *** //
if ( currentFrame == 0 )
{
// initialize the F matrix at t=0 to the unit matrix
hipLaunchKernelGGL(( makeI) , dim3(numBlocks),dim3(blockSize), 0, 0, F_d, nchrom );
// set the transition dipole moment at t=0
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, mux_d , cmux0_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, muy_d , cmuy0_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, muz_d , cmuz0_d , nchrom );
// set the polarizability at t=0
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, axx_d , caxx0_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, ayy_d , cayy0_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, azz_d , cazz0_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, axy_d , caxy0_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, ayz_d , cayz0_d , nchrom );
hipLaunchKernelGGL(( cast_to_complex_GPU) , dim3(numBlocks),dim3(blockSize), 0, 0, azx_d , cazx0_d , nchrom );
}
else
{
// Integrate with exact diagonalization
// build the propigator
hipLaunchKernelGGL(( Pinit) , dim3(numBlocks),dim3(blockSize), 0, 0, prop_d, w_d, nchrom, dt );
// ctmpmat_d = ckappa_d * prop_d
magma_cgemm( MagmaNoTrans, MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
(magma_int_t) nchrom, MAGMA_ONE, ckappa_d, (magma_int_t) nchrom, prop_d,
(magma_int_t) nchrom, MAGMA_ZERO, ctmpmat_d, (magma_int_t) nchrom, queue );
// prop_d = ctmpmat_d * ckappa_d **T
magma_cgemm( MagmaNoTrans, MagmaTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
(magma_int_t) nchrom, MAGMA_ONE, ctmpmat_d, (magma_int_t) nchrom, ckappa_d,
(magma_int_t) nchrom, MAGMA_ZERO, prop_d, (magma_int_t) nchrom, queue );
// ctmpmat_d = prop_d * F
magma_cgemm( MagmaNoTrans, MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
(magma_int_t) nchrom, MAGMA_ONE, prop_d, (magma_int_t) nchrom, F_d,
(magma_int_t) nchrom, MAGMA_ZERO, ctmpmat_d, (magma_int_t) nchrom, queue );
// copy the F matrix back from the temporary variable to F_d
magma_ccopy( (magma_int_t) nchrom2, ctmpmat_d , 1, F_d, 1, queue );
}
// *** Done updating the F matrix *** //
// free 2d matrices if actively managing memory
if ( !ALLOCATE_2DGPU_ONCE )
{
hipFree( ckappa_d );
hipFree( ctmpmat_d );
hipFree( prop_d );
}
// calculate mFm for x y and z components
// tcfx = cmux0_d**T * F_d *cmux_d
// x
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cmux0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcfx = magma_cdotu( (magma_int_t) nchrom, cmux_d, 1, tmpmu_d, 1, queue );
// y
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cmuy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcfy = magma_cdotu( (magma_int_t) nchrom, cmuy_d, 1, tmpmu_d, 1, queue );
// z
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cmuz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcfz = magma_cdotu( (magma_int_t) nchrom, cmuz_d, 1, tmpmu_d, 1, queue );
// accumulate the tcf over the samples for the IR spectrum
tcftmp = MAGMA_ADD( tcfx , tcfy );
tcftmp = MAGMA_ADD( tcftmp, tcfz );
tcf[ currentFrame ] = MAGMA_ADD( tcf[currentFrame], tcftmp );
// zero variables
tcf_iiFii = MAGMA_ZERO;
tcf_ijFij = MAGMA_ZERO;
tcf_iiFjj = MAGMA_ZERO;
// Now The Raman Spectrum //
//-------------------------------------------------//
// tcfxx = caxx0_d**T * F_d * caxx_d
// **
// iiFii
// **
// xxFxx
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
caxx0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFii = magma_cdotu( (magma_int_t) nchrom, caxx_d, 1, tmpmu_d, 1, queue );
// yyFyy
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cayy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFii = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cayy_d, 1, tmpmu_d, 1, queue ), tcf_iiFii );
// zzFzz
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cazz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFii = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cazz_d, 1, tmpmu_d, 1, queue ), tcf_iiFii );
// **
// ijFij
// **
// xyFxy
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
caxy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_ijFij = magma_cdotu( (magma_int_t) nchrom, caxy_d, 1, tmpmu_d, 1, queue );
// yzFyz
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cayz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_ijFij = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cayz_d, 1, tmpmu_d, 1, queue ), tcf_ijFij );
// zxFzx
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cazx0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_ijFij = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cazx_d, 1, tmpmu_d, 1, queue ), tcf_ijFij );
// **
// iiFjj
// **
// xxFyy
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
caxx0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = magma_cdotu( (magma_int_t) nchrom, cayy_d, 1, tmpmu_d, 1, queue );
// xxFzz
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
caxx0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cazz_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj );
// yyFxx
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cayy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, caxx_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj);
// yyFzz
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cayy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cazz_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj);
// zzFxx
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cazz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, caxx_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj);
// zzFyy
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cazz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cayy_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj);
// accumulate the tcf over the samples for the VV raman spectrum
tcftmp = MAGMA_ADD( MAGMA_MUL(MAGMA_MAKE(3.,0.), tcf_iiFii), tcf_iiFjj );
tcftmp = MAGMA_ADD( tcftmp, MAGMA_MUL(MAGMA_MAKE(4.,0.), tcf_ijFij ));
tcftmp = MAGMA_DIV( tcftmp, MAGMA_MAKE(15.,0.) );
tcfvv[ currentFrame ] = MAGMA_ADD( tcfvv[currentFrame], tcftmp );
// accumulate the tcf over the samples for the VH raman spectrum
tcftmp = MAGMA_ADD( MAGMA_MUL(MAGMA_MAKE(2.,0.), tcf_iiFii), MAGMA_MUL( MAGMA_MAKE(-1.,0.), tcf_iiFjj ));
tcftmp = MAGMA_ADD( tcftmp, MAGMA_MUL(MAGMA_MAKE(6.,0.), tcf_ijFij ));
tcftmp = MAGMA_DIV( tcftmp, MAGMA_MAKE(30.,0.) );
tcfvh[ currentFrame ] = MAGMA_ADD( tcfvh[currentFrame], tcftmp );
// *** Done with Time Correlation *** //
// ---------------------------------------------------- //
// update progress bar if simulation is big enough, otherwise it really isn't necessary
if ( nchrom > 400 ) printProgress( currentFrame, ntcfpoints-1 );
// done with current frame, move to next
currentFrame += 1;
}
// done with current sample, move to next, and reset currentFrame to 0
currentSample +=1;
currentFrame = 0;
} // end outer loop
printf("\n\n----------------------------------------------------------\n");
printf("Finishing up...\n");
// close xdr file
xdrfile_close(trj);
// *** IR Spectrum *** //
// ---------------------------------------------------- //
// pad the time correlation function with zeros, copy to device memory and perform fft
// fourier transform the time correlation function on the GPU
pdtcf = (user_complex_t *) calloc( ntcfpoints+nzeros, sizeof(user_complex_t));
for ( int i = 0; i < ntcfpoints; i++ )
{
// multiply the tcf by the relaxation term
dcy = MAGMA_MAKE(exp( -1.0 * i * dt / ( 2.0 * t1 ))/(1.*nsamples), 0.0);
tcf[i] = MAGMA_MUL( tcf[i], dcy );
pdtcf[i] = tcf[i];
}
for ( int i = 0; i < nzeros; i++ ) pdtcf[i+ntcfpoints] = MAGMA_ZERO;
hipMalloc( &pdtcf_d , (ntcfpoints+nzeros)*sizeof(user_complex_t));
hipMemcpy( pdtcf_d, pdtcf, (ntcfpoints+nzeros)*sizeof(user_complex_t), hipMemcpyHostToDevice );
hipfftPlan1d ( &plan, ntcfpoints+nzeros, HIPFFT_C2R, 1);
hipfftExecC2R ( plan, pdtcf_d, Ftcf_d );
hipMemcpy ( Ftcf, Ftcf_d, ntcfpointsR*sizeof(user_real_t), hipMemcpyDeviceToHost );
// *** VV Spectrum *** //
// ---------------------------------------------------- //
for ( int i = 0; i < ntcfpoints; i++ )
{
// multiply the tcf by the relaxation term
dcy = MAGMA_MAKE(exp( -1.0 * i * dt / ( 2.0 * t1 ))/(1.*nsamples), 0.0);
tcfvv[i] = MAGMA_MUL( tcfvv[i], dcy );
pdtcf[i] = tcfvv[i];
}
for ( int i = 0; i < nzeros; i++ ) pdtcf[i+ntcfpoints] = MAGMA_ZERO;
hipMemcpy( pdtcf_d, pdtcf, (ntcfpoints+nzeros)*sizeof(user_complex_t), hipMemcpyHostToDevice );
hipfftExecC2R ( plan, pdtcf_d, Ftcf_d );
hipMemcpy ( Ftcfvv, Ftcf_d, ntcfpointsR*sizeof(user_real_t), hipMemcpyDeviceToHost );
// *** VH Spectrum *** //
// ---------------------------------------------------- //
for ( int i = 0; i < ntcfpoints; i++ )
{
// multiply the tcf by the relaxation term
dcy = MAGMA_MAKE(exp( -1.0 * i * dt / ( 2.0 * t1 ))/(1.*nsamples), 0.0);
tcfvh[i] = MAGMA_MUL( tcfvh[i], dcy );
pdtcf[i] = tcfvh[i];
}
for ( int i = 0; i < nzeros; i++ ) pdtcf[i+ntcfpoints] = MAGMA_ZERO;
hipMemcpy( pdtcf_d, pdtcf, (ntcfpoints+nzeros)*sizeof(user_complex_t), hipMemcpyHostToDevice );
hipfftExecC2R ( plan, pdtcf_d, Ftcf_d );
hipMemcpy ( Ftcfvh, Ftcf_d, ntcfpointsR*sizeof(user_real_t), hipMemcpyDeviceToHost );
hipfftDestroy(plan);
// normalize spectral density by number of samples
for ( int i = 0; i < nomega; i++) Sw[i] = Sw[i] / (user_real_t) nsamples;
// normalize the frequency and ipr weighted frequency distributions
for ( int i = 0; i < nomega; i ++ ) Pw[i] /= nchrom*nsamples*ntcfpoints;
for ( int i = 0; i < nomega; i ++ ) Rw[i] /= nchrom*nsamples*ntcfpoints;
for ( int i = 0; i < nomega; i ++ ) Rw[i] /= Pw[i];
for ( int i = 0; i < nomega; i ++ ) Rmw[i] /= nchrom*nsamples*ntcfpoints;
for ( int i = 0; i < nomega; i ++ ) Rmw[i] /= Pw[i];
// write time correlation function
rtcf = fopen(strcat(strcpy(fname,outf),"_irrtcf.dat"), "w");
itcf = fopen(strcat(strcpy(fname,outf),"_iritcf.dat"), "w");
vv_rtcf = fopen(strcat(strcpy(fname,outf),"_vvrtcf.dat"), "w");
vv_itcf = fopen(strcat(strcpy(fname,outf),"_vvitcf.dat"), "w");
vh_rtcf = fopen(strcat(strcpy(fname,outf),"_vhrtcf.dat"), "w");
vh_itcf = fopen(strcat(strcpy(fname,outf),"_vhitcf.dat"), "w");
for ( int i = 0; i < ntcfpoints; i++ )
{
fprintf( rtcf, "%g %g \n", i*dt, MAGMA_REAL( tcf[i] ) );
fprintf( itcf, "%g %g \n", i*dt, MAGMA_IMAG( tcf[i] ) );
fprintf( vv_rtcf, "%g %g \n", i*dt, MAGMA_REAL( tcfvv[i] ) );
fprintf( vv_itcf, "%g %g \n", i*dt, MAGMA_IMAG( tcfvv[i] ) );
fprintf( vh_rtcf, "%g %g \n", i*dt, MAGMA_REAL( tcfvh[i] ) );
fprintf( vh_itcf, "%g %g \n", i*dt, MAGMA_IMAG( tcfvh[i] ) );
}
fclose( rtcf );
fclose( itcf );
fclose( vv_rtcf );
fclose( vv_itcf );
fclose( vh_rtcf );
fclose( vh_itcf );
// write the spectral density
spec_density = fopen(strcat(strcpy(fname,outf),"_spdn.dat"), "w");
for ( int i = 0; i < nomega; i++) fprintf(spec_density, "%g %g\n", omega[i], Sw[i]);
fclose(spec_density);
// write the frequency distributions
freq_dist = fopen(strcat(strcpy(fname,outf),"_Pw.dat"), "w");
for ( int i = 0; i < nomega; i++) fprintf(freq_dist, "%g %g\n", omega[i], Pw[i]);
fclose(freq_dist);
ipr_freq_dist = fopen(strcat(strcpy(fname,outf),"_Rw.dat"), "w");
for ( int i = 0; i < nomega; i++) fprintf(ipr_freq_dist, "%g %g\n", omega[i], Rw[i]);
fclose(ipr_freq_dist);
mipr_freq_dist = fopen(strcat(strcpy(fname,outf),"_Rmw.dat"), "w");
for ( int i = 0; i < nomega; i++) fprintf(mipr_freq_dist, "%g %g\n", omega[i], Rmw[i]);
fclose(mipr_freq_dist);
// Write the absorption lineshape
// Since the C2R transform is inverse by default, the frequencies have to be negated
// NOTE: to compare with YICUN's code, divide Ftcf by 2
spec_lineshape = fopen(strcat(strcpy(fname,outf),"_irls.dat"),"w");
vv_lineshape = fopen(strcat(strcpy(fname,outf),"_vvls.dat"),"w");
vh_lineshape = fopen(strcat(strcpy(fname,outf),"_vhls.dat"),"w");
factor = 2*PI*HBAR/(dt*(ntcfpoints+nzeros)); // conversion factor to give energy and correct intensity from FFT
for ( int i = (ntcfpoints+nzeros)/2; i < ntcfpoints+nzeros; i++ ) // "negative" FFT frequencies
{
freq = -1*(i-ntcfpoints-nzeros)*factor + avef;
if ( freq <= (user_real_t) omegaStop ) {
fprintf(spec_lineshape, "%g %g\n", freq, Ftcf[i]/(factor*(ntcfpoints+nzeros)));
fprintf(vv_lineshape, "%g %g\n", freq, Ftcfvv[i]/(factor*(ntcfpoints+nzeros)));
fprintf(vh_lineshape, "%g %g\n", freq, Ftcfvh[i]/(factor*(ntcfpoints+nzeros)));
}
}
for ( int i = 0; i < ntcfpoints+nzeros / 2 ; i++) // "positive" FFT frequencies
{
freq = -1*i*factor + avef;
if ( freq >= (user_real_t) omegaStart) {
fprintf(spec_lineshape, "%g %g\n", freq, Ftcf[i]/(factor*(ntcfpoints+nzeros)));
fprintf(vv_lineshape, "%g %g\n", freq, Ftcfvv[i]/(factor*(ntcfpoints+nzeros)));
fprintf(vh_lineshape, "%g %g\n", freq, Ftcfvh[i]/(factor*(ntcfpoints+nzeros)));
}
}
fclose(spec_lineshape);
fclose(vv_lineshape);
fclose(vh_lineshape);
// free memory on the CPU and GPU and finalize magma library
magma_queue_destroy( queue );
free(x);
free(Ftcf);
free(Ftcfvv);
free(Ftcfvh);
free(tcf);
free(tcfvv);
free(tcfvh);
free(pdtcf);
free(Rw);
free(Pw);
free(kappa);
free(Rmw);
hipFree(x_d);
hipFree(Ftcf_d);
hipFree(mux_d);
hipFree(muy_d);
hipFree(muz_d);
hipFree(eproj_d);
hipFree(cmux_d);
hipFree(cmuy_d);
hipFree(cmuz_d);
hipFree(cmux0_d);
hipFree(cmuy0_d);
hipFree(cmuz0_d);
hipFree(tmpmu_d);
hipFree(axx_d);
hipFree(ayy_d);
hipFree(azz_d);
hipFree(axy_d);
hipFree(ayz_d);
hipFree(azx_d);
hipFree(caxx_d);
hipFree(cayy_d);
hipFree(cazz_d);
hipFree(caxy_d);
hipFree(cayz_d);
hipFree(cazx_d);
hipFree(caxx0_d);
hipFree(cayy0_d);
hipFree(cazz0_d);
hipFree(caxy0_d);
hipFree(cayz0_d);
hipFree(cazx0_d);
hipFree(F_d);
if ( ALLOCATE_2DGPU_ONCE )
{
hipFree(kappa_d);
hipFree(ckappa_d);
hipFree(ctmpmat_d);
hipFree(prop_d);
}
magma_free(pdtcf_d);
// free memory used for diagonalization
if ( SSYEVD_ALLOC_FLAG == 0 )
{
free(w);
free(iwork);
magma_free_pinned( work );
magma_free_pinned( wA );
}
// free memory used in spectral density calculation
// CPU arrays
free(omega);
free(Sw);
free(tmpSw);
// GPU arrays
hipFree(MUX_d);
hipFree(MUY_d);
hipFree(MUZ_d);
hipFree(omega_d);
hipFree(Sw_d);
hipFree(w_d);
// final call to finalize magma math library
magma_finalize();
end = time(NULL);
printf("\n>>> Done with the calculation in %f seconds.\n", difftime(end,start));
return 0;
}
/**********************************************************
BUILD ELECTRIC FIELD PROJECTION ALONG OH BONDS
GPU FUNCTION
**********************************************************/
__global__
void get_eproj_GPU( rvec *x, float boxx, float boxy, float boxz, int natoms, int natom_mol,
int nchrom, int nchrom_mol, int nmol, int model, user_real_t *eproj )
{
int n, m, i, j, istart, istride;
int chrom;
user_real_t mox[XDR_DIM]; // oxygen position on molecule m
user_real_t mx[XDR_DIM]; // atom position on molecule m
user_real_t nhx[XDR_DIM]; // hydrogen position on molecule n of the current chromophore
user_real_t nox[XDR_DIM]; // oxygen position on molecule n
user_real_t nohx[XDR_DIM]; // the unit vector pointing along the OH bond for the current chromophore
user_real_t mom[XDR_DIM]; // the OM vector on molecule m
user_real_t dr[XDR_DIM]; // the min image vector between two atoms
user_real_t r; // the distance between two atoms
const float cutoff = 0.7831; // the oh cutoff distance
const float bohr_nm = 18.8973; // convert from bohr to nanometer
user_real_t efield[XDR_DIM]; // the electric field vector
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// Loop over the chromophores belonging to the current thread
for ( chrom = istart; chrom < nchrom; chrom += istride )
{
// calculate the molecule hosting the current chromophore
n = chrom / nchrom_mol;
// initialize the electric field vector to zero at this chromophore
efield[0] = 0.;
efield[1] = 0.;
efield[2] = 0.;
// *** GET INFO ABOUT MOLECULE N HOSTING CHROMOPHORE *** //
// N IS OUR REFERENCE MOLECULE //
// get the position of the hydrogen associated with the current stretch
// NOTE: I'm making some assumptions about the ordering of the positions,
// this can be changed if necessary for a more robust program
// Throughout, I assume that the atoms are grouped into molecules and that
// every 4th molecule starting at 0 (1, 2, 3) is OW (HW1, HW2, MW)
if ( chrom % 2 == 0 ){ //HW1
nhx[0] = x[ n*natom_mol + 1 ][0];
nhx[1] = x[ n*natom_mol + 1 ][1];
nhx[2] = x[ n*natom_mol + 1 ][2];
}
else if ( chrom % 2 == 1 ){ //HW2
nhx[0] = x[ n*natom_mol + 2 ][0];
nhx[1] = x[ n*natom_mol + 2 ][1];
nhx[2] = x[ n*natom_mol + 2 ][2];
}
// The oxygen position
nox[0] = x[ n*natom_mol ][0];
nox[1] = x[ n*natom_mol ][1];
nox[2] = x[ n*natom_mol ][2];
// The oh unit vector
nohx[0] = minImage( nhx[0] - nox[0], boxx );
nohx[1] = minImage( nhx[1] - nox[1], boxy );
nohx[2] = minImage( nhx[2] - nox[2], boxz );
r = mag3(nohx);
nohx[0] /= r;
nohx[1] /= r;
nohx[2] /= r;
// for testing with YICUN -- can change to ROH later...
//nohx[0] /= 0.09572;
//nohx[1] /= 0.09572;
//nohx[2] /= 0.09572;
// *** DONE WITH MOLECULE N *** //
// *** LOOP OVER ALL OTHER MOLECULES *** //
for ( m = 0; m < nmol; m++ ){
// skip the reference molecule
if ( m == n ) continue;
// get oxygen position on current molecule
mox[0] = x[ m*natom_mol ][0];
mox[1] = x[ m*natom_mol ][1];
mox[2] = x[ m*natom_mol ][2];
// find displacement between oxygen on m and hydrogen on n
dr[0] = minImage( mox[0] - nhx[0], boxx );
dr[1] = minImage( mox[1] - nhx[1], boxy );
dr[2] = minImage( mox[2] - nhx[2], boxz );
r = mag3(dr);
// skip if the distance is greater than the cutoff
if ( r > cutoff ) continue;
// loop over all atoms in the current molecule and calculate the electric field
// (excluding the oxygen atoms since they have no charge)
for ( i=1; i < natom_mol; i++ ){
// position of current atom
mx[0] = x[ m*natom_mol + i ][0];
mx[1] = x[ m*natom_mol + i ][1];
mx[2] = x[ m*natom_mol + i ][2];
// Move m site to TIP4P distance if model is E3B3 or TIP4P2005 -- this must be done to use the TIP4P map
if ( i == 3 )
{
if ( model != 0 )
{
// get the OM unit vector
mom[0] = minImage( mx[0] - mox[0], boxx );
mom[1] = minImage( mx[1] - mox[1], boxy );
mom[2] = minImage( mx[2] - mox[2], boxz );
r = mag3(mom);
// TIP4P OM distance is 0.015 nm along the OM bond
mx[0] = mox[0] + 0.0150*mom[0]/r;
mx[1] = mox[1] + 0.0150*mom[1]/r;
mx[2] = mox[2] + 0.0150*mom[2]/r;
}
}
// the minimum image displacement between the reference hydrogen and the current atom
// NOTE: this converted to bohr so the efield will be in au
dr[0] = minImage( nhx[0] - mx[0], boxx )*bohr_nm;
dr[1] = minImage( nhx[1] - mx[1], boxy )*bohr_nm;
dr[2] = minImage( nhx[2] - mx[2], boxz )*bohr_nm;
r = mag3(dr);
// Add the contribution of the current atom to the electric field
if ( i < 3 ){ // HW1 and HW2
for ( j=0; j < XDR_DIM; j++){
efield[j] += 0.52 * dr[j] / (r*r*r);
}
}
else if ( i == 3 ){ // MW (note the negative sign)
for ( j=0; j < XDR_DIM; j++){
efield[j] -= 1.04 * dr[j] / (r*r*r);
}
}
} // end loop over atoms in molecule m
} // end loop over molecules m
// project the efield along the OH bond to get the relevant value for the map
eproj[chrom] = dot3( efield, nohx );
} // end loop over reference chromophores
}
/**********************************************************
BUILD HAMILTONIAN AND RETURN TRANSITION DIPOLE VECTOR
FOR EACH CHROMOPHORE ON THE GPU
**********************************************************/
__global__
void get_kappa_GPU( rvec *x, float boxx, float boxy, float boxz, int natoms, int natom_mol, int nchrom, int nchrom_mol, int nmol,
user_real_t *eproj, user_real_t *kappa, user_real_t *mux, user_real_t *muy, user_real_t *muz, user_real_t *axx,
user_real_t *ayy, user_real_t *azz, user_real_t *axy, user_real_t *ayz, user_real_t *azx, user_real_t avef, int ispecies,
int imap)
{
int n, m, istart, istride;
int chromn, chromm;
user_real_t mox[XDR_DIM]; // oxygen position on molecule m
user_real_t mhx[XDR_DIM]; // atom position on molecule m
user_real_t nhx[XDR_DIM]; // hydrogen position on molecule n of the current chromophore
user_real_t nox[XDR_DIM]; // oxygen position on molecule n
user_real_t noh[XDR_DIM];
user_real_t moh[XDR_DIM];
user_real_t nmu[XDR_DIM];
user_real_t mmu[XDR_DIM];
user_real_t mmuprime;
user_real_t nmuprime;
user_real_t dr[XDR_DIM]; // the min image vector between two atoms
user_real_t r; // the distance between two atoms
const user_real_t bohr_nm = 18.8973; // convert from bohr to nanometer
const user_real_t cm_hartree = 2.1947463E5; // convert from cm-1 to hartree
user_real_t En, Em; // the electric field projection
user_real_t xn, xm, pn, pm; // the x and p from the map
user_real_t wn, wm; // the energies
// define the maps
user_real_t map_w[3], map_x[2], map_p[2], map_mup[3], map_wi[3];
// 2013 maps from gruenbaum
if ( imap == 0 ){
// H2O and HOD/D2O
if ( ispecies == 0 || ispecies == 1 ){
map_w[0] = 3670.2; map_w[1] = -3541.7; map_w[2] = -152677.0;
map_x[0] = 0.19285; map_x[1] = -1.7261E-5;
map_p[0] = 1.6466; map_p[1] = 5.7692E-4;
}
// D2O and HOD/H2O
if ( ispecies == 2 || ispecies == 3 ){
map_w[0] = 2767.8; map_w[1] = -2630.3; map_w[2] = -102601.0;
map_x[0] = 0.16593; map_x[1] = -2.0632E-5;
map_p[0] = 2.0475; map_p[1] = 8.9108E-4;
}
map_mup[0] = 0.1646; map_mup[1] = 11.39; map_mup[2] = 63.41;
map_wi[0] = -1361.0; map_wi[1] = 27165.0; map_wi[2] = -1.887;
}
// 2010 map from Li and Skinner
else if ( imap == 1 )
{
// H2O and HOD/D2O
if ( ispecies == 0 || ispecies == 1 ){
map_w[0] = 3732.9; map_w[1] = -3519.8; map_w[2] = -153520.0;
map_x[0] = 0.19318; map_x[1] = -1.7248E-5;
map_p[0] = 1.6102; map_p[1] = 5.8697E-4;
}
// D2O and HOD/H2O
if ( ispecies == 2 || ispecies == 3 ){
map_w[0] = 2748.2; map_w[1] = -2572.2; map_w[2] = -102980.0;
map_x[0] = 0.16598; map_x[1] = -2.0752E-5;
map_p[0] = 1.9813; map_p[1] = 9.1419E-4;
}
// note the wi have to be converted from hartree to cm from
// the values in the table of the paper
map_mup[0] = 0.1622; map_mup[1] = 10.381; map_mup[2] = 137.6;
map_wi[0] = -1360.8; map_wi[1] = 27171.0; map_wi[2] = -1.887;
}
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// Loop over the chromophores belonging to the current thread and fill in kappa for that row
for ( chromn = istart; chromn < nchrom; chromn += istride )
{
// calculate the molecule hosting the current chromophore
// and get the corresponding electric field at the relevant hydrogen
n = chromn / nchrom_mol;
En = eproj[chromn];
// get parameters from the map
wn = map_w[0] + map_w[1]*En + map_w[2]*En*En;
xn = map_x[0] + map_x[1]*wn;
pn = map_p[0] + map_p[1]*wn;
nmuprime = map_mup[0] + map_mup[1]*En + map_mup[2]*En*En;
// and calculate the location of the transition dipole moment
// SEE calc_efield_GPU for assumptions about ordering of atoms
nox[0] = x[ n*natom_mol ][0];
nox[1] = x[ n*natom_mol ][1];
nox[2] = x[ n*natom_mol ][2];
if ( chromn % 2 == 0 ) //HW1
{
nhx[0] = x[ n*natom_mol + 1 ][0];
nhx[1] = x[ n*natom_mol + 1 ][1];
nhx[2] = x[ n*natom_mol + 1 ][2];
}
else if ( chromn % 2 == 1 ) //HW2
{
nhx[0] = x[ n*natom_mol + 2 ][0];
nhx[1] = x[ n*natom_mol + 2 ][1];
nhx[2] = x[ n*natom_mol + 2 ][2];
}
// The OH unit vector
noh[0] = minImage( nhx[0] - nox[0], boxx );
noh[1] = minImage( nhx[1] - nox[1], boxy );
noh[2] = minImage( nhx[2] - nox[2], boxz );
r = mag3(noh);
noh[0] /= r;
noh[1] /= r;
noh[2] /= r;
// The location of the TDM
nmu[0] = minImage( nox[0] + 0.067 * noh[0], boxx );
nmu[1] = minImage( nox[1] + 0.067 * noh[1], boxy );
nmu[2] = minImage( nox[2] + 0.067 * noh[2], boxz );
// and the TDM vector to return
mux[chromn] = noh[0] * nmuprime * xn;
muy[chromn] = noh[1] * nmuprime * xn;
muz[chromn] = noh[2] * nmuprime * xn;
// and the polarizability
axx[chromn] = (4.6 * noh[0] * noh[0] + 1.0) * xn;
ayy[chromn] = (4.6 * noh[1] * noh[1] + 1.0) * xn;
azz[chromn] = (4.6 * noh[2] * noh[2] + 1.0) * xn;
axy[chromn] = 4.6 * noh[0] * noh[1] * xn;
ayz[chromn] = 4.6 * noh[1] * noh[2] * xn;
azx[chromn] = 4.6 * noh[2] * noh[0] * xn;
// Loop over all other chromophores
for ( chromm = 0; chromm < nchrom; chromm ++ )
{
// calculate the molecule hosting the current chromophore
// and get the corresponding electric field at the relevant hydrogen
m = chromm / nchrom_mol;
Em = eproj[chromm];
// also get the relevent x and p from the map
// get parameters from the map
wm = map_w[0] + map_w[1]*Em + map_w[2]*Em*Em;
xm = map_x[0] + map_x[1]*wm;
pm = map_p[0] + map_p[1]*wm;
mmuprime = map_mup[0] + map_mup[1]*Em + map_mup[2]*Em*Em;
// the diagonal energy
if ( chromn == chromm )
{
// Note that this is a flattened 2d array
// subtract high frequency energies to get rid of highly oscillatory parts of the F matrix
kappa[chromn*nchrom + chromm] = wm - avef;
}
// intramolecular coupling
else if ( m == n )
{
// ** --
// if is HOD/H2O or HOD/D2O, no coupling
if ( ispecies == 1 || ispecies == 2 ){
kappa[chromn * nchrom + chromm ] = 0.;
}
// ** --
else{
kappa[chromn*nchrom + chromm] = (map_wi[0] + map_wi[1]*(En + Em))*xn*xm + map_wi[2]*pn*pm;
}
}
// intermolecular coupling
else
{
// ** --
// if is HOD/H2O or HOD/D2O, no coupling
if ( ispecies == 1 || ispecies == 2 ){
kappa[chromn * nchrom + chromm ] = 0.;
}
// ** --
else{
// calculate the distance between dipoles
// they are located 0.67 A from the oxygen along the OH bond
// tdm position on chromophore n
mox[0] = x[ m*natom_mol ][0];
mox[1] = x[ m*natom_mol ][1];
mox[2] = x[ m*natom_mol ][2];
if ( chromm % 2 == 0 ) //HW1
{
mhx[0] = x[ m*natom_mol + 1 ][0];
mhx[1] = x[ m*natom_mol + 1 ][1];
mhx[2] = x[ m*natom_mol + 1 ][2];
}
else if ( chromm % 2 == 1 ) //HW2
{
mhx[0] = x[ m*natom_mol + 2 ][0];
mhx[1] = x[ m*natom_mol + 2 ][1];
mhx[2] = x[ m*natom_mol + 2 ][2];
}
// The OH unit vector
moh[0] = minImage( mhx[0] - mox[0], boxx );
moh[1] = minImage( mhx[1] - mox[1], boxy );
moh[2] = minImage( mhx[2] - mox[2], boxz );
r = mag3(moh);
moh[0] /= r;
moh[1] /= r;
moh[2] /= r;
// The location of the TDM and the dipole derivative
mmu[0] = minImage( mox[0] + 0.067 * moh[0], boxx );
mmu[1] = minImage( mox[1] + 0.067 * moh[1], boxy );
mmu[2] = minImage( mox[2] + 0.067 * moh[2], boxz );
// the distance between TDM on N and on M and convert to unit vector
dr[0] = minImage( nmu[0] - mmu[0], boxx );
dr[1] = minImage( nmu[1] - mmu[1], boxy );
dr[2] = minImage( nmu[2] - mmu[2], boxz );
r = mag3( dr );
dr[0] /= r;
dr[1] /= r;
dr[2] /= r;
r *= bohr_nm; // convert to bohr
// The coupling in the transition dipole approximation in wavenumber
// Note the conversion to wavenumber
kappa[chromn*nchrom + chromm] = ( dot3( noh, moh ) - 3.0 * dot3( noh, dr ) *
dot3( moh, dr ) ) / ( r*r*r ) *
xn*xm*nmuprime*mmuprime*cm_hartree;
}
}// end intramolecular coupling
}// end loop over chromm
}// end loop over reference
}
/**********************************************************
Calculate the Spectral Density
**********************************************************/
__global__
void get_spectral_density( user_real_t *w, user_real_t *MUX, user_real_t *MUY, user_real_t *MUZ, user_real_t *omega, user_real_t *Sw,
int nomega, int nchrom, user_real_t t1, user_real_t avef ){
int istart, istride, i, chromn;
user_real_t wi, dw, MU2, gamma;
// split up each desired frequency to separate thread on GPU
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// the linewidth parameter
gamma = HBAR/(t1 * 2.0);
// Loop over the chromophores belonging to the current thread and fill in kappa for that row
for ( i = istart; i < nomega; i += istride )
{
// get current frequency
wi = omega[i];
// Loop over all chromophores calculatint the spectral intensity at the current frequency
for ( chromn = 0; chromn < nchrom; chromn ++ ){
// calculate the TDM squared and get the mode energy
MU2 = MUX[chromn]*MUX[chromn] + MUY[chromn]*MUY[chromn] + MUZ[chromn]*MUZ[chromn];
dw = wi - (w[chromn] + avef) ; // also adjust for avef subtracted from kappa
// add a lorentzian lineshape to the spectral density
Sw[i] += MU2 * gamma / ( dw*dw + gamma*gamma )/PI;
}
}
}
/**********************************************************
HELPER FUNCTIONS FOR GPU CALCULATIONS
CALLABLE FROM CPU AND GPU
**********************************************************/
// The minimage image of a scalar
user_real_t minImage( user_real_t dx, user_real_t boxl )
{
return dx - boxl*round(dx/boxl);
}
// The magnitude of a 3 dimensional vector
user_real_t mag3( user_real_t dx[3] )
{
return sqrt( dot3( dx, dx ) );
}
// The dot product of a 3 dimensional vector
user_real_t dot3( user_real_t x[3], user_real_t y[3] )
{
return x[0]*y[0] + x[1]*y[1] + x[2]*y[2];
}
// cast the matrix from float to complex -- this may not be the best way to do this, but it is quick to implement
__global__
void cast_to_complex_GPU ( user_real_t *d_d, user_complex_t *z_d, magma_int_t n )
{
int istart, istride, i;
// split up each desired frequency to separate thread on GPU
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// convert from float to complex
for ( i = istart; i < n; i += istride )
{
z_d[i] = MAGMA_MAKE( d_d[i], 0.0 );
}
}
// initialize the propigation matrix
__global__
void Pinit ( user_complex_t *prop_d, user_real_t *w_d, int n, user_real_t dt )
{
int istart, istride, i, j;
user_real_t arg;
// each will occour on a separate thread on the gpu
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
for ( i = istart; i < n; i += istride )
{
// zero matrix
for ( j = 0; j < n; j ++ ) prop_d[ i*n + j] = MAGMA_ZERO;
// P = exp(iwt/hbar)
arg = w_d[i] * dt / HBAR;
prop_d[ i*n + i ] = MAGMA_MAKE( cos(arg), sin(arg) );
}
}
// initialize the F matrix on the gpu to the unit matrix
__global__
void makeI ( user_complex_t *mat, int n )
{
int istart, istride, i, j;
// each will occour on a separate thread on the gpu
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// convert from float to complex
for ( i = istart; i < n; i += istride )
{
for ( j = 0; j < n; j++ ) mat[ i*n + j ] = MAGMA_ZERO;
mat[ i * n + i ] = MAGMA_ONE;
}
}
// parse input file to setup calculation
void ir_init( char *argv[], char gmxf[], char cptf[], char outf[], char model[], user_real_t *dt, int *ntcfpoints,
int *nsamples, float *sampleEvery, user_real_t *t1, user_real_t *avef, user_real_t *omegaStart, user_real_t *omegaStop,
int *omegaStep, int *natom_mol, int *nchrom_mol, int *nzeros, user_real_t *beginTime,
char species[], int *imap )
{
char para[MAX_STR_LEN];
char value[MAX_STR_LEN];
FILE *inpf = fopen(argv[1],"r");
if ( inpf == NULL )
{
printf("ERROR: Could not open %s. The first argument should contain a vaild\nfile name that points to a file containing the simulation parameters.", argv[1]);
exit(EXIT_FAILURE);
}
else printf(">>> Reading parameters from input file %s\n", argv[1]);
// Parse input file
while (fscanf( inpf, "%s%s%*[^\n]", para, value ) != EOF)
{
if ( strcmp(para,"xtcf") == 0 )
{
sscanf( value, "%s", gmxf );
}
else if ( strcmp(para,"outf") == 0 )
{
sscanf( value, "%s", outf );
}
else if ( strcmp(para,"cptf") == 0 )
{
sscanf( value, "%s", cptf );
}
else if ( strcmp(para,"model") == 0 )
{
sscanf( value, "%s", model );
}
else if ( strcmp(para,"ntcfpoints") == 0 )
{
sscanf( value, "%d", (int *) ntcfpoints );
}
else if ( strcmp(para,"nsamples") == 0 )
{
sscanf( value, "%d", (int *) nsamples);
}
else if ( strcmp(para,"sampleEvery") == 0 )
{
sscanf( value, "%f", (float *) sampleEvery );
}
else if ( strcmp(para,"omegaStep") == 0 )
{
sscanf( value, "%d", (int *) omegaStep );
}
else if ( strcmp(para,"natom_mol") == 0 )
{
sscanf( value, "%d", (int *) natom_mol );
}
else if ( strcmp(para,"nchrom_mol") == 0 )
{
sscanf( value, "%d", (int *) nchrom_mol );
}
else if ( strcmp(para,"nzeros") == 0 )
{
sscanf( value, "%d", (int *) nzeros );
}
else if ( strcmp(para,"map") == 0 )
{
sscanf( value, "%d", (int *) imap );
}
else if ( strcmp(para,"species") == 0 )
{
sscanf( value, "%s", species );
}
else if ( strcmp(para,"dt") == 0 )
{
sscanf( value, "%f", dt );
}
else if ( strcmp(para,"t1") == 0 )
{
sscanf( value, "%f", t1 );
}
else if ( strcmp(para,"avef") == 0 )
{
sscanf( value, "%f", avef );
}
else if ( strcmp(para,"beginTime") == 0 )
{
sscanf( value, "%f", beginTime );
}
else if ( strcmp(para,"omegaStart") == 0 )
{
sscanf( value, "%f", (user_real_t *) omegaStart );
}
else if ( strcmp(para,"omegaStop") == 0 )
{
sscanf( value, "%f", (user_real_t *) omegaStop );
}
else
{
printf("WARNING: Parameter %s in input file %s not recognized, ignoring.\n", para, argv[1]);
}
}
fclose(inpf);
printf(">>> Done reading input file and setting parameters\n");
}
// Progress bar to keep updated on tcf
void printProgress( int currentStep, int totalSteps )
{
user_real_t percentage = (user_real_t) currentStep / (user_real_t) totalSteps;
int lpad = (int) (percentage*PWID);
int rpad = PWID - lpad;
fprintf(stderr, "\r [%.*s%*s]%3d%%", lpad, PSTR, rpad, "",(int) (percentage*100));
}
| d571ae94e735690bebd57f3d6b38114a80dea2a0.cu | /* This program calculates the OH stetch IR absorption spectrum
* for coupled water from an MD trajectory. The exciton Hamilt-
* onian is built using the maps developed by Skinner and co-
* workers
*/
#include "calcIR.h"
int main(int argc, char *argv[])
{
// Some help for starting the program. User must supply a single argument
if ( argc != 2 ){
printf("Usage:\n"
"\tInclude as the first argument the name of an input file. No other arguments are allowed.\n");
exit(EXIT_FAILURE);
}
// retrieve and print info about gpu
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
printf("\nGPU INFO:\n"
"\tDevice name: %s\n"
"\tMemory: %g gb\n",
prop.name, prop.totalGlobalMem/(1.E9));
// *** Variable Declaration *** //
// **************************************************** //
printf("\n>>> Setting default parameters\n");
// Model parameters
char gmxf[MAX_STR_LEN]; strncpy( gmxf, "traj.xtc", MAX_STR_LEN ); // trajectory file
char outf[MAX_STR_LEN]; strncpy( outf, "spec", MAX_STR_LEN ); // name for output files
char cptf[MAX_STR_LEN]; strncpy( cptf, "spec", MAX_STR_LEN ); // name for output files
char model[MAX_STR_LEN];strncpy( model,"e3b3", MAX_STR_LEN ); // water model tip4p, tip4p2005, e3b2, e3b3
int imodel = 0; // integer for water model
int imap = 0; // integer for spectroscopic map used (0 - 2013 Gruenbaum) (1 - 2010 Li)
int ispecies = 0; // integer for species of interest
int ntcfpoints = 150 ; // the number of tcf points for each spectrum
int nsamples = 1 ; // number of samples to average for the total spectrum
float sampleEvery = 10. ; // sample a new configuration every sampleEvery ps. Note the way the program is written,
// ntcfpoints*dt must be less than sampleEvery.
user_real_t omegaStart = 2000; // starting frequency for spectral density
user_real_t omegaStop = 5000; // ending frequency for spectral density
int omegaStep = 5; // resolution for spectral density
int natom_mol = 4; // Atoms per water molecule :: MODEL DEPENDENT
int nchrom_mol = 2; // Chromophores per molecule :: TWO for stretch -- ONE for bend
int nzeros = 25600; // zeros for padding fft
user_real_t dt = 0.010; // dt between frames in xtc file (in ps)
user_real_t beginTime = 0 ; // the beginning time in ps to allow for equilibration, if desired
user_real_t t1 = 0.260; // relaxation time ( in ps )
user_real_t avef = 3415.2; // the approximate average stretch frequency to get rid of high
// frequency oscillations in the time correlation function
char species[MAX_STR_LEN]; strncpy( species, " ", MAX_STR_LEN ); // species HOD/H2O HOD/D2O H2O D2O
// read in model parameters
// START FROM INPUT FILE
ir_init( argv, gmxf, cptf, outf, model, &dt, &ntcfpoints, &nsamples, &sampleEvery, &t1,
&avef, &omegaStart, &omegaStop, &omegaStep, &natom_mol, &nchrom_mol, &nzeros, &beginTime,
species, &imap );
// Print the parameters to stdout
printf("\tSetting xtc file %s\n", gmxf );
printf("\tSetting default file name to %s\n", outf );
printf("\tSetting model to %s\n", model );
printf("\tSetting the number of tcf points to %d\n", ntcfpoints );
printf("\tSetting nsamples to %d\n", nsamples );
printf("\tSetting sampleEvery to %f (ps)\n", sampleEvery );
printf("\tSetting omegaStep to %d\n", omegaStep );
printf("\tSetting natom_mol to %d\n", natom_mol );
printf("\tSetting nchrom_mol to %d\n", nchrom_mol );
printf("\tSetting nzeros to %d\n", nzeros );
printf("\tSetting map to %d\n", imap );
printf("\tSetting species to %s\n", species );
printf("\tSetting omegaStart to %f\n", omegaStart );
printf("\tSetting omegaStop to %f\n", omegaStop );
printf("\tSetting dt to %f\n", dt );
printf("\tSetting t1 to %f (ps)\n", t1 );
printf("\tSetting avef to %f\n", avef );
printf("\tSetting equilibration time to %f (ps)\n", beginTime );
// Useful variables and condstants
int natoms, nmol, nchrom; // number of atoms, molecules, chromophores
int currentSample = 0; // current sample
int currentFrame = 0; // current frame
const int ntcfpointsR = ( nzeros + ntcfpoints - 1 ) * 2; // number of points for the real fourier transform
const int nomega = ( omegaStop - omegaStart ) / omegaStep + 1; // number of frequencies for the spectral density
magma_int_t nchrom2; // nchrom squared
float desired_time; // desired time for the current frame
int nframes, est_nframes; // variables for indexing offsets
// Trajectory variables for the CPU
rvec *x; // Position vector
matrix box; // Box vectors
float gmxtime, prec; // Time at current frame, precision of xtf file
int step, xdrinfo; // The current step number
int64_t *frame_offset; // Offset for random frame access from trajectory
float frame_dt; // Time between successive frames
// GPU variables
const int blockSize = 128; // The number of threads to launch per block
rvec *x_d; // positions
user_real_t *mux_d, *muy_d, *muz_d; // transition dipole moments
user_real_t *axx_d, *ayy_d, *azz_d; // polarizability
user_real_t *axy_d, *ayz_d, *azx_d; // polarizability
user_complex_t *cmux0_d, *cmuy0_d, *cmuz0_d; // complex version of the transition dipole moment at t=0
user_complex_t *cmux_d, *cmuy_d, *cmuz_d; // complex version of the transition dipole moment
user_complex_t *caxx0_d, *cayy0_d, *cazz0_d; // complex version of the polarizability at t=0
user_complex_t *caxy0_d, *cayz0_d, *cazx0_d; // complex version of the polarizability at t=0
user_complex_t *caxx_d, *cayy_d, *cazz_d; // complex version of the polarizability
user_complex_t *caxy_d, *cayz_d, *cazx_d; // complex version of the polarizability
user_complex_t *tmpmu_d; // to sum all polarizations
user_real_t *MUX_d, *MUY_d, *MUZ_d; // transition dipole moments in the eigen basis
user_real_t *eproj_d; // the electric field projected along the oh bonds
user_real_t *kappa_d; // the hamiltonian on the GPU
user_real_t *kappa;
// magma variables for ssyevd
user_real_t aux_work[1]; // To get optimal size of lwork
magma_int_t aux_iwork[1], info; // To get optimal liwork, and return info
magma_int_t lwork, liwork; // Leading dim of kappa, sizes of work arrays
magma_int_t *iwork; // Work array
user_real_t *work; // Work array
user_real_t *w ; // Eigenvalues
user_real_t wi ; // Eigenvalues
user_real_t *wA ; // Work array
int SSYEVD_ALLOC_FLAG = 1; // flag whether to allocate ssyevr arrays -- it is turned off after they are allocated
// magma variables for gemv
magma_queue_t queue;
// variables for spectrum calculations
user_real_t *w_d; // Eigenvalues on the GPU
user_real_t *omega, *omega_d; // Frequencies on CPU and GPU
user_real_t *Sw, *Sw_d; // Spectral density on CPU and GPU
user_real_t *tmpSw; // Temporary spectral density
user_real_t *Rw; // inverse participation ratio weighted frequency distribution
user_real_t *Rmw; // inverse participation ratio weighted frequency distribution
user_real_t *Pw; // frequency distribution
user_real_t ipr; // inverse participation ratio
user_real_t mipr; // molecular inverse participation ratio
// variables for TCF
user_complex_t *F_d; // F matrix on GPU
user_complex_t *prop_d; // Propigator matrix on GPU
user_complex_t *ctmpmat_d; // temporary complex matrix for matrix multiplications on gpu
user_complex_t *ckappa_d; // A complex version of kappa
user_complex_t tcfx, tcfy, tcfz; // Time correlation function, polarized, ir
user_complex_t tcf_iiFii, tcf_iiFjj, tcf_ijFij; // Time correlation function, polarized, raman
user_complex_t dcy, tcftmp; // Decay constant and a temporary variable for the tcf
user_complex_t *pdtcf, *pdtcf_d; // padded time correlation functions
user_complex_t *tcf; // Time correlation function IR
user_complex_t *tcfvv; // Time correlation function VV raman
user_complex_t *tcfvh; // Time correlation function VH raman
user_real_t *Ftcf, *Ftcf_d; // Fourier transformed time correlation function
user_real_t *Ftcfvv, *Ftcfvh;
// For fft on gpu
cufftHandle plan;
// for timing and errors
time_t start=time(NULL), end;
cudaError_t Cuerr;
int Merr;
size_t freem, total;
int ALLOCATE_2DGPU_ONCE = 0;
// for file output
FILE *rtcf;
FILE *itcf;
FILE *spec_density;
FILE *freq_dist;
FILE *ipr_freq_dist;
FILE *mipr_freq_dist;
FILE *spec_lineshape;
FILE *vv_lineshape;
FILE *vv_rtcf;
FILE *vv_itcf;
FILE *vh_lineshape;
FILE *vh_rtcf;
FILE *vh_itcf;
char *fname;
fname = (char *) malloc( strlen(outf) + 9 );
user_real_t factor; // conversion factor to give energy and correct intensity from FFT
user_real_t freq;
// **************************************************** //
// *** End Variable Declaration *** //
// *** Begin main routine *** //
// **************************************************** //
// Open trajectory file and get info about the systeem
XDRFILE *trj = xdrfile_open( gmxf, "r" );
if ( trj == NULL )
{
printf("WARNING: The file %s could not be opened. Is the name correct?\n", gmxf);
exit(EXIT_FAILURE);
}
read_xtc_natoms( (char *)gmxf, &natoms);
nmol = natoms / natom_mol;
nchrom = nmol * nchrom_mol;
nchrom2 = (magma_int_t) nchrom*nchrom;
if ( nchrom < 6000 ) ALLOCATE_2DGPU_ONCE = 1;
printf(">>> Will read the trajectory from: %s.\n",gmxf);
printf(">>> Found %d atoms and %d molecules.\n",natoms, nmol);
printf(">>> Found %d chromophores.\n",nchrom);
// *** MEMORY ALLOCATION *** //
// **************************************************** //
// determine the number of blocks to launch on the gpu
// each thread takes care of one chromophore for building the electric field and Hamiltonian
const int numBlocks = (nchrom+blockSize-1)/blockSize;
// Initialize magma math library and queue
magma_init(); magma_queue_create( 0, &queue );
// CPU arrays
x = (rvec*) malloc( natoms * sizeof(x[0] )); if ( x == NULL ) MALLOC_ERR;
tcf = (user_complex_t *) calloc( ntcfpoints , sizeof(user_complex_t)); if ( tcf == NULL ) MALLOC_ERR;
tcfvv = (user_complex_t *) calloc( ntcfpoints , sizeof(user_complex_t)); if ( tcfvv == NULL ) MALLOC_ERR;
tcfvh = (user_complex_t *) calloc( ntcfpoints , sizeof(user_complex_t)); if ( tcfvh == NULL ) MALLOC_ERR;
Ftcf = (user_real_t *) calloc( ntcfpointsR , sizeof(user_real_t)); if ( Ftcf == NULL ) MALLOC_ERR;
Ftcfvv = (user_real_t *) calloc( ntcfpointsR , sizeof(user_real_t)); if ( Ftcfvv == NULL ) MALLOC_ERR;
Ftcfvh = (user_real_t *) calloc( ntcfpointsR , sizeof(user_real_t)); if ( Ftcfvh == NULL ) MALLOC_ERR;
// GPU arrays
Cuerr = cudaMalloc( &x_d , natoms *sizeof(x[0])); CHK_ERR;
Cuerr = cudaMalloc( &eproj_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &Ftcf_d , ntcfpointsR *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &mux_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &muy_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &muz_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &cmux_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cmuy_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cmuz_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cmux0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cmuy0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cmuz0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &tmpmu_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &axx_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &ayy_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &azz_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &axy_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &ayz_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &azx_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &caxx_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cayy_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cazz_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &caxy_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cayz_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cazx_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &caxx0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cayy0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cazz0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &caxy0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cayz0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &cazx0_d , nchrom *sizeof(user_complex_t)); CHK_ERR;
// F_d is persistant so alloacate here
Cuerr = cudaMalloc( &F_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
// Only allocate temporary non-persistant 2D arrays if the system is small enough
// Otherwise we have to more actively manage memory to avoid
// going over the GPU max memory (4 GB on M1200)
if ( ALLOCATE_2DGPU_ONCE )
{
Cuerr = cudaMalloc( &kappa_d , nchrom2 *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &ckappa_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &ctmpmat_d, nchrom2 *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &prop_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
}
kappa = (user_real_t *) malloc( nchrom2 * sizeof(user_real_t)); if ( kappa == NULL ) MALLOC_ERR;
// memory for spectral density calculation
// CPU arrays
omega = (user_real_t *) malloc( nomega * sizeof(user_real_t)); if ( omega == NULL ) MALLOC_ERR;
Sw = (user_real_t *) calloc( nomega , sizeof(user_real_t)); if ( Sw == NULL ) MALLOC_ERR;
tmpSw = (user_real_t *) malloc( nomega * sizeof(user_real_t)); if ( tmpSw == NULL ) MALLOC_ERR;
Pw = (user_real_t *) calloc( nomega , sizeof(user_real_t)); if ( Pw == NULL ) MALLOC_ERR;
Rw = (user_real_t *) calloc( nomega , sizeof(user_real_t)); if ( Rw == NULL ) MALLOC_ERR;
Rmw = (user_real_t *) calloc( nomega , sizeof(user_real_t)); if ( Rmw == NULL ) MALLOC_ERR;
// GPU arrays
Cuerr = cudaMalloc( &MUX_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &MUY_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &MUZ_d , nchrom *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &omega_d , nomega *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &Sw_d , nomega *sizeof(user_real_t)); CHK_ERR;
Cuerr = cudaMalloc( &w_d , nchrom *sizeof(user_real_t)); CHK_ERR;
// initialize omega array
for (int i = 0; i < nomega; i++) omega[i] = (user_real_t) (omegaStart + omegaStep*i);
// *** END MEMORY ALLOCATION *** //
// **************************************************** //
// set imodel based on model passed...if 1, reset OM lengths to tip4p lengths
if ( strcmp( model, "tip4p2005" ) == 0 || strcmp( model, "e3b3" ) == 0 ) imodel = 1;
else if ( strcmp( model, "tip4p" ) == 0 || strcmp( model, "e3b2" ) == 0 )imodel = 0;
else{
printf("WARNING: model: %s is not recognized. Check input file. Aborting...\n", model );
exit(EXIT_FAILURE);
}
// set ispecies based on species passed... 0 H2O, 1 HOD in D2O, 2 HOD in H2O, 3 D2O;
if ( strcmp( species, "H2O" ) == 0 ) ispecies = 0;
else if ( strcmp( species, "HOD/D2O" ) == 0 ) ispecies = 1;
else if ( strcmp( species, "HOD/H2O" ) == 0 ) ispecies = 2;
else if ( strcmp( species, "D2O" ) == 0 ) ispecies = 3;
else{
printf("WARNING: species: %s is not recognized. Check input file. Aborting...\n", species );
exit(EXIT_FAILURE);
}
// index the frames for random access
read_xtc( trj, natoms, &step, &gmxtime, box, x, &prec );
float gmxtime2 = gmxtime;
read_xtc( trj, natoms, &step, &gmxtime, box, x, &prec );
frame_dt = round((gmxtime-gmxtime2)*prec)/(1.*prec);
printf(">>> Frame time offset is: %f (ps)\n", frame_dt );
xdrfile_close(trj);
printf(">>> Now indexing the xtc file to allow random access.\n");
read_xtc_n_frames( gmxf, &nframes, &est_nframes, &frame_offset );
// open xtc file for reading
trj = xdrfile_open( gmxf, "r" );
printf("\n>>> Now calculating the absorption spectrum\n");
printf("----------------------------------------------------------\n");
// **************************************************** //
// *** OUTER LOOP OVER SAMPLES *** //
while( currentSample < nsamples )
{
desired_time = currentSample * sampleEvery + beginTime;
printf("\n Now processing sample %d/%d starting at %.2f ps\n",
currentSample + 1, nsamples, desired_time );
fflush(stdout);
// **************************************************** //
// *** MAIN LOOP OVER TRAJECTORY *** //
while( currentFrame < ntcfpoints )
{
// ---------------------------------------------------- //
// *** Get Info About The System *** //
// read the current frame from the trajectory file and copy to device memory
// this assumes that the trajectory has no gaps and starts at time zero, but should give a warning if something goes wrong
desired_time = currentSample * sampleEvery + beginTime + dt * currentFrame;
int frame = round(desired_time/frame_dt);
xdrinfo = xdr_seek( trj, frame_offset[ frame ], SEEK_SET ); // set point to beginning of current frame
//printf("%f\n", desired_time);
if ( xdrinfo != exdrOK ){
printf("WARNING:: xdr_seek returned error %d.\n", xdrinfo);
xdrfile_close(trj); exit(EXIT_FAILURE);
}
xdrinfo = read_xtc( trj, natoms, &step, &gmxtime, box, x, &prec ); // read frame from disk
if ( xdrinfo != exdrOK ){
printf("Warning:: read_xtc returned error %d.\n", xdrinfo);
xdrfile_close(trj); exit(EXIT_FAILURE);
}
if ( fabs( desired_time - gmxtime ) > frame_dt*1E-1 ){ // check that we have the frame we want
printf("\nWARNING:: could not find the desired frame at time %f (ps).\n", desired_time );
printf("I am instead at gmxtime: %f.\nIs something wrong with the trajectory?", gmxtime );
exit(EXIT_FAILURE);
}
// copy trajectory to gpu memory
cudaMemcpy( x_d, x, natoms*sizeof(x[0]), cudaMemcpyHostToDevice );
// allocate space for hamiltonian on the GPU if acively managing GPU memory
if ( !ALLOCATE_2DGPU_ONCE ) Cuerr = cudaMalloc( &kappa_d , nchrom2 *sizeof(user_real_t)); CHK_ERR;
// launch kernel to calculate the electric field projection along OH bonds and build the exciton hamiltonian
get_eproj_GPU <<<numBlocks,blockSize>>> ( x_d, box[0][0], box[1][1], box[2][2], natoms, natom_mol, nchrom, nchrom_mol, nmol, imodel, eproj_d );
get_kappa_GPU <<<numBlocks,blockSize>>> ( x_d, box[0][0], box[1][1], box[2][2], natoms, natom_mol, nchrom, nchrom_mol, nmol, eproj_d, kappa_d,
mux_d, muy_d, muz_d, axx_d, ayy_d, azz_d, axy_d, ayz_d, azx_d, avef, ispecies, imap);
// *** Done getting System Info *** //
// ---------------------------------------------------- //
// ---------------------------------------------------- //
// *** Diagonalize the Hamiltonian *** //
// Note that kappa only needs to be diagonalized if the exact integration method is requested or the spectral density
// if the first time, query for optimal workspace dimensions
if ( SSYEVD_ALLOC_FLAG )
{
magma_ssyevd_gpu( MagmaVec, MagmaUpper, (magma_int_t) nchrom, NULL, (magma_int_t) nchrom,
NULL, NULL, (magma_int_t) nchrom, aux_work, -1, aux_iwork, -1, &info );
lwork = (magma_int_t) aux_work[0];
liwork = aux_iwork[0];
// allocate work arrays, eigenvalues and other stuff
w = (user_real_t *) malloc( nchrom * sizeof(user_real_t)); if ( w == NULL ) MALLOC_ERR;
Merr = magma_imalloc_cpu ( &iwork, liwork ); CHK_MERR;
Merr = magma_smalloc_pinned( &wA , nchrom2 ) ; CHK_MERR;
Merr = magma_smalloc_pinned( &work , lwork ); CHK_MERR;
SSYEVD_ALLOC_FLAG = 0; // is allocated here, so we won't need to do it again
// get info about space needed for diagonalization
cudaMemGetInfo( &freem, &total );
printf("\n>>> cudaMemGetInfo returned\n"
"\tfree: %g gb\n"
"\ttotal: %g gb\n", (float) freem/(1E9), (float) total/(1E9));
printf(">>> %g gb needed by diagonalization routine.\n", (float) (lwork * (float) sizeof(user_real_t)/(1E9)));
}
magma_ssyevd_gpu( MagmaVec, MagmaUpper, (magma_int_t) nchrom, kappa_d, (magma_int_t) nchrom,
w, wA, (magma_int_t) nchrom, work, lwork, iwork, liwork, &info );
if ( info != 0 ){ printf("ERROR: magma_dsyevd_gpu returned info %lld.\n", info ); exit(EXIT_FAILURE);}
// copy eigenvalues to device memory
cudaMemcpy( w_d , w , nchrom*sizeof(user_real_t), cudaMemcpyHostToDevice );
// *** Done with the Diagonalization *** //
// ---------------------------------------------------- //
// ---------------------------------------------------- //
// *** The Spectral Density *** //
if ( currentFrame == 0 )
{
// project the transition dipole moments onto the eigenbasis
// MU_d = kappa_d**T x mu_d
magma_sgemv( MagmaTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
1.0, kappa_d, (magma_int_t) nchrom , mux_d, 1, 0.0, MUX_d, 1, queue);
magma_sgemv( MagmaTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
1.0, kappa_d, (magma_int_t) nchrom, muy_d, 1, 0.0, MUY_d, 1, queue);
magma_sgemv( MagmaTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
1.0, kappa_d, (magma_int_t) nchrom, muz_d, 1, 0.0, MUZ_d, 1, queue);
// Initializee the temporary array for spectral density
for (int i = 0; i < nomega; i++) tmpSw[i] = 0.0;
// Copy relevant variables to device memory
cudaMemcpy( omega_d, omega, nomega*sizeof(user_real_t), cudaMemcpyHostToDevice );
cudaMemcpy( Sw_d , tmpSw, nomega*sizeof(user_real_t), cudaMemcpyHostToDevice );
// calculate the spectral density on the GPU and copy back to the CPU
get_spectral_density <<<numBlocks,blockSize>>> ( w_d, MUX_d, MUY_d, MUZ_d, omega_d, Sw_d, nomega, nchrom, t1, avef );
cudaMemcpy( tmpSw, Sw_d, nomega*sizeof(user_real_t), cudaMemcpyDeviceToHost );
// Copy temporary to persistant to get average spectral density over samples
for (int i = 0; i < nomega; i++ ) Sw[i] += tmpSw[i];
}
// *** Done the Spectral Density *** //
// ---------------------------------------------------- //
// ---------------------------------------------------- //
// *** The Frequency Distb. *** //
// could make this a function...
// copy eigenvectors back to host memory
cudaMemcpy( kappa, kappa_d, nchrom2*sizeof(user_real_t), cudaMemcpyDeviceToHost );
// loop over eigenstates belonging to the current thread and calculate ipr
for ( int eign = 0; eign < nchrom; eign ++ ){
user_real_t c;
int bin_num;
// determine ipr
ipr = 0.; // initialize ipr
for ( int i = 0; i < nchrom; i ++ ){
// calculate ipr
c = kappa[eign*nchrom + i];
ipr += c*c*c*c;
}
ipr = 1./ipr;
// determine molecular ipr
user_real_t inner_sum, outer_sum;
int chrom;
outer_sum = 0.;
for ( int i = 0; i < nmol; i ++ ){
inner_sum = 0.; //initialize
for ( int j = 0; j < nchrom_mol; j++ ){
chrom = i*nchrom_mol + j;
c = kappa[eign*nchrom + chrom];
inner_sum += c*c;
}
outer_sum += inner_sum * inner_sum;
}
mipr = 1./outer_sum;
// determine frequency distribution
wi = w[eign] + avef; // frequency of current mode
// determine bin number
bin_num = (int) round((wi - omegaStart)/omegaStep);
if ( bin_num < 0 || bin_num >= nomega ){
printf("WARNING: bin_num is: %d for frequency %g. Check bounds of omegaStart and omegaStop. Aborting.\n", bin_num, wi);
}
// divide by omegaStep to make probability density
Pw[ bin_num] += 1./(omegaStep*1.);
Rw[ bin_num] += ipr/(omegaStep*1.);
Rmw[bin_num] += mipr/(omegaStep*1.);
}
// *** Done the Frequency Distb. *** //
// ---------------------------------------------------- //
// ---------------------------------------------------- //
// *** Time Correlation Function *** //
// allocate space for complex hamiltonian if actively managing memory
if ( !ALLOCATE_2DGPU_ONCE ) Cuerr = cudaMalloc( &ckappa_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
// cast variables to complex to calculate time correlation function (which is complex)
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( kappa_d, ckappa_d, nchrom2);
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( mux_d , cmux_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( muy_d , cmuy_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( muz_d , cmuz_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( axx_d , caxx_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( ayy_d , cayy_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( azz_d , cazz_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( axy_d , caxy_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( ayz_d , cayz_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( azx_d , cazx_d , nchrom );
// free float hamiltonian since we won't need it from here and allocate space for the rest
// of the 2D matrix variables that have not yet been allocated if actively managing memory
if ( !ALLOCATE_2DGPU_ONCE )
{
cudaFree( kappa_d );
Cuerr = cudaMalloc( &ctmpmat_d, nchrom2 *sizeof(user_complex_t)); CHK_ERR;
Cuerr = cudaMalloc( &prop_d , nchrom2 *sizeof(user_complex_t)); CHK_ERR;
}
// ---------------------------------------------------- //
// *** Calculate the F matrix *** //
if ( currentFrame == 0 )
{
// initialize the F matrix at t=0 to the unit matrix
makeI <<<numBlocks,blockSize>>> ( F_d, nchrom );
// set the transition dipole moment at t=0
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( mux_d , cmux0_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( muy_d , cmuy0_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( muz_d , cmuz0_d , nchrom );
// set the polarizability at t=0
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( axx_d , caxx0_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( ayy_d , cayy0_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( azz_d , cazz0_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( axy_d , caxy0_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( ayz_d , cayz0_d , nchrom );
cast_to_complex_GPU <<<numBlocks,blockSize>>> ( azx_d , cazx0_d , nchrom );
}
else
{
// Integrate with exact diagonalization
// build the propigator
Pinit <<<numBlocks,blockSize>>> ( prop_d, w_d, nchrom, dt );
// ctmpmat_d = ckappa_d * prop_d
magma_cgemm( MagmaNoTrans, MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
(magma_int_t) nchrom, MAGMA_ONE, ckappa_d, (magma_int_t) nchrom, prop_d,
(magma_int_t) nchrom, MAGMA_ZERO, ctmpmat_d, (magma_int_t) nchrom, queue );
// prop_d = ctmpmat_d * ckappa_d **T
magma_cgemm( MagmaNoTrans, MagmaTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
(magma_int_t) nchrom, MAGMA_ONE, ctmpmat_d, (magma_int_t) nchrom, ckappa_d,
(magma_int_t) nchrom, MAGMA_ZERO, prop_d, (magma_int_t) nchrom, queue );
// ctmpmat_d = prop_d * F
magma_cgemm( MagmaNoTrans, MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom,
(magma_int_t) nchrom, MAGMA_ONE, prop_d, (magma_int_t) nchrom, F_d,
(magma_int_t) nchrom, MAGMA_ZERO, ctmpmat_d, (magma_int_t) nchrom, queue );
// copy the F matrix back from the temporary variable to F_d
magma_ccopy( (magma_int_t) nchrom2, ctmpmat_d , 1, F_d, 1, queue );
}
// *** Done updating the F matrix *** //
// free 2d matrices if actively managing memory
if ( !ALLOCATE_2DGPU_ONCE )
{
cudaFree( ckappa_d );
cudaFree( ctmpmat_d );
cudaFree( prop_d );
}
// calculate mFm for x y and z components
// tcfx = cmux0_d**T * F_d *cmux_d
// x
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cmux0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcfx = magma_cdotu( (magma_int_t) nchrom, cmux_d, 1, tmpmu_d, 1, queue );
// y
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cmuy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcfy = magma_cdotu( (magma_int_t) nchrom, cmuy_d, 1, tmpmu_d, 1, queue );
// z
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cmuz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcfz = magma_cdotu( (magma_int_t) nchrom, cmuz_d, 1, tmpmu_d, 1, queue );
// accumulate the tcf over the samples for the IR spectrum
tcftmp = MAGMA_ADD( tcfx , tcfy );
tcftmp = MAGMA_ADD( tcftmp, tcfz );
tcf[ currentFrame ] = MAGMA_ADD( tcf[currentFrame], tcftmp );
// zero variables
tcf_iiFii = MAGMA_ZERO;
tcf_ijFij = MAGMA_ZERO;
tcf_iiFjj = MAGMA_ZERO;
// Now The Raman Spectrum //
//-------------------------------------------------//
// tcfxx = caxx0_d**T * F_d * caxx_d
// **
// iiFii
// **
// xxFxx
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
caxx0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFii = magma_cdotu( (magma_int_t) nchrom, caxx_d, 1, tmpmu_d, 1, queue );
// yyFyy
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cayy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFii = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cayy_d, 1, tmpmu_d, 1, queue ), tcf_iiFii );
// zzFzz
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cazz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFii = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cazz_d, 1, tmpmu_d, 1, queue ), tcf_iiFii );
// **
// ijFij
// **
// xyFxy
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
caxy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_ijFij = magma_cdotu( (magma_int_t) nchrom, caxy_d, 1, tmpmu_d, 1, queue );
// yzFyz
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cayz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_ijFij = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cayz_d, 1, tmpmu_d, 1, queue ), tcf_ijFij );
// zxFzx
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cazx0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_ijFij = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cazx_d, 1, tmpmu_d, 1, queue ), tcf_ijFij );
// **
// iiFjj
// **
// xxFyy
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
caxx0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = magma_cdotu( (magma_int_t) nchrom, cayy_d, 1, tmpmu_d, 1, queue );
// xxFzz
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
caxx0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cazz_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj );
// yyFxx
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cayy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, caxx_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj);
// yyFzz
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cayy0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cazz_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj);
// zzFxx
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cazz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, caxx_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj);
// zzFyy
magma_cgemv( MagmaNoTrans, (magma_int_t) nchrom, (magma_int_t) nchrom, MAGMA_ONE, F_d, (magma_int_t) nchrom,
cazz0_d, 1, MAGMA_ZERO, tmpmu_d, 1, queue);
tcf_iiFjj = MAGMA_ADD( magma_cdotu( (magma_int_t) nchrom, cayy_d, 1, tmpmu_d, 1, queue ), tcf_iiFjj);
// accumulate the tcf over the samples for the VV raman spectrum
tcftmp = MAGMA_ADD( MAGMA_MUL(MAGMA_MAKE(3.,0.), tcf_iiFii), tcf_iiFjj );
tcftmp = MAGMA_ADD( tcftmp, MAGMA_MUL(MAGMA_MAKE(4.,0.), tcf_ijFij ));
tcftmp = MAGMA_DIV( tcftmp, MAGMA_MAKE(15.,0.) );
tcfvv[ currentFrame ] = MAGMA_ADD( tcfvv[currentFrame], tcftmp );
// accumulate the tcf over the samples for the VH raman spectrum
tcftmp = MAGMA_ADD( MAGMA_MUL(MAGMA_MAKE(2.,0.), tcf_iiFii), MAGMA_MUL( MAGMA_MAKE(-1.,0.), tcf_iiFjj ));
tcftmp = MAGMA_ADD( tcftmp, MAGMA_MUL(MAGMA_MAKE(6.,0.), tcf_ijFij ));
tcftmp = MAGMA_DIV( tcftmp, MAGMA_MAKE(30.,0.) );
tcfvh[ currentFrame ] = MAGMA_ADD( tcfvh[currentFrame], tcftmp );
// *** Done with Time Correlation *** //
// ---------------------------------------------------- //
// update progress bar if simulation is big enough, otherwise it really isn't necessary
if ( nchrom > 400 ) printProgress( currentFrame, ntcfpoints-1 );
// done with current frame, move to next
currentFrame += 1;
}
// done with current sample, move to next, and reset currentFrame to 0
currentSample +=1;
currentFrame = 0;
} // end outer loop
printf("\n\n----------------------------------------------------------\n");
printf("Finishing up...\n");
// close xdr file
xdrfile_close(trj);
// *** IR Spectrum *** //
// ---------------------------------------------------- //
// pad the time correlation function with zeros, copy to device memory and perform fft
// fourier transform the time correlation function on the GPU
pdtcf = (user_complex_t *) calloc( ntcfpoints+nzeros, sizeof(user_complex_t));
for ( int i = 0; i < ntcfpoints; i++ )
{
// multiply the tcf by the relaxation term
dcy = MAGMA_MAKE(exp( -1.0 * i * dt / ( 2.0 * t1 ))/(1.*nsamples), 0.0);
tcf[i] = MAGMA_MUL( tcf[i], dcy );
pdtcf[i] = tcf[i];
}
for ( int i = 0; i < nzeros; i++ ) pdtcf[i+ntcfpoints] = MAGMA_ZERO;
cudaMalloc( &pdtcf_d , (ntcfpoints+nzeros)*sizeof(user_complex_t));
cudaMemcpy( pdtcf_d, pdtcf, (ntcfpoints+nzeros)*sizeof(user_complex_t), cudaMemcpyHostToDevice );
cufftPlan1d ( &plan, ntcfpoints+nzeros, CUFFT_C2R, 1);
cufftExecC2R ( plan, pdtcf_d, Ftcf_d );
cudaMemcpy ( Ftcf, Ftcf_d, ntcfpointsR*sizeof(user_real_t), cudaMemcpyDeviceToHost );
// *** VV Spectrum *** //
// ---------------------------------------------------- //
for ( int i = 0; i < ntcfpoints; i++ )
{
// multiply the tcf by the relaxation term
dcy = MAGMA_MAKE(exp( -1.0 * i * dt / ( 2.0 * t1 ))/(1.*nsamples), 0.0);
tcfvv[i] = MAGMA_MUL( tcfvv[i], dcy );
pdtcf[i] = tcfvv[i];
}
for ( int i = 0; i < nzeros; i++ ) pdtcf[i+ntcfpoints] = MAGMA_ZERO;
cudaMemcpy( pdtcf_d, pdtcf, (ntcfpoints+nzeros)*sizeof(user_complex_t), cudaMemcpyHostToDevice );
cufftExecC2R ( plan, pdtcf_d, Ftcf_d );
cudaMemcpy ( Ftcfvv, Ftcf_d, ntcfpointsR*sizeof(user_real_t), cudaMemcpyDeviceToHost );
// *** VH Spectrum *** //
// ---------------------------------------------------- //
for ( int i = 0; i < ntcfpoints; i++ )
{
// multiply the tcf by the relaxation term
dcy = MAGMA_MAKE(exp( -1.0 * i * dt / ( 2.0 * t1 ))/(1.*nsamples), 0.0);
tcfvh[i] = MAGMA_MUL( tcfvh[i], dcy );
pdtcf[i] = tcfvh[i];
}
for ( int i = 0; i < nzeros; i++ ) pdtcf[i+ntcfpoints] = MAGMA_ZERO;
cudaMemcpy( pdtcf_d, pdtcf, (ntcfpoints+nzeros)*sizeof(user_complex_t), cudaMemcpyHostToDevice );
cufftExecC2R ( plan, pdtcf_d, Ftcf_d );
cudaMemcpy ( Ftcfvh, Ftcf_d, ntcfpointsR*sizeof(user_real_t), cudaMemcpyDeviceToHost );
cufftDestroy(plan);
// normalize spectral density by number of samples
for ( int i = 0; i < nomega; i++) Sw[i] = Sw[i] / (user_real_t) nsamples;
// normalize the frequency and ipr weighted frequency distributions
for ( int i = 0; i < nomega; i ++ ) Pw[i] /= nchrom*nsamples*ntcfpoints;
for ( int i = 0; i < nomega; i ++ ) Rw[i] /= nchrom*nsamples*ntcfpoints;
for ( int i = 0; i < nomega; i ++ ) Rw[i] /= Pw[i];
for ( int i = 0; i < nomega; i ++ ) Rmw[i] /= nchrom*nsamples*ntcfpoints;
for ( int i = 0; i < nomega; i ++ ) Rmw[i] /= Pw[i];
// write time correlation function
rtcf = fopen(strcat(strcpy(fname,outf),"_irrtcf.dat"), "w");
itcf = fopen(strcat(strcpy(fname,outf),"_iritcf.dat"), "w");
vv_rtcf = fopen(strcat(strcpy(fname,outf),"_vvrtcf.dat"), "w");
vv_itcf = fopen(strcat(strcpy(fname,outf),"_vvitcf.dat"), "w");
vh_rtcf = fopen(strcat(strcpy(fname,outf),"_vhrtcf.dat"), "w");
vh_itcf = fopen(strcat(strcpy(fname,outf),"_vhitcf.dat"), "w");
for ( int i = 0; i < ntcfpoints; i++ )
{
fprintf( rtcf, "%g %g \n", i*dt, MAGMA_REAL( tcf[i] ) );
fprintf( itcf, "%g %g \n", i*dt, MAGMA_IMAG( tcf[i] ) );
fprintf( vv_rtcf, "%g %g \n", i*dt, MAGMA_REAL( tcfvv[i] ) );
fprintf( vv_itcf, "%g %g \n", i*dt, MAGMA_IMAG( tcfvv[i] ) );
fprintf( vh_rtcf, "%g %g \n", i*dt, MAGMA_REAL( tcfvh[i] ) );
fprintf( vh_itcf, "%g %g \n", i*dt, MAGMA_IMAG( tcfvh[i] ) );
}
fclose( rtcf );
fclose( itcf );
fclose( vv_rtcf );
fclose( vv_itcf );
fclose( vh_rtcf );
fclose( vh_itcf );
// write the spectral density
spec_density = fopen(strcat(strcpy(fname,outf),"_spdn.dat"), "w");
for ( int i = 0; i < nomega; i++) fprintf(spec_density, "%g %g\n", omega[i], Sw[i]);
fclose(spec_density);
// write the frequency distributions
freq_dist = fopen(strcat(strcpy(fname,outf),"_Pw.dat"), "w");
for ( int i = 0; i < nomega; i++) fprintf(freq_dist, "%g %g\n", omega[i], Pw[i]);
fclose(freq_dist);
ipr_freq_dist = fopen(strcat(strcpy(fname,outf),"_Rw.dat"), "w");
for ( int i = 0; i < nomega; i++) fprintf(ipr_freq_dist, "%g %g\n", omega[i], Rw[i]);
fclose(ipr_freq_dist);
mipr_freq_dist = fopen(strcat(strcpy(fname,outf),"_Rmw.dat"), "w");
for ( int i = 0; i < nomega; i++) fprintf(mipr_freq_dist, "%g %g\n", omega[i], Rmw[i]);
fclose(mipr_freq_dist);
// Write the absorption lineshape
// Since the C2R transform is inverse by default, the frequencies have to be negated
// NOTE: to compare with YICUN's code, divide Ftcf by 2
spec_lineshape = fopen(strcat(strcpy(fname,outf),"_irls.dat"),"w");
vv_lineshape = fopen(strcat(strcpy(fname,outf),"_vvls.dat"),"w");
vh_lineshape = fopen(strcat(strcpy(fname,outf),"_vhls.dat"),"w");
factor = 2*PI*HBAR/(dt*(ntcfpoints+nzeros)); // conversion factor to give energy and correct intensity from FFT
for ( int i = (ntcfpoints+nzeros)/2; i < ntcfpoints+nzeros; i++ ) // "negative" FFT frequencies
{
freq = -1*(i-ntcfpoints-nzeros)*factor + avef;
if ( freq <= (user_real_t) omegaStop ) {
fprintf(spec_lineshape, "%g %g\n", freq, Ftcf[i]/(factor*(ntcfpoints+nzeros)));
fprintf(vv_lineshape, "%g %g\n", freq, Ftcfvv[i]/(factor*(ntcfpoints+nzeros)));
fprintf(vh_lineshape, "%g %g\n", freq, Ftcfvh[i]/(factor*(ntcfpoints+nzeros)));
}
}
for ( int i = 0; i < ntcfpoints+nzeros / 2 ; i++) // "positive" FFT frequencies
{
freq = -1*i*factor + avef;
if ( freq >= (user_real_t) omegaStart) {
fprintf(spec_lineshape, "%g %g\n", freq, Ftcf[i]/(factor*(ntcfpoints+nzeros)));
fprintf(vv_lineshape, "%g %g\n", freq, Ftcfvv[i]/(factor*(ntcfpoints+nzeros)));
fprintf(vh_lineshape, "%g %g\n", freq, Ftcfvh[i]/(factor*(ntcfpoints+nzeros)));
}
}
fclose(spec_lineshape);
fclose(vv_lineshape);
fclose(vh_lineshape);
// free memory on the CPU and GPU and finalize magma library
magma_queue_destroy( queue );
free(x);
free(Ftcf);
free(Ftcfvv);
free(Ftcfvh);
free(tcf);
free(tcfvv);
free(tcfvh);
free(pdtcf);
free(Rw);
free(Pw);
free(kappa);
free(Rmw);
cudaFree(x_d);
cudaFree(Ftcf_d);
cudaFree(mux_d);
cudaFree(muy_d);
cudaFree(muz_d);
cudaFree(eproj_d);
cudaFree(cmux_d);
cudaFree(cmuy_d);
cudaFree(cmuz_d);
cudaFree(cmux0_d);
cudaFree(cmuy0_d);
cudaFree(cmuz0_d);
cudaFree(tmpmu_d);
cudaFree(axx_d);
cudaFree(ayy_d);
cudaFree(azz_d);
cudaFree(axy_d);
cudaFree(ayz_d);
cudaFree(azx_d);
cudaFree(caxx_d);
cudaFree(cayy_d);
cudaFree(cazz_d);
cudaFree(caxy_d);
cudaFree(cayz_d);
cudaFree(cazx_d);
cudaFree(caxx0_d);
cudaFree(cayy0_d);
cudaFree(cazz0_d);
cudaFree(caxy0_d);
cudaFree(cayz0_d);
cudaFree(cazx0_d);
cudaFree(F_d);
if ( ALLOCATE_2DGPU_ONCE )
{
cudaFree(kappa_d);
cudaFree(ckappa_d);
cudaFree(ctmpmat_d);
cudaFree(prop_d);
}
magma_free(pdtcf_d);
// free memory used for diagonalization
if ( SSYEVD_ALLOC_FLAG == 0 )
{
free(w);
free(iwork);
magma_free_pinned( work );
magma_free_pinned( wA );
}
// free memory used in spectral density calculation
// CPU arrays
free(omega);
free(Sw);
free(tmpSw);
// GPU arrays
cudaFree(MUX_d);
cudaFree(MUY_d);
cudaFree(MUZ_d);
cudaFree(omega_d);
cudaFree(Sw_d);
cudaFree(w_d);
// final call to finalize magma math library
magma_finalize();
end = time(NULL);
printf("\n>>> Done with the calculation in %f seconds.\n", difftime(end,start));
return 0;
}
/**********************************************************
BUILD ELECTRIC FIELD PROJECTION ALONG OH BONDS
GPU FUNCTION
**********************************************************/
__global__
void get_eproj_GPU( rvec *x, float boxx, float boxy, float boxz, int natoms, int natom_mol,
int nchrom, int nchrom_mol, int nmol, int model, user_real_t *eproj )
{
int n, m, i, j, istart, istride;
int chrom;
user_real_t mox[XDR_DIM]; // oxygen position on molecule m
user_real_t mx[XDR_DIM]; // atom position on molecule m
user_real_t nhx[XDR_DIM]; // hydrogen position on molecule n of the current chromophore
user_real_t nox[XDR_DIM]; // oxygen position on molecule n
user_real_t nohx[XDR_DIM]; // the unit vector pointing along the OH bond for the current chromophore
user_real_t mom[XDR_DIM]; // the OM vector on molecule m
user_real_t dr[XDR_DIM]; // the min image vector between two atoms
user_real_t r; // the distance between two atoms
const float cutoff = 0.7831; // the oh cutoff distance
const float bohr_nm = 18.8973; // convert from bohr to nanometer
user_real_t efield[XDR_DIM]; // the electric field vector
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// Loop over the chromophores belonging to the current thread
for ( chrom = istart; chrom < nchrom; chrom += istride )
{
// calculate the molecule hosting the current chromophore
n = chrom / nchrom_mol;
// initialize the electric field vector to zero at this chromophore
efield[0] = 0.;
efield[1] = 0.;
efield[2] = 0.;
// *** GET INFO ABOUT MOLECULE N HOSTING CHROMOPHORE *** //
// N IS OUR REFERENCE MOLECULE //
// get the position of the hydrogen associated with the current stretch
// NOTE: I'm making some assumptions about the ordering of the positions,
// this can be changed if necessary for a more robust program
// Throughout, I assume that the atoms are grouped into molecules and that
// every 4th molecule starting at 0 (1, 2, 3) is OW (HW1, HW2, MW)
if ( chrom % 2 == 0 ){ //HW1
nhx[0] = x[ n*natom_mol + 1 ][0];
nhx[1] = x[ n*natom_mol + 1 ][1];
nhx[2] = x[ n*natom_mol + 1 ][2];
}
else if ( chrom % 2 == 1 ){ //HW2
nhx[0] = x[ n*natom_mol + 2 ][0];
nhx[1] = x[ n*natom_mol + 2 ][1];
nhx[2] = x[ n*natom_mol + 2 ][2];
}
// The oxygen position
nox[0] = x[ n*natom_mol ][0];
nox[1] = x[ n*natom_mol ][1];
nox[2] = x[ n*natom_mol ][2];
// The oh unit vector
nohx[0] = minImage( nhx[0] - nox[0], boxx );
nohx[1] = minImage( nhx[1] - nox[1], boxy );
nohx[2] = minImage( nhx[2] - nox[2], boxz );
r = mag3(nohx);
nohx[0] /= r;
nohx[1] /= r;
nohx[2] /= r;
// for testing with YICUN -- can change to ROH later...
//nohx[0] /= 0.09572;
//nohx[1] /= 0.09572;
//nohx[2] /= 0.09572;
// *** DONE WITH MOLECULE N *** //
// *** LOOP OVER ALL OTHER MOLECULES *** //
for ( m = 0; m < nmol; m++ ){
// skip the reference molecule
if ( m == n ) continue;
// get oxygen position on current molecule
mox[0] = x[ m*natom_mol ][0];
mox[1] = x[ m*natom_mol ][1];
mox[2] = x[ m*natom_mol ][2];
// find displacement between oxygen on m and hydrogen on n
dr[0] = minImage( mox[0] - nhx[0], boxx );
dr[1] = minImage( mox[1] - nhx[1], boxy );
dr[2] = minImage( mox[2] - nhx[2], boxz );
r = mag3(dr);
// skip if the distance is greater than the cutoff
if ( r > cutoff ) continue;
// loop over all atoms in the current molecule and calculate the electric field
// (excluding the oxygen atoms since they have no charge)
for ( i=1; i < natom_mol; i++ ){
// position of current atom
mx[0] = x[ m*natom_mol + i ][0];
mx[1] = x[ m*natom_mol + i ][1];
mx[2] = x[ m*natom_mol + i ][2];
// Move m site to TIP4P distance if model is E3B3 or TIP4P2005 -- this must be done to use the TIP4P map
if ( i == 3 )
{
if ( model != 0 )
{
// get the OM unit vector
mom[0] = minImage( mx[0] - mox[0], boxx );
mom[1] = minImage( mx[1] - mox[1], boxy );
mom[2] = minImage( mx[2] - mox[2], boxz );
r = mag3(mom);
// TIP4P OM distance is 0.015 nm along the OM bond
mx[0] = mox[0] + 0.0150*mom[0]/r;
mx[1] = mox[1] + 0.0150*mom[1]/r;
mx[2] = mox[2] + 0.0150*mom[2]/r;
}
}
// the minimum image displacement between the reference hydrogen and the current atom
// NOTE: this converted to bohr so the efield will be in au
dr[0] = minImage( nhx[0] - mx[0], boxx )*bohr_nm;
dr[1] = minImage( nhx[1] - mx[1], boxy )*bohr_nm;
dr[2] = minImage( nhx[2] - mx[2], boxz )*bohr_nm;
r = mag3(dr);
// Add the contribution of the current atom to the electric field
if ( i < 3 ){ // HW1 and HW2
for ( j=0; j < XDR_DIM; j++){
efield[j] += 0.52 * dr[j] / (r*r*r);
}
}
else if ( i == 3 ){ // MW (note the negative sign)
for ( j=0; j < XDR_DIM; j++){
efield[j] -= 1.04 * dr[j] / (r*r*r);
}
}
} // end loop over atoms in molecule m
} // end loop over molecules m
// project the efield along the OH bond to get the relevant value for the map
eproj[chrom] = dot3( efield, nohx );
} // end loop over reference chromophores
}
/**********************************************************
BUILD HAMILTONIAN AND RETURN TRANSITION DIPOLE VECTOR
FOR EACH CHROMOPHORE ON THE GPU
**********************************************************/
__global__
void get_kappa_GPU( rvec *x, float boxx, float boxy, float boxz, int natoms, int natom_mol, int nchrom, int nchrom_mol, int nmol,
user_real_t *eproj, user_real_t *kappa, user_real_t *mux, user_real_t *muy, user_real_t *muz, user_real_t *axx,
user_real_t *ayy, user_real_t *azz, user_real_t *axy, user_real_t *ayz, user_real_t *azx, user_real_t avef, int ispecies,
int imap)
{
int n, m, istart, istride;
int chromn, chromm;
user_real_t mox[XDR_DIM]; // oxygen position on molecule m
user_real_t mhx[XDR_DIM]; // atom position on molecule m
user_real_t nhx[XDR_DIM]; // hydrogen position on molecule n of the current chromophore
user_real_t nox[XDR_DIM]; // oxygen position on molecule n
user_real_t noh[XDR_DIM];
user_real_t moh[XDR_DIM];
user_real_t nmu[XDR_DIM];
user_real_t mmu[XDR_DIM];
user_real_t mmuprime;
user_real_t nmuprime;
user_real_t dr[XDR_DIM]; // the min image vector between two atoms
user_real_t r; // the distance between two atoms
const user_real_t bohr_nm = 18.8973; // convert from bohr to nanometer
const user_real_t cm_hartree = 2.1947463E5; // convert from cm-1 to hartree
user_real_t En, Em; // the electric field projection
user_real_t xn, xm, pn, pm; // the x and p from the map
user_real_t wn, wm; // the energies
// define the maps
user_real_t map_w[3], map_x[2], map_p[2], map_mup[3], map_wi[3];
// 2013 maps from gruenbaum
if ( imap == 0 ){
// H2O and HOD/D2O
if ( ispecies == 0 || ispecies == 1 ){
map_w[0] = 3670.2; map_w[1] = -3541.7; map_w[2] = -152677.0;
map_x[0] = 0.19285; map_x[1] = -1.7261E-5;
map_p[0] = 1.6466; map_p[1] = 5.7692E-4;
}
// D2O and HOD/H2O
if ( ispecies == 2 || ispecies == 3 ){
map_w[0] = 2767.8; map_w[1] = -2630.3; map_w[2] = -102601.0;
map_x[0] = 0.16593; map_x[1] = -2.0632E-5;
map_p[0] = 2.0475; map_p[1] = 8.9108E-4;
}
map_mup[0] = 0.1646; map_mup[1] = 11.39; map_mup[2] = 63.41;
map_wi[0] = -1361.0; map_wi[1] = 27165.0; map_wi[2] = -1.887;
}
// 2010 map from Li and Skinner
else if ( imap == 1 )
{
// H2O and HOD/D2O
if ( ispecies == 0 || ispecies == 1 ){
map_w[0] = 3732.9; map_w[1] = -3519.8; map_w[2] = -153520.0;
map_x[0] = 0.19318; map_x[1] = -1.7248E-5;
map_p[0] = 1.6102; map_p[1] = 5.8697E-4;
}
// D2O and HOD/H2O
if ( ispecies == 2 || ispecies == 3 ){
map_w[0] = 2748.2; map_w[1] = -2572.2; map_w[2] = -102980.0;
map_x[0] = 0.16598; map_x[1] = -2.0752E-5;
map_p[0] = 1.9813; map_p[1] = 9.1419E-4;
}
// note the wi have to be converted from hartree to cm from
// the values in the table of the paper
map_mup[0] = 0.1622; map_mup[1] = 10.381; map_mup[2] = 137.6;
map_wi[0] = -1360.8; map_wi[1] = 27171.0; map_wi[2] = -1.887;
}
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// Loop over the chromophores belonging to the current thread and fill in kappa for that row
for ( chromn = istart; chromn < nchrom; chromn += istride )
{
// calculate the molecule hosting the current chromophore
// and get the corresponding electric field at the relevant hydrogen
n = chromn / nchrom_mol;
En = eproj[chromn];
// get parameters from the map
wn = map_w[0] + map_w[1]*En + map_w[2]*En*En;
xn = map_x[0] + map_x[1]*wn;
pn = map_p[0] + map_p[1]*wn;
nmuprime = map_mup[0] + map_mup[1]*En + map_mup[2]*En*En;
// and calculate the location of the transition dipole moment
// SEE calc_efield_GPU for assumptions about ordering of atoms
nox[0] = x[ n*natom_mol ][0];
nox[1] = x[ n*natom_mol ][1];
nox[2] = x[ n*natom_mol ][2];
if ( chromn % 2 == 0 ) //HW1
{
nhx[0] = x[ n*natom_mol + 1 ][0];
nhx[1] = x[ n*natom_mol + 1 ][1];
nhx[2] = x[ n*natom_mol + 1 ][2];
}
else if ( chromn % 2 == 1 ) //HW2
{
nhx[0] = x[ n*natom_mol + 2 ][0];
nhx[1] = x[ n*natom_mol + 2 ][1];
nhx[2] = x[ n*natom_mol + 2 ][2];
}
// The OH unit vector
noh[0] = minImage( nhx[0] - nox[0], boxx );
noh[1] = minImage( nhx[1] - nox[1], boxy );
noh[2] = minImage( nhx[2] - nox[2], boxz );
r = mag3(noh);
noh[0] /= r;
noh[1] /= r;
noh[2] /= r;
// The location of the TDM
nmu[0] = minImage( nox[0] + 0.067 * noh[0], boxx );
nmu[1] = minImage( nox[1] + 0.067 * noh[1], boxy );
nmu[2] = minImage( nox[2] + 0.067 * noh[2], boxz );
// and the TDM vector to return
mux[chromn] = noh[0] * nmuprime * xn;
muy[chromn] = noh[1] * nmuprime * xn;
muz[chromn] = noh[2] * nmuprime * xn;
// and the polarizability
axx[chromn] = (4.6 * noh[0] * noh[0] + 1.0) * xn;
ayy[chromn] = (4.6 * noh[1] * noh[1] + 1.0) * xn;
azz[chromn] = (4.6 * noh[2] * noh[2] + 1.0) * xn;
axy[chromn] = 4.6 * noh[0] * noh[1] * xn;
ayz[chromn] = 4.6 * noh[1] * noh[2] * xn;
azx[chromn] = 4.6 * noh[2] * noh[0] * xn;
// Loop over all other chromophores
for ( chromm = 0; chromm < nchrom; chromm ++ )
{
// calculate the molecule hosting the current chromophore
// and get the corresponding electric field at the relevant hydrogen
m = chromm / nchrom_mol;
Em = eproj[chromm];
// also get the relevent x and p from the map
// get parameters from the map
wm = map_w[0] + map_w[1]*Em + map_w[2]*Em*Em;
xm = map_x[0] + map_x[1]*wm;
pm = map_p[0] + map_p[1]*wm;
mmuprime = map_mup[0] + map_mup[1]*Em + map_mup[2]*Em*Em;
// the diagonal energy
if ( chromn == chromm )
{
// Note that this is a flattened 2d array
// subtract high frequency energies to get rid of highly oscillatory parts of the F matrix
kappa[chromn*nchrom + chromm] = wm - avef;
}
// intramolecular coupling
else if ( m == n )
{
// ** --
// if is HOD/H2O or HOD/D2O, no coupling
if ( ispecies == 1 || ispecies == 2 ){
kappa[chromn * nchrom + chromm ] = 0.;
}
// ** --
else{
kappa[chromn*nchrom + chromm] = (map_wi[0] + map_wi[1]*(En + Em))*xn*xm + map_wi[2]*pn*pm;
}
}
// intermolecular coupling
else
{
// ** --
// if is HOD/H2O or HOD/D2O, no coupling
if ( ispecies == 1 || ispecies == 2 ){
kappa[chromn * nchrom + chromm ] = 0.;
}
// ** --
else{
// calculate the distance between dipoles
// they are located 0.67 A from the oxygen along the OH bond
// tdm position on chromophore n
mox[0] = x[ m*natom_mol ][0];
mox[1] = x[ m*natom_mol ][1];
mox[2] = x[ m*natom_mol ][2];
if ( chromm % 2 == 0 ) //HW1
{
mhx[0] = x[ m*natom_mol + 1 ][0];
mhx[1] = x[ m*natom_mol + 1 ][1];
mhx[2] = x[ m*natom_mol + 1 ][2];
}
else if ( chromm % 2 == 1 ) //HW2
{
mhx[0] = x[ m*natom_mol + 2 ][0];
mhx[1] = x[ m*natom_mol + 2 ][1];
mhx[2] = x[ m*natom_mol + 2 ][2];
}
// The OH unit vector
moh[0] = minImage( mhx[0] - mox[0], boxx );
moh[1] = minImage( mhx[1] - mox[1], boxy );
moh[2] = minImage( mhx[2] - mox[2], boxz );
r = mag3(moh);
moh[0] /= r;
moh[1] /= r;
moh[2] /= r;
// The location of the TDM and the dipole derivative
mmu[0] = minImage( mox[0] + 0.067 * moh[0], boxx );
mmu[1] = minImage( mox[1] + 0.067 * moh[1], boxy );
mmu[2] = minImage( mox[2] + 0.067 * moh[2], boxz );
// the distance between TDM on N and on M and convert to unit vector
dr[0] = minImage( nmu[0] - mmu[0], boxx );
dr[1] = minImage( nmu[1] - mmu[1], boxy );
dr[2] = minImage( nmu[2] - mmu[2], boxz );
r = mag3( dr );
dr[0] /= r;
dr[1] /= r;
dr[2] /= r;
r *= bohr_nm; // convert to bohr
// The coupling in the transition dipole approximation in wavenumber
// Note the conversion to wavenumber
kappa[chromn*nchrom + chromm] = ( dot3( noh, moh ) - 3.0 * dot3( noh, dr ) *
dot3( moh, dr ) ) / ( r*r*r ) *
xn*xm*nmuprime*mmuprime*cm_hartree;
}
}// end intramolecular coupling
}// end loop over chromm
}// end loop over reference
}
/**********************************************************
Calculate the Spectral Density
**********************************************************/
__global__
void get_spectral_density( user_real_t *w, user_real_t *MUX, user_real_t *MUY, user_real_t *MUZ, user_real_t *omega, user_real_t *Sw,
int nomega, int nchrom, user_real_t t1, user_real_t avef ){
int istart, istride, i, chromn;
user_real_t wi, dw, MU2, gamma;
// split up each desired frequency to separate thread on GPU
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// the linewidth parameter
gamma = HBAR/(t1 * 2.0);
// Loop over the chromophores belonging to the current thread and fill in kappa for that row
for ( i = istart; i < nomega; i += istride )
{
// get current frequency
wi = omega[i];
// Loop over all chromophores calculatint the spectral intensity at the current frequency
for ( chromn = 0; chromn < nchrom; chromn ++ ){
// calculate the TDM squared and get the mode energy
MU2 = MUX[chromn]*MUX[chromn] + MUY[chromn]*MUY[chromn] + MUZ[chromn]*MUZ[chromn];
dw = wi - (w[chromn] + avef) ; // also adjust for avef subtracted from kappa
// add a lorentzian lineshape to the spectral density
Sw[i] += MU2 * gamma / ( dw*dw + gamma*gamma )/PI;
}
}
}
/**********************************************************
HELPER FUNCTIONS FOR GPU CALCULATIONS
CALLABLE FROM CPU AND GPU
**********************************************************/
// The minimage image of a scalar
user_real_t minImage( user_real_t dx, user_real_t boxl )
{
return dx - boxl*round(dx/boxl);
}
// The magnitude of a 3 dimensional vector
user_real_t mag3( user_real_t dx[3] )
{
return sqrt( dot3( dx, dx ) );
}
// The dot product of a 3 dimensional vector
user_real_t dot3( user_real_t x[3], user_real_t y[3] )
{
return x[0]*y[0] + x[1]*y[1] + x[2]*y[2];
}
// cast the matrix from float to complex -- this may not be the best way to do this, but it is quick to implement
__global__
void cast_to_complex_GPU ( user_real_t *d_d, user_complex_t *z_d, magma_int_t n )
{
int istart, istride, i;
// split up each desired frequency to separate thread on GPU
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// convert from float to complex
for ( i = istart; i < n; i += istride )
{
z_d[i] = MAGMA_MAKE( d_d[i], 0.0 );
}
}
// initialize the propigation matrix
__global__
void Pinit ( user_complex_t *prop_d, user_real_t *w_d, int n, user_real_t dt )
{
int istart, istride, i, j;
user_real_t arg;
// each will occour on a separate thread on the gpu
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
for ( i = istart; i < n; i += istride )
{
// zero matrix
for ( j = 0; j < n; j ++ ) prop_d[ i*n + j] = MAGMA_ZERO;
// P = exp(iwt/hbar)
arg = w_d[i] * dt / HBAR;
prop_d[ i*n + i ] = MAGMA_MAKE( cos(arg), sin(arg) );
}
}
// initialize the F matrix on the gpu to the unit matrix
__global__
void makeI ( user_complex_t *mat, int n )
{
int istart, istride, i, j;
// each will occour on a separate thread on the gpu
istart = blockIdx.x * blockDim.x + threadIdx.x;
istride = blockDim.x * gridDim.x;
// convert from float to complex
for ( i = istart; i < n; i += istride )
{
for ( j = 0; j < n; j++ ) mat[ i*n + j ] = MAGMA_ZERO;
mat[ i * n + i ] = MAGMA_ONE;
}
}
// parse input file to setup calculation
void ir_init( char *argv[], char gmxf[], char cptf[], char outf[], char model[], user_real_t *dt, int *ntcfpoints,
int *nsamples, float *sampleEvery, user_real_t *t1, user_real_t *avef, user_real_t *omegaStart, user_real_t *omegaStop,
int *omegaStep, int *natom_mol, int *nchrom_mol, int *nzeros, user_real_t *beginTime,
char species[], int *imap )
{
char para[MAX_STR_LEN];
char value[MAX_STR_LEN];
FILE *inpf = fopen(argv[1],"r");
if ( inpf == NULL )
{
printf("ERROR: Could not open %s. The first argument should contain a vaild\nfile name that points to a file containing the simulation parameters.", argv[1]);
exit(EXIT_FAILURE);
}
else printf(">>> Reading parameters from input file %s\n", argv[1]);
// Parse input file
while (fscanf( inpf, "%s%s%*[^\n]", para, value ) != EOF)
{
if ( strcmp(para,"xtcf") == 0 )
{
sscanf( value, "%s", gmxf );
}
else if ( strcmp(para,"outf") == 0 )
{
sscanf( value, "%s", outf );
}
else if ( strcmp(para,"cptf") == 0 )
{
sscanf( value, "%s", cptf );
}
else if ( strcmp(para,"model") == 0 )
{
sscanf( value, "%s", model );
}
else if ( strcmp(para,"ntcfpoints") == 0 )
{
sscanf( value, "%d", (int *) ntcfpoints );
}
else if ( strcmp(para,"nsamples") == 0 )
{
sscanf( value, "%d", (int *) nsamples);
}
else if ( strcmp(para,"sampleEvery") == 0 )
{
sscanf( value, "%f", (float *) sampleEvery );
}
else if ( strcmp(para,"omegaStep") == 0 )
{
sscanf( value, "%d", (int *) omegaStep );
}
else if ( strcmp(para,"natom_mol") == 0 )
{
sscanf( value, "%d", (int *) natom_mol );
}
else if ( strcmp(para,"nchrom_mol") == 0 )
{
sscanf( value, "%d", (int *) nchrom_mol );
}
else if ( strcmp(para,"nzeros") == 0 )
{
sscanf( value, "%d", (int *) nzeros );
}
else if ( strcmp(para,"map") == 0 )
{
sscanf( value, "%d", (int *) imap );
}
else if ( strcmp(para,"species") == 0 )
{
sscanf( value, "%s", species );
}
else if ( strcmp(para,"dt") == 0 )
{
sscanf( value, "%f", dt );
}
else if ( strcmp(para,"t1") == 0 )
{
sscanf( value, "%f", t1 );
}
else if ( strcmp(para,"avef") == 0 )
{
sscanf( value, "%f", avef );
}
else if ( strcmp(para,"beginTime") == 0 )
{
sscanf( value, "%f", beginTime );
}
else if ( strcmp(para,"omegaStart") == 0 )
{
sscanf( value, "%f", (user_real_t *) omegaStart );
}
else if ( strcmp(para,"omegaStop") == 0 )
{
sscanf( value, "%f", (user_real_t *) omegaStop );
}
else
{
printf("WARNING: Parameter %s in input file %s not recognized, ignoring.\n", para, argv[1]);
}
}
fclose(inpf);
printf(">>> Done reading input file and setting parameters\n");
}
// Progress bar to keep updated on tcf
void printProgress( int currentStep, int totalSteps )
{
user_real_t percentage = (user_real_t) currentStep / (user_real_t) totalSteps;
int lpad = (int) (percentage*PWID);
int rpad = PWID - lpad;
fprintf(stderr, "\r [%.*s%*s]%3d%%", lpad, PSTR, rpad, "",(int) (percentage*100));
}
|
c0a1ad54932f8d42e709f038ceeff16cf02dd0a2.hip | // !!! This is a file automatically generated by hipify!!!
/* ===========================================================================
Project: Volume Transform Library
Description: Performs volume transform operations
Copyright (C) 2014 Lucas Sherman
Lucas Sherman, email: [email protected]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
=========================================================================== */
// Include Header
#include "Conv.h"
// Include Dependencies
#include "VoxVolt/Impl/VolumeBlocker.h"
#include "VoxLib/Error/CudaError.h"
#include "VoxLib/Core/Logging.h"
#include <chrono>
// Sinc function
#include "boost/math/special_functions/sinc.hpp"
// CUDA API headers
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define KERNEL_BLOCK_W 16
#define KERNEL_BLOCK_H 16
#define KERNEL_BLOCK_SIZE (KERNEL_BLOCK_W * KERNEL_BLOCK_H)
#define MAX_KERNEL_SIZE 16
namespace vox {
namespace volt {
namespace {
namespace filescope {
__constant__ float gd_kernel[MAX_KERNEL_SIZE*MAX_KERNEL_SIZE*MAX_KERNEL_SIZE]; ///< Kernel matrix
surface<void, 3> gd_volumeTexOut; ///< Surface for convolved volume data output
// ----------------------------------------------------------------------------
// Textures for each of the supported volume data types
// ----------------------------------------------------------------------------
#define VOX_TEXTURE(T) texture<##T,3,hipReadModeNormalizedFloat> gd_volumeTexIn_##T
VOX_TEXTURE(Int8);
VOX_TEXTURE(UInt8);
VOX_TEXTURE(Int16);
VOX_TEXTURE(UInt16);
#undef VOX_TEXTURE
// ----------------------------------------------------------------------------
// Boiler plate for simplifying the texture sampling calls across data types
// ----------------------------------------------------------------------------
template<typename T> VOX_DEVICE float fetchSample(float x, float y, float z) { return 0.f; }
#define TEMPLATE(T) template<> VOX_DEVICE float fetchSample<##T>(float x, float y, float z) \
{ return tex3D(gd_volumeTexIn_##T, x, y, z); }
TEMPLATE(Int8)
TEMPLATE(UInt8)
TEMPLATE(UInt16)
TEMPLATE(Int16)
#undef TEMPLATE
// ----------------------------------------------------------------------------
// Convolution kernel for a non-seperable convolution kernel
// ----------------------------------------------------------------------------
template<typename T>
__global__ void convKernel(Vector3u apron, hipExtent blockSize, Vector2f range)
{
extern __shared__ float cache[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= blockSize.width || y >= blockSize.height) return;
for (int z = 0; z < blockSize.depth; z++)
{
float sum = 0.0f;
float * filter = gd_kernel;
// Compute point convolution
int mbegin = x-apron[0]; int mend = x+apron[0];
int nbegin = y-apron[1]; int nend = y+apron[1];
int obegin = z-apron[2]; int oend = z+apron[2];
for (int o = obegin; o <= oend; o++)
for (int n = nbegin; n <= nend; n++)
for (int m = mbegin; m <= mend; m++)
{
sum += *filter * fetchSample<T>(m, n, o); filter++;
}
surf3Dwrite<T>((T)(range[0] + range[1]*sum), gd_volumeTexOut, x*sizeof(T), y, z);
}
}
// ----------------------------------------------------------------------------
// Convolution kernel for seperable filters
// ----------------------------------------------------------------------------
template<typename T>
__global__ void convSepKernel()
{
}
// ----------------------------------------------------------------------------
// Binds the buffers in a volume blocker to the convolution kernel handles
// ----------------------------------------------------------------------------
#define VOX_SETUP_TEX(T) \
case Volume::Type_##T: { \
filescope::gd_volumeTexIn_##T.normalized = false; \
filescope::gd_volumeTexIn_##T.filterMode = hipFilterModePoint; \
filescope::gd_volumeTexIn_##T.addressMode[0] = hipAddressModeClamp; \
filescope::gd_volumeTexIn_##T.addressMode[1] = hipAddressModeClamp; \
filescope::gd_volumeTexIn_##T.addressMode[2] = hipAddressModeClamp; \
VOX_CUDA_CHECK(hipBindTextureToArray(filescope::gd_volumeTexIn_##T, \
blocker.arrayIn(), blocker.formatIn())); \
break; }
void bindBuffers(VolumeBlocker & blocker)
{
auto volume = blocker.volume();
// Bind the volume data buffers
auto type = volume->type();
switch (type)
{
VOX_SETUP_TEX(UInt8);
VOX_SETUP_TEX(UInt16);
VOX_SETUP_TEX(Int8);
VOX_SETUP_TEX(Int16);
default: throw Error(__FILE__, __LINE__, VOLT_LOG_CAT,
format("Unsupported volume data type (%1%)", Volume::typeToString(type)),
Error_NotImplemented);
}
VOX_CUDA_CHECK(hipBindSurfaceToArray(gd_volumeTexOut, blocker.arrayOut()));
}
// ----------------------------------------------------------------------------
// Normalizes a vector so the distribution sums to 1
// ----------------------------------------------------------------------------
void normalize(std::vector<float> & vector)
{
float sum = 0;
BOOST_FOREACH (auto & elem, vector)
sum += elem;
BOOST_FOREACH (auto & elem, vector)
elem /= sum;
}
} // namespace filescope
} // namespace anonymous
// ----------------------------------------------------------------------------
// Executes the convolution kernel on the input volume data
// ----------------------------------------------------------------------------
std::shared_ptr<Volume> Conv::execute(std::shared_ptr<Volume> volume, Image3D<float> kernel, Volume::Type type)
{
// Verify the kernel is of odd dimensions
if (!(kernel.width() % 2 && kernel.height() % 2 && kernel.depth() % 2))
throw Error(__FILE__, __LINE__, VOLT_LOG_CAT,
"Kernel size must be odd", Error_Range);
// Check the kernel dimensions against the library limit
if (kernel.width() > MAX_KERNEL_SIZE ||
kernel.height() > MAX_KERNEL_SIZE ||
kernel.depth() > MAX_KERNEL_SIZE)
throw Error(__FILE__, __LINE__, VOLT_LOG_CAT, "Kernel size exceeds library limit", Error_Range);
Vector3u apron = (kernel.dims() - Vector3u(1)) / 2;
// Copy the kernel into device memory
VOX_CUDA_CHECK(hipMemcpyToSymbol(filescope::gd_kernel,
kernel.data(), sizeof(float)*kernel.dims().fold(mul)));
// Create the start event for performance timing
hipEvent_t start, stop;
VOX_CUDA_CHECK(hipEventCreate(&start));
VOX_CUDA_CHECK(hipEventRecord(start,0));
// Initialize the volume block loader/scheduler
auto outType = (type == Volume::Type_End) ? volume->type() : type;
VolumeBlocker blocker(volume, apron, outType);
// Setup the execution configuration
auto blockSize = blocker.blockSize();
dim3 threads(KERNEL_BLOCK_W, KERNEL_BLOCK_H);
dim3 blocks(
(blockSize.width + threads.x - 1) / threads.x,
(blockSize.height + threads.y - 1) / threads.y
);
unsigned int shared = 1024;
// Execute the blocked volume convolution
blocker.begin();
filescope::bindBuffers(blocker); // Bind the data buffers
while (!blocker.atEnd());
{
auto blockIndex = blocker.loadNext();
// Execute the convolution kernel call
switch (volume->type())
{
case Volume::Type_Int8: hipLaunchKernelGGL(( filescope::convKernel<Int8>) , dim3(blocks),dim3(threads),shared, 0, apron, blockSize,
Vector2f(std::numeric_limits<Int8>::min(), std::numeric_limits<Int8>::max())); break;
case Volume::Type_UInt8: hipLaunchKernelGGL(( filescope::convKernel<UInt8>) , dim3(blocks),dim3(threads),shared, 0, apron, blockSize,
Vector2f(std::numeric_limits<UInt8>::min(), std::numeric_limits<UInt8>::max())); break;
case Volume::Type_UInt16:hipLaunchKernelGGL(( filescope::convKernel<UInt16>) , dim3(blocks),dim3(threads),shared, 0, apron, blockSize,
Vector2f(std::numeric_limits<UInt16>::min(), std::numeric_limits<UInt16>::max())); break;
case Volume::Type_Int16: hipLaunchKernelGGL(( filescope::convKernel<Int16>) , dim3(blocks),dim3(threads),shared, 0, apron, blockSize,
Vector2f(0, std::numeric_limits<Int16>::max())); break;
default: throw Error(__FILE__, __LINE__, VOLT_LOG_CAT, "Unsupported volume data type", Error_NotImplemented);
}
VOX_CUDA_CHECK(hipDeviceSynchronize());
blocker.readNext();
}
auto result = blocker.finish();
//Create the stop event for performance timing
VOX_CUDA_CHECK(hipEventCreate(&stop));
VOX_CUDA_CHECK(hipEventRecord(stop,0));
VOX_CUDA_CHECK(hipEventSynchronize(stop));
// Compute the time elapsed during GPU execution
float elapsedTime;
VOX_CUDA_CHECK(hipEventElapsedTime(&elapsedTime, start, stop));
VOX_LOG_INFO(VOLT_LOG_CAT, format("Convolution completed in %1% ms", elapsedTime));
return result;
}
// ----------------------------------------------------------------------------
// Resamples the volume data using lanczos filtering
// ----------------------------------------------------------------------------
std::shared_ptr<Volume> Conv::lanczos(std::shared_ptr<Volume> volume, Vector4u newSize, Volume::Type type)
{
static const unsigned int a = 3;
// Compute the lanczos resampling window
auto extent = volume->extent();
Vector4f step; // resample step distance
Vector3u fsize; // filter width
for (int i = 0; i < 3; i++)
{
step[i] = newSize[i] / extent[i];
fsize[i] = (a / step[i]);
}
// Create the start event for performance timing
hipEvent_t start, stop;
VOX_CUDA_CHECK(hipEventCreate(&start));
VOX_CUDA_CHECK(hipEventRecord(start,0));
//Create the stop event for performance timing
VOX_CUDA_CHECK(hipEventCreate(&stop));
VOX_CUDA_CHECK(hipEventRecord(stop,0));
VOX_CUDA_CHECK(hipEventSynchronize(stop));
// Compute the time elapsed during GPU execution
float elapsedTime;
VOX_CUDA_CHECK(hipEventElapsedTime(&elapsedTime, start, stop));
VOX_LOG_INFO(VOLT_LOG_CAT, format("Lanczos completed in %1% ms", elapsedTime));
return volume;
}
// ----------------------------------------------------------------------------
// Constructs a laplace kernel
// ----------------------------------------------------------------------------
void Conv::makeLaplaceKernel(Image3D<float> & kernel)
{
kernel.resize(3, 3, 3);
auto data = kernel.buffer().get();
float vals[] = {
0, 0, 0,
0, 1, 0,
0, 0, 0,
0, 1, 0,
1, -6, 1,
0, 1, 0,
0, 0, 0,
0, 1, 0,
0, 0, 0,
};
memcpy(data, vals, kernel.size()*sizeof(float));
}
// ----------------------------------------------------------------------------
// Constructs a gaussian kernel of the given size
// ----------------------------------------------------------------------------
void Conv::makeGaussianKernel(std::vector<float> & out, float variance, unsigned int size)
{
unsigned int width = size ? size : ceil(6.f*variance);
if (!(width % 2)) width++;
out.resize(width);
float var2 = variance * variance;
float K = 1 / (sqrt(2 * M_PI * var2));
unsigned int o = (width-1) / 2;
if (width%2)
{
for (unsigned int i = 0; i <= o; i++)
{
float val = K * expf(-(float)(i*i) / (2.f * var2));
out[o+i] = val;
out[o-i] = val;
}
}
else
{
for (unsigned int i = 0; i <= o; i++)
{
float x = i + 0.5f;
float val = K * exp(-x*x / (2 * var2));
out[o+i+1] = val;
out[o-i] = val;
}
}
filescope::normalize(out);
}
// ----------------------------------------------------------------------------
// Constructs a mean kernel of the given size
// ----------------------------------------------------------------------------
void Conv::makeMeanKernel(std::vector<float> & out, unsigned int size)
{
out.resize(size, 1.0f / (float)size);
}
// ----------------------------------------------------------------------------
// Constructs a hamming kernel of the given size
// ----------------------------------------------------------------------------
void Conv::makeHammingKernel(std::vector<float> & out, float freq, float a, float b, unsigned int size)
{
auto width = size ? size : 0;
out.resize(width);
}
} // namespace volt
} // namespace vox | c0a1ad54932f8d42e709f038ceeff16cf02dd0a2.cu | /* ===========================================================================
Project: Volume Transform Library
Description: Performs volume transform operations
Copyright (C) 2014 Lucas Sherman
Lucas Sherman, email: [email protected]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
=========================================================================== */
// Include Header
#include "Conv.h"
// Include Dependencies
#include "VoxVolt/Impl/VolumeBlocker.h"
#include "VoxLib/Error/CudaError.h"
#include "VoxLib/Core/Logging.h"
#include <chrono>
// Sinc function
#include "boost/math/special_functions/sinc.hpp"
// CUDA API headers
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define KERNEL_BLOCK_W 16
#define KERNEL_BLOCK_H 16
#define KERNEL_BLOCK_SIZE (KERNEL_BLOCK_W * KERNEL_BLOCK_H)
#define MAX_KERNEL_SIZE 16
namespace vox {
namespace volt {
namespace {
namespace filescope {
__constant__ float gd_kernel[MAX_KERNEL_SIZE*MAX_KERNEL_SIZE*MAX_KERNEL_SIZE]; ///< Kernel matrix
surface<void, 3> gd_volumeTexOut; ///< Surface for convolved volume data output
// ----------------------------------------------------------------------------
// Textures for each of the supported volume data types
// ----------------------------------------------------------------------------
#define VOX_TEXTURE(T) texture<##T,3,cudaReadModeNormalizedFloat> gd_volumeTexIn_##T
VOX_TEXTURE(Int8);
VOX_TEXTURE(UInt8);
VOX_TEXTURE(Int16);
VOX_TEXTURE(UInt16);
#undef VOX_TEXTURE
// ----------------------------------------------------------------------------
// Boiler plate for simplifying the texture sampling calls across data types
// ----------------------------------------------------------------------------
template<typename T> VOX_DEVICE float fetchSample(float x, float y, float z) { return 0.f; }
#define TEMPLATE(T) template<> VOX_DEVICE float fetchSample<##T>(float x, float y, float z) \
{ return tex3D(gd_volumeTexIn_##T, x, y, z); }
TEMPLATE(Int8)
TEMPLATE(UInt8)
TEMPLATE(UInt16)
TEMPLATE(Int16)
#undef TEMPLATE
// ----------------------------------------------------------------------------
// Convolution kernel for a non-seperable convolution kernel
// ----------------------------------------------------------------------------
template<typename T>
__global__ void convKernel(Vector3u apron, cudaExtent blockSize, Vector2f range)
{
extern __shared__ float cache[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= blockSize.width || y >= blockSize.height) return;
for (int z = 0; z < blockSize.depth; z++)
{
float sum = 0.0f;
float * filter = gd_kernel;
// Compute point convolution
int mbegin = x-apron[0]; int mend = x+apron[0];
int nbegin = y-apron[1]; int nend = y+apron[1];
int obegin = z-apron[2]; int oend = z+apron[2];
for (int o = obegin; o <= oend; o++)
for (int n = nbegin; n <= nend; n++)
for (int m = mbegin; m <= mend; m++)
{
sum += *filter * fetchSample<T>(m, n, o); filter++;
}
surf3Dwrite<T>((T)(range[0] + range[1]*sum), gd_volumeTexOut, x*sizeof(T), y, z);
}
}
// ----------------------------------------------------------------------------
// Convolution kernel for seperable filters
// ----------------------------------------------------------------------------
template<typename T>
__global__ void convSepKernel()
{
}
// ----------------------------------------------------------------------------
// Binds the buffers in a volume blocker to the convolution kernel handles
// ----------------------------------------------------------------------------
#define VOX_SETUP_TEX(T) \
case Volume::Type_##T: { \
filescope::gd_volumeTexIn_##T.normalized = false; \
filescope::gd_volumeTexIn_##T.filterMode = cudaFilterModePoint; \
filescope::gd_volumeTexIn_##T.addressMode[0] = cudaAddressModeClamp; \
filescope::gd_volumeTexIn_##T.addressMode[1] = cudaAddressModeClamp; \
filescope::gd_volumeTexIn_##T.addressMode[2] = cudaAddressModeClamp; \
VOX_CUDA_CHECK(cudaBindTextureToArray(filescope::gd_volumeTexIn_##T, \
blocker.arrayIn(), blocker.formatIn())); \
break; }
void bindBuffers(VolumeBlocker & blocker)
{
auto volume = blocker.volume();
// Bind the volume data buffers
auto type = volume->type();
switch (type)
{
VOX_SETUP_TEX(UInt8);
VOX_SETUP_TEX(UInt16);
VOX_SETUP_TEX(Int8);
VOX_SETUP_TEX(Int16);
default: throw Error(__FILE__, __LINE__, VOLT_LOG_CAT,
format("Unsupported volume data type (%1%)", Volume::typeToString(type)),
Error_NotImplemented);
}
VOX_CUDA_CHECK(cudaBindSurfaceToArray(gd_volumeTexOut, blocker.arrayOut()));
}
// ----------------------------------------------------------------------------
// Normalizes a vector so the distribution sums to 1
// ----------------------------------------------------------------------------
void normalize(std::vector<float> & vector)
{
float sum = 0;
BOOST_FOREACH (auto & elem, vector)
sum += elem;
BOOST_FOREACH (auto & elem, vector)
elem /= sum;
}
} // namespace filescope
} // namespace anonymous
// ----------------------------------------------------------------------------
// Executes the convolution kernel on the input volume data
// ----------------------------------------------------------------------------
std::shared_ptr<Volume> Conv::execute(std::shared_ptr<Volume> volume, Image3D<float> kernel, Volume::Type type)
{
// Verify the kernel is of odd dimensions
if (!(kernel.width() % 2 && kernel.height() % 2 && kernel.depth() % 2))
throw Error(__FILE__, __LINE__, VOLT_LOG_CAT,
"Kernel size must be odd", Error_Range);
// Check the kernel dimensions against the library limit
if (kernel.width() > MAX_KERNEL_SIZE ||
kernel.height() > MAX_KERNEL_SIZE ||
kernel.depth() > MAX_KERNEL_SIZE)
throw Error(__FILE__, __LINE__, VOLT_LOG_CAT, "Kernel size exceeds library limit", Error_Range);
Vector3u apron = (kernel.dims() - Vector3u(1)) / 2;
// Copy the kernel into device memory
VOX_CUDA_CHECK(cudaMemcpyToSymbol(filescope::gd_kernel,
kernel.data(), sizeof(float)*kernel.dims().fold(mul)));
// Create the start event for performance timing
cudaEvent_t start, stop;
VOX_CUDA_CHECK(cudaEventCreate(&start));
VOX_CUDA_CHECK(cudaEventRecord(start,0));
// Initialize the volume block loader/scheduler
auto outType = (type == Volume::Type_End) ? volume->type() : type;
VolumeBlocker blocker(volume, apron, outType);
// Setup the execution configuration
auto blockSize = blocker.blockSize();
dim3 threads(KERNEL_BLOCK_W, KERNEL_BLOCK_H);
dim3 blocks(
(blockSize.width + threads.x - 1) / threads.x,
(blockSize.height + threads.y - 1) / threads.y
);
unsigned int shared = 1024;
// Execute the blocked volume convolution
blocker.begin();
filescope::bindBuffers(blocker); // Bind the data buffers
while (!blocker.atEnd());
{
auto blockIndex = blocker.loadNext();
// Execute the convolution kernel call
switch (volume->type())
{
case Volume::Type_Int8: filescope::convKernel<Int8> <<<blocks,threads,shared>>> (apron, blockSize,
Vector2f(std::numeric_limits<Int8>::min(), std::numeric_limits<Int8>::max())); break;
case Volume::Type_UInt8: filescope::convKernel<UInt8> <<<blocks,threads,shared>>> (apron, blockSize,
Vector2f(std::numeric_limits<UInt8>::min(), std::numeric_limits<UInt8>::max())); break;
case Volume::Type_UInt16: filescope::convKernel<UInt16> <<<blocks,threads,shared>>> (apron, blockSize,
Vector2f(std::numeric_limits<UInt16>::min(), std::numeric_limits<UInt16>::max())); break;
case Volume::Type_Int16: filescope::convKernel<Int16> <<<blocks,threads,shared>>> (apron, blockSize,
Vector2f(0, std::numeric_limits<Int16>::max())); break;
default: throw Error(__FILE__, __LINE__, VOLT_LOG_CAT, "Unsupported volume data type", Error_NotImplemented);
}
VOX_CUDA_CHECK(cudaDeviceSynchronize());
blocker.readNext();
}
auto result = blocker.finish();
//Create the stop event for performance timing
VOX_CUDA_CHECK(cudaEventCreate(&stop));
VOX_CUDA_CHECK(cudaEventRecord(stop,0));
VOX_CUDA_CHECK(cudaEventSynchronize(stop));
// Compute the time elapsed during GPU execution
float elapsedTime;
VOX_CUDA_CHECK(cudaEventElapsedTime(&elapsedTime, start, stop));
VOX_LOG_INFO(VOLT_LOG_CAT, format("Convolution completed in %1% ms", elapsedTime));
return result;
}
// ----------------------------------------------------------------------------
// Resamples the volume data using lanczos filtering
// ----------------------------------------------------------------------------
std::shared_ptr<Volume> Conv::lanczos(std::shared_ptr<Volume> volume, Vector4u newSize, Volume::Type type)
{
static const unsigned int a = 3;
// Compute the lanczos resampling window
auto extent = volume->extent();
Vector4f step; // resample step distance
Vector3u fsize; // filter width
for (int i = 0; i < 3; i++)
{
step[i] = newSize[i] / extent[i];
fsize[i] = (a / step[i]);
}
// Create the start event for performance timing
cudaEvent_t start, stop;
VOX_CUDA_CHECK(cudaEventCreate(&start));
VOX_CUDA_CHECK(cudaEventRecord(start,0));
//Create the stop event for performance timing
VOX_CUDA_CHECK(cudaEventCreate(&stop));
VOX_CUDA_CHECK(cudaEventRecord(stop,0));
VOX_CUDA_CHECK(cudaEventSynchronize(stop));
// Compute the time elapsed during GPU execution
float elapsedTime;
VOX_CUDA_CHECK(cudaEventElapsedTime(&elapsedTime, start, stop));
VOX_LOG_INFO(VOLT_LOG_CAT, format("Lanczos completed in %1% ms", elapsedTime));
return volume;
}
// ----------------------------------------------------------------------------
// Constructs a laplace kernel
// ----------------------------------------------------------------------------
void Conv::makeLaplaceKernel(Image3D<float> & kernel)
{
kernel.resize(3, 3, 3);
auto data = kernel.buffer().get();
float vals[] = {
0, 0, 0,
0, 1, 0,
0, 0, 0,
0, 1, 0,
1, -6, 1,
0, 1, 0,
0, 0, 0,
0, 1, 0,
0, 0, 0,
};
memcpy(data, vals, kernel.size()*sizeof(float));
}
// ----------------------------------------------------------------------------
// Constructs a gaussian kernel of the given size
// ----------------------------------------------------------------------------
void Conv::makeGaussianKernel(std::vector<float> & out, float variance, unsigned int size)
{
unsigned int width = size ? size : ceil(6.f*variance);
if (!(width % 2)) width++;
out.resize(width);
float var2 = variance * variance;
float K = 1 / (sqrt(2 * M_PI * var2));
unsigned int o = (width-1) / 2;
if (width%2)
{
for (unsigned int i = 0; i <= o; i++)
{
float val = K * expf(-(float)(i*i) / (2.f * var2));
out[o+i] = val;
out[o-i] = val;
}
}
else
{
for (unsigned int i = 0; i <= o; i++)
{
float x = i + 0.5f;
float val = K * exp(-x*x / (2 * var2));
out[o+i+1] = val;
out[o-i] = val;
}
}
filescope::normalize(out);
}
// ----------------------------------------------------------------------------
// Constructs a mean kernel of the given size
// ----------------------------------------------------------------------------
void Conv::makeMeanKernel(std::vector<float> & out, unsigned int size)
{
out.resize(size, 1.0f / (float)size);
}
// ----------------------------------------------------------------------------
// Constructs a hamming kernel of the given size
// ----------------------------------------------------------------------------
void Conv::makeHammingKernel(std::vector<float> & out, float freq, float a, float b, unsigned int size)
{
auto width = size ? size : 0;
out.resize(width);
}
} // namespace volt
} // namespace vox |
02cdbf22247899e46e2aa9abc95381eee86c3bd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Kernel configuration
////////////////////////////////////////////////////////////////////////////////
/*
#define KERNEL_RADIUS 8
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__device__ __constant__ float d_Kernel[KERNEL_W];
// Assuming ROW_TILE_W, KERNEL_RADIUS_ALIGNED and dataW
// are multiples of maximum coalescable read/write size,
// all global memory operations are coalesced in convolutionRowGPU()
#define ROW_TILE_W 128
#define KERNEL_RADIUS_ALIGNED 16
// Assuming COLUMN_TILE_W and dataW are multiples
// of maximum coalescable read/write size, all global memory operations
// are coalesced in convolutionColumnGPU()
#define COLUMN_TILE_W 16
#define COLUMN_TILE_H 48
*/
#define KERNEL_RADIUS 1
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__device__ __constant__ float d_Kernel[KERNEL_W];
// Assuming ROW_TILE_W, KERNEL_RADIUS_ALIGNED and dataW
// are multiples of maximum coalescable read/write size,
// all global memory operations are coalesced in convolutionRowGPU()
#define ROW_TILE_W 128
#define KERNEL_RADIUS_ALIGNED 16
// Assuming COLUMN_TILE_W and dataW are multiples
// of maximum coalescable read/write size, all global memory operations
// are coalesced in convolutionColumnGPU()
#define COLUMN_TILE_W 16
#define COLUMN_TILE_H 48
////////////////////////////////////////////////////////////////////////////////
// Loop unrolling templates, needed for best performance
////////////////////////////////////////////////////////////////////////////////
template<int i> __device__ float convolutionRow(float *data){
return
data[KERNEL_RADIUS - i] * d_Kernel[i]
+ convolutionRow<i - 1>(data);
}
template<> __device__ float convolutionRow<-1>(float *data){
return 0;
}
template<int i> __device__ float convolutionColumn(float *data){
return
data[(KERNEL_RADIUS - i) * COLUMN_TILE_W] * d_Kernel[i]
+ convolutionColumn<i - 1>(data);
}
template<> __device__ float convolutionColumn<-1>(float *data){
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH
){
//Data cache
__shared__ float data[KERNEL_RADIUS + ROW_TILE_W + KERNEL_RADIUS];
//Current tile and apron limits, relative to row start
const int tileStart = blockIdx.x*ROW_TILE_W;
const int tileEnd = tileStart + ROW_TILE_W - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
//Row start index in d_Data[]
const int rowStart = blockIdx.y*dataW;
//Aligned apron start. Assuming dataW and ROW_TILE_W are multiples
//of half-warp size, rowStart + apronStartAligned is also a
//multiple of half-warp size, thus having proper alignment
//for coalesced d_Data[] read.
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Data[rowStart + loadPos] : 0;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming dataW and ROW_TILE_W are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
sum = convolutionRow<2 * KERNEL_RADIUS>(data + smemPos);
//for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
// sum += data[smemPos + k] * d_Kernel[KERNEL_RADIUS - k];
d_Result[rowStart + writePos] = sum;
}
}
__global__ void iXiYRowGPU(
float *d_Result,
float *d_Ix,
float *d_Iy,
int dataW,
int dataH
){
//Data cache
__shared__ float data[KERNEL_RADIUS + ROW_TILE_W + KERNEL_RADIUS];
//Current tile and apron limits, relative to row start
const int tileStart = blockIdx.x*ROW_TILE_W;
const int tileEnd = tileStart + ROW_TILE_W - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
//Row start index in d_Data[]
const int rowStart = blockIdx.y*dataW;
//Aligned apron start. Assuming dataW and ROW_TILE_W are multiples
//of half-warp size, rowStart + apronStartAligned is also a
//multiple of half-warp size, thus having proper alignment
//for coalesced d_Data[] read.
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Ix[rowStart + loadPos] : 0;
}
//Ensure the completness of the loading stage of Ix
//because this way we don't risk that any thread tries to access
//a distant memory position to fetch some Iy value
__syncthreads();
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] *=
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Iy[rowStart + loadPos] : 0;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming dataW and ROW_TILE_W are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
sum = convolutionRow<2 * KERNEL_RADIUS>(data + smemPos);
//for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
// sum += data[smemPos + k] * d_Kernel[KERNEL_RADIUS - k];
d_Result[rowStart + writePos] = sum;
}
}
__global__ void convolutionSquaredRowGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH
){
//Data cache
__shared__ float data[KERNEL_RADIUS + ROW_TILE_W + KERNEL_RADIUS];
//Current tile and apron limits, relative to row start
const int tileStart = blockIdx.x*ROW_TILE_W;
const int tileEnd = tileStart + ROW_TILE_W - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
//Row start index in d_Data[]
const int rowStart = blockIdx.y*dataW;
//Aligned apron start. Assuming dataW and ROW_TILE_W are multiples
//of half-warp size, rowStart + apronStartAligned is also a
//multiple of half-warp size, thus having proper alignment
//for coalesced d_Data[] read.
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Data[rowStart + loadPos] : 0;
data[smemPos] *= data[smemPos];
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming dataW and ROW_TILE_W are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
sum = convolutionRow<2 * KERNEL_RADIUS>(data + smemPos);
//for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
// sum += data[smemPos + k] * d_Kernel[KERNEL_RADIUS - k];
d_Result[rowStart + writePos] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH,
int smemStride,
int gmemStride
){
//Data cache
__shared__ float data[COLUMN_TILE_W * (KERNEL_RADIUS + COLUMN_TILE_H + KERNEL_RADIUS)];
//Current tile and apron limits, in rows
const int tileStart = blockIdx.y*COLUMN_TILE_H;
const int tileEnd = tileStart + COLUMN_TILE_H - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataH - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataH - 1);
//Current column index
const int columnStart = (blockIdx.x*COLUMN_TILE_W) + threadIdx.x;
//Shared and global memory indices for current column
int smemPos = (threadIdx.y*COLUMN_TILE_W) + threadIdx.x;
int gmemPos = ((apronStart + threadIdx.y)*dataW) + columnStart;
//Cycle through the entire data cache
//Load global memory values, if indices are within the image borders,
//or initialize with zero otherwise
for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y){
data[smemPos] =
((y >= apronStartClamped) && (y <= apronEndClamped)) ?
d_Data[gmemPos] : 0;
smemPos += smemStride;
gmemPos += gmemStride;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
//Shared and global memory indices for current column
smemPos = ((threadIdx.y + KERNEL_RADIUS)*COLUMN_TILE_W) + threadIdx.x;
gmemPos = ((tileStart + threadIdx.y)*dataW) + columnStart;
//Cycle through the tile body, clamped by image borders
//Calculate and output the results
for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y){
float sum = 0;
sum = convolutionColumn<2 * KERNEL_RADIUS>(data + smemPos);
//for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
// sum +=
// data[smemPos + IMUL(k, COLUMN_TILE_W)] *
// d_Kernel[KERNEL_RADIUS - k];
d_Result[gmemPos] = sum;
smemPos += smemStride;
gmemPos += gmemStride;
}
}
| 02cdbf22247899e46e2aa9abc95381eee86c3bd2.cu | ////////////////////////////////////////////////////////////////////////////////
// Kernel configuration
////////////////////////////////////////////////////////////////////////////////
/*
#define KERNEL_RADIUS 8
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__device__ __constant__ float d_Kernel[KERNEL_W];
// Assuming ROW_TILE_W, KERNEL_RADIUS_ALIGNED and dataW
// are multiples of maximum coalescable read/write size,
// all global memory operations are coalesced in convolutionRowGPU()
#define ROW_TILE_W 128
#define KERNEL_RADIUS_ALIGNED 16
// Assuming COLUMN_TILE_W and dataW are multiples
// of maximum coalescable read/write size, all global memory operations
// are coalesced in convolutionColumnGPU()
#define COLUMN_TILE_W 16
#define COLUMN_TILE_H 48
*/
#define KERNEL_RADIUS 1
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__device__ __constant__ float d_Kernel[KERNEL_W];
// Assuming ROW_TILE_W, KERNEL_RADIUS_ALIGNED and dataW
// are multiples of maximum coalescable read/write size,
// all global memory operations are coalesced in convolutionRowGPU()
#define ROW_TILE_W 128
#define KERNEL_RADIUS_ALIGNED 16
// Assuming COLUMN_TILE_W and dataW are multiples
// of maximum coalescable read/write size, all global memory operations
// are coalesced in convolutionColumnGPU()
#define COLUMN_TILE_W 16
#define COLUMN_TILE_H 48
////////////////////////////////////////////////////////////////////////////////
// Loop unrolling templates, needed for best performance
////////////////////////////////////////////////////////////////////////////////
template<int i> __device__ float convolutionRow(float *data){
return
data[KERNEL_RADIUS - i] * d_Kernel[i]
+ convolutionRow<i - 1>(data);
}
template<> __device__ float convolutionRow<-1>(float *data){
return 0;
}
template<int i> __device__ float convolutionColumn(float *data){
return
data[(KERNEL_RADIUS - i) * COLUMN_TILE_W] * d_Kernel[i]
+ convolutionColumn<i - 1>(data);
}
template<> __device__ float convolutionColumn<-1>(float *data){
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH
){
//Data cache
__shared__ float data[KERNEL_RADIUS + ROW_TILE_W + KERNEL_RADIUS];
//Current tile and apron limits, relative to row start
const int tileStart = blockIdx.x*ROW_TILE_W;
const int tileEnd = tileStart + ROW_TILE_W - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
//Row start index in d_Data[]
const int rowStart = blockIdx.y*dataW;
//Aligned apron start. Assuming dataW and ROW_TILE_W are multiples
//of half-warp size, rowStart + apronStartAligned is also a
//multiple of half-warp size, thus having proper alignment
//for coalesced d_Data[] read.
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Data[rowStart + loadPos] : 0;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming dataW and ROW_TILE_W are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
sum = convolutionRow<2 * KERNEL_RADIUS>(data + smemPos);
//for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
// sum += data[smemPos + k] * d_Kernel[KERNEL_RADIUS - k];
d_Result[rowStart + writePos] = sum;
}
}
__global__ void iXiYRowGPU(
float *d_Result,
float *d_Ix,
float *d_Iy,
int dataW,
int dataH
){
//Data cache
__shared__ float data[KERNEL_RADIUS + ROW_TILE_W + KERNEL_RADIUS];
//Current tile and apron limits, relative to row start
const int tileStart = blockIdx.x*ROW_TILE_W;
const int tileEnd = tileStart + ROW_TILE_W - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
//Row start index in d_Data[]
const int rowStart = blockIdx.y*dataW;
//Aligned apron start. Assuming dataW and ROW_TILE_W are multiples
//of half-warp size, rowStart + apronStartAligned is also a
//multiple of half-warp size, thus having proper alignment
//for coalesced d_Data[] read.
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Ix[rowStart + loadPos] : 0;
}
//Ensure the completness of the loading stage of Ix
//because this way we don't risk that any thread tries to access
//a distant memory position to fetch some Iy value
__syncthreads();
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] *=
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Iy[rowStart + loadPos] : 0;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming dataW and ROW_TILE_W are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
sum = convolutionRow<2 * KERNEL_RADIUS>(data + smemPos);
//for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
// sum += data[smemPos + k] * d_Kernel[KERNEL_RADIUS - k];
d_Result[rowStart + writePos] = sum;
}
}
__global__ void convolutionSquaredRowGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH
){
//Data cache
__shared__ float data[KERNEL_RADIUS + ROW_TILE_W + KERNEL_RADIUS];
//Current tile and apron limits, relative to row start
const int tileStart = blockIdx.x*ROW_TILE_W;
const int tileEnd = tileStart + ROW_TILE_W - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
//Row start index in d_Data[]
const int rowStart = blockIdx.y*dataW;
//Aligned apron start. Assuming dataW and ROW_TILE_W are multiples
//of half-warp size, rowStart + apronStartAligned is also a
//multiple of half-warp size, thus having proper alignment
//for coalesced d_Data[] read.
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Data[rowStart + loadPos] : 0;
data[smemPos] *= data[smemPos];
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming dataW and ROW_TILE_W are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
sum = convolutionRow<2 * KERNEL_RADIUS>(data + smemPos);
//for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
// sum += data[smemPos + k] * d_Kernel[KERNEL_RADIUS - k];
d_Result[rowStart + writePos] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH,
int smemStride,
int gmemStride
){
//Data cache
__shared__ float data[COLUMN_TILE_W * (KERNEL_RADIUS + COLUMN_TILE_H + KERNEL_RADIUS)];
//Current tile and apron limits, in rows
const int tileStart = blockIdx.y*COLUMN_TILE_H;
const int tileEnd = tileStart + COLUMN_TILE_H - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataH - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataH - 1);
//Current column index
const int columnStart = (blockIdx.x*COLUMN_TILE_W) + threadIdx.x;
//Shared and global memory indices for current column
int smemPos = (threadIdx.y*COLUMN_TILE_W) + threadIdx.x;
int gmemPos = ((apronStart + threadIdx.y)*dataW) + columnStart;
//Cycle through the entire data cache
//Load global memory values, if indices are within the image borders,
//or initialize with zero otherwise
for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y){
data[smemPos] =
((y >= apronStartClamped) && (y <= apronEndClamped)) ?
d_Data[gmemPos] : 0;
smemPos += smemStride;
gmemPos += gmemStride;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
//Shared and global memory indices for current column
smemPos = ((threadIdx.y + KERNEL_RADIUS)*COLUMN_TILE_W) + threadIdx.x;
gmemPos = ((tileStart + threadIdx.y)*dataW) + columnStart;
//Cycle through the tile body, clamped by image borders
//Calculate and output the results
for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y){
float sum = 0;
sum = convolutionColumn<2 * KERNEL_RADIUS>(data + smemPos);
//for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
// sum +=
// data[smemPos + IMUL(k, COLUMN_TILE_W)] *
// d_Kernel[KERNEL_RADIUS - k];
d_Result[gmemPos] = sum;
smemPos += smemStride;
gmemPos += gmemStride;
}
}
|
e9a007f2f482b524e9d362f072af061d35b8c561.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH/THHTensorMath.h>
#include <THH/THHGeneral.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHApply.cuh>
#include <c10/macros/Macros.h>
#include <ATen/WrapDimUtils.h>
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, typename Real, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_gatherKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<int64_t, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < src.sizes[dim]);
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#include <THH/generic/THHTensorScatterGather.hip>
#include <THH/THHGenerateAllTypes.h>
#include <THH/generic/THHTensorScatterGather.hip>
#include <THH/THHGenerateBoolType.h>
| e9a007f2f482b524e9d362f072af061d35b8c561.cu | #include <THC/THCTensorMath.h>
#include <THC/THCGeneral.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCApply.cuh>
#include <c10/macros/Macros.h>
#include <ATen/WrapDimUtils.h>
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, typename Real, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<int64_t, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, typename Real, int Dims>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__ void THCudaTensor_gatherKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<int64_t, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = blockIdx.x * blockDim.x + threadIdx.x;
linearId < totalElements;
linearId += gridDim.x * blockDim.x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
int64_t indexValue = index.data[indexOffset];
CUDA_KERNEL_ASSERT(indexValue >= 0 && indexValue < src.sizes[dim]);
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
#include <THC/generic/THCTensorScatterGather.cu>
#include <THC/THCGenerateAllTypes.h>
#include <THC/generic/THCTensorScatterGather.cu>
#include <THC/THCGenerateBoolType.h>
|
03cbe3c5b75815684f5cc7d1c51a0d644a020ea1.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cstdkib.h>
using namespace std;
#define N 1024
#define TAILLE_BLOC_X 16
#define TAILLE_BLOC_Y 16
/*====================*/
/* KERNEL DECLARATION */
/*====================*/
__global__ void matmulKernel (float *d_A,
float *d_B,
float *d_C,
int n)
{
unsigned int j = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i = blockDim.y * blockIdy.y + threadIdy.y;
if (i<n && j<n)
{
float temp=0;
for (int k=0; k<n; k++)
temp = temp + d_A[i*n+k] * d_B[k*n+j];
d_C[i*n+j] = temp;
}
}
/*=================*/
/* CPU DECLARATION */
/*=================*/
void matmulCPU (float *c_A,
float *c_B,
float *c_C,
int n)
{
int i,j,k;
int s;
for(i=0; i<n ; i++){
for(j=0; j<n ; j++){
s=0;
for(k=0; k<n ; k++)
s+=c_A[i*n+k]*c_B[k*n+j];
c_C[i*n+j]=s;
}
}
}
/*==================*/
/* MAIN DECLARATION */
/*==================*/
int main()
{
float *A, *B, *C, *C_ref_CPU, *d_A, *d_B, *d_C;
int taille_totale = N*N*sizeof(float),i,j;
/* Allocation CPU */
A=(float*)malloc(taille_totale);
B=(float*)malloc(taille_totale);
C=(float*)malloc(taille_totale);
fillMatriceFloat(A,N);
fillMatriceFloat(B,N);
/* Allocation GPU */
hipMalloc((void **) &d_A, taille_totale);
hipMalloc((void **) &d_B, taille_totale);
hipMalloc((void **) &d_C, taille_totale);
/* Transferts CPU -> GPU */
hipMemcpy(d_A, A, taille_totale, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, taille_totale, hipMemcpyHostToDevice);
/* Lancement de kernel */
dim3 threadsParBloc(TAILLE_BLOC_X, TAILLE_BLOC_Y);
dim3 tailleGrille(ceil(N/(float) TAILLE_BLOC_X), ceil(N/(float) TAILLE_BLOC_Y));
hipLaunchKernelGGL(( matmulKernel), dim3(tailleGrille), dim3(threadsParBloc), 0, 0, d_A, d_B, d_C, N);
/* Transferts GPU -> CPU */
hipMemcpy(d_C, C, taille_totale, hipMemcpyDeviceToHost);
/* Verification */
matmulCPU(A, B, C_ref_CPU, N);
for(i=0; i<N; i++)
for(j=0; j<N; j++)
if (fabsf(C[i*N+j]-C_ref_CPU[i*N+j]) > 0.001)
printf("%4d %4d h %le d %le\n",i,j,C_ref_CPU[i*DIM+j],C[i*DIM+j]);
/* Liberation memoire GPU et CPU */
hipFree(d_A); hipFree(d_B); hipFree(d_C);
free(A); free(B); free(C); free(C_ref_CPU);
return 0;
}
| 03cbe3c5b75815684f5cc7d1c51a0d644a020ea1.cu | #include <cuda.h>
#include <cstdkib.h>
using namespace std;
#define N 1024
#define TAILLE_BLOC_X 16
#define TAILLE_BLOC_Y 16
/*====================*/
/* KERNEL DECLARATION */
/*====================*/
__global__ void matmulKernel (float *d_A,
float *d_B,
float *d_C,
int n)
{
unsigned int j = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i = blockDim.y * blockIdy.y + threadIdy.y;
if (i<n && j<n)
{
float temp=0;
for (int k=0; k<n; k++)
temp = temp + d_A[i*n+k] * d_B[k*n+j];
d_C[i*n+j] = temp;
}
}
/*=================*/
/* CPU DECLARATION */
/*=================*/
void matmulCPU (float *c_A,
float *c_B,
float *c_C,
int n)
{
int i,j,k;
int s;
for(i=0; i<n ; i++){
for(j=0; j<n ; j++){
s=0;
for(k=0; k<n ; k++)
s+=c_A[i*n+k]*c_B[k*n+j];
c_C[i*n+j]=s;
}
}
}
/*==================*/
/* MAIN DECLARATION */
/*==================*/
int main()
{
float *A, *B, *C, *C_ref_CPU, *d_A, *d_B, *d_C;
int taille_totale = N*N*sizeof(float),i,j;
/* Allocation CPU */
A=(float*)malloc(taille_totale);
B=(float*)malloc(taille_totale);
C=(float*)malloc(taille_totale);
fillMatriceFloat(A,N);
fillMatriceFloat(B,N);
/* Allocation GPU */
cudaMalloc((void **) &d_A, taille_totale);
cudaMalloc((void **) &d_B, taille_totale);
cudaMalloc((void **) &d_C, taille_totale);
/* Transferts CPU -> GPU */
cudaMemcpy(d_A, A, taille_totale, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, taille_totale, cudaMemcpyHostToDevice);
/* Lancement de kernel */
dim3 threadsParBloc(TAILLE_BLOC_X, TAILLE_BLOC_Y);
dim3 tailleGrille(ceil(N/(float) TAILLE_BLOC_X), ceil(N/(float) TAILLE_BLOC_Y));
matmulKernel<<<tailleGrille, threadsParBloc>>>(d_A, d_B, d_C, N);
/* Transferts GPU -> CPU */
cudaMemcpy(d_C, C, taille_totale, cudaMemcpyDeviceToHost);
/* Verification */
matmulCPU(A, B, C_ref_CPU, N);
for(i=0; i<N; i++)
for(j=0; j<N; j++)
if (fabsf(C[i*N+j]-C_ref_CPU[i*N+j]) > 0.001)
printf("%4d %4d h %le d %le\n",i,j,C_ref_CPU[i*DIM+j],C[i*DIM+j]);
/* Liberation memoire GPU et CPU */
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
free(A); free(B); free(C); free(C_ref_CPU);
return 0;
}
|
eede8e61248e123cac96f3deb95bc55348c39171.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
int
n = 512;
float
a, beta_old, beta = 0.0f,
sqydot, ydot, thr = 1e-5f,
*x, *dx, *y, *dy;
#define I (i+1)
#define J (j+1)
#define BLOCKSIZE 256 // Recommended blocksize from cuda ducumentation.
// y = A * x_old on GPU
__global__ void
cudaDy(float * dx, float * dy, int n)
{
int i = blockIdx.x*BLOCKSIZE+threadIdx.x;
float ytemp = 0.0f;
for (int j = 0; j<n; j++)
{
ytemp += dx[j]/(0.5f*(I+J-1)*(I+J-2)+I);
}
dy[i]=ytemp;
}
// x = y / sqrt(y dot y) on GPU. 2-norm precalculated on CPU.
__global__ void
cudaDx(float* dx, float* dy, float sqydot)
{
int i = blockIdx.x*BLOCKSIZE+threadIdx.x;
dx[i] = dy[i]/sqydot;
}
int
main ( int argc, char **argv )
{
if ( argc > 1 )
n = (1 << strtol ( argv[1], NULL, 10 ));
x = (float *) malloc ( n*sizeof(float) );
y = (float *) malloc ( n*sizeof(float) );
memset ( x, 0, n*sizeof(float) );
x[0] = 1.0f;
// Allocate similar arrays on device.
hipMalloc(&dx, n*sizeof(float));
hipMalloc(&dy, n*sizeof(float));
// Copy initial contents of x one time.
hipMemcpy(dx, x, n*sizeof(float), hipMemcpyHostToDevice);
// Set size of thread block
dim3 threadBlock (BLOCKSIZE);
// Set number of thread blocks
dim3 gridBlock (n/BLOCKSIZE);
do
{
// Calculate y vector
hipLaunchKernelGGL(( cudaDy) , dim3(gridBlock), dim3(threadBlock) , 0, 0, dx, dy, n);
// Copy result to host
hipMemcpy(y, dy, n*sizeof(float), hipMemcpyDeviceToHost);
// Calculate new beta and y dot product on host.
beta_old = beta;
beta = 0.0f;
ydot = 0.0f;
for ( int j=0; j<n; j++ )
{
beta += y[j] * x[j];
ydot += y[j] * y[j];
}
if ( fabs(beta_old-beta) < thr )
break;
// Precalculate square root on host and send to x vector calculation.
sqydot = sqrt(ydot);
hipLaunchKernelGGL(( cudaDx) , dim3(gridBlock), dim3(threadBlock) , 0, 0, dx, dy, sqydot);
// Copy result to host.
hipMemcpy(x, dx, n*sizeof(float), hipMemcpyDeviceToHost);
} while ( 1 );
printf ( "%e\n", beta );
free ( x ), free ( y );
hipFree(dx), hipFree(dy);
}
| eede8e61248e123cac96f3deb95bc55348c39171.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
int
n = 512;
float
a, beta_old, beta = 0.0f,
sqydot, ydot, thr = 1e-5f,
*x, *dx, *y, *dy;
#define I (i+1)
#define J (j+1)
#define BLOCKSIZE 256 // Recommended blocksize from cuda ducumentation.
// y = A * x_old on GPU
__global__ void
cudaDy(float * dx, float * dy, int n)
{
int i = blockIdx.x*BLOCKSIZE+threadIdx.x;
float ytemp = 0.0f;
for (int j = 0; j<n; j++)
{
ytemp += dx[j]/(0.5f*(I+J-1)*(I+J-2)+I);
}
dy[i]=ytemp;
}
// x = y / sqrt(y dot y) on GPU. 2-norm precalculated on CPU.
__global__ void
cudaDx(float* dx, float* dy, float sqydot)
{
int i = blockIdx.x*BLOCKSIZE+threadIdx.x;
dx[i] = dy[i]/sqydot;
}
int
main ( int argc, char **argv )
{
if ( argc > 1 )
n = (1 << strtol ( argv[1], NULL, 10 ));
x = (float *) malloc ( n*sizeof(float) );
y = (float *) malloc ( n*sizeof(float) );
memset ( x, 0, n*sizeof(float) );
x[0] = 1.0f;
// Allocate similar arrays on device.
cudaMalloc(&dx, n*sizeof(float));
cudaMalloc(&dy, n*sizeof(float));
// Copy initial contents of x one time.
cudaMemcpy(dx, x, n*sizeof(float), cudaMemcpyHostToDevice);
// Set size of thread block
dim3 threadBlock (BLOCKSIZE);
// Set number of thread blocks
dim3 gridBlock (n/BLOCKSIZE);
do
{
// Calculate y vector
cudaDy <<< gridBlock, threadBlock >>> (dx, dy, n);
// Copy result to host
cudaMemcpy(y, dy, n*sizeof(float), cudaMemcpyDeviceToHost);
// Calculate new beta and y dot product on host.
beta_old = beta;
beta = 0.0f;
ydot = 0.0f;
for ( int j=0; j<n; j++ )
{
beta += y[j] * x[j];
ydot += y[j] * y[j];
}
if ( fabs(beta_old-beta) < thr )
break;
// Precalculate square root on host and send to x vector calculation.
sqydot = sqrt(ydot);
cudaDx <<< gridBlock, threadBlock >>> (dx, dy, sqydot);
// Copy result to host.
cudaMemcpy(x, dx, n*sizeof(float), cudaMemcpyDeviceToHost);
} while ( 1 );
printf ( "%e\n", beta );
free ( x ), free ( y );
cudaFree(dx), cudaFree(dy);
}
|
d14d415527f8546563202f99068ca722ba7802e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "Pixel.h"
#include <stdio.h>
#include <limits>
#include <chrono>
#include <stdlib.h>
#include <iostream>
#define BLOCK_SIZE 1024
// TODO I'm not sure if the DistanceFunction.EuclideanDistanceSquared can be used within this cuda code
__device__ int CudaEuclideanDistanceSquared(int x1, int y1, int x2, int y2) {
return (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1);
}
__global__ void VoronoiGlobalSearch(Pixel* cudaGrid, Pixel* cudaSeeds, int gridHeight, int gridWidth, int numSeeds) {
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < (gridHeight * gridWidth)) {
if (cudaGrid[pos].seed) {
return;
}
unsigned int minDistance = INT_MAX;
Pixel closestSeed;
for (int seedPos = 0; seedPos < numSeeds; seedPos++) {
int distance = CudaEuclideanDistanceSquared(cudaGrid[pos].row, cudaGrid[pos].col, cudaSeeds[seedPos].row, cudaSeeds[seedPos].col);
if (distance <= minDistance) {
minDistance = distance;
closestSeed = cudaSeeds[seedPos];
}
}
// set grid position closest seed
cudaGrid[pos].color = closestSeed.color;
}
}
__global__ void VoronoiLocalSearch(Pixel* cudaGrid, Pixel* cudaSeeds, int gridHeight, int gridWidth, int searchRadius, int numSeeds) {
// compute grid position
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos >= (gridHeight * gridWidth)) {
return; // can't access this memory
}
if (cudaGrid[pos].seed) {
return;
}
// compute boundaries for search box
int startRow = (int)cudaGrid[pos].row - searchRadius;
if (startRow < 0) {
startRow = 0;
}
int endRow = (int)cudaGrid[pos].row + searchRadius;
if (endRow >= gridHeight) {
endRow = gridHeight - 1;
}
int startCol = (int)cudaGrid[pos].col - searchRadius;
if (startCol < 0) {
startCol = 0;
}
int endCol = (int)cudaGrid[pos].col + searchRadius;
if (endCol >= gridWidth) {
endCol = gridWidth - 1;
}
unsigned int minDistance = INT_MAX;
bool success = false;
Pixel closestSeed;
// iterate through local search space and find closest seed
for (int boxRow = startRow; boxRow <= endRow; boxRow++) {
for (int boxCol = startCol; boxCol <= endCol; boxCol++) {
int boxPos = (boxRow * gridWidth) + boxCol;
if (cudaGrid[boxPos].seed) {
int dist = CudaEuclideanDistanceSquared(cudaGrid[pos].row, cudaGrid[pos].col, cudaGrid[boxPos].row, cudaGrid[boxPos].col);
if (dist <= minDistance) {
minDistance = dist;
success = true;
closestSeed = cudaGrid[boxPos];
}
}
}
}
if (success) {
cudaGrid[pos].color = closestSeed.color;
return;
}
// local search failed - fallback to global strategy (same as the other search)
// Note - never actually seen the local search fail, but it is a possibility
minDistance = INT_MAX;
for (int seedPos = 0; seedPos < numSeeds; seedPos++) {
int distance = CudaEuclideanDistanceSquared(cudaGrid[pos].row, cudaGrid[pos].col, cudaSeeds[seedPos].row, cudaSeeds[seedPos].col);
if (distance <= minDistance) {
minDistance = distance;
closestSeed = cudaSeeds[seedPos];
}
}
// set grid position closest seed
cudaGrid[pos].color = closestSeed.color;
}
extern "C" void CudaComputeVoronoi(Pixel * grid, Pixel * seeds, int gridHeight, int gridWidth, int numSeeds, int searchRadius) {
hipError_t result;
// cuda related data
Pixel* cudaGrid;
Pixel* cudaSeeds;
int gridSize = gridWidth * gridHeight;
// select our 0 GPU to run on
result = hipSetDevice(0);
if (result != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
exit(1);
}
// allocate data for our grid, seeds, and colors
result = hipMalloc((void**)&cudaGrid, sizeof(Pixel) * gridSize);
if (result != hipSuccess) {
fprintf(stderr, "hipMalloc failed for grid allocation.");
exit(1);
}
result = hipMalloc((void**)&cudaSeeds, sizeof(Pixel) * numSeeds);
if (result != hipSuccess) {
fprintf(stderr, "hipMalloc failed for seed allocation.");
exit(1);
}
// copy over our grid
result = hipMemcpy(cudaGrid, grid, sizeof(Pixel) * gridSize, hipMemcpyHostToDevice);
if (result != hipSuccess) {
fprintf(stderr, "hipMemcpy failed while downloading grid data to device.");
exit(1);
}
// copy over our seeds
result = hipMemcpy(cudaSeeds, seeds, sizeof(Pixel) * numSeeds, hipMemcpyHostToDevice);
if (result != hipSuccess) {
fprintf(stderr, "hipMemcpy failed while downloading seed data to device.");
exit(1);
}
// set execution configuration on GPU
dim3 dimblock(BLOCK_SIZE);
dim3 dimgrid(ceil((float)gridSize / BLOCK_SIZE));
// compute voronoi
if (searchRadius > 0) {
VoronoiLocalSearch << <dimgrid, dimblock >> > (cudaGrid, cudaSeeds, gridHeight, gridWidth, searchRadius, numSeeds);
}
else {
VoronoiGlobalSearch << <dimgrid, dimblock >> > (cudaGrid, cudaSeeds, gridHeight, gridWidth, numSeeds);
}
// check to see if there were any errors
result = hipGetLastError();
if (result != hipSuccess) {
fprintf(stderr, "voronoi launch failed: %s\n", hipGetErrorString(result));
exit(1);
}
else {
std::cout << "kernel has been launched" << std::endl;
}
// copy over our grid data
result = hipMemcpy(grid, cudaGrid, sizeof(Pixel) * gridSize, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
fprintf(stderr, "hipMemcpy failed while uploading grid data from the device.");
exit(1);
}
else {
std::cout << "finished copying data over" << std::endl;
}
// release grid memory allocation
result = hipFree(cudaGrid);
if (result != hipSuccess) {
fprintf(stderr, "hipFree failed while freeing cuda_grid!");
exit(1);
}
// release seed memory allocation
result = hipFree(cudaSeeds);
if (result != hipSuccess) {
fprintf(stderr, "hipFree failed while freeing cuda_grid!");
exit(1);
}
}
| d14d415527f8546563202f99068ca722ba7802e0.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Pixel.h"
#include <stdio.h>
#include <limits>
#include <chrono>
#include <stdlib.h>
#include <iostream>
#define BLOCK_SIZE 1024
// TODO I'm not sure if the DistanceFunction.EuclideanDistanceSquared can be used within this cuda code
__device__ int CudaEuclideanDistanceSquared(int x1, int y1, int x2, int y2) {
return (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1);
}
__global__ void VoronoiGlobalSearch(Pixel* cudaGrid, Pixel* cudaSeeds, int gridHeight, int gridWidth, int numSeeds) {
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < (gridHeight * gridWidth)) {
if (cudaGrid[pos].seed) {
return;
}
unsigned int minDistance = INT_MAX;
Pixel closestSeed;
for (int seedPos = 0; seedPos < numSeeds; seedPos++) {
int distance = CudaEuclideanDistanceSquared(cudaGrid[pos].row, cudaGrid[pos].col, cudaSeeds[seedPos].row, cudaSeeds[seedPos].col);
if (distance <= minDistance) {
minDistance = distance;
closestSeed = cudaSeeds[seedPos];
}
}
// set grid position closest seed
cudaGrid[pos].color = closestSeed.color;
}
}
__global__ void VoronoiLocalSearch(Pixel* cudaGrid, Pixel* cudaSeeds, int gridHeight, int gridWidth, int searchRadius, int numSeeds) {
// compute grid position
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos >= (gridHeight * gridWidth)) {
return; // can't access this memory
}
if (cudaGrid[pos].seed) {
return;
}
// compute boundaries for search box
int startRow = (int)cudaGrid[pos].row - searchRadius;
if (startRow < 0) {
startRow = 0;
}
int endRow = (int)cudaGrid[pos].row + searchRadius;
if (endRow >= gridHeight) {
endRow = gridHeight - 1;
}
int startCol = (int)cudaGrid[pos].col - searchRadius;
if (startCol < 0) {
startCol = 0;
}
int endCol = (int)cudaGrid[pos].col + searchRadius;
if (endCol >= gridWidth) {
endCol = gridWidth - 1;
}
unsigned int minDistance = INT_MAX;
bool success = false;
Pixel closestSeed;
// iterate through local search space and find closest seed
for (int boxRow = startRow; boxRow <= endRow; boxRow++) {
for (int boxCol = startCol; boxCol <= endCol; boxCol++) {
int boxPos = (boxRow * gridWidth) + boxCol;
if (cudaGrid[boxPos].seed) {
int dist = CudaEuclideanDistanceSquared(cudaGrid[pos].row, cudaGrid[pos].col, cudaGrid[boxPos].row, cudaGrid[boxPos].col);
if (dist <= minDistance) {
minDistance = dist;
success = true;
closestSeed = cudaGrid[boxPos];
}
}
}
}
if (success) {
cudaGrid[pos].color = closestSeed.color;
return;
}
// local search failed - fallback to global strategy (same as the other search)
// Note - never actually seen the local search fail, but it is a possibility
minDistance = INT_MAX;
for (int seedPos = 0; seedPos < numSeeds; seedPos++) {
int distance = CudaEuclideanDistanceSquared(cudaGrid[pos].row, cudaGrid[pos].col, cudaSeeds[seedPos].row, cudaSeeds[seedPos].col);
if (distance <= minDistance) {
minDistance = distance;
closestSeed = cudaSeeds[seedPos];
}
}
// set grid position closest seed
cudaGrid[pos].color = closestSeed.color;
}
extern "C" void CudaComputeVoronoi(Pixel * grid, Pixel * seeds, int gridHeight, int gridWidth, int numSeeds, int searchRadius) {
cudaError_t result;
// cuda related data
Pixel* cudaGrid;
Pixel* cudaSeeds;
int gridSize = gridWidth * gridHeight;
// select our 0 GPU to run on
result = cudaSetDevice(0);
if (result != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
exit(1);
}
// allocate data for our grid, seeds, and colors
result = cudaMalloc((void**)&cudaGrid, sizeof(Pixel) * gridSize);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed for grid allocation.");
exit(1);
}
result = cudaMalloc((void**)&cudaSeeds, sizeof(Pixel) * numSeeds);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed for seed allocation.");
exit(1);
}
// copy over our grid
result = cudaMemcpy(cudaGrid, grid, sizeof(Pixel) * gridSize, cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed while downloading grid data to device.");
exit(1);
}
// copy over our seeds
result = cudaMemcpy(cudaSeeds, seeds, sizeof(Pixel) * numSeeds, cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed while downloading seed data to device.");
exit(1);
}
// set execution configuration on GPU
dim3 dimblock(BLOCK_SIZE);
dim3 dimgrid(ceil((float)gridSize / BLOCK_SIZE));
// compute voronoi
if (searchRadius > 0) {
VoronoiLocalSearch << <dimgrid, dimblock >> > (cudaGrid, cudaSeeds, gridHeight, gridWidth, searchRadius, numSeeds);
}
else {
VoronoiGlobalSearch << <dimgrid, dimblock >> > (cudaGrid, cudaSeeds, gridHeight, gridWidth, numSeeds);
}
// check to see if there were any errors
result = cudaGetLastError();
if (result != cudaSuccess) {
fprintf(stderr, "voronoi launch failed: %s\n", cudaGetErrorString(result));
exit(1);
}
else {
std::cout << "kernel has been launched" << std::endl;
}
// copy over our grid data
result = cudaMemcpy(grid, cudaGrid, sizeof(Pixel) * gridSize, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed while uploading grid data from the device.");
exit(1);
}
else {
std::cout << "finished copying data over" << std::endl;
}
// release grid memory allocation
result = cudaFree(cudaGrid);
if (result != cudaSuccess) {
fprintf(stderr, "cudaFree failed while freeing cuda_grid!");
exit(1);
}
// release seed memory allocation
result = cudaFree(cudaSeeds);
if (result != cudaSuccess) {
fprintf(stderr, "cudaFree failed while freeing cuda_grid!");
exit(1);
}
}
|
20595be04df29a4d3ec26e9c1a0352c563c07cf2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include "timer.h"
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result) {
if (result != hipSuccess) {
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(1);
}
}
__global__ void encryptKernel(int n, char* deviceDataIn, char* deviceDataOut, int key) {
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
int temp = ((int)deviceDataIn[index]) + key;
if (temp > 127) { temp = temp % 127 - 1; }
deviceDataOut[index] = temp;
}
__global__ void decryptKernel(int n, char* deviceDataIn, char* deviceDataOut, int key) {
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
int temp = ((int)deviceDataIn[index]) - key;
if (temp < 0) { temp = temp % 127 + 128; }
deviceDataOut[index] = temp;
}
int fileSize() {
int size;
ifstream file ("original.data", ios::in|ios::binary|ios::ate);
if (file.is_open())
{
size = file.tellg();
file.close();
}
else {
cout << "Unable to open file";
size = -1;
}
return size;
}
int readData(char *fileName, char *data) {
streampos size;
ifstream file (fileName, ios::in|ios::binary|ios::ate);
if (file.is_open())
{
size = file.tellg();
file.seekg (0, ios::beg);
file.read (data, size);
file.close();
cout << "The entire file content is in memory." << endl;
}
else cout << "Unable to open file" << endl;
return 0;
}
int writeData(int size, char *fileName, char *data) {
ofstream file (fileName, ios::out|ios::binary|ios::trunc);
if (file.is_open())
{
file.write (data, size);
file.close();
cout << "The entire file content was written to file." << endl;
return 0;
}
else cout << "Unable to open file";
return -1;
}
int EncryptSeq (int n, char* data_in, char* data_out, int key)
{
int i;
timer sequentialTime = timer("Sequential encryption");
sequentialTime.start();
for (i=0; i<n; i++) {
int temporary = (int)data_in[i] + key;
if (temporary > 127) { temporary = (temporary % 127) - 1; }
data_out[i] = temporary;
}
sequentialTime.stop();
cout << fixed << setprecision(6);
cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl;
return 0;
}
int DecryptSeq (int n, char* data_in, char* data_out, int key)
{
int i;
timer sequentialTime = timer("Sequential decryption");
sequentialTime.start();
for (i=0; i<n; i++) {
int temporary = (int)data_in[i] - key;
if (temporary < 0) {temporary = (temporary % 127) + 128; }
data_out[i] = temporary;
}
sequentialTime.stop();
cout << fixed << setprecision(6);
cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl;
return 0;
}
int EncryptCuda (int n, char* data_in, char* data_out,int key) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
char* deviceDataIn = NULL;
checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char)));
if (deviceDataIn == NULL) {
cout << "could not allocate memory!" << endl;
return -1;
}
char* deviceDataOut = NULL;
checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char)));
if (deviceDataOut == NULL) {
checkCudaCall(hipFree(deviceDataIn));
cout << "could not allocate memory!" << endl;
return -1;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
hipLaunchKernelGGL(( encryptKernel), dim3(ceil((double)n/threadBlockSize)), dim3(threadBlockSize), 0, 0, n,deviceDataIn, deviceDataOut,key);
hipDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(hipFree(deviceDataIn));
checkCudaCall(hipFree(deviceDataOut));
cout << fixed << setprecision(6);
cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl;
cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl;
return 0;
}
int DecryptCuda (int n, char* data_in, char* data_out, int key) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
char* deviceDataIn = NULL;
checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char)));
if (deviceDataIn == NULL) {
cout << "could not allocate memory!" << endl;
return -1;
}
char* deviceDataOut = NULL;
checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char)));
if (deviceDataOut == NULL) {
checkCudaCall(hipFree(deviceDataIn));
cout << "could not allocate memory!" << endl;
return -1;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
hipLaunchKernelGGL(( decryptKernel), dim3(ceil((double)n/threadBlockSize)), dim3(threadBlockSize), 0, 0, n,deviceDataIn, deviceDataOut,key);
hipDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(hipFree(deviceDataIn));
checkCudaCall(hipFree(deviceDataOut));
cout << fixed << setprecision(6);
cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl;
cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl;
return 0;
}
int main(int argc, char* argv[]) {
int key = 444;
key = key % 127;
int n;
n = fileSize();
if (n == -1) {
cout << "File not found! Exiting ... " << endl;
exit(0);
}
char* data_in = new char[n];
char* data_out = new char[n];
readData("original.data", data_in);
cout << "Encrypting a file of " << n << " characters." << endl;
EncryptSeq(n, data_in, data_out, key);
writeData(n, "sequential.data", data_out);
EncryptCuda(n, data_in, data_out, key);
writeData(n, "cuda.data", data_out);
readData("cuda.data", data_in);
cout << "Decrypting a file of " << n << " characters" << endl;
DecryptSeq(n, data_in, data_out, key);
writeData(n, "sequential_decrypted.data", data_out);
DecryptCuda(n, data_in, data_out, key);
writeData(n, "recovered.data", data_out);
delete[] data_in;
delete[] data_out;
return 0;
}
| 20595be04df29a4d3ec26e9c1a0352c563c07cf2.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include "timer.h"
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
__global__ void encryptKernel(int n, char* deviceDataIn, char* deviceDataOut, int key) {
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
int temp = ((int)deviceDataIn[index]) + key;
if (temp > 127) { temp = temp % 127 - 1; }
deviceDataOut[index] = temp;
}
__global__ void decryptKernel(int n, char* deviceDataIn, char* deviceDataOut, int key) {
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
int temp = ((int)deviceDataIn[index]) - key;
if (temp < 0) { temp = temp % 127 + 128; }
deviceDataOut[index] = temp;
}
int fileSize() {
int size;
ifstream file ("original.data", ios::in|ios::binary|ios::ate);
if (file.is_open())
{
size = file.tellg();
file.close();
}
else {
cout << "Unable to open file";
size = -1;
}
return size;
}
int readData(char *fileName, char *data) {
streampos size;
ifstream file (fileName, ios::in|ios::binary|ios::ate);
if (file.is_open())
{
size = file.tellg();
file.seekg (0, ios::beg);
file.read (data, size);
file.close();
cout << "The entire file content is in memory." << endl;
}
else cout << "Unable to open file" << endl;
return 0;
}
int writeData(int size, char *fileName, char *data) {
ofstream file (fileName, ios::out|ios::binary|ios::trunc);
if (file.is_open())
{
file.write (data, size);
file.close();
cout << "The entire file content was written to file." << endl;
return 0;
}
else cout << "Unable to open file";
return -1;
}
int EncryptSeq (int n, char* data_in, char* data_out, int key)
{
int i;
timer sequentialTime = timer("Sequential encryption");
sequentialTime.start();
for (i=0; i<n; i++) {
int temporary = (int)data_in[i] + key;
if (temporary > 127) { temporary = (temporary % 127) - 1; }
data_out[i] = temporary;
}
sequentialTime.stop();
cout << fixed << setprecision(6);
cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl;
return 0;
}
int DecryptSeq (int n, char* data_in, char* data_out, int key)
{
int i;
timer sequentialTime = timer("Sequential decryption");
sequentialTime.start();
for (i=0; i<n; i++) {
int temporary = (int)data_in[i] - key;
if (temporary < 0) {temporary = (temporary % 127) + 128; }
data_out[i] = temporary;
}
sequentialTime.stop();
cout << fixed << setprecision(6);
cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl;
return 0;
}
int EncryptCuda (int n, char* data_in, char* data_out,int key) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
char* deviceDataIn = NULL;
checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char)));
if (deviceDataIn == NULL) {
cout << "could not allocate memory!" << endl;
return -1;
}
char* deviceDataOut = NULL;
checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char)));
if (deviceDataOut == NULL) {
checkCudaCall(cudaFree(deviceDataIn));
cout << "could not allocate memory!" << endl;
return -1;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
encryptKernel<<<ceil((double)n/threadBlockSize), threadBlockSize>>>(n,deviceDataIn, deviceDataOut,key);
cudaDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(cudaFree(deviceDataIn));
checkCudaCall(cudaFree(deviceDataOut));
cout << fixed << setprecision(6);
cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl;
cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl;
return 0;
}
int DecryptCuda (int n, char* data_in, char* data_out, int key) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
char* deviceDataIn = NULL;
checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char)));
if (deviceDataIn == NULL) {
cout << "could not allocate memory!" << endl;
return -1;
}
char* deviceDataOut = NULL;
checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char)));
if (deviceDataOut == NULL) {
checkCudaCall(cudaFree(deviceDataIn));
cout << "could not allocate memory!" << endl;
return -1;
}
timer kernelTime1 = timer("kernelTime");
timer memoryTime = timer("memoryTime");
// copy the original vectors to the GPU
memoryTime.start();
checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice));
memoryTime.stop();
// execute kernel
kernelTime1.start();
decryptKernel<<<ceil((double)n/threadBlockSize), threadBlockSize>>>(n,deviceDataIn, deviceDataOut,key);
cudaDeviceSynchronize();
kernelTime1.stop();
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
memoryTime.start();
checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost));
memoryTime.stop();
checkCudaCall(cudaFree(deviceDataIn));
checkCudaCall(cudaFree(deviceDataOut));
cout << fixed << setprecision(6);
cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl;
cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl;
return 0;
}
int main(int argc, char* argv[]) {
int key = 444;
key = key % 127;
int n;
n = fileSize();
if (n == -1) {
cout << "File not found! Exiting ... " << endl;
exit(0);
}
char* data_in = new char[n];
char* data_out = new char[n];
readData("original.data", data_in);
cout << "Encrypting a file of " << n << " characters." << endl;
EncryptSeq(n, data_in, data_out, key);
writeData(n, "sequential.data", data_out);
EncryptCuda(n, data_in, data_out, key);
writeData(n, "cuda.data", data_out);
readData("cuda.data", data_in);
cout << "Decrypting a file of " << n << " characters" << endl;
DecryptSeq(n, data_in, data_out, key);
writeData(n, "sequential_decrypted.data", data_out);
DecryptCuda(n, data_in, data_out, key);
writeData(n, "recovered.data", data_out);
delete[] data_in;
delete[] data_out;
return 0;
}
|
e94fc9dbeecb51513996bff8a973dd6897e6d863.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include "containers.h"
using namespace std;
void BatchResource::init(hipStream_t) {
hipMalloc((void**) &overflows, sizeof(bool*) * N_CELL_PARALLEL);
hipMalloc((void**) &capacities, sizeof(CAPACITY_TYPE) * N_CELL_PARALLEL);
hipMalloc((void**) &module_specs, sizeof(ModuleSpec*) * N_CELL_PARALLEL);
hipMalloc((void**) &sdf_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
hipMalloc((void**) &s_timestamp_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
hipMalloc((void**) &s_delay_info_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
hipMalloc((void**) &s_value_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
hipMalloc((void**) &s_length_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
hipMalloc((void**) &sdf_num_rows, sizeof(unsigned int) * N_CELL_PARALLEL);
hipMalloc((void**) &input_data_schedule, sizeof(InputData) * N_CELL_PARALLEL * MAX_NUM_MODULE_INPUT);
hipMalloc((void**) &output_data_schedule, sizeof(Data) * N_CELL_PARALLEL * MAX_NUM_MODULE_OUTPUT);
}
void BatchResource::set(const ResourceBuffer& resource_buffer, hipStream_t stream) {
assert(resource_buffer.input_data_schedule.size() <= N_CELL_PARALLEL * MAX_NUM_MODULE_ARGS);
assert(resource_buffer.output_data_schedule.size() <= N_CELL_PARALLEL * MAX_NUM_MODULE_ARGS);
num_modules = resource_buffer.size;
auto direction = hipMemcpyHostToDevice;
hipMemcpyAsync(overflows, resource_buffer.overflows.data(), sizeof(bool*) * num_modules, direction, stream);
hipMemcpyAsync(capacities, resource_buffer.capacities.data(), sizeof(CAPACITY_TYPE) * num_modules, direction, stream);
hipMemcpyAsync(module_specs, resource_buffer.module_specs.data(), sizeof(ModuleSpec*) * num_modules, direction, stream);
hipMemcpyAsync(sdf_offsets, resource_buffer.sdf_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
hipMemcpyAsync(s_timestamp_offsets, resource_buffer.s_timestamp_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
hipMemcpyAsync(s_delay_info_offsets, resource_buffer.s_delay_info_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
hipMemcpyAsync(s_value_offsets, resource_buffer.s_value_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
hipMemcpyAsync(s_length_offsets, resource_buffer.s_length_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
hipMemcpyAsync(sdf_num_rows, resource_buffer.sdf_num_rows.data(), sizeof(unsigned int) * num_modules, direction, stream);
hipMemcpyAsync(input_data_schedule, resource_buffer.input_data_schedule.data(), sizeof(InputData) * resource_buffer.input_data_schedule.size(), direction, stream);
hipMemcpyAsync(output_data_schedule, resource_buffer.output_data_schedule.data(), sizeof(Data) * resource_buffer.output_data_schedule.size(), direction, stream);
}
void BatchResource::free() const {
hipFree(overflows);
hipFree(capacities);
hipFree(module_specs);
hipFree(sdf_offsets);
hipFree(s_timestamp_offsets); hipFree(s_delay_info_offsets); hipFree(s_value_offsets); hipFree(s_length_offsets);
hipFree(sdf_num_rows);
hipFree(input_data_schedule); hipFree(output_data_schedule);
}
ResourceBuffer::ResourceBuffer() {
overflows.reserve(N_CELL_PARALLEL);
capacities.reserve(N_CELL_PARALLEL);
module_specs.reserve(N_CELL_PARALLEL);
sdf_offsets.reserve(N_CELL_PARALLEL);
s_timestamp_offsets.reserve(N_CELL_PARALLEL); s_delay_info_offsets.reserve(N_CELL_PARALLEL); s_value_offsets.reserve(N_CELL_PARALLEL); s_length_offsets.reserve(N_CELL_PARALLEL);
sdf_num_rows.reserve(N_CELL_PARALLEL);
input_data_schedule.reserve(N_CELL_PARALLEL * MAX_NUM_MODULE_INPUT);
output_data_schedule.reserve(N_CELL_PARALLEL * MAX_NUM_MODULE_OUTPUT);
}
void ResourceBuffer::finish_module() {
size++;
input_data_schedule.resize(size * MAX_NUM_MODULE_INPUT);
output_data_schedule.resize(size * MAX_NUM_MODULE_OUTPUT);
}
void ResourceBuffer::clear() {
overflows.clear();
capacities.clear();
module_specs.clear();
sdf_offsets.clear(); s_timestamp_offsets.clear(); s_delay_info_offsets.clear(); s_value_offsets.clear(); s_length_offsets.clear();
sdf_num_rows.clear();
input_data_schedule.clear();
output_data_schedule.clear();
size = 0;
}
| e94fc9dbeecb51513996bff8a973dd6897e6d863.cu | #include <cassert>
#include "containers.h"
using namespace std;
void BatchResource::init(cudaStream_t) {
cudaMalloc((void**) &overflows, sizeof(bool*) * N_CELL_PARALLEL);
cudaMalloc((void**) &capacities, sizeof(CAPACITY_TYPE) * N_CELL_PARALLEL);
cudaMalloc((void**) &module_specs, sizeof(ModuleSpec*) * N_CELL_PARALLEL);
cudaMalloc((void**) &sdf_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
cudaMalloc((void**) &s_timestamp_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
cudaMalloc((void**) &s_delay_info_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
cudaMalloc((void**) &s_value_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
cudaMalloc((void**) &s_length_offsets, sizeof(unsigned int) * N_CELL_PARALLEL);
cudaMalloc((void**) &sdf_num_rows, sizeof(unsigned int) * N_CELL_PARALLEL);
cudaMalloc((void**) &input_data_schedule, sizeof(InputData) * N_CELL_PARALLEL * MAX_NUM_MODULE_INPUT);
cudaMalloc((void**) &output_data_schedule, sizeof(Data) * N_CELL_PARALLEL * MAX_NUM_MODULE_OUTPUT);
}
void BatchResource::set(const ResourceBuffer& resource_buffer, cudaStream_t stream) {
assert(resource_buffer.input_data_schedule.size() <= N_CELL_PARALLEL * MAX_NUM_MODULE_ARGS);
assert(resource_buffer.output_data_schedule.size() <= N_CELL_PARALLEL * MAX_NUM_MODULE_ARGS);
num_modules = resource_buffer.size;
auto direction = cudaMemcpyHostToDevice;
cudaMemcpyAsync(overflows, resource_buffer.overflows.data(), sizeof(bool*) * num_modules, direction, stream);
cudaMemcpyAsync(capacities, resource_buffer.capacities.data(), sizeof(CAPACITY_TYPE) * num_modules, direction, stream);
cudaMemcpyAsync(module_specs, resource_buffer.module_specs.data(), sizeof(ModuleSpec*) * num_modules, direction, stream);
cudaMemcpyAsync(sdf_offsets, resource_buffer.sdf_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
cudaMemcpyAsync(s_timestamp_offsets, resource_buffer.s_timestamp_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
cudaMemcpyAsync(s_delay_info_offsets, resource_buffer.s_delay_info_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
cudaMemcpyAsync(s_value_offsets, resource_buffer.s_value_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
cudaMemcpyAsync(s_length_offsets, resource_buffer.s_length_offsets.data(), sizeof(unsigned int) * num_modules, direction, stream);
cudaMemcpyAsync(sdf_num_rows, resource_buffer.sdf_num_rows.data(), sizeof(unsigned int) * num_modules, direction, stream);
cudaMemcpyAsync(input_data_schedule, resource_buffer.input_data_schedule.data(), sizeof(InputData) * resource_buffer.input_data_schedule.size(), direction, stream);
cudaMemcpyAsync(output_data_schedule, resource_buffer.output_data_schedule.data(), sizeof(Data) * resource_buffer.output_data_schedule.size(), direction, stream);
}
void BatchResource::free() const {
cudaFree(overflows);
cudaFree(capacities);
cudaFree(module_specs);
cudaFree(sdf_offsets);
cudaFree(s_timestamp_offsets); cudaFree(s_delay_info_offsets); cudaFree(s_value_offsets); cudaFree(s_length_offsets);
cudaFree(sdf_num_rows);
cudaFree(input_data_schedule); cudaFree(output_data_schedule);
}
ResourceBuffer::ResourceBuffer() {
overflows.reserve(N_CELL_PARALLEL);
capacities.reserve(N_CELL_PARALLEL);
module_specs.reserve(N_CELL_PARALLEL);
sdf_offsets.reserve(N_CELL_PARALLEL);
s_timestamp_offsets.reserve(N_CELL_PARALLEL); s_delay_info_offsets.reserve(N_CELL_PARALLEL); s_value_offsets.reserve(N_CELL_PARALLEL); s_length_offsets.reserve(N_CELL_PARALLEL);
sdf_num_rows.reserve(N_CELL_PARALLEL);
input_data_schedule.reserve(N_CELL_PARALLEL * MAX_NUM_MODULE_INPUT);
output_data_schedule.reserve(N_CELL_PARALLEL * MAX_NUM_MODULE_OUTPUT);
}
void ResourceBuffer::finish_module() {
size++;
input_data_schedule.resize(size * MAX_NUM_MODULE_INPUT);
output_data_schedule.resize(size * MAX_NUM_MODULE_OUTPUT);
}
void ResourceBuffer::clear() {
overflows.clear();
capacities.clear();
module_specs.clear();
sdf_offsets.clear(); s_timestamp_offsets.clear(); s_delay_info_offsets.clear(); s_value_offsets.clear(); s_length_offsets.clear();
sdf_num_rows.clear();
input_data_schedule.clear();
output_data_schedule.clear();
size = 0;
}
|
a638a12a0312e4a35ead1884c95a301d04b01eb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <iostream>
#include <iomanip>
#include <stdlib.h>
#include <fstream>
#include <string.h>
#include <vector>
#include <stdio.h>
#include <float.h>
#include <sstream>
#include <algorithm>
using namespace std;
// helper method which takes a line and parse each number into integer
// and adds them into a vector
void stringToIntParser (string &temp, vector <float> &temp2)
{
stringstream str(temp);
while (str.good() )
{
string substr;
getline(str,substr,',');
temp2.push_back(atof(substr.c_str()));
}
}
// helper method to check if a line has # character of not
bool hasHash (string s){
for ( int i= 0; i < s.length(); i++) {
if (s.at(i) == '#')
return true;
}
return false;
}
// the kernel function which does the calculation for 3 dimension
__global__ void caclulation3D(float *a, float *b, float *c, int width, int height, int depth, float vK) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < (width*height*depth)) {
int x = index % width; // current column -- x direction width
int y = (index / width) % height; // current row y direction height
int z = index / (width*height); // current depth in z direction
int left = max (x-1, 0);
int right = min (x+1, width-1);
int top = max (0, y-1);
int bottom = min(y+1, height-1);
int front = max (z -1, 0);
int back = min (z+1, depth - 1);
if(c[index] != FLT_MIN) {
a[index] = b[index] + vK * ( b[front*width*height + y*width + x] + b[back*width*height + y*width + x]
+b[z*width*height + top*width + x] + b[z*width*height + bottom*width + x]
+ b[z*width*height + y*width + left] + b[z*width*height + y*width + right] - 6*b[index]);
}
}
}
// the kernel function which does the calculation
__global__ void caclulation2D(float *a, float *b, float *c, int width, int height, float vK) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < width*height) {
int y = index / width; // current row y direction height
int x = index % width; // current column -- x direction width
int left = max (x-1, 0);
int right = min (x+1, width-1);
int top = max (0, y-1);
int bottom= min(y+1, height-1);
//printf(" the index are index = %d, (x,y) = (%d, %d) left =%d, right = %d, top = %d , bottom = %d \n",
//index, x, y, left, right, top, bottom);
if(c[index] != FLT_MIN) {
a[index] = b[index] + vK *(b[x+top*width] + b[x+bottom*width] + b[left+y*width] + b[right + y*width] - 4*b[index]);
}
}
}
// initializing the cpu memory for two dimension grid points
void initialize2D(float *array1, float * array2, int width, int height, float fixed, std::vector<float> v) {
for(int y = 0; y < height; y++) {
for(int x = 0; x<width; x++) {
array1[x+ width*y] = fixed;
array2[x+ width*y] = fixed;
}
}
int hx, hy, wx, wy;
float heat;
int idx = 0;
while(idx < v.size()) {
hx = (int) v[idx], hy = (int) v[idx+1];
wx = (int) v[idx+2], wy = (int) v[idx+3];
heat = v[idx+4];
idx += 5;
for(int y = hy; y < hy + wy; y++) {
for (int x = hx; x < hx + wx; x++) {
array1[x + width*y] = heat;
array2[x+width*y] = FLT_MIN;
}
}
}
}
// initializing the cpu array for 3D grid points
void initialize3D(float *array1, float * array2, int width, int height, int depth, float fixed, std::vector<float> v) {
for(int z = 0; z < depth; z++) {
for (int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
array1[z*width*height + y*width + x] = fixed;
array2[z*width*height + y*width + x] = fixed;
}
}
}
int hx, hy, hz, wx, wy, wz;
float heat;
int idx = 0;
while(idx < v.size()) {
hx = (int) v[idx], hy = (int) v[idx+1], hz = (int) v[idx+2];
wx = (int) v[idx+3], wy = (int) v[idx+4], wz =(int) v[idx+5];
heat = v[idx+6];
idx += 7;
for(int z = hz; z <hz+wz; z++) {
for(int y = hy; y < hy +wy; y++) {
for (int x = hx; x < hx + wx; x++) {
array1[z*width*height + y*width + x] = heat;
array2[z*width*height + y*width + x] = FLT_MIN;
}
}
}
}
}
// helper method which write the final result into a csv file
int writeoutput(float *array, char const *s, int width, int height, int depth, int dimension) {
FILE *f = fopen(s, "w+");
if(f == NULL) return -1;
if(dimension == 3) {
for(int z = 0; z < depth; z++) {
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
if(x != width-1)
fprintf(f, "%f, ", array[z*width*height + y*width + x]);
else
fprintf(f, "%f\n", array[z*width*height + y*width + x]); // print a new line after each row
}
}
fprintf(f, "\n"); // printing a blank line
}
} else {
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
if(x != width-1)
fprintf(f, "%f, ", array[y*width + x]);
else
fprintf(f, "%f\n", array[y*width + x]);
}
}
}
fclose(f);
return 0;
}
// main starts here
int main (int argc, char** argv ) {
// initializing the local variable
ifstream configfile(argv[1]);
string line;
vector <string> stringarray; //string vector to store all the line from input file
int dimension; // dimension 2D or 3D
vector<float>heightWeidthDepth; //height (row), width (col) and depth of the grid
float startTemp; // default starting temperature for the grid
vector<float>heaterLocation;
int timeStep; // total time step
float valueK; //the k value which is constant
// start reading all the line from the file
while (getline(configfile, line)) {
if (line.empty() || hasHash(line)) {}
else
stringarray.push_back(line);
}
// the first line is dimension get it and convert in into integer
dimension = atoi(stringarray[0].c_str());
// the second value is constant k
valueK = atof(stringarray[1].c_str());
// the third value is number of tiestep in integer
timeStep = atoi(stringarray[2].c_str());
// the height and width and depth which is in one line
stringToIntParser(stringarray[3], heightWeidthDepth);
// the 4th value is the default starting temperature
startTemp = atof(stringarray[4].c_str());
// the rest of the values are heater location
// which can be 0 or 1 or more
for ( int i = 5; i < stringarray.size(); ++i)
{
stringToIntParser(stringarray[i], heaterLocation);
}
int height = (int) heightWeidthDepth[1]; // y axis
int width = (int) heightWeidthDepth[0]; // x axis
int depth = 0;
int size;
if(dimension == 3) {depth = (int) heightWeidthDepth[2];}
if(dimension == 3)
size = height*width*depth*sizeof(float); // total number of points
else
size = height*width*sizeof(float);
// declare the cpu array
float *array1 = (float*)malloc(size);
float *array2 = (float*)malloc(size);
// declare the gpu variable
float *gArray1, *gArray2, *gArray3;
hipMalloc((void **) &gArray1, size);
hipMalloc((void **) &gArray2, size);
hipMalloc((void **) &gArray3, size);
// initialize the cpu array
if(dimension == 2)
initialize2D(array1, array2, width, height, startTemp, heaterLocation);
else
initialize3D(array1, array2, width, height, depth, startTemp, heaterLocation);
// copy all data to the device from cpu
hipMemcpy(gArray1, array1, size, hipMemcpyHostToDevice);
hipMemcpy(gArray2, array1, size, hipMemcpyHostToDevice);
hipMemcpy(gArray3, array2, size, hipMemcpyHostToDevice);
// call the kernel function swap the gArray1 has the updated value after each odd(1,3,5)
//time step and gArray2
int points = (depth == 0)? height*width:height*width*depth;
int numOfThreads = (points < 512) ? 512: 1024;
for(int i = 0; i < timeStep; i++) {
if(i%2 == 0) {
if(dimension == 2)
hipLaunchKernelGGL(( caclulation2D), dim3((points/numOfThreads) + 1) , dim3(numOfThreads) , 0, 0, gArray1, gArray2, gArray3, width, height, valueK);
else
hipLaunchKernelGGL(( caclulation3D) , dim3((points/numOfThreads) + 1) , dim3(numOfThreads), 0, 0, gArray1, gArray2, gArray3, width, height, depth, valueK);
} else {
if(dimension == 2)
hipLaunchKernelGGL(( caclulation2D), dim3((points/numOfThreads) + 1) , dim3(numOfThreads) , 0, 0, gArray2, gArray1, gArray3, width,height, valueK);
else
hipLaunchKernelGGL(( caclulation3D) , dim3((points/numOfThreads) + 1) , dim3(numOfThreads), 0, 0, gArray2, gArray1, gArray3, width, height, depth, valueK);
}
hipDeviceSynchronize();
}
// read from gpu to cpu based on timestep
if(timeStep % 2 == 0)
hipMemcpy(array1, gArray2, size, hipMemcpyDeviceToHost);
else
hipMemcpy(array1, gArray1, size, hipMemcpyDeviceToHost);
// write output to a .csv file
writeoutput(array1, "heatOutput.csv", width, height, depth, dimension);
// free all the allocated memory both in gpu and cpu
hipFree(gArray1), hipFree(gArray2), hipFree(gArray3);
free(array1), free(array2);
return 0;
}
| a638a12a0312e4a35ead1884c95a301d04b01eb1.cu | #include <cstdlib>
#include <iostream>
#include <iomanip>
#include <stdlib.h>
#include <fstream>
#include <string.h>
#include <vector>
#include <stdio.h>
#include <float.h>
#include <sstream>
#include <algorithm>
using namespace std;
// helper method which takes a line and parse each number into integer
// and adds them into a vector
void stringToIntParser (string &temp, vector <float> &temp2)
{
stringstream str(temp);
while (str.good() )
{
string substr;
getline(str,substr,',');
temp2.push_back(atof(substr.c_str()));
}
}
// helper method to check if a line has # character of not
bool hasHash (string s){
for ( int i= 0; i < s.length(); i++) {
if (s.at(i) == '#')
return true;
}
return false;
}
// the kernel function which does the calculation for 3 dimension
__global__ void caclulation3D(float *a, float *b, float *c, int width, int height, int depth, float vK) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < (width*height*depth)) {
int x = index % width; // current column -- x direction width
int y = (index / width) % height; // current row y direction height
int z = index / (width*height); // current depth in z direction
int left = max (x-1, 0);
int right = min (x+1, width-1);
int top = max (0, y-1);
int bottom = min(y+1, height-1);
int front = max (z -1, 0);
int back = min (z+1, depth - 1);
if(c[index] != FLT_MIN) {
a[index] = b[index] + vK * ( b[front*width*height + y*width + x] + b[back*width*height + y*width + x]
+b[z*width*height + top*width + x] + b[z*width*height + bottom*width + x]
+ b[z*width*height + y*width + left] + b[z*width*height + y*width + right] - 6*b[index]);
}
}
}
// the kernel function which does the calculation
__global__ void caclulation2D(float *a, float *b, float *c, int width, int height, float vK) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < width*height) {
int y = index / width; // current row y direction height
int x = index % width; // current column -- x direction width
int left = max (x-1, 0);
int right = min (x+1, width-1);
int top = max (0, y-1);
int bottom= min(y+1, height-1);
//printf(" the index are index = %d, (x,y) = (%d, %d) left =%d, right = %d, top = %d , bottom = %d \n",
//index, x, y, left, right, top, bottom);
if(c[index] != FLT_MIN) {
a[index] = b[index] + vK *(b[x+top*width] + b[x+bottom*width] + b[left+y*width] + b[right + y*width] - 4*b[index]);
}
}
}
// initializing the cpu memory for two dimension grid points
void initialize2D(float *array1, float * array2, int width, int height, float fixed, std::vector<float> v) {
for(int y = 0; y < height; y++) {
for(int x = 0; x<width; x++) {
array1[x+ width*y] = fixed;
array2[x+ width*y] = fixed;
}
}
int hx, hy, wx, wy;
float heat;
int idx = 0;
while(idx < v.size()) {
hx = (int) v[idx], hy = (int) v[idx+1];
wx = (int) v[idx+2], wy = (int) v[idx+3];
heat = v[idx+4];
idx += 5;
for(int y = hy; y < hy + wy; y++) {
for (int x = hx; x < hx + wx; x++) {
array1[x + width*y] = heat;
array2[x+width*y] = FLT_MIN;
}
}
}
}
// initializing the cpu array for 3D grid points
void initialize3D(float *array1, float * array2, int width, int height, int depth, float fixed, std::vector<float> v) {
for(int z = 0; z < depth; z++) {
for (int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
array1[z*width*height + y*width + x] = fixed;
array2[z*width*height + y*width + x] = fixed;
}
}
}
int hx, hy, hz, wx, wy, wz;
float heat;
int idx = 0;
while(idx < v.size()) {
hx = (int) v[idx], hy = (int) v[idx+1], hz = (int) v[idx+2];
wx = (int) v[idx+3], wy = (int) v[idx+4], wz =(int) v[idx+5];
heat = v[idx+6];
idx += 7;
for(int z = hz; z <hz+wz; z++) {
for(int y = hy; y < hy +wy; y++) {
for (int x = hx; x < hx + wx; x++) {
array1[z*width*height + y*width + x] = heat;
array2[z*width*height + y*width + x] = FLT_MIN;
}
}
}
}
}
// helper method which write the final result into a csv file
int writeoutput(float *array, char const *s, int width, int height, int depth, int dimension) {
FILE *f = fopen(s, "w+");
if(f == NULL) return -1;
if(dimension == 3) {
for(int z = 0; z < depth; z++) {
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
if(x != width-1)
fprintf(f, "%f, ", array[z*width*height + y*width + x]);
else
fprintf(f, "%f\n", array[z*width*height + y*width + x]); // print a new line after each row
}
}
fprintf(f, "\n"); // printing a blank line
}
} else {
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
if(x != width-1)
fprintf(f, "%f, ", array[y*width + x]);
else
fprintf(f, "%f\n", array[y*width + x]);
}
}
}
fclose(f);
return 0;
}
// main starts here
int main (int argc, char** argv ) {
// initializing the local variable
ifstream configfile(argv[1]);
string line;
vector <string> stringarray; //string vector to store all the line from input file
int dimension; // dimension 2D or 3D
vector<float>heightWeidthDepth; //height (row), width (col) and depth of the grid
float startTemp; // default starting temperature for the grid
vector<float>heaterLocation;
int timeStep; // total time step
float valueK; //the k value which is constant
// start reading all the line from the file
while (getline(configfile, line)) {
if (line.empty() || hasHash(line)) {}
else
stringarray.push_back(line);
}
// the first line is dimension get it and convert in into integer
dimension = atoi(stringarray[0].c_str());
// the second value is constant k
valueK = atof(stringarray[1].c_str());
// the third value is number of tiestep in integer
timeStep = atoi(stringarray[2].c_str());
// the height and width and depth which is in one line
stringToIntParser(stringarray[3], heightWeidthDepth);
// the 4th value is the default starting temperature
startTemp = atof(stringarray[4].c_str());
// the rest of the values are heater location
// which can be 0 or 1 or more
for ( int i = 5; i < stringarray.size(); ++i)
{
stringToIntParser(stringarray[i], heaterLocation);
}
int height = (int) heightWeidthDepth[1]; // y axis
int width = (int) heightWeidthDepth[0]; // x axis
int depth = 0;
int size;
if(dimension == 3) {depth = (int) heightWeidthDepth[2];}
if(dimension == 3)
size = height*width*depth*sizeof(float); // total number of points
else
size = height*width*sizeof(float);
// declare the cpu array
float *array1 = (float*)malloc(size);
float *array2 = (float*)malloc(size);
// declare the gpu variable
float *gArray1, *gArray2, *gArray3;
cudaMalloc((void **) &gArray1, size);
cudaMalloc((void **) &gArray2, size);
cudaMalloc((void **) &gArray3, size);
// initialize the cpu array
if(dimension == 2)
initialize2D(array1, array2, width, height, startTemp, heaterLocation);
else
initialize3D(array1, array2, width, height, depth, startTemp, heaterLocation);
// copy all data to the device from cpu
cudaMemcpy(gArray1, array1, size, cudaMemcpyHostToDevice);
cudaMemcpy(gArray2, array1, size, cudaMemcpyHostToDevice);
cudaMemcpy(gArray3, array2, size, cudaMemcpyHostToDevice);
// call the kernel function swap the gArray1 has the updated value after each odd(1,3,5)
//time step and gArray2
int points = (depth == 0)? height*width:height*width*depth;
int numOfThreads = (points < 512) ? 512: 1024;
for(int i = 0; i < timeStep; i++) {
if(i%2 == 0) {
if(dimension == 2)
caclulation2D<<<(points/numOfThreads) + 1 , numOfThreads >>>(gArray1, gArray2, gArray3, width, height, valueK);
else
caclulation3D <<< (points/numOfThreads) + 1 , numOfThreads>>> (gArray1, gArray2, gArray3, width, height, depth, valueK);
} else {
if(dimension == 2)
caclulation2D<<<(points/numOfThreads) + 1 , numOfThreads >>>(gArray2, gArray1, gArray3, width,height, valueK);
else
caclulation3D <<< (points/numOfThreads) + 1 , numOfThreads>>> (gArray2, gArray1, gArray3, width, height, depth, valueK);
}
cudaDeviceSynchronize();
}
// read from gpu to cpu based on timestep
if(timeStep % 2 == 0)
cudaMemcpy(array1, gArray2, size, cudaMemcpyDeviceToHost);
else
cudaMemcpy(array1, gArray1, size, cudaMemcpyDeviceToHost);
// write output to a .csv file
writeoutput(array1, "heatOutput.csv", width, height, depth, dimension);
// free all the allocated memory both in gpu and cpu
cudaFree(gArray1), cudaFree(gArray2), cudaFree(gArray3);
free(array1), free(array2);
return 0;
}
|
4a0d4dadbc2ff2359d4332c496238df350533864.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "histogram_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *I = NULL;
hipMalloc(&I, XSIZE*YSIZE);
float *minI = NULL;
hipMalloc(&minI, XSIZE*YSIZE);
float *maxI = NULL;
hipMalloc(&maxI, XSIZE*YSIZE);
float *mask = NULL;
hipMalloc(&mask, XSIZE*YSIZE);
int nbins = 1;
int c = 2;
int h = YSIZE;
int w = XSIZE;
float *hist = NULL;
hipMalloc(&hist, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
histogram_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, I,minI,maxI,mask,nbins,c,h,w,hist);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
histogram_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, I,minI,maxI,mask,nbins,c,h,w,hist);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
histogram_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, I,minI,maxI,mask,nbins,c,h,w,hist);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4a0d4dadbc2ff2359d4332c496238df350533864.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "histogram_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *I = NULL;
cudaMalloc(&I, XSIZE*YSIZE);
float *minI = NULL;
cudaMalloc(&minI, XSIZE*YSIZE);
float *maxI = NULL;
cudaMalloc(&maxI, XSIZE*YSIZE);
float *mask = NULL;
cudaMalloc(&mask, XSIZE*YSIZE);
int nbins = 1;
int c = 2;
int h = YSIZE;
int w = XSIZE;
float *hist = NULL;
cudaMalloc(&hist, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
histogram_kernel<<<gridBlock,threadBlock>>>(I,minI,maxI,mask,nbins,c,h,w,hist);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
histogram_kernel<<<gridBlock,threadBlock>>>(I,minI,maxI,mask,nbins,c,h,w,hist);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
histogram_kernel<<<gridBlock,threadBlock>>>(I,minI,maxI,mask,nbins,c,h,w,hist);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d7f8f0feba3040786581945eb1df91190a9d1651.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_b;
int xdim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_b;
int ydim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_b;
int xdim1_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_b;
int ydim1_update_halo_kernel4_plus_2_b_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_2_b * (y) + \
xdim0_update_halo_kernel4_plus_2_b * ydim0_update_halo_kernel4_plus_2_b * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_2_b * (y) + \
xdim1_update_halo_kernel4_plus_2_b * ydim1_update_halo_kernel4_plus_2_b * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_2_b(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_b(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b *
ydim0_update_halo_kernel4_plus_2_b;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b *
ydim1_update_halo_kernel4_plus_2_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_b(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 123))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(123, "update_halo_kernel4_plus_2_b");
OPS_kernels[123].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_b_h ||
ydim0 != ydim0_update_halo_kernel4_plus_2_b_h ||
xdim1 != xdim1_update_halo_kernel4_plus_2_b_h ||
ydim1 != ydim1_update_halo_kernel4_plus_2_b_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_b, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_2_b_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_b, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_2_b_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_b, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_2_b_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_b, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_2_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[123].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_b), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[123].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[123].mpi_time += t2 - t1;
OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| d7f8f0feba3040786581945eb1df91190a9d1651.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_b;
int xdim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_b;
int ydim0_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_b;
int xdim1_update_halo_kernel4_plus_2_b_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_b;
int ydim1_update_halo_kernel4_plus_2_b_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_2_b * (y) + \
xdim0_update_halo_kernel4_plus_2_b * ydim0_update_halo_kernel4_plus_2_b * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_2_b * (y) + \
xdim1_update_halo_kernel4_plus_2_b * ydim1_update_halo_kernel4_plus_2_b * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_2_b(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(-2, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(-2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_b(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_b *
ydim0_update_halo_kernel4_plus_2_b;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_b *
ydim1_update_halo_kernel4_plus_2_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_b(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_2_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 123))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(123, "update_halo_kernel4_plus_2_b");
OPS_kernels[123].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_b_h ||
ydim0 != ydim0_update_halo_kernel4_plus_2_b_h ||
xdim1 != xdim1_update_halo_kernel4_plus_2_b_h ||
ydim1 != ydim1_update_halo_kernel4_plus_2_b_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_b, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_2_b_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_b, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_2_b_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_b, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_2_b_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_b, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_2_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[123].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel4_plus_2_b<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[123].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[123].mpi_time += t2 - t1;
OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
f5b9d3ed456b00a8c068217eb1c63ec49e053430.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "cta_config.h"
#include "../common/cuda_check.h"
extern __global__ void Histogram(float* input, int* output, int length);
extern __global__ void reduceAll(int* input, int* output, int num_parts);
void RandFloatArray(float* ptr, int length) {
for (int i = 0; i < length; ++i) {
float val = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
ptr[i] = val;
}
return;
}
void AssertArrayEqual(int* ptr1, int* ptr2, int length) {
int num_diff = 0;
for (int i = 0; i < length; ++i) {
if (ptr1[i] != ptr2[i]) {
printf("Index %d: %d vs. %d\n", i, ptr1[i], ptr2[i]);
num_diff++;
}
}
assert(num_diff < 10);
return;
}
int main(int argc, char** argv) {
if (argc < 2) {
printf("Usage: ./histogram <num of elements>");
return -1;
}
int length = atoi(argv[1]);
printf("Running the histogram on %d elements\n", length);
float* host_input = (float*) malloc(length * sizeof(float));
int* host_output = (int*) malloc(NUM_BINS * sizeof(int));
RandFloatArray(host_input, length);
for (int i = 0; i < NUM_BINS; ++i) {
host_output[i] = 0;
}
for (int i = 0; i < length; ++i) {
float val = host_input[i] * 255.0;
int bin_id = (int)(val);
CLAMP(bin_id, 0, NUM_BINS);
host_output[bin_id] += 1;
}
printf("Completed ground truth computation!\n");
float* device_input;
int* device_part_output;
int* device_output;
CUDA_CHECK(hipMalloc((void**) &device_input, length * sizeof(float)));
CUDA_CHECK(hipMalloc((void**) &device_part_output,
NUM_BLOCKS * NUM_BINS * sizeof(int)));
CUDA_CHECK(hipMalloc((void**) &device_output, NUM_BINS * sizeof(int)));
int* results = (int*) malloc(NUM_BINS * sizeof(int));
CUDA_CHECK(hipMemcpy(device_input, host_input, length * sizeof(float),
hipMemcpyHostToDevice));
#ifdef MEASURE_POWER
while (true) {
#endif
hipLaunchKernelGGL(( Histogram), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0,
device_input, device_part_output, length);
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduceAll), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0,
device_part_output, device_output, NUM_BLOCKS);
hipDeviceSynchronize();
#ifdef MEASURE_POWER
}
#endif
printf("Completed GPU computation!\n");
CUDA_CHECK(hipMemcpy(results, device_output,
NUM_BINS * sizeof(int), hipMemcpyDeviceToHost));
AssertArrayEqual(host_output, results, NUM_BINS);
printf("Correctness Check: Accepted!\n");
free(host_input);
free(host_output);
free(results);
CUDA_CHECK(hipFree(device_input));
CUDA_CHECK(hipFree(device_part_output));
CUDA_CHECK(hipFree(device_output));
return 0;
}
| f5b9d3ed456b00a8c068217eb1c63ec49e053430.cu | #include <stdlib.h>
#include <stdio.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include "cta_config.h"
#include "../common/cuda_check.h"
extern __global__ void Histogram(float* input, int* output, int length);
extern __global__ void reduceAll(int* input, int* output, int num_parts);
void RandFloatArray(float* ptr, int length) {
for (int i = 0; i < length; ++i) {
float val = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
ptr[i] = val;
}
return;
}
void AssertArrayEqual(int* ptr1, int* ptr2, int length) {
int num_diff = 0;
for (int i = 0; i < length; ++i) {
if (ptr1[i] != ptr2[i]) {
printf("Index %d: %d vs. %d\n", i, ptr1[i], ptr2[i]);
num_diff++;
}
}
assert(num_diff < 10);
return;
}
int main(int argc, char** argv) {
if (argc < 2) {
printf("Usage: ./histogram <num of elements>");
return -1;
}
int length = atoi(argv[1]);
printf("Running the histogram on %d elements\n", length);
float* host_input = (float*) malloc(length * sizeof(float));
int* host_output = (int*) malloc(NUM_BINS * sizeof(int));
RandFloatArray(host_input, length);
for (int i = 0; i < NUM_BINS; ++i) {
host_output[i] = 0;
}
for (int i = 0; i < length; ++i) {
float val = host_input[i] * 255.0;
int bin_id = (int)(val);
CLAMP(bin_id, 0, NUM_BINS);
host_output[bin_id] += 1;
}
printf("Completed ground truth computation!\n");
float* device_input;
int* device_part_output;
int* device_output;
CUDA_CHECK(cudaMalloc((void**) &device_input, length * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**) &device_part_output,
NUM_BLOCKS * NUM_BINS * sizeof(int)));
CUDA_CHECK(cudaMalloc((void**) &device_output, NUM_BINS * sizeof(int)));
int* results = (int*) malloc(NUM_BINS * sizeof(int));
CUDA_CHECK(cudaMemcpy(device_input, host_input, length * sizeof(float),
cudaMemcpyHostToDevice));
#ifdef MEASURE_POWER
while (true) {
#endif
Histogram<<<NUM_BLOCKS, NUM_THREADS>>>(
device_input, device_part_output, length);
cudaDeviceSynchronize();
reduceAll<<<NUM_BLOCKS, NUM_THREADS>>>(
device_part_output, device_output, NUM_BLOCKS);
cudaDeviceSynchronize();
#ifdef MEASURE_POWER
}
#endif
printf("Completed GPU computation!\n");
CUDA_CHECK(cudaMemcpy(results, device_output,
NUM_BINS * sizeof(int), cudaMemcpyDeviceToHost));
AssertArrayEqual(host_output, results, NUM_BINS);
printf("Correctness Check: Accepted!\n");
free(host_input);
free(host_output);
free(results);
CUDA_CHECK(cudaFree(device_input));
CUDA_CHECK(cudaFree(device_part_output));
CUDA_CHECK(cudaFree(device_output));
return 0;
}
|
6ec22dbe9a114e50708d8e7764923139d8ae9c79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../cuda_by_example/common/cpu_bitmap.h"
#include <cmath>
#define DIM 800
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y, float scale ) {
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel( unsigned char *ptr ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y, 0.7 );
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
hipMalloc( (void**)&dev_bitmap, bitmap.image_size() );
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap );
hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost );
hipFree( dev_bitmap );
bitmap.display_and_exit();
}
| 6ec22dbe9a114e50708d8e7764923139d8ae9c79.cu | #include "../../cuda_by_example/common/cpu_bitmap.h"
#include <cmath>
#define DIM 800
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y, float scale ) {
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel( unsigned char *ptr ) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y, 0.7 );
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
cudaMalloc( (void**)&dev_bitmap, bitmap.image_size() );
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
kernel<<<grid,1>>>( dev_bitmap );
cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost );
cudaFree( dev_bitmap );
bitmap.display_and_exit();
}
|
55a75d935a9ed76385256eef99ab1ce27c0c8b9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlarfgx-v2.cu, normal z -> c, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "commonblas_c.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define COMPLEX
/******************************************************************************/
__global__
void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ float xnorm;
magmaFloatComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#ifdef REAL
float alpha = *dx0;
float alphai = MAGMA_C_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 1 )
#else
magmaFloatComplex alpha = *dx0;
float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dA = *dx0;
}
else {
#ifdef REAL
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j == 0) {
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j == 0) {
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_C_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_C_MAKE(beta, 0.);
}
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
if (j < it) {
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_C_MAKE(0., 0.);
}
}
/***************************************************************************//**
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*******************************************************************************/
extern "C" void
magma_clarfgx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_clarfgx_gpu_kernel)
, dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
n, dx0, dx, dtau, dxnorm, dA, iter);
}
/***************************************************************************//**
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*******************************************************************************/
extern "C" void
magma_clarfgtx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter,
magmaFloatComplex_ptr V, magma_int_t ldv,
magmaFloatComplex_ptr T, magma_int_t ldt,
magmaFloatComplex_ptr dwork,
magma_queue_t queue )
{
/* Generate the elementary reflector H(iter) */
magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter, queue);
if (iter == 0) {
magmaFloatComplex tt = MAGMA_C_ONE;
magmablas_clacpy( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue );
magma_csetmatrix( 1, 1, &tt, 1, dx0, 1, queue );
}
else {
/* Compute the iter-th column of T */
hipLaunchKernelGGL(( magma_cgemv_kernel3)
, dim3(iter), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
n, V, ldv, dx0, dwork, dtau );
hipLaunchKernelGGL(( magma_ctrmv_kernel2)
, dim3(iter), dim3(iter), 0, queue->cuda_stream() ,
T, ldt, dwork, T+iter*ldt, dtau );
}
}
| 55a75d935a9ed76385256eef99ab1ce27c0c8b9c.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlarfgx-v2.cu, normal z -> c, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "commonblas_c.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define COMPLEX
/******************************************************************************/
__global__
void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ float xnorm;
magmaFloatComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#ifdef REAL
float alpha = *dx0;
float alphai = MAGMA_C_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 1 )
#else
magmaFloatComplex alpha = *dx0;
float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dA = *dx0;
}
else {
#ifdef REAL
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j == 0) {
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j == 0) {
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_C_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_C_MAKE(beta, 0.);
}
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
if (j < it) {
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_C_MAKE(0., 0.);
}
}
/***************************************************************************//**
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*******************************************************************************/
extern "C" void
magma_clarfgx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
magma_clarfgx_gpu_kernel
<<< blocks, threads, 0, queue->cuda_stream() >>>
( n, dx0, dx, dtau, dxnorm, dA, iter);
}
/***************************************************************************//**
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*******************************************************************************/
extern "C" void
magma_clarfgtx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter,
magmaFloatComplex_ptr V, magma_int_t ldv,
magmaFloatComplex_ptr T, magma_int_t ldt,
magmaFloatComplex_ptr dwork,
magma_queue_t queue )
{
/* Generate the elementary reflector H(iter) */
magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter, queue);
if (iter == 0) {
magmaFloatComplex tt = MAGMA_C_ONE;
magmablas_clacpy( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue );
magma_csetmatrix( 1, 1, &tt, 1, dx0, 1, queue );
}
else {
/* Compute the iter-th column of T */
magma_cgemv_kernel3
<<< iter, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( n, V, ldv, dx0, dwork, dtau );
magma_ctrmv_kernel2
<<< iter, iter, 0, queue->cuda_stream() >>>
( T, ldt, dwork, T+iter*ldt, dtau );
}
}
|
00eeb6bb09e4ea6a9ca6368ca7b59965f7e72a40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <float.h>
void __global__ kernel_isnan(float* array_device, int* rowArray, int rowArrayLength, int* colArray, int colArrayLength, int totalCols, int totalRows, float* results)
{
int n = blockIdx.x * blockDim.x + threadIdx.x;
int m = blockIdx.y * blockDim.y + threadIdx.y;
if (n < rowArrayLength && m < colArrayLength)
{
int arrayInd = n*totalCols + m;
int resultsInd = n*colArrayLength + m;
if (isnan(array_device[arrayInd]) || isinf(array_device[arrayInd])) // I think this is all we need to do
results[resultsInd] = 1;
else
results[resultsInd] = 0;
}
}
void __global__ kernel_nan2num(float* array_device, int* rowArray, int rowArrayLength, int* colArray, int colArrayLength, int totalCols, int totalRows, bool inPlace, float* results)
{
int n = blockIdx.x * blockDim.x + threadIdx.x;
int m = blockIdx.y * blockDim.y + threadIdx.y;
if (n < rowArrayLength && m < colArrayLength)
{
int arrayInd = n*totalCols + m;
int resultsInd = n*colArrayLength + m;
if (inPlace)
{
if (isnan(array_device[arrayInd])) // I think this is all we need to do
array_device[arrayInd] = 0;
else if (isinf(array_device[arrayInd]))
// so seems that isinf checks for both negative and positive infinity, and then we can see if the value is above or below zero?
if (array_device[arrayInd] > 0)
array_device[arrayInd] = FLT_MAX;
else
array_device[arrayInd] = -FLT_MAX;
}
else
{
if (isnan(array_device[arrayInd])) // I think this is all we need to do
results[resultsInd] = 0;
else if (isinf(array_device[arrayInd]))
if (array_device[arrayInd] > 0)
results[resultsInd] = FLT_MAX;
else
results[resultsInd] = -FLT_MAX; // note that FLT_MIN is the smallest float, ie E-38, NOT -E38
else
results[resultsInd] = array_device[arrayInd]; // otherwise just copy the value
}
}
__syncthreads();
}
| 00eeb6bb09e4ea6a9ca6368ca7b59965f7e72a40.cu | #include <stdio.h>
#include <float.h>
void __global__ kernel_isnan(float* array_device, int* rowArray, int rowArrayLength, int* colArray, int colArrayLength, int totalCols, int totalRows, float* results)
{
int n = blockIdx.x * blockDim.x + threadIdx.x;
int m = blockIdx.y * blockDim.y + threadIdx.y;
if (n < rowArrayLength && m < colArrayLength)
{
int arrayInd = n*totalCols + m;
int resultsInd = n*colArrayLength + m;
if (isnan(array_device[arrayInd]) || isinf(array_device[arrayInd])) // I think this is all we need to do
results[resultsInd] = 1;
else
results[resultsInd] = 0;
}
}
void __global__ kernel_nan2num(float* array_device, int* rowArray, int rowArrayLength, int* colArray, int colArrayLength, int totalCols, int totalRows, bool inPlace, float* results)
{
int n = blockIdx.x * blockDim.x + threadIdx.x;
int m = blockIdx.y * blockDim.y + threadIdx.y;
if (n < rowArrayLength && m < colArrayLength)
{
int arrayInd = n*totalCols + m;
int resultsInd = n*colArrayLength + m;
if (inPlace)
{
if (isnan(array_device[arrayInd])) // I think this is all we need to do
array_device[arrayInd] = 0;
else if (isinf(array_device[arrayInd]))
// so seems that isinf checks for both negative and positive infinity, and then we can see if the value is above or below zero?
if (array_device[arrayInd] > 0)
array_device[arrayInd] = FLT_MAX;
else
array_device[arrayInd] = -FLT_MAX;
}
else
{
if (isnan(array_device[arrayInd])) // I think this is all we need to do
results[resultsInd] = 0;
else if (isinf(array_device[arrayInd]))
if (array_device[arrayInd] > 0)
results[resultsInd] = FLT_MAX;
else
results[resultsInd] = -FLT_MAX; // note that FLT_MIN is the smallest float, ie E-38, NOT -E38
else
results[resultsInd] = array_device[arrayInd]; // otherwise just copy the value
}
}
__syncthreads();
}
|
a6d48909fc4462d11a39528497f756cd2fba2875.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef RESAMPLE_BILINEAR
#define RESAMPLE_BILINEAR
#if __CUDACC_VER_MAJOR__ >= 9
#include <hip/hip_fp16.h>
#endif
#include "PrGPU/KernelSupport/KernelCore.h" //includes KernelWrapper.h
#include "PrGPU/KernelSupport/KernelMemory.h"
#if GF_DEVICE_TARGET_DEVICE
GF_KERNEL_FUNCTION(kFrameDiff,
((GF_PTR(float4))(inImg))
((GF_PTR(float4))(nextImg))
((GF_PTR(float4))(destImg)),
((int)(inPitch))
((int)(destPitch))
((int)(in16f))
((unsigned int)(outWidth))
((unsigned int)(outHeight)),
((uint2)(inXY)(KERNEL_XY)))
{
float4 color, nextColor, dest;
if (inXY.x >= outWidth || inXY.y >= outHeight) return;
color = ReadFloat4(inImg, inXY.y * inPitch + inXY.x, !!in16f);
nextColor = ReadFloat4(nextImg, inXY.y * inPitch + inXY.x, !!in16f);
dest.x = (nextColor.x - color.x);
dest.y = (nextColor.y - color.y);
dest.z = (nextColor.z - color.z);
dest.w = color.w;
WriteFloat4(dest, destImg, inXY.y * destPitch + inXY.x, !!in16f);
}
#endif
#if __NVCC__
void FrameDiff_CUDA(
float *inBuf,
float *nextBuf,
float *destBuf,
int inPitch,
int destPitch,
int is16f,
unsigned int width,
unsigned int height)
{
dim3 blockDim(16, 16, 1);
dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1);
kFrameDiff << < gridDim, blockDim, 0 >> > ((float4*)inBuf, (float4*)nextBuf, (float4*)destBuf, inPitch, destPitch, is16f, width, height);
hipDeviceSynchronize();
}
#endif //GF_DEVICE_TARGET_HOST
#endif //SDK_CROSS_DISSOLVE
| a6d48909fc4462d11a39528497f756cd2fba2875.cu |
#ifndef RESAMPLE_BILINEAR
#define RESAMPLE_BILINEAR
#if __CUDACC_VER_MAJOR__ >= 9
#include <cuda_fp16.h>
#endif
#include "PrGPU/KernelSupport/KernelCore.h" //includes KernelWrapper.h
#include "PrGPU/KernelSupport/KernelMemory.h"
#if GF_DEVICE_TARGET_DEVICE
GF_KERNEL_FUNCTION(kFrameDiff,
((GF_PTR(float4))(inImg))
((GF_PTR(float4))(nextImg))
((GF_PTR(float4))(destImg)),
((int)(inPitch))
((int)(destPitch))
((int)(in16f))
((unsigned int)(outWidth))
((unsigned int)(outHeight)),
((uint2)(inXY)(KERNEL_XY)))
{
float4 color, nextColor, dest;
if (inXY.x >= outWidth || inXY.y >= outHeight) return;
color = ReadFloat4(inImg, inXY.y * inPitch + inXY.x, !!in16f);
nextColor = ReadFloat4(nextImg, inXY.y * inPitch + inXY.x, !!in16f);
dest.x = (nextColor.x - color.x);
dest.y = (nextColor.y - color.y);
dest.z = (nextColor.z - color.z);
dest.w = color.w;
WriteFloat4(dest, destImg, inXY.y * destPitch + inXY.x, !!in16f);
}
#endif
#if __NVCC__
void FrameDiff_CUDA(
float *inBuf,
float *nextBuf,
float *destBuf,
int inPitch,
int destPitch,
int is16f,
unsigned int width,
unsigned int height)
{
dim3 blockDim(16, 16, 1);
dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1);
kFrameDiff << < gridDim, blockDim, 0 >> > ((float4*)inBuf, (float4*)nextBuf, (float4*)destBuf, inPitch, destPitch, is16f, width, height);
cudaDeviceSynchronize();
}
#endif //GF_DEVICE_TARGET_HOST
#endif //SDK_CROSS_DISSOLVE
|
gemm.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudakernel/gemm/gemm.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include <hip/hip_fp16.h>
#include <float.h>
#include "kernel_type.h"
#include "conv_common.h"
#define TIMES 4
static std::vector<kernel_info_t> g_kvec;
static bool is_g_kvec_set = false;
#define FAKE_CONV_PARAM \
const int in_hw = 1; const int out_hw = 1; \
const int flt_hw = 1; const int splitk = 1; \
const int in_height = 1; const int in_width = 1; \
const int batch = M; const int num_grp = 1; \
const int num_chl_per_grp = 0; const int num_chl_per_grp_pad = K_pad; \
const int flt_height = 1; const int flt_width = 1; \
const int num_flt_per_grp = 0; const int num_flt_per_grp_pad = N_pad; \
const int out_height = 1; const int out_width = 1; \
const int stride_height = 1; const int stride_width = 1; \
const int pad_height = 0; const int pad_width = 0; \
const int hole_height = 1; const int hole_width = 1;
#define GEMM_FUNC_PARAM \
input0_tmp, \
(int4*)weight, \
final_out, \
kLoopNum, \
in_lut, 0, \
flt_lut, 0, \
in_hw, out_hw, \
flt_hw, splitk, \
in_height, in_width, \
batch, num_grp, \
num_chl_per_grp, num_chl_per_grp_pad, \
flt_height, flt_width, \
num_flt_per_grp, num_flt_per_grp_pad, \
out_height, out_width, \
stride_height, stride_width, \
pad_height, pad_width, \
hole_height, hole_width, \
has_bias, (int4*)bias, \
fuse_param.has_activation, clip_min, \
fuse_param.has_clip, clip_max, \
fuse_param.has_prelu, (const void *) fuse_param.prelu, \
fuse_param.has_elt, (const int4 *) fuse_param.pre_data, \
fuse_param.has_elt_activation, elt_clip_min, \
fuse_param.has_elt_clip, elt_clip_max, \
fuse_param.has_elt_prelu, (const void *) fuse_param.elt_prelu, \
(__half)fuse_param.leaky, (__half)fuse_param.elt_leaky, \
fuse_param.has_concat, concat_offset_v8, \
concat_stride_v8
void init_f1_kvec(std::vector<kernel_info_t> &g_kvec, ppl::common::datatype_t type)
{
if ( type == ppl::common::DATATYPE_FLOAT32 )
{
printf("fp32 unsupported in %s\n", __FUNCTION__);
}
else if ( type == ppl::common::DATATYPE_FLOAT16 )
{
Initialize2spkConvF1KernelContainer(g_kvec);
}
else
{ printf("type unsupported\n"); }
is_g_kvec_set = true;
}
uint64_t PPLGemmCUDAGetBufSize(
const ppl::nn::TensorShape* input_shape,
int transA)
{
auto type = input_shape->GetDataType();
int type_size = ppl::common::GetSizeOfDataType(type);
if(transA){
int pad_size = GetPadSize(type); // ldg 128 bytes
int K = input_shape->GetDim(0);
int M = input_shape->GetDim(1);
int K_pad = Align(K, pad_size);
return M * K_pad * type_size;
}
return 0;
}
unsigned int PPLCUDAGemmGetBiasSize(
const ppl::common::datatype_t type,
const int N,
const bool is_scalar)
{
if(!is_scalar) return 0;
int pad_size = GetPadSize(type); // ldg 128 bytes
int N_pad = Align(N, pad_size);
int type_size = ppl::common::GetSizeOfDataType(type);
return N_pad * type_size;
}
//block size: (32,32,1)
template<typename T>
__global__ void matrix_transpose(
T *output,
T *input,
float scale,
const int in_row,
const int in_col)
{
unsigned int in_x = blockIdx.x*32 + threadIdx.x;
unsigned int in_y = blockIdx.y*32 + threadIdx.y;
unsigned int out_x = blockIdx.y*32 + threadIdx.x;
unsigned int out_y = blockIdx.x*32 + threadIdx.y;
bool in_range = (in_x <= in_col) && (in_y <= in_row);
bool out_range = (out_x <= in_row) && (out_y <= in_col);
__shared__ T smem[32][33];
T value = in_range ? input[in_y*in_col + in_x] : (T)0;
smem[threadIdx.x][threadIdx.y] = value;
__syncthreads();
value = smem[threadIdx.y][threadIdx.x];
float fp_value = (float)value * scale;
if(out_range) output[out_y*in_row + out_x] = (__half)fp_value;
}
template<typename T>
__global__ void scale(T *input, float scale, unsigned int size){
unsigned int off = blockIdx.x*512 + threadIdx.x;
bool in_range = off <= size;
T value = in_range ? input[off] : (T)0;
float fp_value = (float)value;
fp_value = scale * fp_value;
if (in_range) input[off] = (T)fp_value;
}
ppl::common::RetCode PPLCUDAGemmModifyWeights(
const hipStream_t &stream,
ppl::nn::TensorShape* weight_shape,
void* weight,
void* tmp_weight, //if need transpose
const ppl::nn::common::GemmParam *param)
{
int transB = param->transB;
float alpha = param->alpha;
auto type = weight_shape->GetDataType();
int pad_size = GetPadSize(type);
const int dim0 = weight_shape->GetDim(0);//assume padded
const int dim1 = weight_shape->GetDim(1);
if (!transB) {
#define TRANSWEIGHT(Type) \
hipLaunchKernelGGL(( matrix_transpose<Type>), dim3(grid), dim3(block), 0, stream, \
(Type*)tmp_weight, (Type*)weight, alpha, dim0, dim1); \
hipMemcpyAsync((Type*)weight, (Type*)tmp_weight, dim0*dim1*sizeof(Type), \
hipMemcpyDeviceToDevice, stream);
dim3 grid(DivUp(dim1, 32), DivUp(dim0, 32), 1);
dim3 block(32, 32, 1);
weight_shape->SetDim(0, dim1);
weight_shape->SetDim(1, dim0);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
TRANSWEIGHT(float)
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
TRANSWEIGHT(__half)
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef TRANSWEIGHT
} else if (alpha != 1.f){
int grid_size = DivUp(dim0*dim1, 512);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
hipLaunchKernelGGL(( scale<float>), dim3(grid_size), dim3(512), 0, stream, (float*)weight, alpha, dim0*dim1);
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
hipLaunchKernelGGL(( scale<__half>), dim3(grid_size), dim3(512), 0, stream, (__half*)weight, alpha, dim0*dim1);
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
ppl::common::RetCode PPLCUDAGemmModifyBias(
const hipStream_t &stream,
const ppl::nn::TensorShape* bias_shape,
void* bias,
const ppl::nn::common::GemmParam *param)
{
if (param->bias_term) {
auto type = bias_shape->GetDataType();
int pad_size = GetPadSize(type);
float beta = param->beta;
int N = bias_shape->GetDim(0);
int N_pad = Align(N, pad_size);
if (type == ppl::common::DATATYPE_FLOAT32) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
hipLaunchKernelGGL(( scale<float>), dim3(grid_size), dim3(512), 0, stream, (float*)bias, beta, N_pad);
}
} else if (type == ppl::common::DATATYPE_FLOAT16) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
hipLaunchKernelGGL(( scale<__half>), dim3(grid_size), dim3(512), 0, stream, (__half*)bias, beta, N_pad);
}
} else{
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
int PPLCUDAGemmSelectKernel(
const hipStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
auto type = weight_shape->GetDataType();
if (!is_g_kvec_set) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
int N_pad = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K_pad = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
bool has_bias = param.bias_term;//beta != 0.f;
float minTime = FLT_MAX;
int best_kid = -1;
float elapsed;
hipEvent_t begin, end;
hipEventCreate(&begin);
hipEventCreate(&end);
//transpose
int4 *input0_tmp = (int4*)input;
if (transA == 1) { // input is shape of (K, M), we need K as the 1st inner dim
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( matrix_transpose<float>), dim3(grid), dim3(block), 0, stream,
(float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( matrix_transpose<__half>), dim3(grid), dim3(block), 0, stream,
(__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
for (unsigned int kid = 0; kid < g_kvec.size(); kid++) {
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
hipEventRecord(begin, stream);
for (int i = 0; i < TIMES; i++) {
if (g_kvec[kid].ktype == CONV_2SPK_F1) {
FAKE_CONV_PARAM
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
(g_kvec[kid]hipLaunchKernelGGL((.lut_kptr)), dim3(grid_size), dim3(block_size), 0, stream, GEMM_FUNC_PARAM);
}
else {
printf("Error: kernel type error in %s\n", __FUNCTION__);
}
}
hipEventRecord(end, stream);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed, begin, end);
if (elapsed < minTime){
best_kid = kid;
minTime = elapsed;
}
}
hipEventDestroy(begin);
hipEventDestroy(end);
return best_kid;
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const hipStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param);
ppl::common::RetCode PPLCUDAGemmForwardImp(
const hipStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param,
const int kid)
{
auto type = weight_shape->GetDataType();
if ( !is_g_kvec_set ) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
int N = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int N_pad = Align(N, pad_size);
int K_pad = Align(K, pad_size);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
ppl::common::RetCode status = ppl::common::RC_SUCCESS;
if(M == 1){
status = PPLCUDAGemvForwardImp<__half>(stream,
M, N, K,
input, weight, bias,
(void*)final_out,
param, temp_buffer, fuse_param);
return status;
}
// kernel configs
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
bool has_bias = param.bias_term;//beta != 0.f;
int4 *input0_tmp = (int4*)input;
if (transA == 1) {
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( matrix_transpose<float>), dim3(grid), dim3(block), 0, stream,
(float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( matrix_transpose<__half>), dim3(grid), dim3(block), 0, stream,
(__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
FAKE_CONV_PARAM
(g_kvec[kid]hipLaunchKernelGGL((.lut_kptr)), dim3(grid_size), dim3(block_size), 0, stream, GEMM_FUNC_PARAM);
return status;
}
template <typename T>
__device__ __inline__ void fma_v4(const int4 a, const int4 b, int4 &c);
template <>
__device__ __inline__ void fma_v4<__half>(const int4 a, const int4 b, int4 &c){
#if __CUDA_ARCH__ >= 600
((__half2*)&c)[0] = __hfma2(((__half2*)&a)[0], ((__half2*)&b)[0], ((__half2*)&c)[0]);
((__half2*)&c)[1] = __hfma2(((__half2*)&a)[1], ((__half2*)&b)[1], ((__half2*)&c)[1]);
((__half2*)&c)[2] = __hfma2(((__half2*)&a)[2], ((__half2*)&b)[2], ((__half2*)&c)[2]);
((__half2*)&c)[3] = __hfma2(((__half2*)&a)[3], ((__half2*)&b)[3], ((__half2*)&c)[3]);
#else
#endif
}
template <>
__device__ __inline__ void fma_v4<float>(const int4 a, const int4 b, int4 &c){
((float*)&c)[0] = ((float*)&a)[0] * ((float*)&b)[0] + ((float*)&c)[0];
((float*)&c)[1] = ((float*)&a)[1] * ((float*)&b)[1] + ((float*)&c)[1];
((float*)&c)[2] = ((float*)&a)[2] * ((float*)&b)[2] + ((float*)&c)[2];
((float*)&c)[3] = ((float*)&a)[3] * ((float*)&b)[3] + ((float*)&c)[3];
}
template <typename T>
__device__ __inline__ int4 add_v4(const int4 a, const int4 b);
template <>
__device__ __inline__ int4 add_v4<__half>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
#if __CUDA_ARCH__ >= 600
((__half2*)&res)[0] = __hadd2(((__half2*)&a)[0], ((__half2*)&b)[0]);
((__half2*)&res)[1] = __hadd2(((__half2*)&a)[1], ((__half2*)&b)[1]);
((__half2*)&res)[2] = __hadd2(((__half2*)&a)[2], ((__half2*)&b)[2]);
((__half2*)&res)[3] = __hadd2(((__half2*)&a)[3], ((__half2*)&b)[3]);
#else
#endif
return res;
}
template <>
__device__ __inline__ int4 add_v4<float>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
((float*)&res)[0] = ((float*)&a)[0] + ((float*)&b)[0];
((float*)&res)[1] = ((float*)&a)[1] + ((float*)&b)[1];
((float*)&res)[2] = ((float*)&a)[2] + ((float*)&b)[2];
((float*)&res)[3] = ((float*)&a)[3] + ((float*)&b)[3];
return res;
}
template <typename T>
__inline__ __device__ T reduce_v4(int4 data){
T res = (T)0;
for(int i = 0; i < sizeof(int4)/sizeof(T); i++){
res = Math<T,T,T>::add(res, ((T*)&data)[i]);
}
}
template <typename T>
__device__ __inline__ void activation(const int activation, int4 &v){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
if(activation ==1){
for(int i = 0; i < T_NUMS_PER_INT4; i++)
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)0)?
t_v[i] : (T)0;
} else{
for(int i = 0; i < T_NUMS_PER_INT4; i++){
T tmp = expf(t_v[i]);
t_v[i] = tmp * __frcp_rn(tmp + (T)1);
}
}
}
template <>
__device__ __inline__ void activation<__half>(const int activation, int4 &v){
#if __CUDA_ARCH__ >= 600
__half2 *h2_v = (__half2*)&v;
int *int_v = (int*)&v;
if(activation ==1){
for(int i = 0; i < 4; i++)
int_v[i] = __vmaxs2(int_v[i], 0);
} else{
__half2 one = {(__half)1.f, (__half)1.f};
for(int i = 0; i < 4; i++){
__half2 tmp = h2exp(h2_v[i]);
h2_v[i] = __hmul2(tmp, h2rcp(__hadd2(one, tmp)));// __h2div(tmp, __hadd2(one, tmp));
}
}
#else
#endif
}
template<typename T>
__device__ __inline__ void clip(int4 &v, float clip_min, float clip_max){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
for(int i = 0; i < T_NUMS_PER_INT4; i++){
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)clip_min)?
t_v[i] : (T)clip_min;
t_v[i] = Math<T,T,T>::le(t_v[i], (T)clip_max)?
t_v[i] : (T)clip_max;
}
}
//matrix: NxK
// N: pad int4
// K: pad int4
// layout and fuse pattern consistent with gemm
//BLK_TILE_N: min:8
template<typename T, int BLK_TILE_N, int THD_TILE_N_V4, int BLK_SIZE>
__global__ void gemv(void *output,
const void *vec,
const void *matrix,
const void *bias,
const int padK,
const int padN,
const fuse_param_t fuse_param)
{
// blk conofig
// one int4 per thd along K
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
constexpr int BLK_TILE_N_V4 = BLK_TILE_N / T_NUMS_PER_INT4;
constexpr int THD_TILE_N = THD_TILE_N_V4 * T_NUMS_PER_INT4;
constexpr int BLK_SIZE_Y = BLK_TILE_N_V4 / THD_TILE_N_V4;
constexpr int BLK_SIZE_X = BLK_SIZE / BLK_SIZE_Y;
constexpr int BLK_TILE_K = BLK_SIZE_X;
int pad_k_v4 = padK / T_NUMS_PER_INT4;
int pad_n_v4 = padN / T_NUMS_PER_INT4;
int n_id = blockIdx.x*BLK_TILE_N + threadIdx.y*T_NUMS_PER_INT4;
int64_t b_base_v4 = (int64_t)n_id*pad_k_v4;
int4 *matrix_base_v4 = (int4*)matrix + b_base_v4;
int4 reg_c[THD_TILE_N];
int4 reg_b[THD_TILE_N];
bool in_n_range[THD_TILE_N_V4];
int4 reg_a;
int4 zero = {0,0,0,0};
T c[THD_TILE_N] = { T(0) };
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++) c[i] = (T)0;
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
reg_c[i] = zero;
}
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
in_n_range[i] = blockIdx.x*BLK_TILE_N_V4 + threadIdx.y + i*BLK_SIZE_Y < pad_n_v4;
}
// ld global VxM
#pragma unroll
for(int k = 0; k < DivUp(pad_k_v4,BLK_TILE_K); k++){
int64_t off = k*BLK_TILE_K + threadIdx.x;
bool in_range = off < pad_k_v4;
reg_a = in_range? ((int4*)vec)[off] : zero;
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
#pragma unroll
for(int j = 0; j < T_NUMS_PER_INT4; j++){
reg_b[i*T_NUMS_PER_INT4 + j] = in_n_range[i] && in_range ?
matrix_base_v4[(i*T_NUMS_PER_INT4*BLK_SIZE_Y+j)*pad_k_v4 + off]
: zero;
fma_v4<T>(reg_a, reg_b[i*T_NUMS_PER_INT4 + j],
reg_c[i*T_NUMS_PER_INT4 + j]);
}
}
}
// int4 reduce to half
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
#pragma unroll
for(int n = 0; n < T_NUMS_PER_INT4; n++){
c[i] = Math<T,T,T>::add( ((T*)reg_c)[i*T_NUMS_PER_INT4 + n],
c[i]);
}
}
__shared__ T smem[BLK_SIZE_X*BLK_TILE_N];
int reduce_off = (threadIdx.y*THD_TILE_N)*BLK_SIZE_X + threadIdx.x;
constexpr int REDUCE_SIZE = BLK_SIZE_X;
if(REDUCE_SIZE >= 64){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
smem[reduce_off + i*BLK_SIZE_X] = c[i];
}
__syncthreads();
}
//reduce
if(REDUCE_SIZE >= 1024){
if(threadIdx.x < 512)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[512 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 512){
if(threadIdx.x < 256)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[256 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 256){
if(threadIdx.x < 128)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[128 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 128){
if(threadIdx.x < 64)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[64 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
unsigned FULL_MASK = __activemask();
if (REDUCE_SIZE >= 64) {
if(threadIdx.x < 32){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[reduce_off + i*BLK_SIZE_X + 32]);
}
}
if(threadIdx.x < 32){
if (REDUCE_SIZE >= 32) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 16));
}
if (REDUCE_SIZE >= 16) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 8));
}
if (REDUCE_SIZE >= 8) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 4));
}
if (REDUCE_SIZE >= 4) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 2));
}
if (REDUCE_SIZE >= 2) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 1));
}
}
// shared shuffle
int4 *smem_v4 = (int4*)smem;
if (threadIdx.x == 0) {
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
smem_v4[i*BLK_SIZE_Y + threadIdx.y] = ((int4*)c)[i];
}
}
__syncthreads();
int tid = threadIdx.y*BLK_SIZE_X + threadIdx.x;
for(int thd_off = tid; thd_off < BLK_TILE_N_V4; thd_off += BLK_SIZE){
int out_off = blockIdx.x*BLK_TILE_N_V4 + thd_off;
bool in_output_range = out_off < pad_n_v4;
if(in_output_range){
int4 bias_data = bias!=NULL? ((int4*)bias)[out_off] : zero;
//TODO add bias
int4 out = add_v4<T>(smem_v4[thd_off], bias_data);
// fuse
if(fuse_param.has_activation) activation<T>(fuse_param.has_activation, out);
if(fuse_param.has_clip) clip<T>(out, fuse_param.clip_min, fuse_param.clip_max);
int concatV4_off = 0;
if(fuse_param.has_concat){
int concat_offset_v4 = fuse_param.concat_offset / T_NUMS_PER_INT4;
int concat_stride_v4 = fuse_param.concat_stride / T_NUMS_PER_INT4;
concatV4_off = concat_offset_v4 + blockIdx.y*concat_stride_v4;
out_off += concatV4_off;
}
((int4*)output)[out_off] = out;
}
}
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const hipStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
constexpr int ELEM_NUM_PR_LD = sizeof(int4)/sizeof(T);
constexpr int expect_blocks = 64;
//constexpr int MAX_BLK_SIZE = 256;
//constexpr int MAX_THD_TILE_N_V4 = 4;
int n_v4 = N / ELEM_NUM_PR_LD;
int blk_tile_n_v4 = DivUp(n_v4, expect_blocks/M);
#define LAUNCH_KERNEL(){ \
constexpr int BLK_TILE_N = BLK_SIZE_Y * THD_TILE_N_V4 * ELEM_NUM_PR_LD; \
constexpr int BLK_SIZE = BLK_SIZE_Y * BLK_SIZE_X; \
dim3 grid; \
grid.x = DivUp(N, BLK_TILE_N); \
grid.y = 1; grid.z = 1; \
dim3 threads = dim3(BLK_SIZE_X, BLK_SIZE_Y,1); \
hipLaunchKernelGGL(( gemv<T, BLK_TILE_N, THD_TILE_N_V4, BLK_SIZE>), dim3(grid), dim3(threads), 0, stream, \
output, input, weight, bias, K, N, fuse_param); \
}
#define CONFIG_KERNEL(_blk_tile_n_v4){ \
if(BLK_SIZE_X <= 64 && blk_tile_n_v4 >= 16){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 4; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 8){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 4){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 2){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} else{ \
constexpr int THD_TILE_N_V4 = 1; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} \
}
if (K >= 512){
constexpr int BLK_SIZE_X = 64;
CONFIG_KERNEL(blk_tile_n_v4);
}
else{
constexpr int BLK_SIZE_X = 32;
CONFIG_KERNEL(blk_tile_n_v4);
}
return ppl::common::RC_SUCCESS;
}
| gemm.cuh | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cublas_v2.h>
#include <raft/linalg/cublas_wrappers.h>
#include <cuda_utils.cuh>
#include "cutlass_wrappers.cuh"
namespace MLCommon {
namespace LinAlg {
/**
* @brief the gemm function for the cases with detailed epilogue customization
* It computes the following equation: D = alpha . opA(A) * opB(B) + beta . C
* @tparam IType input data-type (for A and B matrices)
* @tparam AccType accumulation data-type
* @tparam OType output data-type (for C and D matrices)
* @tparam OutputTile_ output tile size for the thread block
* @tparam AccumulatorsPerThread_ number of accumulators per thread
* @tparam MainLoopFunctor_ custom functor to be used in the main loop
* @tparam Index_ the type of index
* @tparam GemmConfig_ the config for the GEMM
* @tparam EpilogueFunctor_ custom epilogue functor
* @tparam GemmEpilogueTraits_ epilogue traits class to build the epilogue
* @tparam GemmEpilogue_ custom epilogue
* @tparam Lambda lambda to initialize any custom params inside EpilogueFunctor_
* @tparam FinalLambda Final device lambda to be applied in epilogue
* @param transA cublas transpose op for A
* @param transB cublas transpose op for B
* @param m number of rows of A and C/D
* @param n number of columns of B and C/D
* @param k number of cols of A and rows of B
* @param alpha scalar
* @param A input matrix
* @param lda leading dim for A
* @param B input matrix
* @param ldb leading dim for B
* @param beta scalar
* @param C input matrix
* @param ldc leading dim for C and D
* @param D output matrix
* @param op lambda function to initialize any custom params inside
* EpilogueFunctor_
* @param fin_op the final lambda to be run inside the Epilogue. This can help
* in customizing a given EpilogueFunctor, without having to go through the task
* of creating another Functor!
* @param stream cuda stream where to launch work
*/
template <
typename IType, typename AccType, typename OType, typename OutputTile_,
typename AccumulatorsPerThread_ = cutlass::Shape<8, 8, 8>,
typename MainLoopFunctor_ = cutlass::gemm::ThreadMultiplyAdd<
AccumulatorsPerThread_, cutlass::Shape<1, 4, 8>, IType, IType, AccType>,
typename Index_ = int,
typename GemmConfig_ =
CustomGemmConfig<IType, AccType, OType, OutputTile_, AccumulatorsPerThread_,
MainLoopFunctor_>,
typename EpilogueFunctor_ = LinearScaling<OType>,
typename GemmEpilogueTraits_ = cutlass::gemm::SimplifiedGemmEpilogueTraits<
GemmConfig_, EpilogueFunctor_, Index_>,
typename GemmEpilogue_ = CustomGemmEpilogue<GemmEpilogueTraits_>,
typename Lambda, typename FinalLambda>
void gemm(cublasOperation_t transA, cublasOperation_t transB, Index_ m,
Index_ n, Index_ k, OType alpha, IType const *A, Index_ lda,
IType const *B, Index_ ldb, OType beta, OType const *C, Index_ ldc,
OType *D, Lambda op, FinalLambda fin_op, cudaStream_t stream) {
baseGemm<IType, AccType, OType, OutputTile_, AccumulatorsPerThread_,
MainLoopFunctor_, Index_, GemmConfig_, EpilogueFunctor_,
GemmEpilogueTraits_, GemmEpilogue_>(transA, transB, m, n, k, alpha,
A, lda, B, ldb, beta, C, ldc, D,
op, fin_op, stream);
}
/**
* @brief the gemm function for the case where no or simple customization is
* needed
* It computes the following equation: D = alpha . opA(A) * opB(B) + beta . C
* @tparam IType input data-type (for A and B matrices)
* @tparam AccType accumulation data-type
* @tparam OType output data-type (for C and D matrices)
* @tparam OutputTile_ output tile size for the thread block
* @tparam AccumulatorsPerThread_ number of accumulators per thread
* @tparam Index_ index type
* @tparam EpilogueFunctor_ custom epilogue functor
* @param transA cublas transpose op for A
* @param transB cublas transpose op for B
* @param m number of rows of A and C/D
* @param n number of columns of B and C/D
* @param k number of cols of A and rows of B
* @param alpha scalar
* @param A input matrix
* @param lda leading dim for A
* @param B input matrix
* @param ldb leading dim for B
* @param beta scalar
* @param C input matrix
* @param ldc leading dim for C and D
* @param D output matrix
* @param stream cuda stream where to launch work
* @{
*/
template <
typename IType, typename AccType, typename OType, typename OutputTile_,
typename AccumulatorsPerThread_ = cutlass::Shape<8, 8, 8>,
typename MainLoopFunctor_ = cutlass::gemm::ThreadMultiplyAdd<
AccumulatorsPerThread_, cutlass::Shape<1, 4, 8>, IType, IType, AccType>,
typename Index_ = int,
typename EpilogueFunctor_ = cutlass::gemm::LinearScaling<OType>>
void gemm(cublasOperation_t transA, cublasOperation_t transB, Index_ m,
Index_ n, Index_ k, OType alpha, IType const *A, Index_ lda,
IType const *B, Index_ ldb, OType beta, OType const *C, Index_ ldc,
OType *D, cudaStream_t stream) {
typedef CustomGemmConfig<IType, AccType, OType, OutputTile_,
AccumulatorsPerThread_, MainLoopFunctor_>
GemmConfig_;
gemm<IType, AccType, OType, OutputTile_, AccumulatorsPerThread_,
MainLoopFunctor_, Index_, GemmConfig_, EpilogueFunctor_>(
transA, transB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, D,
[](typename EpilogueFunctor_::Params &p) { return 0; },
0, // missing final lambda here
stream);
}
/**
* @brief the wrapper of cublas gemm function
* It computes the following equation: D = alpha . opA(A) * opB(B) + beta . C
* @tparam math_t the type of input/output matrices
* @param a input matrix
* @param n_rows_a number of rows of A
* @param n_cols_a number of columns of A
* @param b input matrix
* @param c output matrix
* @param n_rows_c number of rows of C
* @param n_cols_c number of columns of C
* @param trans_a cublas transpose op for A
* @param trans_b cublas transpose op for B
* @param alpha scalar
* @param beta scalar
* @param cublas_h cublas handle
* @param stream cuda stream
*/
template <typename math_t>
void gemm(const math_t *a, int n_rows_a, int n_cols_a, const math_t *b,
math_t *c, int n_rows_c, int n_cols_c, cublasOperation_t trans_a,
cublasOperation_t trans_b, math_t alpha, math_t beta,
cublasHandle_t cublas_h, cudaStream_t stream) {
int m = n_rows_c;
int n = n_cols_c;
int k = trans_a == CUBLAS_OP_T ? n_rows_a : n_cols_a;
int lda = trans_a == CUBLAS_OP_T ? k : m;
int ldb = trans_b == CUBLAS_OP_T ? n : k;
int ldc = m;
CUBLAS_CHECK(raft::linalg::cublasgemm(cublas_h, trans_a, trans_b, m, n, k,
&alpha, a, lda, b, ldb, &beta, c, ldc,
stream));
}
template <typename math_t>
void gemm(const math_t *a, int n_rows_a, int n_cols_a, const math_t *b,
math_t *c, int n_rows_c, int n_cols_c, cublasOperation_t trans_a,
cublasOperation_t trans_b, cublasHandle_t cublas_h,
cudaStream_t stream) {
math_t alpha = math_t(1);
math_t beta = math_t(0);
gemm(a, n_rows_a, n_cols_a, b, c, n_rows_c, n_cols_c, trans_a, trans_b, alpha,
beta, cublas_h, stream);
}
/**
* @brief A wrapper for CUBLS GEMM function designed for handling all possible
* combinations of operand layouts.
* It computes the following equation: Z = alpha . X * Y + beta . Z
* @tparam T Data type of input/output matrices (float/double)
* @param handle cublas handle
* @param z output matrix of size M rows x N columns
* @param x input matrix of size M rows x K columns
* @param y input matrix of size K rows x N columns
* @param _M number of rows of X and Z
* @param _N number of rows of Y and columns of Z
* @param _K number of columns of X and rows of Y
* @param isZColMajor Storage layout of Z. true = col major, false = row major
* @param isXColMajor Storage layout of X. true = col major, false = row major
* @param isYColMajor Storage layout of Y. true = col major, false = row major
* @param stream cuda stream
* @param alpha scalar
* @param beta scalar
*/
template <typename T>
void gemm(cublasHandle_t handle, T *z, T *x, T *y, int _M, int _N, int _K,
bool isZColMajor, bool isXColMajor, bool isYColMajor,
cudaStream_t stream, T alpha = T(1.0), T beta = T(0.0)) {
cublasOperation_t trans_a, trans_b;
T *a, *b, *c;
int lda, ldb, ldc;
int M, N, K;
// This function performs c = a * b. Based on the required output layout,
// either a = x, b = y or a = y, b = x. In either case c = z.
if (isZColMajor == true) {
// Result c is required in column major layout. Thus we perform,
// z = x * y
// Using BLAS call c = a * b. Therefore a = x, b = y and c = z
a = x;
// If x is in row major layout, cublas needs to transpose x first,
// therefore trans_x needs to be CUBLAS_OP_T. If x is in column major
// layout, trans_b needs to be CUBLAS_OP_N.
trans_a = isXColMajor == true ? CUBLAS_OP_N : CUBLAS_OP_T;
// Set leading dimension appropriately
lda = isXColMajor == true ? _M : _K;
b = y;
// If y is in row major layout, cublas needs to transpose y first,
// therefore trans_x needs to be CUBLAS_OP_T. If x is in column major
// layout, trans_b needs to be CUBLAS_OP_N.
trans_b = isYColMajor == true ? CUBLAS_OP_N : CUBLAS_OP_T;
ldb = isYColMajor == true ? _K : _N;
c = z;
ldc = _M;
M = _M;
N = _N;
K = _K;
} else {
// Result c is required in row major layout Thus we pick
// a = y, b = x and c = a * b = y * x
// cublas produces output matrix only in column major layout. To get output
// matrix on row major layout, we need to produce transpose of output
// in column major layout. Therefore we perform,
// tr(z) = tr(y) * tr(x)
// we model this using cublas call for c = a * b
// therefore a = tr(y), b = tr(x) and c = tr(z)
a = y;
// If y is in row major layout, it can be/ interpreted as tr(y) on column
// major layout. Therefore we can pass trans_a as CUBLAS_OP_N. If y is in
// column major layout, cublas needs to transpose y first, therefore
// trans_a needs to be CUBLAS_OP_T
trans_a = isYColMajor == true ? CUBLAS_OP_T : CUBLAS_OP_N;
// Set leading dimension appropriately
lda = isYColMajor == true ? _K : _N;
b = x;
// If x is in row major layout, it can be interpreted as tr(x) on column
// major layout. Therefore we can pass trans_b as CUBLAS_OP_N. If x is in
// column major layout, cublas needs to trasponse x first, therefore
// trans_b needs to be CUBLAS_OP_T
trans_b = isXColMajor == true ? CUBLAS_OP_T : CUBLAS_OP_N;
// Set leading dimension appropriately
ldb = isXColMajor == true ? _M : _K;
c = z;
ldc = _N;
M = _N;
N = _M;
K = _K;
}
// Actual cuBLAS call
CUBLAS_CHECK(raft::linalg::cublasgemm(handle, trans_a, trans_b, M, N, K,
&alpha, a, lda, b, ldb, &beta, c, ldc,
stream));
}
} // end namespace LinAlg
} // end namespace MLCommon
|
gemm.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudakernel/gemm/gemm.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include <hip/hip_fp16.h>
#include <float.h>
#include "kernel_type.h"
#include "conv_common.h"
#define TIMES 4
static std::vector<kernel_info_t> g_kvec;
static bool is_g_kvec_set = false;
#define FAKE_CONV_PARAM \
const int in_hw = 1; const int out_hw = 1; \
const int flt_hw = 1; const int splitk = 1; \
const int in_height = 1; const int in_width = 1; \
const int batch = M; const int num_grp = 1; \
const int num_chl_per_grp = 0; const int num_chl_per_grp_pad = K_pad; \
const int flt_height = 1; const int flt_width = 1; \
const int num_flt_per_grp = 0; const int num_flt_per_grp_pad = N_pad; \
const int out_height = 1; const int out_width = 1; \
const int stride_height = 1; const int stride_width = 1; \
const int pad_height = 0; const int pad_width = 0; \
const int hole_height = 1; const int hole_width = 1;
#define GEMM_FUNC_PARAM \
input0_tmp, \
(int4*)weight, \
final_out, \
kLoopNum, \
in_lut, 0, \
flt_lut, 0, \
in_hw, out_hw, \
flt_hw, splitk, \
in_height, in_width, \
batch, num_grp, \
num_chl_per_grp, num_chl_per_grp_pad, \
flt_height, flt_width, \
num_flt_per_grp, num_flt_per_grp_pad, \
out_height, out_width, \
stride_height, stride_width, \
pad_height, pad_width, \
hole_height, hole_width, \
has_bias, (int4*)bias, \
fuse_param.has_activation, clip_min, \
fuse_param.has_clip, clip_max, \
fuse_param.has_prelu, (const void *) fuse_param.prelu, \
fuse_param.has_elt, (const int4 *) fuse_param.pre_data, \
fuse_param.has_elt_activation, elt_clip_min, \
fuse_param.has_elt_clip, elt_clip_max, \
fuse_param.has_elt_prelu, (const void *) fuse_param.elt_prelu, \
(__half)fuse_param.leaky, (__half)fuse_param.elt_leaky, \
fuse_param.has_concat, concat_offset_v8, \
concat_stride_v8
void init_f1_kvec(std::vector<kernel_info_t> &g_kvec, ppl::common::datatype_t type)
{
if ( type == ppl::common::DATATYPE_FLOAT32 )
{
printf("fp32 unsupported in %s\n", __FUNCTION__);
}
else if ( type == ppl::common::DATATYPE_FLOAT16 )
{
Initialize2spkConvF1KernelContainer(g_kvec);
}
else
{ printf("type unsupported\n"); }
is_g_kvec_set = true;
}
uint64_t PPLGemmCUDAGetBufSize(
const ppl::nn::TensorShape* input_shape,
int transA)
{
auto type = input_shape->GetDataType();
int type_size = ppl::common::GetSizeOfDataType(type);
if(transA){
int pad_size = GetPadSize(type); // ldg 128 bytes
int K = input_shape->GetDim(0);
int M = input_shape->GetDim(1);
int K_pad = Align(K, pad_size);
return M * K_pad * type_size;
}
return 0;
}
unsigned int PPLCUDAGemmGetBiasSize(
const ppl::common::datatype_t type,
const int N,
const bool is_scalar)
{
if(!is_scalar) return 0;
int pad_size = GetPadSize(type); // ldg 128 bytes
int N_pad = Align(N, pad_size);
int type_size = ppl::common::GetSizeOfDataType(type);
return N_pad * type_size;
}
//block size: (32,32,1)
template<typename T>
__global__ void matrix_transpose(
T *output,
T *input,
float scale,
const int in_row,
const int in_col)
{
unsigned int in_x = blockIdx.x*32 + threadIdx.x;
unsigned int in_y = blockIdx.y*32 + threadIdx.y;
unsigned int out_x = blockIdx.y*32 + threadIdx.x;
unsigned int out_y = blockIdx.x*32 + threadIdx.y;
bool in_range = (in_x <= in_col) && (in_y <= in_row);
bool out_range = (out_x <= in_row) && (out_y <= in_col);
__shared__ T smem[32][33];
T value = in_range ? input[in_y*in_col + in_x] : (T)0;
smem[threadIdx.x][threadIdx.y] = value;
__syncthreads();
value = smem[threadIdx.y][threadIdx.x];
float fp_value = (float)value * scale;
if(out_range) output[out_y*in_row + out_x] = (__half)fp_value;
}
template<typename T>
__global__ void scale(T *input, float scale, unsigned int size){
unsigned int off = blockIdx.x*512 + threadIdx.x;
bool in_range = off <= size;
T value = in_range ? input[off] : (T)0;
float fp_value = (float)value;
fp_value = scale * fp_value;
if (in_range) input[off] = (T)fp_value;
}
ppl::common::RetCode PPLCUDAGemmModifyWeights(
const hipStream_t &stream,
ppl::nn::TensorShape* weight_shape,
void* weight,
void* tmp_weight, //if need transpose
const ppl::nn::common::GemmParam *param)
{
int transB = param->transB;
float alpha = param->alpha;
auto type = weight_shape->GetDataType();
int pad_size = GetPadSize(type);
const int dim0 = weight_shape->GetDim(0);//assume padded
const int dim1 = weight_shape->GetDim(1);
if (!transB) {
#define TRANSWEIGHT(Type) \
hipLaunchKernelGGL(( matrix_transpose<Type>), dim3(grid), dim3(block), 0, stream, \
(Type*)tmp_weight, (Type*)weight, alpha, dim0, dim1); \
hipMemcpyAsync((Type*)weight, (Type*)tmp_weight, dim0*dim1*sizeof(Type), \
hipMemcpyDeviceToDevice, stream);
dim3 grid(DivUp(dim1, 32), DivUp(dim0, 32), 1);
dim3 block(32, 32, 1);
weight_shape->SetDim(0, dim1);
weight_shape->SetDim(1, dim0);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
TRANSWEIGHT(float)
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
TRANSWEIGHT(__half)
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef TRANSWEIGHT
} else if (alpha != 1.f){
int grid_size = DivUp(dim0*dim1, 512);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
hipLaunchKernelGGL(( scale<float>), dim3(grid_size), dim3(512), 0, stream, (float*)weight, alpha, dim0*dim1);
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
hipLaunchKernelGGL(( scale<__half>), dim3(grid_size), dim3(512), 0, stream, (__half*)weight, alpha, dim0*dim1);
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
ppl::common::RetCode PPLCUDAGemmModifyBias(
const hipStream_t &stream,
const ppl::nn::TensorShape* bias_shape,
void* bias,
const ppl::nn::common::GemmParam *param)
{
if (param->bias_term) {
auto type = bias_shape->GetDataType();
int pad_size = GetPadSize(type);
float beta = param->beta;
int N = bias_shape->GetDim(0);
int N_pad = Align(N, pad_size);
if (type == ppl::common::DATATYPE_FLOAT32) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
hipLaunchKernelGGL(( scale<float>), dim3(grid_size), dim3(512), 0, stream, (float*)bias, beta, N_pad);
}
} else if (type == ppl::common::DATATYPE_FLOAT16) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
hipLaunchKernelGGL(( scale<__half>), dim3(grid_size), dim3(512), 0, stream, (__half*)bias, beta, N_pad);
}
} else{
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
int PPLCUDAGemmSelectKernel(
const hipStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
auto type = weight_shape->GetDataType();
if (!is_g_kvec_set) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
int N_pad = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K_pad = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
bool has_bias = param.bias_term;//beta != 0.f;
float minTime = FLT_MAX;
int best_kid = -1;
float elapsed;
hipEvent_t begin, end;
hipEventCreate(&begin);
hipEventCreate(&end);
//transpose
int4 *input0_tmp = (int4*)input;
if (transA == 1) { // input is shape of (K, M), we need K as the 1st inner dim
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( matrix_transpose<float>), dim3(grid), dim3(block), 0, stream,
(float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( matrix_transpose<__half>), dim3(grid), dim3(block), 0, stream,
(__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
for (unsigned int kid = 0; kid < g_kvec.size(); kid++) {
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
hipEventRecord(begin, stream);
for (int i = 0; i < TIMES; i++) {
if (g_kvec[kid].ktype == CONV_2SPK_F1) {
FAKE_CONV_PARAM
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
(g_kvec[kid]hipLaunchKernelGGL((.lut_kptr)), dim3(grid_size), dim3(block_size), 0, stream, GEMM_FUNC_PARAM);
}
else {
printf("Error: kernel type error in %s\n", __FUNCTION__);
}
}
hipEventRecord(end, stream);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed, begin, end);
if (elapsed < minTime){
best_kid = kid;
minTime = elapsed;
}
}
hipEventDestroy(begin);
hipEventDestroy(end);
return best_kid;
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const hipStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param);
ppl::common::RetCode PPLCUDAGemmForwardImp(
const hipStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param,
const int kid)
{
auto type = weight_shape->GetDataType();
if ( !is_g_kvec_set ) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
int N = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int N_pad = Align(N, pad_size);
int K_pad = Align(K, pad_size);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
ppl::common::RetCode status = ppl::common::RC_SUCCESS;
if(M == 1){
status = PPLCUDAGemvForwardImp<__half>(stream,
M, N, K,
input, weight, bias,
(void*)final_out,
param, temp_buffer, fuse_param);
return status;
}
// kernel configs
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
bool has_bias = param.bias_term;//beta != 0.f;
int4 *input0_tmp = (int4*)input;
if (transA == 1) {
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( matrix_transpose<float>), dim3(grid), dim3(block), 0, stream,
(float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( matrix_transpose<__half>), dim3(grid), dim3(block), 0, stream,
(__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
FAKE_CONV_PARAM
(g_kvec[kid]hipLaunchKernelGGL((.lut_kptr)), dim3(grid_size), dim3(block_size), 0, stream, GEMM_FUNC_PARAM);
return status;
}
template <typename T>
__device__ __inline__ void fma_v4(const int4 a, const int4 b, int4 &c);
template <>
__device__ __inline__ void fma_v4<__half>(const int4 a, const int4 b, int4 &c){
#if __CUDA_ARCH__ >= 600
((__half2*)&c)[0] = __hfma2(((__half2*)&a)[0], ((__half2*)&b)[0], ((__half2*)&c)[0]);
((__half2*)&c)[1] = __hfma2(((__half2*)&a)[1], ((__half2*)&b)[1], ((__half2*)&c)[1]);
((__half2*)&c)[2] = __hfma2(((__half2*)&a)[2], ((__half2*)&b)[2], ((__half2*)&c)[2]);
((__half2*)&c)[3] = __hfma2(((__half2*)&a)[3], ((__half2*)&b)[3], ((__half2*)&c)[3]);
#else
#endif
}
template <>
__device__ __inline__ void fma_v4<float>(const int4 a, const int4 b, int4 &c){
((float*)&c)[0] = ((float*)&a)[0] * ((float*)&b)[0] + ((float*)&c)[0];
((float*)&c)[1] = ((float*)&a)[1] * ((float*)&b)[1] + ((float*)&c)[1];
((float*)&c)[2] = ((float*)&a)[2] * ((float*)&b)[2] + ((float*)&c)[2];
((float*)&c)[3] = ((float*)&a)[3] * ((float*)&b)[3] + ((float*)&c)[3];
}
template <typename T>
__device__ __inline__ int4 add_v4(const int4 a, const int4 b);
template <>
__device__ __inline__ int4 add_v4<__half>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
#if __CUDA_ARCH__ >= 600
((__half2*)&res)[0] = __hadd2(((__half2*)&a)[0], ((__half2*)&b)[0]);
((__half2*)&res)[1] = __hadd2(((__half2*)&a)[1], ((__half2*)&b)[1]);
((__half2*)&res)[2] = __hadd2(((__half2*)&a)[2], ((__half2*)&b)[2]);
((__half2*)&res)[3] = __hadd2(((__half2*)&a)[3], ((__half2*)&b)[3]);
#else
#endif
return res;
}
template <>
__device__ __inline__ int4 add_v4<float>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
((float*)&res)[0] = ((float*)&a)[0] + ((float*)&b)[0];
((float*)&res)[1] = ((float*)&a)[1] + ((float*)&b)[1];
((float*)&res)[2] = ((float*)&a)[2] + ((float*)&b)[2];
((float*)&res)[3] = ((float*)&a)[3] + ((float*)&b)[3];
return res;
}
template <typename T>
__inline__ __device__ T reduce_v4(int4 data){
T res = (T)0;
for(int i = 0; i < sizeof(int4)/sizeof(T); i++){
res = Math<T,T,T>::add(res, ((T*)&data)[i]);
}
}
template <typename T>
__device__ __inline__ void activation(const int activation, int4 &v){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
if(activation ==1){
for(int i = 0; i < T_NUMS_PER_INT4; i++)
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)0)?
t_v[i] : (T)0;
} else{
for(int i = 0; i < T_NUMS_PER_INT4; i++){
T tmp = expf(t_v[i]);
t_v[i] = tmp * __frcp_rn(tmp + (T)1);
}
}
}
template <>
__device__ __inline__ void activation<__half>(const int activation, int4 &v){
#if __CUDA_ARCH__ >= 600
__half2 *h2_v = (__half2*)&v;
int *int_v = (int*)&v;
if(activation ==1){
for(int i = 0; i < 4; i++)
int_v[i] = __vmaxs2(int_v[i], 0);
} else{
__half2 one = {(__half)1.f, (__half)1.f};
for(int i = 0; i < 4; i++){
__half2 tmp = h2exp(h2_v[i]);
h2_v[i] = __hmul2(tmp, h2rcp(__hadd2(one, tmp)));// __h2div(tmp, __hadd2(one, tmp));
}
}
#else
#endif
}
template<typename T>
__device__ __inline__ void clip(int4 &v, float clip_min, float clip_max){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
for(int i = 0; i < T_NUMS_PER_INT4; i++){
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)clip_min)?
t_v[i] : (T)clip_min;
t_v[i] = Math<T,T,T>::le(t_v[i], (T)clip_max)?
t_v[i] : (T)clip_max;
}
}
//matrix: NxK
// N: pad int4
// K: pad int4
// layout and fuse pattern consistent with gemm
//BLK_TILE_N: min:8
template<typename T, int BLK_TILE_N, int THD_TILE_N_V4, int BLK_SIZE>
__global__ void gemv(void *output,
const void *vec,
const void *matrix,
const void *bias,
const int padK,
const int padN,
const fuse_param_t fuse_param)
{
// blk conofig
// one int4 per thd along K
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
constexpr int BLK_TILE_N_V4 = BLK_TILE_N / T_NUMS_PER_INT4;
constexpr int THD_TILE_N = THD_TILE_N_V4 * T_NUMS_PER_INT4;
constexpr int BLK_SIZE_Y = BLK_TILE_N_V4 / THD_TILE_N_V4;
constexpr int BLK_SIZE_X = BLK_SIZE / BLK_SIZE_Y;
constexpr int BLK_TILE_K = BLK_SIZE_X;
int pad_k_v4 = padK / T_NUMS_PER_INT4;
int pad_n_v4 = padN / T_NUMS_PER_INT4;
int n_id = blockIdx.x*BLK_TILE_N + threadIdx.y*T_NUMS_PER_INT4;
int64_t b_base_v4 = (int64_t)n_id*pad_k_v4;
int4 *matrix_base_v4 = (int4*)matrix + b_base_v4;
int4 reg_c[THD_TILE_N];
int4 reg_b[THD_TILE_N];
bool in_n_range[THD_TILE_N_V4];
int4 reg_a;
int4 zero = {0,0,0,0};
T c[THD_TILE_N] = { T(0) };
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++) c[i] = (T)0;
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
reg_c[i] = zero;
}
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
in_n_range[i] = blockIdx.x*BLK_TILE_N_V4 + threadIdx.y + i*BLK_SIZE_Y < pad_n_v4;
}
// ld global VxM
#pragma unroll
for(int k = 0; k < DivUp(pad_k_v4,BLK_TILE_K); k++){
int64_t off = k*BLK_TILE_K + threadIdx.x;
bool in_range = off < pad_k_v4;
reg_a = in_range? ((int4*)vec)[off] : zero;
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
#pragma unroll
for(int j = 0; j < T_NUMS_PER_INT4; j++){
reg_b[i*T_NUMS_PER_INT4 + j] = in_n_range[i] && in_range ?
matrix_base_v4[(i*T_NUMS_PER_INT4*BLK_SIZE_Y+j)*pad_k_v4 + off]
: zero;
fma_v4<T>(reg_a, reg_b[i*T_NUMS_PER_INT4 + j],
reg_c[i*T_NUMS_PER_INT4 + j]);
}
}
}
// int4 reduce to half
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
#pragma unroll
for(int n = 0; n < T_NUMS_PER_INT4; n++){
c[i] = Math<T,T,T>::add( ((T*)reg_c)[i*T_NUMS_PER_INT4 + n],
c[i]);
}
}
__shared__ T smem[BLK_SIZE_X*BLK_TILE_N];
int reduce_off = (threadIdx.y*THD_TILE_N)*BLK_SIZE_X + threadIdx.x;
constexpr int REDUCE_SIZE = BLK_SIZE_X;
if(REDUCE_SIZE >= 64){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
smem[reduce_off + i*BLK_SIZE_X] = c[i];
}
__syncthreads();
}
//reduce
if(REDUCE_SIZE >= 1024){
if(threadIdx.x < 512)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[512 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 512){
if(threadIdx.x < 256)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[256 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 256){
if(threadIdx.x < 128)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[128 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 128){
if(threadIdx.x < 64)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[64 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
unsigned FULL_MASK = __activemask();
if (REDUCE_SIZE >= 64) {
if(threadIdx.x < 32){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[reduce_off + i*BLK_SIZE_X + 32]);
}
}
if(threadIdx.x < 32){
if (REDUCE_SIZE >= 32) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 16));
}
if (REDUCE_SIZE >= 16) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 8));
}
if (REDUCE_SIZE >= 8) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 4));
}
if (REDUCE_SIZE >= 4) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 2));
}
if (REDUCE_SIZE >= 2) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 1));
}
}
// shared shuffle
int4 *smem_v4 = (int4*)smem;
if (threadIdx.x == 0) {
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
smem_v4[i*BLK_SIZE_Y + threadIdx.y] = ((int4*)c)[i];
}
}
__syncthreads();
int tid = threadIdx.y*BLK_SIZE_X + threadIdx.x;
for(int thd_off = tid; thd_off < BLK_TILE_N_V4; thd_off += BLK_SIZE){
int out_off = blockIdx.x*BLK_TILE_N_V4 + thd_off;
bool in_output_range = out_off < pad_n_v4;
if(in_output_range){
int4 bias_data = bias!=NULL? ((int4*)bias)[out_off] : zero;
//TODO add bias
int4 out = add_v4<T>(smem_v4[thd_off], bias_data);
// fuse
if(fuse_param.has_activation) activation<T>(fuse_param.has_activation, out);
if(fuse_param.has_clip) clip<T>(out, fuse_param.clip_min, fuse_param.clip_max);
int concatV4_off = 0;
if(fuse_param.has_concat){
int concat_offset_v4 = fuse_param.concat_offset / T_NUMS_PER_INT4;
int concat_stride_v4 = fuse_param.concat_stride / T_NUMS_PER_INT4;
concatV4_off = concat_offset_v4 + blockIdx.y*concat_stride_v4;
out_off += concatV4_off;
}
((int4*)output)[out_off] = out;
}
}
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const hipStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
constexpr int ELEM_NUM_PR_LD = sizeof(int4)/sizeof(T);
constexpr int expect_blocks = 64;
//constexpr int MAX_BLK_SIZE = 256;
//constexpr int MAX_THD_TILE_N_V4 = 4;
int n_v4 = N / ELEM_NUM_PR_LD;
int blk_tile_n_v4 = DivUp(n_v4, expect_blocks/M);
#define LAUNCH_KERNEL(){ \
constexpr int BLK_TILE_N = BLK_SIZE_Y * THD_TILE_N_V4 * ELEM_NUM_PR_LD; \
constexpr int BLK_SIZE = BLK_SIZE_Y * BLK_SIZE_X; \
dim3 grid; \
grid.x = DivUp(N, BLK_TILE_N); \
grid.y = 1; grid.z = 1; \
dim3 threads = dim3(BLK_SIZE_X, BLK_SIZE_Y,1); \
hipLaunchKernelGGL(( gemv<T, BLK_TILE_N, THD_TILE_N_V4, BLK_SIZE>), dim3(grid), dim3(threads), 0, stream, \
output, input, weight, bias, K, N, fuse_param); \
}
#define CONFIG_KERNEL(_blk_tile_n_v4){ \
if(BLK_SIZE_X <= 64 && blk_tile_n_v4 >= 16){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 4; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 8){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 4){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 2){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} else{ \
constexpr int THD_TILE_N_V4 = 1; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} \
}
if (K >= 512){
constexpr int BLK_SIZE_X = 64;
CONFIG_KERNEL(blk_tile_n_v4);
}
else{
constexpr int BLK_SIZE_X = 32;
CONFIG_KERNEL(blk_tile_n_v4);
}
return ppl::common::RC_SUCCESS;
}
| gemm.cu | #include "cudakernel/gemm/gemm.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include <cuda_fp16.h>
#include <float.h>
#include "kernel_type.h"
#include "conv_common.h"
#define TIMES 4
static std::vector<kernel_info_t> g_kvec;
static bool is_g_kvec_set = false;
#define FAKE_CONV_PARAM \
const int in_hw = 1; const int out_hw = 1; \
const int flt_hw = 1; const int splitk = 1; \
const int in_height = 1; const int in_width = 1; \
const int batch = M; const int num_grp = 1; \
const int num_chl_per_grp = 0; const int num_chl_per_grp_pad = K_pad; \
const int flt_height = 1; const int flt_width = 1; \
const int num_flt_per_grp = 0; const int num_flt_per_grp_pad = N_pad; \
const int out_height = 1; const int out_width = 1; \
const int stride_height = 1; const int stride_width = 1; \
const int pad_height = 0; const int pad_width = 0; \
const int hole_height = 1; const int hole_width = 1;
#define GEMM_FUNC_PARAM \
input0_tmp, \
(int4*)weight, \
final_out, \
kLoopNum, \
in_lut, 0, \
flt_lut, 0, \
in_hw, out_hw, \
flt_hw, splitk, \
in_height, in_width, \
batch, num_grp, \
num_chl_per_grp, num_chl_per_grp_pad, \
flt_height, flt_width, \
num_flt_per_grp, num_flt_per_grp_pad, \
out_height, out_width, \
stride_height, stride_width, \
pad_height, pad_width, \
hole_height, hole_width, \
has_bias, (int4*)bias, \
fuse_param.has_activation, clip_min, \
fuse_param.has_clip, clip_max, \
fuse_param.has_prelu, (const void *) fuse_param.prelu, \
fuse_param.has_elt, (const int4 *) fuse_param.pre_data, \
fuse_param.has_elt_activation, elt_clip_min, \
fuse_param.has_elt_clip, elt_clip_max, \
fuse_param.has_elt_prelu, (const void *) fuse_param.elt_prelu, \
(__half)fuse_param.leaky, (__half)fuse_param.elt_leaky, \
fuse_param.has_concat, concat_offset_v8, \
concat_stride_v8
void init_f1_kvec(std::vector<kernel_info_t> &g_kvec, ppl::common::datatype_t type)
{
if ( type == ppl::common::DATATYPE_FLOAT32 )
{
printf("fp32 unsupported in %s\n", __FUNCTION__);
}
else if ( type == ppl::common::DATATYPE_FLOAT16 )
{
Initialize2spkConvF1KernelContainer(g_kvec);
}
else
{ printf("type unsupported\n"); }
is_g_kvec_set = true;
}
uint64_t PPLGemmCUDAGetBufSize(
const ppl::nn::TensorShape* input_shape,
int transA)
{
auto type = input_shape->GetDataType();
int type_size = ppl::common::GetSizeOfDataType(type);
if(transA){
int pad_size = GetPadSize(type); // ldg 128 bytes
int K = input_shape->GetDim(0);
int M = input_shape->GetDim(1);
int K_pad = Align(K, pad_size);
return M * K_pad * type_size;
}
return 0;
}
unsigned int PPLCUDAGemmGetBiasSize(
const ppl::common::datatype_t type,
const int N,
const bool is_scalar)
{
if(!is_scalar) return 0;
int pad_size = GetPadSize(type); // ldg 128 bytes
int N_pad = Align(N, pad_size);
int type_size = ppl::common::GetSizeOfDataType(type);
return N_pad * type_size;
}
//block size: (32,32,1)
template<typename T>
__global__ void matrix_transpose(
T *output,
T *input,
float scale,
const int in_row,
const int in_col)
{
unsigned int in_x = blockIdx.x*32 + threadIdx.x;
unsigned int in_y = blockIdx.y*32 + threadIdx.y;
unsigned int out_x = blockIdx.y*32 + threadIdx.x;
unsigned int out_y = blockIdx.x*32 + threadIdx.y;
bool in_range = (in_x <= in_col) && (in_y <= in_row);
bool out_range = (out_x <= in_row) && (out_y <= in_col);
__shared__ T smem[32][33];
T value = in_range ? input[in_y*in_col + in_x] : (T)0;
smem[threadIdx.x][threadIdx.y] = value;
__syncthreads();
value = smem[threadIdx.y][threadIdx.x];
float fp_value = (float)value * scale;
if(out_range) output[out_y*in_row + out_x] = (__half)fp_value;
}
template<typename T>
__global__ void scale(T *input, float scale, unsigned int size){
unsigned int off = blockIdx.x*512 + threadIdx.x;
bool in_range = off <= size;
T value = in_range ? input[off] : (T)0;
float fp_value = (float)value;
fp_value = scale * fp_value;
if (in_range) input[off] = (T)fp_value;
}
ppl::common::RetCode PPLCUDAGemmModifyWeights(
const cudaStream_t &stream,
ppl::nn::TensorShape* weight_shape,
void* weight,
void* tmp_weight, //if need transpose
const ppl::nn::common::GemmParam *param)
{
int transB = param->transB;
float alpha = param->alpha;
auto type = weight_shape->GetDataType();
int pad_size = GetPadSize(type);
const int dim0 = weight_shape->GetDim(0);//assume padded
const int dim1 = weight_shape->GetDim(1);
if (!transB) {
#define TRANSWEIGHT(Type) \
matrix_transpose<Type><<<grid, block, 0, stream>>> \
((Type*)tmp_weight, (Type*)weight, alpha, dim0, dim1); \
cudaMemcpyAsync((Type*)weight, (Type*)tmp_weight, dim0*dim1*sizeof(Type), \
cudaMemcpyDeviceToDevice, stream);
dim3 grid(DivUp(dim1, 32), DivUp(dim0, 32), 1);
dim3 block(32, 32, 1);
weight_shape->SetDim(0, dim1);
weight_shape->SetDim(1, dim0);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
TRANSWEIGHT(float)
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
TRANSWEIGHT(__half)
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef TRANSWEIGHT
} else if (alpha != 1.f){
int grid_size = DivUp(dim0*dim1, 512);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
scale<float><<<grid_size, 512, 0, stream>>>((float*)weight, alpha, dim0*dim1);
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
scale<__half><<<grid_size, 512, 0, stream>>>((__half*)weight, alpha, dim0*dim1);
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
ppl::common::RetCode PPLCUDAGemmModifyBias(
const cudaStream_t &stream,
const ppl::nn::TensorShape* bias_shape,
void* bias,
const ppl::nn::common::GemmParam *param)
{
if (param->bias_term) {
auto type = bias_shape->GetDataType();
int pad_size = GetPadSize(type);
float beta = param->beta;
int N = bias_shape->GetDim(0);
int N_pad = Align(N, pad_size);
if (type == ppl::common::DATATYPE_FLOAT32) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
scale<float><<<grid_size, 512, 0, stream>>>((float*)bias, beta, N_pad);
}
} else if (type == ppl::common::DATATYPE_FLOAT16) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
scale<__half><<<grid_size, 512, 0, stream>>>((__half*)bias, beta, N_pad);
}
} else{
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
int PPLCUDAGemmSelectKernel(
const cudaStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
auto type = weight_shape->GetDataType();
if (!is_g_kvec_set) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
int N_pad = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K_pad = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
bool has_bias = param.bias_term;//beta != 0.f;
float minTime = FLT_MAX;
int best_kid = -1;
float elapsed;
cudaEvent_t begin, end;
cudaEventCreate(&begin);
cudaEventCreate(&end);
//transpose
int4 *input0_tmp = (int4*)input;
if (transA == 1) { // input is shape of (K, M), we need K as the 1st inner dim
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
matrix_transpose<float><<<grid, block, 0, stream>>>
((float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
matrix_transpose<__half><<<grid, block, 0, stream>>>
((__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
for (unsigned int kid = 0; kid < g_kvec.size(); kid++) {
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
cudaEventRecord(begin, stream);
for (int i = 0; i < TIMES; i++) {
if (g_kvec[kid].ktype == CONV_2SPK_F1) {
FAKE_CONV_PARAM
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
(g_kvec[kid].lut_kptr)<<<grid_size, block_size, 0, stream>>>(GEMM_FUNC_PARAM);
}
else {
printf("Error: kernel type error in %s\n", __FUNCTION__);
}
}
cudaEventRecord(end, stream);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, begin, end);
if (elapsed < minTime){
best_kid = kid;
minTime = elapsed;
}
}
cudaEventDestroy(begin);
cudaEventDestroy(end);
return best_kid;
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const cudaStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param);
ppl::common::RetCode PPLCUDAGemmForwardImp(
const cudaStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param,
const int kid)
{
auto type = weight_shape->GetDataType();
if ( !is_g_kvec_set ) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
int N = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int N_pad = Align(N, pad_size);
int K_pad = Align(K, pad_size);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
ppl::common::RetCode status = ppl::common::RC_SUCCESS;
if(M == 1){
status = PPLCUDAGemvForwardImp<__half>(stream,
M, N, K,
input, weight, bias,
(void*)final_out,
param, temp_buffer, fuse_param);
return status;
}
// kernel configs
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
bool has_bias = param.bias_term;//beta != 0.f;
int4 *input0_tmp = (int4*)input;
if (transA == 1) {
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
matrix_transpose<float><<<grid, block, 0, stream>>>
((float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
matrix_transpose<__half><<<grid, block, 0, stream>>>
((__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
FAKE_CONV_PARAM
(g_kvec[kid].lut_kptr)<<<grid_size, block_size, 0, stream>>>(GEMM_FUNC_PARAM);
return status;
}
template <typename T>
__device__ __inline__ void fma_v4(const int4 a, const int4 b, int4 &c);
template <>
__device__ __inline__ void fma_v4<__half>(const int4 a, const int4 b, int4 &c){
#if __CUDA_ARCH__ >= 600
((__half2*)&c)[0] = __hfma2(((__half2*)&a)[0], ((__half2*)&b)[0], ((__half2*)&c)[0]);
((__half2*)&c)[1] = __hfma2(((__half2*)&a)[1], ((__half2*)&b)[1], ((__half2*)&c)[1]);
((__half2*)&c)[2] = __hfma2(((__half2*)&a)[2], ((__half2*)&b)[2], ((__half2*)&c)[2]);
((__half2*)&c)[3] = __hfma2(((__half2*)&a)[3], ((__half2*)&b)[3], ((__half2*)&c)[3]);
#else
#endif
}
template <>
__device__ __inline__ void fma_v4<float>(const int4 a, const int4 b, int4 &c){
((float*)&c)[0] = ((float*)&a)[0] * ((float*)&b)[0] + ((float*)&c)[0];
((float*)&c)[1] = ((float*)&a)[1] * ((float*)&b)[1] + ((float*)&c)[1];
((float*)&c)[2] = ((float*)&a)[2] * ((float*)&b)[2] + ((float*)&c)[2];
((float*)&c)[3] = ((float*)&a)[3] * ((float*)&b)[3] + ((float*)&c)[3];
}
template <typename T>
__device__ __inline__ int4 add_v4(const int4 a, const int4 b);
template <>
__device__ __inline__ int4 add_v4<__half>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
#if __CUDA_ARCH__ >= 600
((__half2*)&res)[0] = __hadd2(((__half2*)&a)[0], ((__half2*)&b)[0]);
((__half2*)&res)[1] = __hadd2(((__half2*)&a)[1], ((__half2*)&b)[1]);
((__half2*)&res)[2] = __hadd2(((__half2*)&a)[2], ((__half2*)&b)[2]);
((__half2*)&res)[3] = __hadd2(((__half2*)&a)[3], ((__half2*)&b)[3]);
#else
#endif
return res;
}
template <>
__device__ __inline__ int4 add_v4<float>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
((float*)&res)[0] = ((float*)&a)[0] + ((float*)&b)[0];
((float*)&res)[1] = ((float*)&a)[1] + ((float*)&b)[1];
((float*)&res)[2] = ((float*)&a)[2] + ((float*)&b)[2];
((float*)&res)[3] = ((float*)&a)[3] + ((float*)&b)[3];
return res;
}
template <typename T>
__inline__ __device__ T reduce_v4(int4 data){
T res = (T)0;
for(int i = 0; i < sizeof(int4)/sizeof(T); i++){
res = Math<T,T,T>::add(res, ((T*)&data)[i]);
}
}
template <typename T>
__device__ __inline__ void activation(const int activation, int4 &v){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
if(activation ==1){
for(int i = 0; i < T_NUMS_PER_INT4; i++)
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)0)?
t_v[i] : (T)0;
} else{
for(int i = 0; i < T_NUMS_PER_INT4; i++){
T tmp = expf(t_v[i]);
t_v[i] = tmp * __frcp_rn(tmp + (T)1);
}
}
}
template <>
__device__ __inline__ void activation<__half>(const int activation, int4 &v){
#if __CUDA_ARCH__ >= 600
__half2 *h2_v = (__half2*)&v;
int *int_v = (int*)&v;
if(activation ==1){
for(int i = 0; i < 4; i++)
int_v[i] = __vmaxs2(int_v[i], 0);
} else{
__half2 one = {(__half)1.f, (__half)1.f};
for(int i = 0; i < 4; i++){
__half2 tmp = h2exp(h2_v[i]);
h2_v[i] = __hmul2(tmp, h2rcp(__hadd2(one, tmp)));// __h2div(tmp, __hadd2(one, tmp));
}
}
#else
#endif
}
template<typename T>
__device__ __inline__ void clip(int4 &v, float clip_min, float clip_max){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
for(int i = 0; i < T_NUMS_PER_INT4; i++){
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)clip_min)?
t_v[i] : (T)clip_min;
t_v[i] = Math<T,T,T>::le(t_v[i], (T)clip_max)?
t_v[i] : (T)clip_max;
}
}
//matrix: NxK
// N: pad int4
// K: pad int4
// layout and fuse pattern consistent with gemm
//BLK_TILE_N: min:8
template<typename T, int BLK_TILE_N, int THD_TILE_N_V4, int BLK_SIZE>
__global__ void gemv(void *output,
const void *vec,
const void *matrix,
const void *bias,
const int padK,
const int padN,
const fuse_param_t fuse_param)
{
// blk conofig
// one int4 per thd along K
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
constexpr int BLK_TILE_N_V4 = BLK_TILE_N / T_NUMS_PER_INT4;
constexpr int THD_TILE_N = THD_TILE_N_V4 * T_NUMS_PER_INT4;
constexpr int BLK_SIZE_Y = BLK_TILE_N_V4 / THD_TILE_N_V4;
constexpr int BLK_SIZE_X = BLK_SIZE / BLK_SIZE_Y;
constexpr int BLK_TILE_K = BLK_SIZE_X;
int pad_k_v4 = padK / T_NUMS_PER_INT4;
int pad_n_v4 = padN / T_NUMS_PER_INT4;
int n_id = blockIdx.x*BLK_TILE_N + threadIdx.y*T_NUMS_PER_INT4;
int64_t b_base_v4 = (int64_t)n_id*pad_k_v4;
int4 *matrix_base_v4 = (int4*)matrix + b_base_v4;
int4 reg_c[THD_TILE_N];
int4 reg_b[THD_TILE_N];
bool in_n_range[THD_TILE_N_V4];
int4 reg_a;
int4 zero = {0,0,0,0};
T c[THD_TILE_N] = { T(0) };
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++) c[i] = (T)0;
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
reg_c[i] = zero;
}
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
in_n_range[i] = blockIdx.x*BLK_TILE_N_V4 + threadIdx.y + i*BLK_SIZE_Y < pad_n_v4;
}
// ld global VxM
#pragma unroll
for(int k = 0; k < DivUp(pad_k_v4,BLK_TILE_K); k++){
int64_t off = k*BLK_TILE_K + threadIdx.x;
bool in_range = off < pad_k_v4;
reg_a = in_range? ((int4*)vec)[off] : zero;
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
#pragma unroll
for(int j = 0; j < T_NUMS_PER_INT4; j++){
reg_b[i*T_NUMS_PER_INT4 + j] = in_n_range[i] && in_range ?
matrix_base_v4[(i*T_NUMS_PER_INT4*BLK_SIZE_Y+j)*pad_k_v4 + off]
: zero;
fma_v4<T>(reg_a, reg_b[i*T_NUMS_PER_INT4 + j],
reg_c[i*T_NUMS_PER_INT4 + j]);
}
}
}
// int4 reduce to half
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
#pragma unroll
for(int n = 0; n < T_NUMS_PER_INT4; n++){
c[i] = Math<T,T,T>::add( ((T*)reg_c)[i*T_NUMS_PER_INT4 + n],
c[i]);
}
}
__shared__ T smem[BLK_SIZE_X*BLK_TILE_N];
int reduce_off = (threadIdx.y*THD_TILE_N)*BLK_SIZE_X + threadIdx.x;
constexpr int REDUCE_SIZE = BLK_SIZE_X;
if(REDUCE_SIZE >= 64){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
smem[reduce_off + i*BLK_SIZE_X] = c[i];
}
__syncthreads();
}
//reduce
if(REDUCE_SIZE >= 1024){
if(threadIdx.x < 512)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[512 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 512){
if(threadIdx.x < 256)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[256 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 256){
if(threadIdx.x < 128)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[128 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 128){
if(threadIdx.x < 64)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[64 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
unsigned FULL_MASK = __activemask();
if (REDUCE_SIZE >= 64) {
if(threadIdx.x < 32){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[reduce_off + i*BLK_SIZE_X + 32]);
}
}
if(threadIdx.x < 32){
if (REDUCE_SIZE >= 32) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 16));
}
if (REDUCE_SIZE >= 16) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 8));
}
if (REDUCE_SIZE >= 8) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 4));
}
if (REDUCE_SIZE >= 4) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 2));
}
if (REDUCE_SIZE >= 2) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 1));
}
}
// shared shuffle
int4 *smem_v4 = (int4*)smem;
if (threadIdx.x == 0) {
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
smem_v4[i*BLK_SIZE_Y + threadIdx.y] = ((int4*)c)[i];
}
}
__syncthreads();
int tid = threadIdx.y*BLK_SIZE_X + threadIdx.x;
for(int thd_off = tid; thd_off < BLK_TILE_N_V4; thd_off += BLK_SIZE){
int out_off = blockIdx.x*BLK_TILE_N_V4 + thd_off;
bool in_output_range = out_off < pad_n_v4;
if(in_output_range){
int4 bias_data = bias!=NULL? ((int4*)bias)[out_off] : zero;
//TODO add bias
int4 out = add_v4<T>(smem_v4[thd_off], bias_data);
// fuse
if(fuse_param.has_activation) activation<T>(fuse_param.has_activation, out);
if(fuse_param.has_clip) clip<T>(out, fuse_param.clip_min, fuse_param.clip_max);
int concatV4_off = 0;
if(fuse_param.has_concat){
int concat_offset_v4 = fuse_param.concat_offset / T_NUMS_PER_INT4;
int concat_stride_v4 = fuse_param.concat_stride / T_NUMS_PER_INT4;
concatV4_off = concat_offset_v4 + blockIdx.y*concat_stride_v4;
out_off += concatV4_off;
}
((int4*)output)[out_off] = out;
}
}
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const cudaStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
constexpr int ELEM_NUM_PR_LD = sizeof(int4)/sizeof(T);
constexpr int expect_blocks = 64;
//constexpr int MAX_BLK_SIZE = 256;
//constexpr int MAX_THD_TILE_N_V4 = 4;
int n_v4 = N / ELEM_NUM_PR_LD;
int blk_tile_n_v4 = DivUp(n_v4, expect_blocks/M);
#define LAUNCH_KERNEL(){ \
constexpr int BLK_TILE_N = BLK_SIZE_Y * THD_TILE_N_V4 * ELEM_NUM_PR_LD; \
constexpr int BLK_SIZE = BLK_SIZE_Y * BLK_SIZE_X; \
dim3 grid; \
grid.x = DivUp(N, BLK_TILE_N); \
grid.y = 1; grid.z = 1; \
dim3 threads = dim3(BLK_SIZE_X, BLK_SIZE_Y,1); \
gemv<T, BLK_TILE_N, THD_TILE_N_V4, BLK_SIZE><<<grid, threads, 0, stream>>>\
(output, input, weight, bias, K, N, fuse_param); \
}
#define CONFIG_KERNEL(_blk_tile_n_v4){ \
if(BLK_SIZE_X <= 64 && blk_tile_n_v4 >= 16){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 4; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 8){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 4){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 2){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} else{ \
constexpr int THD_TILE_N_V4 = 1; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} \
}
if (K >= 512){
constexpr int BLK_SIZE_X = 64;
CONFIG_KERNEL(blk_tile_n_v4);
}
else{
constexpr int BLK_SIZE_X = 32;
CONFIG_KERNEL(blk_tile_n_v4);
}
return ppl::common::RC_SUCCESS;
}
|
2b0a430a689b22bed1001da41e774d80e9cf7982.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
extern "C" {
__global__ void init( unsigned long long int* seed, hiprandState_t * state){
int id = threadIdx.x;
hiprand_init(*seed, id, 0, &state[id]);
}
__device__ void pi(const float &x, float *pars, float &p){
p = expf(-powf(fabsf(x*pars[2]), pars[1]));
}
__device__ void f(const float &x, float *pars, float &s){
s += x*sinf(x*pars[0]);
}
__global__ void mcmc(hiprandState_t* states, unsigned int * num_samples, float * Pars, int * npar,
float * Sigma, float * result){
int id = threadIdx.x;
hiprandState_t state = states[id];
unsigned int N = *num_samples;
float sigma = *Sigma;
float *pars = new float[*npar];
memcpy(pars, &Pars[*npar*id], *npar*sizeof(float));
float xi = hiprand_uniform(&state);
float xg = 0.0;
float s = 0.0;
float p_xi = 0.0;
float p_xg = 0.0;
pi(xi, pars, p_xi);
for(unsigned int i=0;i<N;i++){
xg = sigma*hiprand_normal(&state)+xi;
pi(xg, pars, p_xg);
if (hiprand_uniform(&state)<(p_xg/p_xi)){
xi = xg;
p_xi = p_xg;
}
f(xi, pars, s);
}
result[id] = s/float(N);
delete pars;
}
} | 2b0a430a689b22bed1001da41e774d80e9cf7982.cu | #include <curand.h>
#include <curand_kernel.h>
extern "C" {
__global__ void init( unsigned long long int* seed, curandState * state){
int id = threadIdx.x;
curand_init(*seed, id, 0, &state[id]);
}
__device__ void pi(const float &x, float *pars, float &p){
p = expf(-powf(fabsf(x*pars[2]), pars[1]));
}
__device__ void f(const float &x, float *pars, float &s){
s += x*sinf(x*pars[0]);
}
__global__ void mcmc(curandState* states, unsigned int * num_samples, float * Pars, int * npar,
float * Sigma, float * result){
int id = threadIdx.x;
curandState state = states[id];
unsigned int N = *num_samples;
float sigma = *Sigma;
float *pars = new float[*npar];
memcpy(pars, &Pars[*npar*id], *npar*sizeof(float));
float xi = curand_uniform(&state);
float xg = 0.0;
float s = 0.0;
float p_xi = 0.0;
float p_xg = 0.0;
pi(xi, pars, p_xi);
for(unsigned int i=0;i<N;i++){
xg = sigma*curand_normal(&state)+xi;
pi(xg, pars, p_xg);
if (curand_uniform(&state)<(p_xg/p_xi)){
xi = xg;
p_xi = p_xg;
}
f(xi, pars, s);
}
result[id] = s/float(N);
delete pars;
}
} |
505c8fa05cea65b7efd179223a8dfe2208247188.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include "cu_errchk.h"
#include "cublas_nrm2.h"
/**
* http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-nrm2
*/
void cublas_nrm2(hipblasHandle_t *handle,
int n,
void *x, int incx,
void *result,
int dtype)
{
switch(dtype) {
case 0:
gpuBlasErrchk(hipblasSnrm2(*handle, n,
static_cast<float*>(x), incx,
static_cast<float*>(result)));
break;
case 1:
gpuBlasErrchk(hipblasDnrm2(*handle, n,
static_cast<double*>(x), incx,
static_cast<double*>(result)));
break;
case 2:
gpuBlasErrchk(hipblasScnrm2(*handle, n,
static_cast<float2*>(x), incx,
static_cast<float*>(result)));
break;
case 3:
gpuBlasErrchk(hipblasDznrm2(*handle, n,
static_cast<double2*>(x), incx,
static_cast<double*>(result)));
break;
}
return;
}
| 505c8fa05cea65b7efd179223a8dfe2208247188.cu | #include <cuda.h>
#include <cublas_v2.h>
#include "cu_errchk.h"
#include "cublas_nrm2.h"
/**
* http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-nrm2
*/
void cublas_nrm2(cublasHandle_t *handle,
int n,
void *x, int incx,
void *result,
int dtype)
{
switch(dtype) {
case 0:
gpuBlasErrchk(cublasSnrm2(*handle, n,
static_cast<float*>(x), incx,
static_cast<float*>(result)));
break;
case 1:
gpuBlasErrchk(cublasDnrm2(*handle, n,
static_cast<double*>(x), incx,
static_cast<double*>(result)));
break;
case 2:
gpuBlasErrchk(cublasScnrm2(*handle, n,
static_cast<float2*>(x), incx,
static_cast<float*>(result)));
break;
case 3:
gpuBlasErrchk(cublasDznrm2(*handle, n,
static_cast<double2*>(x), incx,
static_cast<double*>(result)));
break;
}
return;
}
|
c648f81bb5b93454ef0d18f66cd70b466ba40be6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include <cmath>
#include "fdtd3d.h"
__global__ void E_update( int Nr, int Nth, int Nph,
float *Er, float *Eth, float *Eph,
float *nDr, float *nDth, float *nDph,
float *oDr, float *oDth, float *oDph, float eps)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = ( blockDim.y * blockIdx.y + threadIdx.y ) / Nph;
int k = ( blockDim.y * blockIdx.y + threadIdx.y ) % Nph;
if( ((i >= 0 ) && (i < Nr)) && ((j >= 1) && (j < Nth)) && ((k >= 1) && (k < Nph)) ){
int idx = i*((Nth+1)*(Nph+1)) + j*(Nph+1) + k;
Er[idx] = Er[idx]
+ (nDr[idx] - oDr[idx])/eps;
oDr[idx] = nDr[idx];
}
if( ((i >= 1) && (i < Nr)) && ((j >= 0) && (j < Nth)) && ((k >= 1) && (k < Nph)) ){
int idx = i*(Nth*(Nph+1)) + j*(Nph+1) + k;
Eth[idx] = Eth[idx]
+ (nDth[idx] - oDth[idx])/eps;
oDth[idx] = nDth[idx];
}
if( ((i >= 1) && (i < Nr)) && ((j >= 1) && (j < Nth)) && ((k >= 0) && (k < Nph)) ){
int idx = i*((Nth+1)*Nph) + j*Nph + k;
Eph[idx] = Eph[idx]
+ (nDph[idx] - oDph[idx])/eps;
oDph[idx] = nDph[idx];
}
} | c648f81bb5b93454ef0d18f66cd70b466ba40be6.cu | #define _USE_MATH_DEFINES
#include <cmath>
#include "fdtd3d.h"
__global__ void E_update( int Nr, int Nth, int Nph,
float *Er, float *Eth, float *Eph,
float *nDr, float *nDth, float *nDph,
float *oDr, float *oDth, float *oDph, float eps)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = ( blockDim.y * blockIdx.y + threadIdx.y ) / Nph;
int k = ( blockDim.y * blockIdx.y + threadIdx.y ) % Nph;
if( ((i >= 0 ) && (i < Nr)) && ((j >= 1) && (j < Nth)) && ((k >= 1) && (k < Nph)) ){
int idx = i*((Nth+1)*(Nph+1)) + j*(Nph+1) + k;
Er[idx] = Er[idx]
+ (nDr[idx] - oDr[idx])/eps;
oDr[idx] = nDr[idx];
}
if( ((i >= 1) && (i < Nr)) && ((j >= 0) && (j < Nth)) && ((k >= 1) && (k < Nph)) ){
int idx = i*(Nth*(Nph+1)) + j*(Nph+1) + k;
Eth[idx] = Eth[idx]
+ (nDth[idx] - oDth[idx])/eps;
oDth[idx] = nDth[idx];
}
if( ((i >= 1) && (i < Nr)) && ((j >= 1) && (j < Nth)) && ((k >= 0) && (k < Nph)) ){
int idx = i*((Nth+1)*Nph) + j*Nph + k;
Eph[idx] = Eph[idx]
+ (nDph[idx] - oDph[idx])/eps;
oDph[idx] = nDph[idx];
}
} |
8e9e676db50e726c70733cea0792d55e72ae74f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void hello_cuda()
{
printf("Hello Cuda world\n");
} | 8e9e676db50e726c70733cea0792d55e72ae74f3.cu | #include "includes.h"
__global__ void hello_cuda()
{
printf("Hello Cuda world\n");
} |
a633a7652f424758e1538ea0d9a4ba2c736cdd5e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h> //Max MIN
#include <stdio.h>
#include <time.h>
#define tbp 512
#define nblocks 1
__global__ void kernel_min(int *a, int *d)
{
__shared__ int sdata[tbp]; //"static" shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = a[i];
__syncthreads();
for(unsigned int s=tbp/2 ; s >= 1 ; s=s/2)
{
if(tid < s)
{
if(sdata[tid] >sdata[tid + s])
{
sdata[tid] = sdata[tid + s];
}
}
__syncthreads();
}
if(tid == 0 )
{
d[blockIdx.x] = sdata[0];
}
}
int main()
{
int i;
const int N=tbp*nblocks;
srand(time(NULL));
int *a;
a = (int*)malloc(N * sizeof(int));
int *d;
d = (int*)malloc(nblocks * sizeof(int));
int *dev_a, *dev_d;
hipMalloc((void **) &dev_a, N*sizeof(int));
hipMalloc((void **) &dev_d, nblocks*sizeof(int));
int mmm=100;
for( i = 0 ; i < N ; i++)
{
a[i] = rand()% 100 + 5;
//printf("%d ",a[i]);
if(mmm>a[i]) mmm=a[i];
}
printf("");
printf("");
printf("");
printf("");
hipMemcpy(dev_a , a, N*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_min) , dim3(nblocks),dim3(tbp), 0, 0, dev_a,dev_d);
hipMemcpy(d, dev_d, nblocks*sizeof(int),hipMemcpyDeviceToHost);
printf("cpu min %d, gpu_min = %d",mmm,d[0]);
hipFree(dev_a);
hipFree(dev_d);
return 0;
}
| a633a7652f424758e1538ea0d9a4ba2c736cdd5e.cu | #include <cuda.h> //Max MIN
#include <stdio.h>
#include <time.h>
#define tbp 512
#define nblocks 1
__global__ void kernel_min(int *a, int *d)
{
__shared__ int sdata[tbp]; //"static" shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = a[i];
__syncthreads();
for(unsigned int s=tbp/2 ; s >= 1 ; s=s/2)
{
if(tid < s)
{
if(sdata[tid] >sdata[tid + s])
{
sdata[tid] = sdata[tid + s];
}
}
__syncthreads();
}
if(tid == 0 )
{
d[blockIdx.x] = sdata[0];
}
}
int main()
{
int i;
const int N=tbp*nblocks;
srand(time(NULL));
int *a;
a = (int*)malloc(N * sizeof(int));
int *d;
d = (int*)malloc(nblocks * sizeof(int));
int *dev_a, *dev_d;
cudaMalloc((void **) &dev_a, N*sizeof(int));
cudaMalloc((void **) &dev_d, nblocks*sizeof(int));
int mmm=100;
for( i = 0 ; i < N ; i++)
{
a[i] = rand()% 100 + 5;
//printf("%d ",a[i]);
if(mmm>a[i]) mmm=a[i];
}
printf("");
printf("");
printf("");
printf("");
cudaMemcpy(dev_a , a, N*sizeof(int),cudaMemcpyHostToDevice);
kernel_min <<<nblocks,tbp>>>(dev_a,dev_d);
cudaMemcpy(d, dev_d, nblocks*sizeof(int),cudaMemcpyDeviceToHost);
printf("cpu min %d, gpu_min = %d",mmm,d[0]);
cudaFree(dev_a);
cudaFree(dev_d);
return 0;
}
|
46f954b577a36b98df5603868d1883a664efa5fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "SSS_inter.h"
#include <hip/hip_runtime.h>
#define gridsize 2048
#define blocksize 64
double get_time(void){
struct timeval tv;
double t;
gettimeofday(&tv,NULL);
t = tv.tv_sec * 1000 + tv.tv_usec /1000;
return t;
}
void SSS_amg_interp_trunc(SSS_MAT *P, SSS_AMG_PARS *pars)
{
const int row = P->num_rows;
const int nnzold = P->num_nnzs;
const double eps_tr = pars->trunc_threshold;
// local variables
int num_nonzero = 0; // number of non zeros after truncation
double Min_neg, max_pos; // min negative and max positive entries
double Fac_neg, Fac_pos; // factors for negative and positive entries
double Sum_neg, TSum_neg; // sum and truncated sum of negative entries
double Sum_pos, TSum_pos; // sum and truncated sum of positive entries
int index1 = 0, index2 = 0, begin, end;
int i, j;
for (i = 0; i < row; ++i) {
begin = P->row_ptr[i];
end = P->row_ptr[i + 1];
P->row_ptr[i] = num_nonzero;
Min_neg = max_pos = 0;
Sum_neg = Sum_pos = 0;
TSum_neg = TSum_pos = 0;
// 1. Summations of positive and negative entries
for (j = begin; j < end; ++j) {
if (P->val[j] > 0) {
Sum_pos += P->val[j];
max_pos = SSS_max(max_pos, P->val[j]);
}
else if (P->val[j] < 0) {
Sum_neg += P->val[j];
Min_neg = SSS_MIN(Min_neg, P->val[j]);
}
}
max_pos *= eps_tr;
Min_neg *= eps_tr;
// 2. Set JA of truncated P
for (j = begin; j < end; ++j) {
if (P->val[j] >= max_pos) {
num_nonzero++;
P->col_idx[index1++] = P->col_idx[j];
TSum_pos += P->val[j];
}
else if (P->val[j] <= Min_neg) {
num_nonzero++;
P->col_idx[index1++] = P->col_idx[j];
TSum_neg += P->val[j];
}
}
// 3. Compute factors and set values of truncated P
if (TSum_pos > SMALLFLOAT) {
Fac_pos = Sum_pos / TSum_pos; // factor for positive entries
}
else {
Fac_pos = 1.0;
}
if (TSum_neg < -SMALLFLOAT) {
Fac_neg = Sum_neg / TSum_neg; // factor for negative entries
}
else {
Fac_neg = 1.0;
}
for (j = begin; j < end; ++j) {
if (P->val[j] >= max_pos)
P->val[index2++] = P->val[j] * Fac_pos;
else if (P->val[j] <= Min_neg)
P->val[index2++] = P->val[j] * Fac_neg;
}
}
// resize the truncated prolongation P
P->num_nnzs = P->row_ptr[row] = num_nonzero;
P->col_idx = (int *) SSS_realloc(P->col_idx, num_nonzero * sizeof(int));
P->val = (double *) SSS_realloc(P->val, num_nonzero * sizeof(double));
//Truncate prolongation
// printf("Truncate prolongation, nnz before: %10d, after: %10d\n",
// nnzold, num_nonzero);
}
__global__ void DIR_Step_1(int row,int *d_A_row_ptr,int *d_A_col_idx,double *d_A_val,int *d_vec,int *d_P_row_ptr,int *d_P_col_idx,double *d_P_val)
{
int tid = blockDim.x * blockIdx.x +threadIdx.x;
int begin_row,end_row;
//-----------------------------------------
double alpha, beta, aii = 0;
// a_minus and a_plus for Neighbors and Prolongation support
double amN, amP, apN, apP;
int IS_STRONG; // is the variable strong coupled to i?
int num_pcouple; // number of positive strong couplings
int j, k, l, index = 0, idiag;
//-------------------cuda----------------------
if(tid<row)
{
begin_row = d_A_row_ptr[tid];
end_row = d_A_row_ptr[tid + 1];
// find diagonal entry first!!!
for (idiag = begin_row; idiag < end_row; idiag++)
{
if (d_A_col_idx[idiag] == tid)
{
aii = d_A_val[idiag];
break;
}
}
if (d_vec[tid] == FGPT)
{
// fine grid nodes
amN = amP = apN = apP = 0.0;
num_pcouple = 0;
for (j = begin_row; j < end_row; ++j)
{
if (j == idiag) continue; // skip diagonal
// check a point strong-coupled to i or not
IS_STRONG = FALSE;
for (k = d_P_row_ptr[tid]; k < d_P_row_ptr[tid + 1]; ++k)
{
if (d_P_col_idx[k] == d_A_col_idx[j])
{
IS_STRONG = TRUE;
break;
}
}
if (d_A_val[j] > 0)
{
apN += d_A_val[j]; // sum up positive entries
if (IS_STRONG)
{
apP += d_A_val[j];
num_pcouple++;
}
}
else
{
amN += d_A_val[j]; // sum up negative entries
if (IS_STRONG) {
amP += d_A_val[j];
}
}
}
// set weight factors
alpha = amN / amP;
if (num_pcouple > 0) {
beta = apN / apP;
}
else {
beta = 0.0;
aii += apN;
}
// keep aii inside the loop to avoid floating pt error
for (j = d_P_row_ptr[tid]; j < d_P_row_ptr[tid + 1]; ++j)
{
k = d_P_col_idx[j];
for (l = d_A_row_ptr[tid]; l < d_A_row_ptr[tid + 1]; l++)
{
if (d_A_col_idx[l] == k) break;
}
if (d_A_val[l] > 0)
{
d_P_val[j] = -beta * d_A_val[l] / aii;
}
else
{
d_P_val[j] = -alpha * d_A_val[l] / aii;
}
}
} // end if vec
else if (d_vec[tid] == CGPT) { // coarse grid nodes
d_P_val[d_P_row_ptr[tid]] = 1.0;
}
}
}
__global__ void DIR_Step_2(int row,int *d_vec,int *d_cindex,int index)
{
int tid = blockDim.x * blockIdx.x +threadIdx.x;
index =tid;
if(tid<row)
{
if (d_vec[tid] == CGPT)
d_cindex[tid] = index++;
}
}
__global__ void DIR_Step_3(int p_nnz,int *d_P_col_idx,int *d_cindex)
{
int tid = blockDim.x * blockIdx.x +threadIdx.x;
int j=0;
if (tid <p_nnz)
{
j=d_P_col_idx[tid];
d_P_col_idx[tid] = d_cindex[j];
}
}
void interp_DIR_cuda(SSS_MAT *A, SSS_IVEC *vertices, SSS_MAT *P, SSS_AMG_PARS *pars)
{
int row = A->num_rows;
int index = 0;
int *vec = vertices->d;
// indices of C-nodes
int *cindex = (int *) SSS_calloc(row, sizeof(int));
int *d_cindex = NULL;
int *d_vec = NULL;
int *d_A_row_ptr = NULL;
int *d_A_col_idx = NULL;
double *d_A_val = NULL;
int *d_P_row_ptr = NULL;
int *d_P_col_idx = NULL;
double *d_P_val = NULL;
struct timeval ww, rr;
gettimeofday(&ww,NULL);
hipFree(0);
gettimeofday(&rr,NULL);
double ee = (rr.tv_sec - ww.tv_sec) * 1000.0 + (rr.tv_usec - ww.tv_usec) / 1000.0;
//printf("-------------cuda_warmup_time = %f ms -------------------\n",ee);
//---------------- cuda Malloc ----------
struct timeval cudamalloc_1, cudamalloc_2;
gettimeofday(&cudamalloc_1,NULL);
//vec cindex
hipMalloc((void **)&d_cindex,row * sizeof(int));
hipMalloc((void **)&d_vec,row * sizeof(int));
//A
hipMalloc((void **)&d_A_row_ptr,(A->num_rows+1) * sizeof(int));
hipMalloc((void **)&d_A_col_idx,A->num_nnzs * sizeof(int));
hipMalloc((void **)&d_A_val,A->num_nnzs * sizeof(double));
//P
hipMalloc( (void **)&d_P_row_ptr,(P->num_rows+1) * sizeof(int));
hipMalloc( (void **)&d_P_col_idx,P->num_nnzs * sizeof(int));
hipMalloc( (void **)&d_P_val,P->num_nnzs * sizeof(double));
gettimeofday(&cudamalloc_2,NULL);
double cudamalloc_3 = (cudamalloc_2.tv_sec - cudamalloc_1.tv_sec) * 1000.0 + (cudamalloc_2.tv_usec - cudamalloc_1.tv_usec) / 1000.0;
//printf("-------------cuda_malloc_time = %f ms -------------------\n",cudamalloc_3);
if (d_cindex == NULL || d_vec ==NULL || d_A_row_ptr == NULL|| d_A_col_idx == NULL|| d_A_val == NULL|| d_P_row_ptr == NULL|| d_P_col_idx == NULL || d_P_val == NULL)
{
printf("could't allocate GPU mem \n");
}
//-----------cuda Memcpy host_to_device----------
struct timeval hosttodevice_1, hosttodevice_2;
gettimeofday(&hosttodevice_1,NULL);
//vec cindex
hipMemcpy(d_cindex, cindex, row * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_vec, vec, row * sizeof(int), hipMemcpyHostToDevice);
//A
hipMemcpy(d_A_row_ptr, A->row_ptr, (A->num_rows+1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_A_col_idx, A->col_idx, A->num_nnzs * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_A_val, A->val, A->num_nnzs * sizeof(double), hipMemcpyHostToDevice);
//P
hipMemcpy(d_P_row_ptr, P->row_ptr, (P->num_rows+1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_P_col_idx, P->col_idx, P->num_nnzs * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_P_val, P->val, P->num_nnzs * sizeof(double), hipMemcpyHostToDevice);
gettimeofday(&hosttodevice_2,NULL);
double hosttodevice_3 = (hosttodevice_2.tv_sec - hosttodevice_1.tv_sec) * 1000.0 + (hosttodevice_2.tv_usec - hosttodevice_1.tv_usec) / 1000.0;
//printf("-------------cuda_host_to_device_time = %f ms -------------------\n",hosttodevice_3);
//--------------------cuda step1-----------------------
struct timeval cuda_step1_1, cuda_step1_2;
gettimeofday(&cuda_step1_1,NULL);
hipLaunchKernelGGL(( DIR_Step_1), dim3(gridsize),dim3(blocksize), 0, 0, row, d_A_row_ptr, d_A_col_idx, d_A_val, d_vec, d_P_row_ptr, d_P_col_idx, d_P_val);
hipDeviceSynchronize();
gettimeofday(&cuda_step1_2,NULL);
double cuda_step1_3 = (cuda_step1_2.tv_sec - cuda_step1_1.tv_sec) * 1000.0 + (cuda_step1_2.tv_usec - cuda_step1_1.tv_usec) / 1000.0;
printf("-------------cuda_step_1_time = %f ms -------------------\n",cuda_step1_3);
//-----------cuda Memcpy device_to_host----------
struct timeval devicetohost1, devicetohost2;
gettimeofday(&devicetohost1,NULL);
//vec cindex
hipMemcpy(vec,d_vec,row * sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(cindex, d_cindex, row * sizeof(int), hipMemcpyDeviceToDevice);
//A
hipMemcpy(A->row_ptr,d_A_row_ptr,(A->num_rows+1)*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(A->col_idx,d_A_col_idx,A->num_nnzs*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(A->val,d_A_val,A->num_nnzs*sizeof(double),hipMemcpyDeviceToHost);
//P
hipMemcpy(P->row_ptr,d_P_row_ptr,(P->num_rows+1)*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(P->col_idx,d_P_col_idx,P->num_nnzs*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(P->val,d_P_val,P->num_nnzs*sizeof(double),hipMemcpyDeviceToHost);
gettimeofday(&devicetohost2,NULL);
double devicetohost3 = (devicetohost2.tv_sec - devicetohost1.tv_sec) * 1000.0 + (devicetohost2.tv_usec - devicetohost1.tv_usec) / 1000.0;
//printf("-------------cuda_device_to_host_time = %f ms -------------------\n",devicetohost3);
//-------------------------------hipFree------------------------
struct timeval cudafree_1, cudafree_2;
gettimeofday(&cudafree_1,NULL);
hipFree(d_cindex);
hipFree(d_vec);
hipFree(d_A_row_ptr);
hipFree(d_A_col_idx);
hipFree(d_A_val);
hipFree(d_P_row_ptr);
hipFree(d_P_col_idx);
hipFree(d_P_val);
gettimeofday(&cudafree_2,NULL);
double cudafree_3 = (cudafree_2.tv_sec - cudafree_1.tv_sec) * 1000.0 + (cudafree_2.tv_usec - cudafree_1.tv_usec) / 1000.0;
//printf("-------------cuda_free_time = %f ms -------------------\n",cudafree_3);
// Step 2. Generate coarse level indices and set values of P.Aj
int i,j;
for (index = i = 0; i < row; ++i)
{
if (vec[i] == CGPT)
cindex[i] = index++;
}
P->num_cols = index;
for (i = 0; i < P->num_nnzs; ++i)
{
j = P->col_idx[i];
P->col_idx[i] = cindex[j];
}
// clean up
SSS_free(cindex);
// Step 3. Truncate the prolongation operator to reduce cost
SSS_amg_interp_trunc(P, pars);
}
void interp_DIR(SSS_MAT * A, SSS_IVEC * vertices, SSS_MAT * P, SSS_AMG_PARS * pars)
{
int row = A->num_rows;
int *vec = vertices->d;
// local variables
int IS_STRONG; // is the variable strong coupled to i?
int num_pcouple; // number of positive strong couplings
int begin_row, end_row;
int i, j, k, l, index = 0, idiag;
// a_minus and a_plus for Neighbors and Prolongation support
double amN, amP, apN, apP;
double alpha, beta, aii = 0;
// indices of C-nodes
int *cindex = (int *) SSS_calloc(row, sizeof(int));
struct timeval cpu_step_1, cpu_step_2;
gettimeofday(&cpu_step_1,NULL);
// Step 1. Fill in values for interpolation operator P
for (i = 0; i < row; ++i)
{
begin_row = A->row_ptr[i];
end_row = A->row_ptr[i + 1];
// find diagonal entry first!!!
for (idiag = begin_row; idiag < end_row; idiag++)
{
if (A->col_idx[idiag] == i)
{
aii = A->val[idiag];
break;
}
}
if (vec[i] == FGPT)
{ // fine grid nodes
amN = amP = apN = apP = 0.0;
num_pcouple = 0;
for (j = begin_row; j < end_row; ++j)
{
if (j == idiag) continue; // skip diagonal
// check a point strong-coupled to i or not
IS_STRONG = FALSE;
for (k = P->row_ptr[i]; k < P->row_ptr[i + 1]; ++k)
{
if (P->col_idx[k] == A->col_idx[j])
{
IS_STRONG = TRUE;
break;
}
}
if (A->val[j] > 0)
{
apN += A->val[j]; // sum up positive entries
if (IS_STRONG)
{
apP += A->val[j];
num_pcouple++;
}
}
else
{
amN += A->val[j]; // sum up negative entries
if (IS_STRONG) {
amP += A->val[j];
}
}
}
// set weight factors
alpha = amN / amP;
if (num_pcouple > 0) {
beta = apN / apP;
}
else {
beta = 0.0;
aii += apN;
}
// keep aii inside the loop to avoid floating pt error
for (j = P->row_ptr[i]; j < P->row_ptr[i + 1]; ++j)
{
k = P->col_idx[j];
for (l = A->row_ptr[i]; l < A->row_ptr[i + 1]; l++)
{
if (A->col_idx[l] == k) break;
}
if (A->val[l] > 0)
{
P->val[j] = -beta * A->val[l] / aii;
}
else
{
P->val[j] = -alpha * A->val[l] / aii;
}
}
} // end if vec
else if (vec[i] == CGPT) { // coarse grid nodes
P->val[P->row_ptr[i]] = 1.0;
}
}
gettimeofday(&cpu_step_2,NULL);
double cpu_step_3 = (cpu_step_2.tv_sec - cpu_step_1.tv_sec) * 1000.0 + (cpu_step_2.tv_usec - cpu_step_1.tv_usec) / 1000.0;
printf("-------------cpu_step1_time = %f ms -------------------\n",cpu_step_3);
// Step 2. Generate coarse level indices and set values of P.Aj
double time4=get_time();
for (index = i = 0; i < row; ++i) {
if (vec[i] == CGPT)
cindex[i] = index++;
}
P->num_cols = index;
for (i = 0; i < P->num_nnzs; ++i) {
j = P->col_idx[i];
P->col_idx[i] = cindex[j];
}
double time5=get_time();
double time6=time5-time4;
// printf("step2 time = %lf\n",time6);
// clean up
SSS_free(cindex);
// Step 3. Truncate the prolongation operator to reduce cost
double time7=get_time();
SSS_amg_interp_trunc(P, pars);
double time8=get_time();
double time9=time8-time7;
// printf("step3 time = %lf\n",time9);
}
static void interp_STD(SSS_MAT * A, SSS_IVEC * vertices, SSS_MAT * P, SSS_IMAT * S, SSS_AMG_PARS * pars)
{
//8 faster
omp_set_num_threads(8);
const int row = A->num_rows;
int *vec = vertices->d;
// local variables
int i, j, k, l, m, index;
double alpha, factor, alN, alP;
double akk, akl, aik, aki;
// indices for coarse neighbor node for every node
int *cindex = (int *) SSS_calloc(row, sizeof(int));
// indices from column number to index in nonzeros in i-th row
int *rindi = (int *) SSS_calloc(2 * row, sizeof(int));
// indices from column number to index in nonzeros in k-th row
int *rindk = (int *) SSS_calloc(2 * row, sizeof(int));
// sums of strongly connected C neighbors
double *csum = (double *) SSS_calloc(row, sizeof(double));
// sums of all neighbors except ISPT
double *psum = (double *) SSS_calloc(row, sizeof(double));
// sums of all neighbors
double *nsum = (double *) SSS_calloc(row, sizeof(double));
// diagonal entries
double *diag = (double *) SSS_calloc(row, sizeof(double));
// coefficients hat a_ij for relevant CGPT of the i-th node
double *Ahat = (double *) SSS_calloc(row, sizeof(double));
// Step 0. Prepare diagonal, Cs-sum, and N-sum
SSS_iarray_set(row, cindex, -1);
SSS_blas_array_set(row, csum, 0.0);
SSS_blas_array_set(row, nsum, 0.0);
for (i = 0; i < row; i++) {
// set flags for strong-connected C nodes
//num = 8
//#pragma omp parallel for
for (j = S->row_ptr[i]; j < S->row_ptr[i + 1]; j++) {
k = S->col_idx[j];
if (vec[k] == CGPT) cindex[k] = i;
}
//#pragma omp parallel for
for (j = A->row_ptr[i]; j < A->row_ptr[i + 1]; j++) {
k = A->col_idx[j];
if (cindex[k] == i) csum[i] += A->val[j]; // strong C-couplings
if (k == i)
diag[i] = A->val[j];
else {
nsum[i] += A->val[j];
if (vec[k] != ISPT) {
psum[i] += A->val[j];
}
}
}
}
// Step 1. Fill in values for interpolation operator P
//#pragma omp parallel for
for (i = 0; i < row; i++) {
if (vec[i] == FGPT) {
alN = psum[i];
alP = csum[i];
// form the reverse indices for i-th row
//#pragma omp parallel for
for (j = A->row_ptr[i]; j < A->row_ptr[i + 1]; j++) rindi[A->col_idx[j]] = j;
//#pragma omp parallel for
// clean up Ahat for relevant nodes only
//#pragma omp parallel for
for (j = P->row_ptr[i]; j < P->row_ptr[i + 1]; j++) Ahat[P->col_idx[j]] = 0.0;
// set values of Ahat
Ahat[i] = diag[i];
//#pragma omp parallel for
for (j = S->row_ptr[i]; j < S->row_ptr[i + 1]; j++) {
k = S->col_idx[j];
aik = A->val[rindi[k]];
if (vec[k] == CGPT) {
Ahat[k] += aik;
}
else if (vec[k] == FGPT) {
akk = diag[k];
// form the reverse indices for k-th row
//#pragma omp parallel for
for (m = A->row_ptr[k]; m < A->row_ptr[k + 1]; m++) rindk[A->col_idx[m]] = m;
factor = aik / akk;
// visit the strong-connected C neighbors of k, compute
// Ahat in the i-th row, set aki if found
aki = 0.0;
//#pragma omp parallel for
for (m = A->row_ptr[k]; m < A->row_ptr[k + 1]; m++) {
if (A->col_idx[m] == i) {
aki = A->val[m];
Ahat[i] -= factor * aki;
}
}
//#pragma omp parallel for
for (m = S->row_ptr[k]; m < S->row_ptr[k + 1]; m++) {
l = S->col_idx[m];
akl = A->val[rindk[l]];
if (vec[l] == CGPT)
Ahat[l] -= factor * akl;
} // end for m
// compute Cs-sum and N-sum for Ahat
alN -= factor * (nsum[k] - aki + akk);
alP -= factor * csum[k];
} // end if vec[k]
} // end for j
// How about positive entries
if (P->row_ptr[i + 1] > P->row_ptr[i]) alpha = alN / alP;
//#pragma omp parallel for
for (j = P->row_ptr[i]; j < P->row_ptr[i + 1]; j++) {
k = P->col_idx[j];
P->val[j] = -alpha * Ahat[k] / Ahat[i];
}
}
else if (vec[i] == CGPT) {
P->val[P->row_ptr[i]] = 1.0;
}
} // end for i
// Step 2. Generate coarse level indices and set values of P.col_idx
//#pragma omp parallel for
for (index = i = 0; i < row; ++i) {
if (vec[i] == CGPT) cindex[i] = index++;
}
P->num_cols = index;
//#pragma omp parallel for
for (i = 0; i < P->row_ptr[P->num_rows]; ++i) {
j = P->col_idx[i];
P->col_idx[i] = cindex[j];
}
// clean up
SSS_free(cindex);
SSS_free(rindi);
SSS_free(rindk);
SSS_free(nsum);
SSS_free(psum);
SSS_free(csum);
SSS_free(diag);
SSS_free(Ahat);
// Step 3. Truncate the prolongation operator to reduce cost
SSS_amg_interp_trunc(P, pars);
}
void SSS_amg_interp(SSS_MAT *A, SSS_IVEC *vertices, SSS_MAT *P, SSS_IMAT *S, SSS_AMG_PARS *pars)
{
interp_type interp_type = pars->interp_type;
switch (interp_type) {
case intERP_DIR: // Direct interpolation
//interp_DIR(A, vertices, P, pars);
interp_DIR_cuda(A, vertices, P, pars);
break;
case intERP_STD: // Standard interpolation
interp_STD(A, vertices, P, S, pars);
break;
default:
SSS_exit_on_errcode(ERROR_AMG_interp_type, __FUNCTION__);
}
}
| 46f954b577a36b98df5603868d1883a664efa5fc.cu | #include "SSS_inter.h"
#include <cuda.h>
#define gridsize 2048
#define blocksize 64
double get_time(void){
struct timeval tv;
double t;
gettimeofday(&tv,NULL);
t = tv.tv_sec * 1000 + tv.tv_usec /1000;
return t;
}
void SSS_amg_interp_trunc(SSS_MAT *P, SSS_AMG_PARS *pars)
{
const int row = P->num_rows;
const int nnzold = P->num_nnzs;
const double eps_tr = pars->trunc_threshold;
// local variables
int num_nonzero = 0; // number of non zeros after truncation
double Min_neg, max_pos; // min negative and max positive entries
double Fac_neg, Fac_pos; // factors for negative and positive entries
double Sum_neg, TSum_neg; // sum and truncated sum of negative entries
double Sum_pos, TSum_pos; // sum and truncated sum of positive entries
int index1 = 0, index2 = 0, begin, end;
int i, j;
for (i = 0; i < row; ++i) {
begin = P->row_ptr[i];
end = P->row_ptr[i + 1];
P->row_ptr[i] = num_nonzero;
Min_neg = max_pos = 0;
Sum_neg = Sum_pos = 0;
TSum_neg = TSum_pos = 0;
// 1. Summations of positive and negative entries
for (j = begin; j < end; ++j) {
if (P->val[j] > 0) {
Sum_pos += P->val[j];
max_pos = SSS_max(max_pos, P->val[j]);
}
else if (P->val[j] < 0) {
Sum_neg += P->val[j];
Min_neg = SSS_MIN(Min_neg, P->val[j]);
}
}
max_pos *= eps_tr;
Min_neg *= eps_tr;
// 2. Set JA of truncated P
for (j = begin; j < end; ++j) {
if (P->val[j] >= max_pos) {
num_nonzero++;
P->col_idx[index1++] = P->col_idx[j];
TSum_pos += P->val[j];
}
else if (P->val[j] <= Min_neg) {
num_nonzero++;
P->col_idx[index1++] = P->col_idx[j];
TSum_neg += P->val[j];
}
}
// 3. Compute factors and set values of truncated P
if (TSum_pos > SMALLFLOAT) {
Fac_pos = Sum_pos / TSum_pos; // factor for positive entries
}
else {
Fac_pos = 1.0;
}
if (TSum_neg < -SMALLFLOAT) {
Fac_neg = Sum_neg / TSum_neg; // factor for negative entries
}
else {
Fac_neg = 1.0;
}
for (j = begin; j < end; ++j) {
if (P->val[j] >= max_pos)
P->val[index2++] = P->val[j] * Fac_pos;
else if (P->val[j] <= Min_neg)
P->val[index2++] = P->val[j] * Fac_neg;
}
}
// resize the truncated prolongation P
P->num_nnzs = P->row_ptr[row] = num_nonzero;
P->col_idx = (int *) SSS_realloc(P->col_idx, num_nonzero * sizeof(int));
P->val = (double *) SSS_realloc(P->val, num_nonzero * sizeof(double));
//Truncate prolongation
// printf("Truncate prolongation, nnz before: %10d, after: %10d\n",
// nnzold, num_nonzero);
}
__global__ void DIR_Step_1(int row,int *d_A_row_ptr,int *d_A_col_idx,double *d_A_val,int *d_vec,int *d_P_row_ptr,int *d_P_col_idx,double *d_P_val)
{
int tid = blockDim.x * blockIdx.x +threadIdx.x;
int begin_row,end_row;
//--------------------参数---------------------
double alpha, beta, aii = 0;
// a_minus and a_plus for Neighbors and Prolongation support
double amN, amP, apN, apP;
int IS_STRONG; // is the variable strong coupled to i?
int num_pcouple; // number of positive strong couplings
int j, k, l, index = 0, idiag;
//-------------------cuda----------------------
if(tid<row)
{
begin_row = d_A_row_ptr[tid];
end_row = d_A_row_ptr[tid + 1];
// find diagonal entry first!!!
for (idiag = begin_row; idiag < end_row; idiag++)
{
if (d_A_col_idx[idiag] == tid)
{
aii = d_A_val[idiag];
break;
}
}
if (d_vec[tid] == FGPT)
{
// fine grid nodes
amN = amP = apN = apP = 0.0;
num_pcouple = 0;
for (j = begin_row; j < end_row; ++j)
{
if (j == idiag) continue; // skip diagonal
// check a point strong-coupled to i or not
IS_STRONG = FALSE;
for (k = d_P_row_ptr[tid]; k < d_P_row_ptr[tid + 1]; ++k)
{
if (d_P_col_idx[k] == d_A_col_idx[j])
{
IS_STRONG = TRUE;
break;
}
}
if (d_A_val[j] > 0)
{
apN += d_A_val[j]; // sum up positive entries
if (IS_STRONG)
{
apP += d_A_val[j];
num_pcouple++;
}
}
else
{
amN += d_A_val[j]; // sum up negative entries
if (IS_STRONG) {
amP += d_A_val[j];
}
}
}
// set weight factors
alpha = amN / amP;
if (num_pcouple > 0) {
beta = apN / apP;
}
else {
beta = 0.0;
aii += apN;
}
// keep aii inside the loop to avoid floating pt error
for (j = d_P_row_ptr[tid]; j < d_P_row_ptr[tid + 1]; ++j)
{
k = d_P_col_idx[j];
for (l = d_A_row_ptr[tid]; l < d_A_row_ptr[tid + 1]; l++)
{
if (d_A_col_idx[l] == k) break;
}
if (d_A_val[l] > 0)
{
d_P_val[j] = -beta * d_A_val[l] / aii;
}
else
{
d_P_val[j] = -alpha * d_A_val[l] / aii;
}
}
} // end if vec
else if (d_vec[tid] == CGPT) { // coarse grid nodes
d_P_val[d_P_row_ptr[tid]] = 1.0;
}
}
}
__global__ void DIR_Step_2(int row,int *d_vec,int *d_cindex,int index)
{
int tid = blockDim.x * blockIdx.x +threadIdx.x;
index =tid;
if(tid<row)
{
if (d_vec[tid] == CGPT)
d_cindex[tid] = index++;
}
}
__global__ void DIR_Step_3(int p_nnz,int *d_P_col_idx,int *d_cindex)
{
int tid = blockDim.x * blockIdx.x +threadIdx.x;
int j=0;
if (tid <p_nnz)
{
j=d_P_col_idx[tid];
d_P_col_idx[tid] = d_cindex[j];
}
}
void interp_DIR_cuda(SSS_MAT *A, SSS_IVEC *vertices, SSS_MAT *P, SSS_AMG_PARS *pars)
{
int row = A->num_rows;
int index = 0;
int *vec = vertices->d;
// indices of C-nodes
int *cindex = (int *) SSS_calloc(row, sizeof(int));
int *d_cindex = NULL;
int *d_vec = NULL;
int *d_A_row_ptr = NULL;
int *d_A_col_idx = NULL;
double *d_A_val = NULL;
int *d_P_row_ptr = NULL;
int *d_P_col_idx = NULL;
double *d_P_val = NULL;
struct timeval ww, rr;
gettimeofday(&ww,NULL);
cudaFree(0);
gettimeofday(&rr,NULL);
double ee = (rr.tv_sec - ww.tv_sec) * 1000.0 + (rr.tv_usec - ww.tv_usec) / 1000.0;
//printf("-------------cuda_warmup_time = %f ms -------------------\n",ee);
//---------------- cuda Malloc ----------
struct timeval cudamalloc_1, cudamalloc_2;
gettimeofday(&cudamalloc_1,NULL);
//vec cindex
cudaMalloc((void **)&d_cindex,row * sizeof(int));
cudaMalloc((void **)&d_vec,row * sizeof(int));
//A
cudaMalloc((void **)&d_A_row_ptr,(A->num_rows+1) * sizeof(int));
cudaMalloc((void **)&d_A_col_idx,A->num_nnzs * sizeof(int));
cudaMalloc((void **)&d_A_val,A->num_nnzs * sizeof(double));
//P
cudaMalloc( (void **)&d_P_row_ptr,(P->num_rows+1) * sizeof(int));
cudaMalloc( (void **)&d_P_col_idx,P->num_nnzs * sizeof(int));
cudaMalloc( (void **)&d_P_val,P->num_nnzs * sizeof(double));
gettimeofday(&cudamalloc_2,NULL);
double cudamalloc_3 = (cudamalloc_2.tv_sec - cudamalloc_1.tv_sec) * 1000.0 + (cudamalloc_2.tv_usec - cudamalloc_1.tv_usec) / 1000.0;
//printf("-------------cuda_malloc_time = %f ms -------------------\n",cudamalloc_3);
if (d_cindex == NULL || d_vec ==NULL || d_A_row_ptr == NULL|| d_A_col_idx == NULL|| d_A_val == NULL|| d_P_row_ptr == NULL|| d_P_col_idx == NULL || d_P_val == NULL)
{
printf("could't allocate GPU mem \n");
}
//-----------cuda Memcpy host_to_device----------
struct timeval hosttodevice_1, hosttodevice_2;
gettimeofday(&hosttodevice_1,NULL);
//vec cindex
cudaMemcpy(d_cindex, cindex, row * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_vec, vec, row * sizeof(int), cudaMemcpyHostToDevice);
//A
cudaMemcpy(d_A_row_ptr, A->row_ptr, (A->num_rows+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_A_col_idx, A->col_idx, A->num_nnzs * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_A_val, A->val, A->num_nnzs * sizeof(double), cudaMemcpyHostToDevice);
//P
cudaMemcpy(d_P_row_ptr, P->row_ptr, (P->num_rows+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_P_col_idx, P->col_idx, P->num_nnzs * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_P_val, P->val, P->num_nnzs * sizeof(double), cudaMemcpyHostToDevice);
gettimeofday(&hosttodevice_2,NULL);
double hosttodevice_3 = (hosttodevice_2.tv_sec - hosttodevice_1.tv_sec) * 1000.0 + (hosttodevice_2.tv_usec - hosttodevice_1.tv_usec) / 1000.0;
//printf("-------------cuda_host_to_device_time = %f ms -------------------\n",hosttodevice_3);
//--------------------cuda step1-----------------------
struct timeval cuda_step1_1, cuda_step1_2;
gettimeofday(&cuda_step1_1,NULL);
DIR_Step_1<<<gridsize,blocksize>>>(row, d_A_row_ptr, d_A_col_idx, d_A_val, d_vec, d_P_row_ptr, d_P_col_idx, d_P_val);
cudaDeviceSynchronize();
gettimeofday(&cuda_step1_2,NULL);
double cuda_step1_3 = (cuda_step1_2.tv_sec - cuda_step1_1.tv_sec) * 1000.0 + (cuda_step1_2.tv_usec - cuda_step1_1.tv_usec) / 1000.0;
printf("-------------cuda_step_1_time = %f ms -------------------\n",cuda_step1_3);
//-----------cuda Memcpy device_to_host----------
struct timeval devicetohost1, devicetohost2;
gettimeofday(&devicetohost1,NULL);
//vec cindex
cudaMemcpy(vec,d_vec,row * sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(cindex, d_cindex, row * sizeof(int), cudaMemcpyDeviceToDevice);
//A
cudaMemcpy(A->row_ptr,d_A_row_ptr,(A->num_rows+1)*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(A->col_idx,d_A_col_idx,A->num_nnzs*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(A->val,d_A_val,A->num_nnzs*sizeof(double),cudaMemcpyDeviceToHost);
//P
cudaMemcpy(P->row_ptr,d_P_row_ptr,(P->num_rows+1)*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(P->col_idx,d_P_col_idx,P->num_nnzs*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(P->val,d_P_val,P->num_nnzs*sizeof(double),cudaMemcpyDeviceToHost);
gettimeofday(&devicetohost2,NULL);
double devicetohost3 = (devicetohost2.tv_sec - devicetohost1.tv_sec) * 1000.0 + (devicetohost2.tv_usec - devicetohost1.tv_usec) / 1000.0;
//printf("-------------cuda_device_to_host_time = %f ms -------------------\n",devicetohost3);
//-------------------------------cudaFree------------------------
struct timeval cudafree_1, cudafree_2;
gettimeofday(&cudafree_1,NULL);
cudaFree(d_cindex);
cudaFree(d_vec);
cudaFree(d_A_row_ptr);
cudaFree(d_A_col_idx);
cudaFree(d_A_val);
cudaFree(d_P_row_ptr);
cudaFree(d_P_col_idx);
cudaFree(d_P_val);
gettimeofday(&cudafree_2,NULL);
double cudafree_3 = (cudafree_2.tv_sec - cudafree_1.tv_sec) * 1000.0 + (cudafree_2.tv_usec - cudafree_1.tv_usec) / 1000.0;
//printf("-------------cuda_free_time = %f ms -------------------\n",cudafree_3);
// Step 2. Generate coarse level indices and set values of P.Aj
int i,j;
for (index = i = 0; i < row; ++i)
{
if (vec[i] == CGPT)
cindex[i] = index++;
}
P->num_cols = index;
for (i = 0; i < P->num_nnzs; ++i)
{
j = P->col_idx[i];
P->col_idx[i] = cindex[j];
}
// clean up
SSS_free(cindex);
// Step 3. Truncate the prolongation operator to reduce cost
SSS_amg_interp_trunc(P, pars);
}
void interp_DIR(SSS_MAT * A, SSS_IVEC * vertices, SSS_MAT * P, SSS_AMG_PARS * pars)
{
int row = A->num_rows;
int *vec = vertices->d;
// local variables
int IS_STRONG; // is the variable strong coupled to i?
int num_pcouple; // number of positive strong couplings
int begin_row, end_row;
int i, j, k, l, index = 0, idiag;
// a_minus and a_plus for Neighbors and Prolongation support
double amN, amP, apN, apP;
double alpha, beta, aii = 0;
// indices of C-nodes
int *cindex = (int *) SSS_calloc(row, sizeof(int));
struct timeval cpu_step_1, cpu_step_2;
gettimeofday(&cpu_step_1,NULL);
// Step 1. Fill in values for interpolation operator P
for (i = 0; i < row; ++i)
{
begin_row = A->row_ptr[i];
end_row = A->row_ptr[i + 1];
// find diagonal entry first!!!
for (idiag = begin_row; idiag < end_row; idiag++)
{
if (A->col_idx[idiag] == i)
{
aii = A->val[idiag];
break;
}
}
if (vec[i] == FGPT)
{ // fine grid nodes
amN = amP = apN = apP = 0.0;
num_pcouple = 0;
for (j = begin_row; j < end_row; ++j)
{
if (j == idiag) continue; // skip diagonal
// check a point strong-coupled to i or not
IS_STRONG = FALSE;
for (k = P->row_ptr[i]; k < P->row_ptr[i + 1]; ++k)
{
if (P->col_idx[k] == A->col_idx[j])
{
IS_STRONG = TRUE;
break;
}
}
if (A->val[j] > 0)
{
apN += A->val[j]; // sum up positive entries
if (IS_STRONG)
{
apP += A->val[j];
num_pcouple++;
}
}
else
{
amN += A->val[j]; // sum up negative entries
if (IS_STRONG) {
amP += A->val[j];
}
}
}
// set weight factors
alpha = amN / amP;
if (num_pcouple > 0) {
beta = apN / apP;
}
else {
beta = 0.0;
aii += apN;
}
// keep aii inside the loop to avoid floating pt error
for (j = P->row_ptr[i]; j < P->row_ptr[i + 1]; ++j)
{
k = P->col_idx[j];
for (l = A->row_ptr[i]; l < A->row_ptr[i + 1]; l++)
{
if (A->col_idx[l] == k) break;
}
if (A->val[l] > 0)
{
P->val[j] = -beta * A->val[l] / aii;
}
else
{
P->val[j] = -alpha * A->val[l] / aii;
}
}
} // end if vec
else if (vec[i] == CGPT) { // coarse grid nodes
P->val[P->row_ptr[i]] = 1.0;
}
}
gettimeofday(&cpu_step_2,NULL);
double cpu_step_3 = (cpu_step_2.tv_sec - cpu_step_1.tv_sec) * 1000.0 + (cpu_step_2.tv_usec - cpu_step_1.tv_usec) / 1000.0;
printf("-------------cpu_step1_time = %f ms -------------------\n",cpu_step_3);
// Step 2. Generate coarse level indices and set values of P.Aj
double time4=get_time();
for (index = i = 0; i < row; ++i) {
if (vec[i] == CGPT)
cindex[i] = index++;
}
P->num_cols = index;
for (i = 0; i < P->num_nnzs; ++i) {
j = P->col_idx[i];
P->col_idx[i] = cindex[j];
}
double time5=get_time();
double time6=time5-time4;
// printf("step2 time = %lf\n",time6);
// clean up
SSS_free(cindex);
// Step 3. Truncate the prolongation operator to reduce cost
double time7=get_time();
SSS_amg_interp_trunc(P, pars);
double time8=get_time();
double time9=time8-time7;
// printf("step3 time = %lf\n",time9);
}
static void interp_STD(SSS_MAT * A, SSS_IVEC * vertices, SSS_MAT * P, SSS_IMAT * S, SSS_AMG_PARS * pars)
{
//8 faster
omp_set_num_threads(8);
const int row = A->num_rows;
int *vec = vertices->d;
// local variables
int i, j, k, l, m, index;
double alpha, factor, alN, alP;
double akk, akl, aik, aki;
// indices for coarse neighbor node for every node
int *cindex = (int *) SSS_calloc(row, sizeof(int));
// indices from column number to index in nonzeros in i-th row
int *rindi = (int *) SSS_calloc(2 * row, sizeof(int));
// indices from column number to index in nonzeros in k-th row
int *rindk = (int *) SSS_calloc(2 * row, sizeof(int));
// sums of strongly connected C neighbors
double *csum = (double *) SSS_calloc(row, sizeof(double));
// sums of all neighbors except ISPT
double *psum = (double *) SSS_calloc(row, sizeof(double));
// sums of all neighbors
double *nsum = (double *) SSS_calloc(row, sizeof(double));
// diagonal entries
double *diag = (double *) SSS_calloc(row, sizeof(double));
// coefficients hat a_ij for relevant CGPT of the i-th node
double *Ahat = (double *) SSS_calloc(row, sizeof(double));
// Step 0. Prepare diagonal, Cs-sum, and N-sum
SSS_iarray_set(row, cindex, -1);
SSS_blas_array_set(row, csum, 0.0);
SSS_blas_array_set(row, nsum, 0.0);
for (i = 0; i < row; i++) {
// set flags for strong-connected C nodes
//num = 8
//#pragma omp parallel for
for (j = S->row_ptr[i]; j < S->row_ptr[i + 1]; j++) {
k = S->col_idx[j];
if (vec[k] == CGPT) cindex[k] = i;
}
//#pragma omp parallel for
for (j = A->row_ptr[i]; j < A->row_ptr[i + 1]; j++) {
k = A->col_idx[j];
if (cindex[k] == i) csum[i] += A->val[j]; // strong C-couplings
if (k == i)
diag[i] = A->val[j];
else {
nsum[i] += A->val[j];
if (vec[k] != ISPT) {
psum[i] += A->val[j];
}
}
}
}
// Step 1. Fill in values for interpolation operator P
//#pragma omp parallel for
for (i = 0; i < row; i++) {
if (vec[i] == FGPT) {
alN = psum[i];
alP = csum[i];
// form the reverse indices for i-th row
//#pragma omp parallel for
for (j = A->row_ptr[i]; j < A->row_ptr[i + 1]; j++) rindi[A->col_idx[j]] = j;
//#pragma omp parallel for
// clean up Ahat for relevant nodes only
//#pragma omp parallel for
for (j = P->row_ptr[i]; j < P->row_ptr[i + 1]; j++) Ahat[P->col_idx[j]] = 0.0;
// set values of Ahat
Ahat[i] = diag[i];
//#pragma omp parallel for
for (j = S->row_ptr[i]; j < S->row_ptr[i + 1]; j++) {
k = S->col_idx[j];
aik = A->val[rindi[k]];
if (vec[k] == CGPT) {
Ahat[k] += aik;
}
else if (vec[k] == FGPT) {
akk = diag[k];
// form the reverse indices for k-th row
//#pragma omp parallel for
for (m = A->row_ptr[k]; m < A->row_ptr[k + 1]; m++) rindk[A->col_idx[m]] = m;
factor = aik / akk;
// visit the strong-connected C neighbors of k, compute
// Ahat in the i-th row, set aki if found
aki = 0.0;
//#pragma omp parallel for
for (m = A->row_ptr[k]; m < A->row_ptr[k + 1]; m++) {
if (A->col_idx[m] == i) {
aki = A->val[m];
Ahat[i] -= factor * aki;
}
}
//#pragma omp parallel for
for (m = S->row_ptr[k]; m < S->row_ptr[k + 1]; m++) {
l = S->col_idx[m];
akl = A->val[rindk[l]];
if (vec[l] == CGPT)
Ahat[l] -= factor * akl;
} // end for m
// compute Cs-sum and N-sum for Ahat
alN -= factor * (nsum[k] - aki + akk);
alP -= factor * csum[k];
} // end if vec[k]
} // end for j
// How about positive entries
if (P->row_ptr[i + 1] > P->row_ptr[i]) alpha = alN / alP;
//#pragma omp parallel for
for (j = P->row_ptr[i]; j < P->row_ptr[i + 1]; j++) {
k = P->col_idx[j];
P->val[j] = -alpha * Ahat[k] / Ahat[i];
}
}
else if (vec[i] == CGPT) {
P->val[P->row_ptr[i]] = 1.0;
}
} // end for i
// Step 2. Generate coarse level indices and set values of P.col_idx
//#pragma omp parallel for
for (index = i = 0; i < row; ++i) {
if (vec[i] == CGPT) cindex[i] = index++;
}
P->num_cols = index;
//#pragma omp parallel for
for (i = 0; i < P->row_ptr[P->num_rows]; ++i) {
j = P->col_idx[i];
P->col_idx[i] = cindex[j];
}
// clean up
SSS_free(cindex);
SSS_free(rindi);
SSS_free(rindk);
SSS_free(nsum);
SSS_free(psum);
SSS_free(csum);
SSS_free(diag);
SSS_free(Ahat);
// Step 3. Truncate the prolongation operator to reduce cost
SSS_amg_interp_trunc(P, pars);
}
void SSS_amg_interp(SSS_MAT *A, SSS_IVEC *vertices, SSS_MAT *P, SSS_IMAT *S, SSS_AMG_PARS *pars)
{
interp_type interp_type = pars->interp_type;
switch (interp_type) {
case intERP_DIR: // Direct interpolation
//interp_DIR(A, vertices, P, pars);
interp_DIR_cuda(A, vertices, P, pars);
break;
case intERP_STD: // Standard interpolation
interp_STD(A, vertices, P, S, pars);
break;
default:
SSS_exit_on_errcode(ERROR_AMG_interp_type, __FUNCTION__);
}
}
|
623dba535150e50e4739618121df41a4e29bd52e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019 Saman Ashkiani
*
* findEmptyPerWarpLicensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <thrust/device_vector.h>
#include <time.h>
#include <unistd.h>
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <random>
#include <vector>
#include "coordinate.h"
#include "cuda_unordered_map.h"
constexpr size_t D = 7;
using KeyT = Coordinate<int32_t, D>;
using ValueT = uint32_t;
using HashFunc = CoordinateHashFunc<int32_t, D>;
struct DataTupleCPU {
std::vector<KeyT> keys;
std::vector<ValueT> values;
std::vector<uint8_t> masks;
void Resize(uint32_t size) {
keys.resize(size);
values.resize(size);
masks.resize(size);
}
void Shuffle(int64_t seed) {
std::mt19937 rng(seed);
std::vector<int> q_index(keys.size());
std::iota(q_index.begin(), q_index.end(), 0);
std::shuffle(q_index.begin(), q_index.end(), rng);
/* Technically this is not totally correct, as the rotated indices can
* be swapped again elsewhere */
for (int i = 0; i < keys.size(); i++) {
std::swap(keys[i], keys[q_index[i]]);
std::swap(values[i], values[q_index[i]]);
std::swap(masks[i], masks[q_index[i]]);
}
}
};
struct DataTupleThrust {
thrust::device_vector<KeyT> keys;
thrust::device_vector<ValueT> values;
thrust::device_vector<uint8_t> masks;
uint32_t size;
void Resize(uint32_t new_size) {
Free();
keys.resize(new_size);
values.resize(new_size);
masks.resize(new_size);
size = new_size;
}
void Upload(const DataTupleCPU &data, uint8_t only_keys = false) {
assert(size == data.keys.size());
keys = data.keys;
if (!only_keys) {
values = data.values;
masks = data.masks;
}
}
void Download(DataTupleCPU &data) {
assert(size == data.keys.size());
CHECK_CUDA(hipMemcpy(data.keys.data(),
thrust::raw_pointer_cast(keys.data()),
sizeof(KeyT) * size, hipMemcpyDeviceToHost));
CHECK_CUDA(hipMemcpy(data.values.data(),
thrust::raw_pointer_cast(values.data()),
sizeof(ValueT) * size, hipMemcpyDeviceToHost));
CHECK_CUDA(hipMemcpy(data.masks.data(),
thrust::raw_pointer_cast(masks.data()),
sizeof(uint8_t) * size, hipMemcpyDeviceToHost));
}
void Free() {
keys.clear();
values.clear();
masks.clear();
}
};
class TestDataHelperThrust {
public:
TestDataHelperThrust(const int keys_pool_size,
const float hit_keys_ratio,
const int64_t seed = 1)
: keys_pool_size_(keys_pool_size), seed_(seed) {
hit_keys_pool_size_ =
static_cast<uint32_t>(keys_pool_size_ * hit_keys_ratio);
keys_pool_.resize(keys_pool_size_);
values_pool_.resize(hit_keys_pool_size_);
GenerateDataPool();
}
void GenerateDataPool() {
/** keys[i in 0 : hit_keys_pool_size_] = i
keys[i in hit_keys_pool_size_ : keys_pool_size] = NOT_FOUND **/
std::mt19937 rng(seed_);
std::vector<uint32_t> index(keys_pool_size_ * D);
std::iota(index.begin(), index.end(), 0);
std::shuffle(index.begin(), index.end(), rng);
for (int32_t i = 0; i < keys_pool_size_; ++i) {
for (int d = 0; d < D; ++d) {
keys_pool_[i][d] = index[i * D + d];
}
if (i < hit_keys_pool_size_) {
values_pool_[i] = i;
}
}
}
/** Return a tuple:
@DataTupleGPU for insertion:
- subset of query, only the 'hit' part
@DataTupleGPU @DataTupleCPU for query:
- all the possible queries, including 'hit' and 'miss'
-@DataTupleGPU: keys initialized for query,
values and masks unintialized, reserved for return value
-@DataTupleCPU: gt for keys, values, and masks **/
std::tuple<DataTupleThrust, DataTupleThrust, DataTupleCPU> GenerateData(
uint32_t num_queries, float existing_ratio) {
uint32_t num_hit_queries =
static_cast<uint32_t>(num_queries * existing_ratio);
assert(num_queries <= keys_pool_size_ &&
"num_queries > keys_pool_size_, abort");
assert(num_hit_queries <= hit_keys_pool_size_ &&
"num_hit_queries > hit_keys_pool_size_, abort");
DataTupleCPU insert_data, query_data_gt;
query_data_gt.Resize(num_queries);
int i = 0;
for (; i < num_hit_queries; i++) {
query_data_gt.keys[i] = keys_pool_[i];
query_data_gt.values[i] = values_pool_[i];
query_data_gt.masks[i] = 1;
}
for (; i < num_queries; ++i) {
query_data_gt.keys[i] = keys_pool_[i];
query_data_gt.values[i] = 0;
query_data_gt.masks[i] = 0;
}
/* insertion */
insert_data.keys =
std::vector<KeyT>(query_data_gt.keys.begin(),
query_data_gt.keys.begin() + num_hit_queries);
insert_data.values = std::vector<ValueT>(
query_data_gt.values.begin(),
query_data_gt.values.begin() + num_hit_queries);
insert_data.masks = std::vector<uint8_t>(
query_data_gt.masks.begin(),
query_data_gt.masks.begin() + num_hit_queries);
/* shuffled queries */
insert_data.Shuffle(seed_);
query_data_gt.Shuffle(seed_);
DataTupleThrust insert_data_gpu, query_data_gpu;
insert_data_gpu.Resize(num_hit_queries);
query_data_gpu.Resize(num_queries);
insert_data_gpu.Upload(insert_data);
query_data_gpu.Upload(query_data_gt, /* only keys = */ true);
return std::make_tuple(insert_data_gpu, query_data_gpu,
std::move(query_data_gt));
}
static uint8_t CheckQueryResult(const std::vector<uint32_t> &values,
const std::vector<uint8_t> &masks,
const std::vector<uint32_t> &values_gt,
const std::vector<uint8_t> &masks_gt) {
int num_queries = values.size();
for (int i = 0; i < num_queries; i++) {
if (!masks_gt[i] && masks[i]) {
printf("### Wrong result at index %d: should be NOT "
"FOUND\n",
i);
return false;
}
if (masks_gt[i] && !masks[i]) {
printf("### Wrong result at index %d: should be FOUND\n", i);
return false;
}
if (masks_gt[i] && masks[i] && (values_gt[i] != values[i])) {
printf("### Wrong result at index %d: %d, but should be "
"%d\n",
i, values[i], values_gt[i]);
return false;
}
}
return true;
}
std::vector<KeyT> keys_pool_;
std::vector<ValueT> values_pool_;
int keys_pool_size_;
int hit_keys_pool_size_;
int64_t seed_;
};
int TestInsert(TestDataHelperThrust &data_generator) {
CudaTimer timer;
float time;
cuda::unordered_map<KeyT, ValueT> hash_table(data_generator.keys_pool_size_);
auto insert_query_data_tuple = data_generator.GenerateData(
data_generator.keys_pool_size_ / 2, 0.4f);
auto &insert_data_gpu = std::get<0>(insert_query_data_tuple);
timer.Start();
hash_table.Insert(insert_data_gpu.keys, insert_data_gpu.values);
time = timer.Stop();
printf("1) Hash table built in %.3f ms (%.3f M elements/s)\n", time,
double(insert_data_gpu.size) / (time * 1000.0));
printf("1) Load factor = %f\n", hash_table.ComputeLoadFactor());
auto &query_data_gpu = std::get<1>(insert_query_data_tuple);
auto &query_data_cpu_gt = std::get<2>(insert_query_data_tuple);
timer.Start();
auto pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("2) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_cpu_gt.keys.size()) / (time * 1000.0));
DataTupleCPU query_data_cpu;
query_data_cpu.Resize(query_data_gpu.size);
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
uint8_t query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_data_cpu_gt.masks);
if (!query_correct) return -1;
return 0;
}
int TestRemove(TestDataHelperThrust &data_generator) {
CudaTimer timer;
float time;
cuda::unordered_map<KeyT, ValueT, HashFunc> hash_table(
data_generator.keys_pool_size_);
auto insert_query_data_tuple = data_generator.GenerateData(
data_generator.keys_pool_size_ / 2, 1.0f);
auto &insert_data_gpu = std::get<0>(insert_query_data_tuple);
timer.Start();
hash_table.Insert(insert_data_gpu.keys, insert_data_gpu.values);
time = timer.Stop();
printf("1) Hash table built in %.3f ms (%.3f M elements/s)\n", time,
double(insert_data_gpu.size) / (time * 1000.0));
printf("1) Load factor = %f\n", hash_table.ComputeLoadFactor());
auto &query_data_gpu = std::get<1>(insert_query_data_tuple);
auto &query_data_cpu_gt = std::get<2>(insert_query_data_tuple);
timer.Start();
auto pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("2) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_cpu_gt.keys.size()) / (time * 1000.0));
DataTupleCPU query_data_cpu;
query_data_cpu.Resize(query_data_gpu.size);
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
uint8_t query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_data_cpu_gt.masks);
if (!query_correct) return -1;
/** Remove everything **/
timer.Start();
hash_table.Remove(query_data_gpu.keys);
time = timer.Stop();
printf("3) Hash table deleted in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_gpu.size) / (time * 1000.0));
printf("3) Load factor = %f\n", hash_table.ComputeLoadFactor());
auto query_masks_gt_after_deletion =
std::vector<uint8_t>(query_data_cpu_gt.keys.size());
std::fill(query_masks_gt_after_deletion.begin(),
query_masks_gt_after_deletion.end(), 0);
timer.Start();
pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("4) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_gpu.size) / (time * 1000.0));
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_masks_gt_after_deletion);
if (!query_correct) return -1;
return 0;
}
int TestConflict(TestDataHelperThrust &data_generator) {
CudaTimer timer;
float time;
cuda::unordered_map<KeyT, ValueT, HashFunc> hash_table(
data_generator.keys_pool_size_);
auto insert_query_data_tuple = data_generator.GenerateData(
data_generator.keys_pool_size_ / 2, 1.0f);
auto &insert_data_gpu = std::get<0>(insert_query_data_tuple);
timer.Start();
hash_table.Insert(insert_data_gpu.keys, insert_data_gpu.values);
time = timer.Stop();
printf("1) Hash table built in %.3f ms (%.3f M elements/s)\n", time,
double(insert_data_gpu.size) / (time * 1000.0));
printf("1) Load factor = %f\n", hash_table.ComputeLoadFactor());
auto &query_data_gpu = std::get<1>(insert_query_data_tuple);
auto &query_data_cpu_gt = std::get<2>(insert_query_data_tuple);
timer.Start();
auto pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("2) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_cpu_gt.keys.size()) / (time * 1000.0));
DataTupleCPU query_data_cpu;
query_data_cpu.Resize(query_data_gpu.size);
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
uint8_t query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_data_cpu_gt.masks);
if (!query_correct) return -1;
DataTupleCPU insert_data_cpu_duplicate;
insert_data_cpu_duplicate.Resize(insert_data_gpu.size);
insert_data_gpu.Download(insert_data_cpu_duplicate);
for (auto &v : insert_data_cpu_duplicate.values) {
v += 1;
}
insert_data_gpu.Upload(insert_data_cpu_duplicate);
timer.Start();
hash_table.Insert(insert_data_gpu.keys, insert_data_gpu.values);
time = timer.Stop();
printf("3) Hash table inserted in %.3f ms (%.3f M elements/s)\n", time,
double(insert_data_gpu.size) / (time * 1000.0));
printf("3) Load factor = %f\n", hash_table.ComputeLoadFactor());
timer.Start();
pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("4) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_gpu.size) / (time * 1000.0));
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_data_cpu_gt.masks);
if (!query_correct) return -1;
return 0;
}
int main() {
const int key_value_pool_size = 1 << 20;
const float existing_ratio = 0.6f;
auto data_generator =
TestDataHelperThrust(key_value_pool_size, existing_ratio);
printf(">>> Test sequence: insert (0.5 valid) -> query\n");
assert(!TestInsert(data_generator) && "TestInsert failed.\n");
printf("TestInsert passed.\n");
printf(">>> Test sequence: insert (all valid) -> query -> delete -> "
"query\n");
assert(!TestRemove(data_generator) && "TestRemove failed.\n");
printf("TestRemove passed.\n");
printf(">>> Test sequence: insert (all valid) -> query -> insert (all "
"valid, duplicate) -> query\n");
assert(!TestConflict(data_generator) && "TestConflict failed.\n");
printf("TestConflict passed.\n");
return 0;
}
| 623dba535150e50e4739618121df41a4e29bd52e.cu | /*
* Copyright 2019 Saman Ashkiani
*
* findEmptyPerWarpLicensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <thrust/device_vector.h>
#include <time.h>
#include <unistd.h>
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <random>
#include <vector>
#include "coordinate.h"
#include "cuda_unordered_map.h"
constexpr size_t D = 7;
using KeyT = Coordinate<int32_t, D>;
using ValueT = uint32_t;
using HashFunc = CoordinateHashFunc<int32_t, D>;
struct DataTupleCPU {
std::vector<KeyT> keys;
std::vector<ValueT> values;
std::vector<uint8_t> masks;
void Resize(uint32_t size) {
keys.resize(size);
values.resize(size);
masks.resize(size);
}
void Shuffle(int64_t seed) {
std::mt19937 rng(seed);
std::vector<int> q_index(keys.size());
std::iota(q_index.begin(), q_index.end(), 0);
std::shuffle(q_index.begin(), q_index.end(), rng);
/* Technically this is not totally correct, as the rotated indices can
* be swapped again elsewhere */
for (int i = 0; i < keys.size(); i++) {
std::swap(keys[i], keys[q_index[i]]);
std::swap(values[i], values[q_index[i]]);
std::swap(masks[i], masks[q_index[i]]);
}
}
};
struct DataTupleThrust {
thrust::device_vector<KeyT> keys;
thrust::device_vector<ValueT> values;
thrust::device_vector<uint8_t> masks;
uint32_t size;
void Resize(uint32_t new_size) {
Free();
keys.resize(new_size);
values.resize(new_size);
masks.resize(new_size);
size = new_size;
}
void Upload(const DataTupleCPU &data, uint8_t only_keys = false) {
assert(size == data.keys.size());
keys = data.keys;
if (!only_keys) {
values = data.values;
masks = data.masks;
}
}
void Download(DataTupleCPU &data) {
assert(size == data.keys.size());
CHECK_CUDA(cudaMemcpy(data.keys.data(),
thrust::raw_pointer_cast(keys.data()),
sizeof(KeyT) * size, cudaMemcpyDeviceToHost));
CHECK_CUDA(cudaMemcpy(data.values.data(),
thrust::raw_pointer_cast(values.data()),
sizeof(ValueT) * size, cudaMemcpyDeviceToHost));
CHECK_CUDA(cudaMemcpy(data.masks.data(),
thrust::raw_pointer_cast(masks.data()),
sizeof(uint8_t) * size, cudaMemcpyDeviceToHost));
}
void Free() {
keys.clear();
values.clear();
masks.clear();
}
};
class TestDataHelperThrust {
public:
TestDataHelperThrust(const int keys_pool_size,
const float hit_keys_ratio,
const int64_t seed = 1)
: keys_pool_size_(keys_pool_size), seed_(seed) {
hit_keys_pool_size_ =
static_cast<uint32_t>(keys_pool_size_ * hit_keys_ratio);
keys_pool_.resize(keys_pool_size_);
values_pool_.resize(hit_keys_pool_size_);
GenerateDataPool();
}
void GenerateDataPool() {
/** keys[i in 0 : hit_keys_pool_size_] = i
keys[i in hit_keys_pool_size_ : keys_pool_size] = NOT_FOUND **/
std::mt19937 rng(seed_);
std::vector<uint32_t> index(keys_pool_size_ * D);
std::iota(index.begin(), index.end(), 0);
std::shuffle(index.begin(), index.end(), rng);
for (int32_t i = 0; i < keys_pool_size_; ++i) {
for (int d = 0; d < D; ++d) {
keys_pool_[i][d] = index[i * D + d];
}
if (i < hit_keys_pool_size_) {
values_pool_[i] = i;
}
}
}
/** Return a tuple:
@DataTupleGPU for insertion:
- subset of query, only the 'hit' part
@DataTupleGPU @DataTupleCPU for query:
- all the possible queries, including 'hit' and 'miss'
-@DataTupleGPU: keys initialized for query,
values and masks unintialized, reserved for return value
-@DataTupleCPU: gt for keys, values, and masks **/
std::tuple<DataTupleThrust, DataTupleThrust, DataTupleCPU> GenerateData(
uint32_t num_queries, float existing_ratio) {
uint32_t num_hit_queries =
static_cast<uint32_t>(num_queries * existing_ratio);
assert(num_queries <= keys_pool_size_ &&
"num_queries > keys_pool_size_, abort");
assert(num_hit_queries <= hit_keys_pool_size_ &&
"num_hit_queries > hit_keys_pool_size_, abort");
DataTupleCPU insert_data, query_data_gt;
query_data_gt.Resize(num_queries);
int i = 0;
for (; i < num_hit_queries; i++) {
query_data_gt.keys[i] = keys_pool_[i];
query_data_gt.values[i] = values_pool_[i];
query_data_gt.masks[i] = 1;
}
for (; i < num_queries; ++i) {
query_data_gt.keys[i] = keys_pool_[i];
query_data_gt.values[i] = 0;
query_data_gt.masks[i] = 0;
}
/* insertion */
insert_data.keys =
std::vector<KeyT>(query_data_gt.keys.begin(),
query_data_gt.keys.begin() + num_hit_queries);
insert_data.values = std::vector<ValueT>(
query_data_gt.values.begin(),
query_data_gt.values.begin() + num_hit_queries);
insert_data.masks = std::vector<uint8_t>(
query_data_gt.masks.begin(),
query_data_gt.masks.begin() + num_hit_queries);
/* shuffled queries */
insert_data.Shuffle(seed_);
query_data_gt.Shuffle(seed_);
DataTupleThrust insert_data_gpu, query_data_gpu;
insert_data_gpu.Resize(num_hit_queries);
query_data_gpu.Resize(num_queries);
insert_data_gpu.Upload(insert_data);
query_data_gpu.Upload(query_data_gt, /* only keys = */ true);
return std::make_tuple(insert_data_gpu, query_data_gpu,
std::move(query_data_gt));
}
static uint8_t CheckQueryResult(const std::vector<uint32_t> &values,
const std::vector<uint8_t> &masks,
const std::vector<uint32_t> &values_gt,
const std::vector<uint8_t> &masks_gt) {
int num_queries = values.size();
for (int i = 0; i < num_queries; i++) {
if (!masks_gt[i] && masks[i]) {
printf("### Wrong result at index %d: should be NOT "
"FOUND\n",
i);
return false;
}
if (masks_gt[i] && !masks[i]) {
printf("### Wrong result at index %d: should be FOUND\n", i);
return false;
}
if (masks_gt[i] && masks[i] && (values_gt[i] != values[i])) {
printf("### Wrong result at index %d: %d, but should be "
"%d\n",
i, values[i], values_gt[i]);
return false;
}
}
return true;
}
std::vector<KeyT> keys_pool_;
std::vector<ValueT> values_pool_;
int keys_pool_size_;
int hit_keys_pool_size_;
int64_t seed_;
};
int TestInsert(TestDataHelperThrust &data_generator) {
CudaTimer timer;
float time;
cuda::unordered_map<KeyT, ValueT> hash_table(data_generator.keys_pool_size_);
auto insert_query_data_tuple = data_generator.GenerateData(
data_generator.keys_pool_size_ / 2, 0.4f);
auto &insert_data_gpu = std::get<0>(insert_query_data_tuple);
timer.Start();
hash_table.Insert(insert_data_gpu.keys, insert_data_gpu.values);
time = timer.Stop();
printf("1) Hash table built in %.3f ms (%.3f M elements/s)\n", time,
double(insert_data_gpu.size) / (time * 1000.0));
printf("1) Load factor = %f\n", hash_table.ComputeLoadFactor());
auto &query_data_gpu = std::get<1>(insert_query_data_tuple);
auto &query_data_cpu_gt = std::get<2>(insert_query_data_tuple);
timer.Start();
auto pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("2) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_cpu_gt.keys.size()) / (time * 1000.0));
DataTupleCPU query_data_cpu;
query_data_cpu.Resize(query_data_gpu.size);
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
uint8_t query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_data_cpu_gt.masks);
if (!query_correct) return -1;
return 0;
}
int TestRemove(TestDataHelperThrust &data_generator) {
CudaTimer timer;
float time;
cuda::unordered_map<KeyT, ValueT, HashFunc> hash_table(
data_generator.keys_pool_size_);
auto insert_query_data_tuple = data_generator.GenerateData(
data_generator.keys_pool_size_ / 2, 1.0f);
auto &insert_data_gpu = std::get<0>(insert_query_data_tuple);
timer.Start();
hash_table.Insert(insert_data_gpu.keys, insert_data_gpu.values);
time = timer.Stop();
printf("1) Hash table built in %.3f ms (%.3f M elements/s)\n", time,
double(insert_data_gpu.size) / (time * 1000.0));
printf("1) Load factor = %f\n", hash_table.ComputeLoadFactor());
auto &query_data_gpu = std::get<1>(insert_query_data_tuple);
auto &query_data_cpu_gt = std::get<2>(insert_query_data_tuple);
timer.Start();
auto pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("2) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_cpu_gt.keys.size()) / (time * 1000.0));
DataTupleCPU query_data_cpu;
query_data_cpu.Resize(query_data_gpu.size);
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
uint8_t query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_data_cpu_gt.masks);
if (!query_correct) return -1;
/** Remove everything **/
timer.Start();
hash_table.Remove(query_data_gpu.keys);
time = timer.Stop();
printf("3) Hash table deleted in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_gpu.size) / (time * 1000.0));
printf("3) Load factor = %f\n", hash_table.ComputeLoadFactor());
auto query_masks_gt_after_deletion =
std::vector<uint8_t>(query_data_cpu_gt.keys.size());
std::fill(query_masks_gt_after_deletion.begin(),
query_masks_gt_after_deletion.end(), 0);
timer.Start();
pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("4) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_gpu.size) / (time * 1000.0));
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_masks_gt_after_deletion);
if (!query_correct) return -1;
return 0;
}
int TestConflict(TestDataHelperThrust &data_generator) {
CudaTimer timer;
float time;
cuda::unordered_map<KeyT, ValueT, HashFunc> hash_table(
data_generator.keys_pool_size_);
auto insert_query_data_tuple = data_generator.GenerateData(
data_generator.keys_pool_size_ / 2, 1.0f);
auto &insert_data_gpu = std::get<0>(insert_query_data_tuple);
timer.Start();
hash_table.Insert(insert_data_gpu.keys, insert_data_gpu.values);
time = timer.Stop();
printf("1) Hash table built in %.3f ms (%.3f M elements/s)\n", time,
double(insert_data_gpu.size) / (time * 1000.0));
printf("1) Load factor = %f\n", hash_table.ComputeLoadFactor());
auto &query_data_gpu = std::get<1>(insert_query_data_tuple);
auto &query_data_cpu_gt = std::get<2>(insert_query_data_tuple);
timer.Start();
auto pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("2) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_cpu_gt.keys.size()) / (time * 1000.0));
DataTupleCPU query_data_cpu;
query_data_cpu.Resize(query_data_gpu.size);
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
uint8_t query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_data_cpu_gt.masks);
if (!query_correct) return -1;
DataTupleCPU insert_data_cpu_duplicate;
insert_data_cpu_duplicate.Resize(insert_data_gpu.size);
insert_data_gpu.Download(insert_data_cpu_duplicate);
for (auto &v : insert_data_cpu_duplicate.values) {
v += 1;
}
insert_data_gpu.Upload(insert_data_cpu_duplicate);
timer.Start();
hash_table.Insert(insert_data_gpu.keys, insert_data_gpu.values);
time = timer.Stop();
printf("3) Hash table inserted in %.3f ms (%.3f M elements/s)\n", time,
double(insert_data_gpu.size) / (time * 1000.0));
printf("3) Load factor = %f\n", hash_table.ComputeLoadFactor());
timer.Start();
pair = hash_table.Search(query_data_gpu.keys);
time = timer.Stop();
printf("4) Hash table searched in %.3f ms (%.3f M queries/s)\n", time,
double(query_data_gpu.size) / (time * 1000.0));
query_data_gpu.values = pair.first;
query_data_gpu.masks = pair.second;
query_data_gpu.Download(query_data_cpu);
query_correct = data_generator.CheckQueryResult(
query_data_cpu.values, query_data_cpu.masks,
query_data_cpu_gt.values, query_data_cpu_gt.masks);
if (!query_correct) return -1;
return 0;
}
int main() {
const int key_value_pool_size = 1 << 20;
const float existing_ratio = 0.6f;
auto data_generator =
TestDataHelperThrust(key_value_pool_size, existing_ratio);
printf(">>> Test sequence: insert (0.5 valid) -> query\n");
assert(!TestInsert(data_generator) && "TestInsert failed.\n");
printf("TestInsert passed.\n");
printf(">>> Test sequence: insert (all valid) -> query -> delete -> "
"query\n");
assert(!TestRemove(data_generator) && "TestRemove failed.\n");
printf("TestRemove passed.\n");
printf(">>> Test sequence: insert (all valid) -> query -> insert (all "
"valid, duplicate) -> query\n");
assert(!TestConflict(data_generator) && "TestConflict failed.\n");
printf("TestConflict passed.\n");
return 0;
}
|
f0c59667917a99611ac254358beb75d63753778d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/amp/update_loss_scaling_op.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename FoundNanInfFlagT>
__global__ void GpuUpdateLossScaling(
const FoundNanInfFlagT found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps, const int decr_every_n_nan_or_inf,
const float incr_ratio, const float decr_ratio,
T* updated_loss_scaling_data, int* good_out_data, int* bad_out_data) {
Update<T>(found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
template <typename T>
__global__ void FusedFillIf(T** outs, const size_t xs_size,
const int64_t* starts, const T value,
const bool* has_inf) {
if (!(*has_inf)) return;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
// copy starts array from global memory to shared memory
extern __shared__ int64_t s_starts[];
for (int i = threadIdx.x; i <= xs_size; i += blockDim.x) {
s_starts[i] = starts[i];
}
__syncthreads();
const int64_t total_num = s_starts[xs_size];
int out_index = 0;
for (int64_t id = tid; id < total_num; id += blockDim.x * gridDim.x) {
// get the "out" index of "id"
// For example:
// id = 15, starts = [0, 10, 10, 20, 30]
// because 10 <= id < 20 ==>
// the id element locate in the 3rd tensor (notice the 2nd tensor size is 0)
int next_out_index = out_index;
while (id >= s_starts[next_out_index]) next_out_index++;
out_index = next_out_index - 1;
// get data pointer and index
T* out_data = outs[out_index];
int64_t idx = id - s_starts[out_index];
// set value
out_data[idx] = value;
}
}
template <typename T, bool IsFoundInfOnCPU>
class UpdateLossScalingFunctor<platform::CUDADeviceContext, T,
IsFoundInfOnCPU> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps,
const int decr_every_n_nan_or_inf, const float incr_ratio,
const float decr_ratio, T* updated_loss_scaling_data,
int* good_out_data, int* bad_out_data) const {
if (IsFoundInfOnCPU) {
hipLaunchKernelGGL(( GpuUpdateLossScaling<T>), dim3(1), dim3(1), 0, dev_ctx.stream(),
*found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
} else {
hipLaunchKernelGGL(( GpuUpdateLossScaling<T>), dim3(1), dim3(1), 0, dev_ctx.stream(),
found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
}
};
template <typename T>
class LazyZeros<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data,
const std::vector<const framework::Tensor*>& xs,
const std::vector<framework::Tensor*>& outs) const {
size_t xs_size = xs.size();
if (xs_size == 0) return;
const auto& cpu_place = platform::CPUPlace();
// alloc each tensor's start index and copy to device
auto h_in_starts_mem =
memory::Alloc(cpu_place, (xs_size + 1) * sizeof(int64_t));
int64_t* h_starts = reinterpret_cast<int64_t*>(h_in_starts_mem->ptr());
auto d_in_starts_mem =
memory::Alloc(dev_ctx, (xs_size + 1) * sizeof(int64_t));
int64_t* d_starts = reinterpret_cast<int64_t*>(d_in_starts_mem->ptr());
// the start index value of each tensor is
// the sum of previous tensor's size. For example:
// outs = [10, 0, 10, 10] ==> starts = [0, 10, 10, 20, 30]
h_starts[0] = 0;
for (int i = 0; i < xs_size; i++) {
h_starts[i + 1] = h_starts[i] + outs[i]->numel();
}
memory::Copy(dev_ctx.GetPlace(), d_starts, cpu_place, h_starts,
(xs_size + 1) * sizeof(int64_t), dev_ctx.stream());
// copy each tensor of "outs" data address array to device
auto h_out_addrs_mem = memory::Alloc(cpu_place, xs_size * sizeof(T*));
T** h_out_addrs = reinterpret_cast<T**>(h_out_addrs_mem->ptr());
auto d_out_addrs_mem = memory::Alloc(dev_ctx, xs_size * sizeof(T*));
T** d_out_addrs = reinterpret_cast<T**>(d_out_addrs_mem->ptr());
for (size_t i = 0; i < xs_size; ++i) {
h_out_addrs[i] = outs[i]->mutable_data<T>(dev_ctx.GetPlace());
}
memory::Copy(dev_ctx.GetPlace(), d_out_addrs, cpu_place, h_out_addrs,
xs_size * sizeof(T*), dev_ctx.stream());
// launch cuda kernel
int64_t total_num = h_starts[xs_size];
int64_t threads_per_block = ::min(static_cast<int64_t>(1024), total_num);
int64_t elements_per_block =
threads_per_block * 50; // each thread deal with 50 data
int64_t blocks_per_grid =
(total_num + elements_per_block - 1) / elements_per_block;
hipLaunchKernelGGL(( FusedFillIf<T>), dim3(blocks_per_grid), dim3(threads_per_block),
(xs_size + 1) * sizeof(int64_t), dev_ctx.stream(),
d_out_addrs, xs_size, d_starts, static_cast<T>(0), found_inf_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
using GPU = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(update_loss_scaling,
ops::UpdateLossScalingKernel<GPU, float>,
ops::UpdateLossScalingKernel<GPU, double>,
ops::UpdateLossScalingKernel<GPU, plat::float16>);
| f0c59667917a99611ac254358beb75d63753778d.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/amp/update_loss_scaling_op.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename FoundNanInfFlagT>
__global__ void GpuUpdateLossScaling(
const FoundNanInfFlagT found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps, const int decr_every_n_nan_or_inf,
const float incr_ratio, const float decr_ratio,
T* updated_loss_scaling_data, int* good_out_data, int* bad_out_data) {
Update<T>(found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
template <typename T>
__global__ void FusedFillIf(T** outs, const size_t xs_size,
const int64_t* starts, const T value,
const bool* has_inf) {
if (!(*has_inf)) return;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
// copy starts array from global memory to shared memory
extern __shared__ int64_t s_starts[];
for (int i = threadIdx.x; i <= xs_size; i += blockDim.x) {
s_starts[i] = starts[i];
}
__syncthreads();
const int64_t total_num = s_starts[xs_size];
int out_index = 0;
for (int64_t id = tid; id < total_num; id += blockDim.x * gridDim.x) {
// get the "out" index of "id"
// For example:
// id = 15, starts = [0, 10, 10, 20, 30]
// because 10 <= id < 20 ==>
// the id element locate in the 3rd tensor (notice the 2nd tensor size is 0)
int next_out_index = out_index;
while (id >= s_starts[next_out_index]) next_out_index++;
out_index = next_out_index - 1;
// get data pointer and index
T* out_data = outs[out_index];
int64_t idx = id - s_starts[out_index];
// set value
out_data[idx] = value;
}
}
template <typename T, bool IsFoundInfOnCPU>
class UpdateLossScalingFunctor<platform::CUDADeviceContext, T,
IsFoundInfOnCPU> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps,
const int decr_every_n_nan_or_inf, const float incr_ratio,
const float decr_ratio, T* updated_loss_scaling_data,
int* good_out_data, int* bad_out_data) const {
if (IsFoundInfOnCPU) {
GpuUpdateLossScaling<T><<<1, 1, 0, dev_ctx.stream()>>>(
*found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
} else {
GpuUpdateLossScaling<T><<<1, 1, 0, dev_ctx.stream()>>>(
found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
}
};
template <typename T>
class LazyZeros<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data,
const std::vector<const framework::Tensor*>& xs,
const std::vector<framework::Tensor*>& outs) const {
size_t xs_size = xs.size();
if (xs_size == 0) return;
const auto& cpu_place = platform::CPUPlace();
// alloc each tensor's start index and copy to device
auto h_in_starts_mem =
memory::Alloc(cpu_place, (xs_size + 1) * sizeof(int64_t));
int64_t* h_starts = reinterpret_cast<int64_t*>(h_in_starts_mem->ptr());
auto d_in_starts_mem =
memory::Alloc(dev_ctx, (xs_size + 1) * sizeof(int64_t));
int64_t* d_starts = reinterpret_cast<int64_t*>(d_in_starts_mem->ptr());
// the start index value of each tensor is
// the sum of previous tensor's size. For example:
// outs = [10, 0, 10, 10] ==> starts = [0, 10, 10, 20, 30]
h_starts[0] = 0;
for (int i = 0; i < xs_size; i++) {
h_starts[i + 1] = h_starts[i] + outs[i]->numel();
}
memory::Copy(dev_ctx.GetPlace(), d_starts, cpu_place, h_starts,
(xs_size + 1) * sizeof(int64_t), dev_ctx.stream());
// copy each tensor of "outs" data address array to device
auto h_out_addrs_mem = memory::Alloc(cpu_place, xs_size * sizeof(T*));
T** h_out_addrs = reinterpret_cast<T**>(h_out_addrs_mem->ptr());
auto d_out_addrs_mem = memory::Alloc(dev_ctx, xs_size * sizeof(T*));
T** d_out_addrs = reinterpret_cast<T**>(d_out_addrs_mem->ptr());
for (size_t i = 0; i < xs_size; ++i) {
h_out_addrs[i] = outs[i]->mutable_data<T>(dev_ctx.GetPlace());
}
memory::Copy(dev_ctx.GetPlace(), d_out_addrs, cpu_place, h_out_addrs,
xs_size * sizeof(T*), dev_ctx.stream());
// launch cuda kernel
int64_t total_num = h_starts[xs_size];
int64_t threads_per_block = std::min(static_cast<int64_t>(1024), total_num);
int64_t elements_per_block =
threads_per_block * 50; // each thread deal with 50 data
int64_t blocks_per_grid =
(total_num + elements_per_block - 1) / elements_per_block;
FusedFillIf<T><<<blocks_per_grid, threads_per_block,
(xs_size + 1) * sizeof(int64_t), dev_ctx.stream()>>>(
d_out_addrs, xs_size, d_starts, static_cast<T>(0), found_inf_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
using GPU = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(update_loss_scaling,
ops::UpdateLossScalingKernel<GPU, float>,
ops::UpdateLossScalingKernel<GPU, double>,
ops::UpdateLossScalingKernel<GPU, plat::float16>);
|
cdda0345b54909b9a099bb1d7b7b33541fc14a6c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <sys/time.h>
#include "pa2.h"
/* For accurate performance metrics, run with CUDA_LAUNCH_BLOCKING="1" */
__global__ void AATrans(mtxel *mtx, mtxel *dest, int dim, int maxcache)
{
int base = blockDim.x * blockIdx.x;
int t = base + threadIdx.x;
/* Calculate the column the thread is working in.
* We are only computing half the matrix,
* since the matrix is symmetric along the diagonal.
* For a 4x4 matrix, the thread assignment looks as follows:
* 1 2 3 4
* 2 5 6 7
* 3 6 8 9
* 4 7 9 A
*/
int c = floor((1+2*dim-sqrtf(1+4*dim+4*dim*dim-8*t))/2);
/* The row follows from the column */
int r = t - c * dim + c * (c - 1) / 2 + c;
DBGPRINT("Thread %d Initial Position: (%d, %d) with "
"dim %d, blocksize %d, and cache %d\n",
t, r, c, dim, blockDim.x, maxcache);
int basec = floor((1+2*dim-sqrtf(1+4*dim+4*dim*dim-8*base))/2);
/* rowmem just gives fast access to a single row
* The first blksize arrays are for the rows of the matrix at r
* The second blksize arrays are for the rows of the matrix at c
*/
extern __shared__ mtxel rowmem[];
int totlen = min(dim, maxcache);
int run = totlen / blockDim.x;
run++;
int offset = run * threadIdx.x;
run++;
for(int i = offset; i - offset < run && i < totlen; i++)
rowmem[i] = mtx[c * dim + i];
__syncthreads();
/* Compute A A^T */
if(c >= 0 && c < dim &&
r >= 0 && r < dim) {
dest[c * dim + r] = 0.0;
/* Determine if the thread has access to the local memory */
if(basec == c) {
/* Move our current column into fast shared memory */
for(int k = 0; k < dim && k < maxcache; k++) {
if(rowmem[k] != mtx[c * dim + k])
printf("Column %d in block %d not copied correctly :(\n",
c, blockDim.x);
dest[c * dim + r] += mtx[r * dim + k] * rowmem[k];
}
for(int k = maxcache; k < dim; k++) {
dest[c * dim + r] += mtx[r * dim + k] * mtx[c * dim + k];
}
}
else {
for(int k = 0; k < dim; k++) {
dest[c * dim + r] += mtx[r * dim + k] * mtx[c * dim + k];
}
}
dest[r * dim + c] = dest[c * dim + r];
}
}
__global__ void AATransSmall(mtxel *mtx, mtxel *dest, int dim)
{
/* Naive implementation. Rather slow, even with small matrices */
int t = blockDim.x * blockIdx.x + threadIdx.x;
int c = t / dim;
int r = t % dim;
if(c >= 0 && c < dim && r >= 0 && r < dim) {
dest[c * dim + r] = 0.0;
for(int k = 0; k < dim; k++)
dest[c * dim + r] += mtx[r * dim + k] * mtx[c * dim + k];
}
}
hipDeviceProp_t dev;
void computeCUDA(mtxel *hostmtx, mtxel *dest, int dim)
{
if(dim == 1) {
printf("0.000000, ");
return;
}
mtxel *devmtx, *devdest;
hipMalloc(&devmtx, sizeof(mtxel[dim * dim]));
hipMalloc(&devdest, sizeof(mtxel[dim * dim]));
if(!devmtx || !devdest)
return;
hipMemset(devdest, 0, sizeof(mtxel[dim * dim]));
hipMemcpy(devmtx, hostmtx, sizeof(mtxel[dim * dim]), hipMemcpyHostToDevice);
/* blksize is the number of rows and columns a thread works with */
int blksize = 1;
/* We want to keep all processors busy, so use some number of
* blocks higher than the number of processors.
* 4 seems to be the magic number, after which performance doesn't
* significantly improve.
*/
int blocks = dim * (dim + 1) / 2;
/* Now calculate the size of the blocks each thread works with,
* and add one extra block for the common case
*/
while(blocks > dev.multiProcessorCount &&
blksize * 2 < dev.maxThreadsPerBlock) {
blksize *= 2;
blocks /= 2;
}
blocks++;
/* There are issues with using all the shared memory (not unexpected),
* so use a large fraction of it instead
*/
dev.sharedMemPerBlock = dev.sharedMemPerBlock * 3 / 4;
struct timeval t1, t2, elapsed;
gettimeofday(&t1, NULL);
/* Note that performance metrics must be collected
* with CUDA_LAUNCH_BLOCKING set
*/
hipLaunchKernelGGL(( AATrans) , dim3(blocks), dim3(blksize), dev.sharedMemPerBlock , 0,
devmtx, devdest, dim, dev.sharedMemPerBlock / sizeof(mtxel));
hipError_t err = hipGetLastError();
if(err != hipSuccess) {
printf("CUDA Error %d: %s\n", err, hipGetErrorString(err));
}
gettimeofday(&t2, NULL);
timersub(&t2, &t1, &elapsed);
printf("%d.%06d, ",
elapsed.tv_sec, elapsed.tv_usec);
hipMemcpy(dest, devdest, sizeof(mtxel[dim * dim]), hipMemcpyDeviceToHost);
hipFree(devmtx);
hipFree(devdest);
}
void checkCUBLAS(hipblasStatus_t err, char *event)
{
switch(err) {
case HIPBLAS_STATUS_SUCCESS:
break;
default:
printf("Unknown error %d! %s\n", err, event);
}
}
void computeCUBLAS(mtxel *mtx, mtxel *dest, int dim)
{
hipblasStatus_t err;
mtxel *devmtx1, *devdest;
err = hipblasAlloc(dim * dim, sizeof(mtxel), (void **)&devmtx1);
checkCUBLAS(err, "Allocated dev matrix 1");
err = hipblasAlloc(dim * dim, sizeof(mtxel), (void **)&devdest);
checkCUBLAS(err, "Allocated dev dest matrix");
err = hipblasSetMatrix(dim, dim, sizeof(mtxel), (void *)mtx, dim, (void *)devmtx1, dim);
checkCUBLAS(err, "Set dev matrix 1");
struct timeval t1, t2, elapsed;
gettimeofday(&t1, NULL);
hipblasDgemm('T', 'N', dim, dim, dim, 1.0,
devmtx1, dim, devmtx1, dim, 0.0, devdest, dim);
err = hipblasGetError();
checkCUBLAS(err, "Multiplied matrix");
err = hipblasGetMatrix(dim, dim, sizeof(mtxel), (void *)devdest, dim, dest, dim);
gettimeofday(&t2, NULL);
timersub(&t2, &t1, &elapsed);
printf("%d.%06d, ",
elapsed.tv_sec, elapsed.tv_usec);
checkCUBLAS(err, "Got matrix");
hipblasFree(devmtx1);
hipblasFree(devdest);
}
int initCUDA()
{
/* Make certain we have a CUDA capable machine */
int count = 0;
hipGetDeviceCount(&count);
if(count == 0) {
return 1;
}
/* Find out some information about it.
* Require at least compute 2.0
*/
hipSetDevice(0);
hipGetDeviceProperties(&dev, 0);
if(dev.major < 2) {
return 2;
}
/* Make a call to a CUDA function so initialization time
* isn't included in our computeCUDA time calculation
*/
void *mem = NULL;
hipMalloc(&mem, 0);
if(mem)
hipFree(mem);
/* Similarly for CUBLAS */
hipblasInit();
return 0;
}
void shutdownCUDA()
{
hipblasShutdown();
}
| cdda0345b54909b9a099bb1d7b7b33541fc14a6c.cu |
#include <cuda.h>
#include <cublas.h>
#include <sys/time.h>
#include "pa2.h"
/* For accurate performance metrics, run with CUDA_LAUNCH_BLOCKING="1" */
__global__ void AATrans(mtxel *mtx, mtxel *dest, int dim, int maxcache)
{
int base = blockDim.x * blockIdx.x;
int t = base + threadIdx.x;
/* Calculate the column the thread is working in.
* We are only computing half the matrix,
* since the matrix is symmetric along the diagonal.
* For a 4x4 matrix, the thread assignment looks as follows:
* 1 2 3 4
* 2 5 6 7
* 3 6 8 9
* 4 7 9 A
*/
int c = floor((1+2*dim-sqrtf(1+4*dim+4*dim*dim-8*t))/2);
/* The row follows from the column */
int r = t - c * dim + c * (c - 1) / 2 + c;
DBGPRINT("Thread %d Initial Position: (%d, %d) with "
"dim %d, blocksize %d, and cache %d\n",
t, r, c, dim, blockDim.x, maxcache);
int basec = floor((1+2*dim-sqrtf(1+4*dim+4*dim*dim-8*base))/2);
/* rowmem just gives fast access to a single row
* The first blksize arrays are for the rows of the matrix at r
* The second blksize arrays are for the rows of the matrix at c
*/
extern __shared__ mtxel rowmem[];
int totlen = min(dim, maxcache);
int run = totlen / blockDim.x;
run++;
int offset = run * threadIdx.x;
run++;
for(int i = offset; i - offset < run && i < totlen; i++)
rowmem[i] = mtx[c * dim + i];
__syncthreads();
/* Compute A A^T */
if(c >= 0 && c < dim &&
r >= 0 && r < dim) {
dest[c * dim + r] = 0.0;
/* Determine if the thread has access to the local memory */
if(basec == c) {
/* Move our current column into fast shared memory */
for(int k = 0; k < dim && k < maxcache; k++) {
if(rowmem[k] != mtx[c * dim + k])
printf("Column %d in block %d not copied correctly :(\n",
c, blockDim.x);
dest[c * dim + r] += mtx[r * dim + k] * rowmem[k];
}
for(int k = maxcache; k < dim; k++) {
dest[c * dim + r] += mtx[r * dim + k] * mtx[c * dim + k];
}
}
else {
for(int k = 0; k < dim; k++) {
dest[c * dim + r] += mtx[r * dim + k] * mtx[c * dim + k];
}
}
dest[r * dim + c] = dest[c * dim + r];
}
}
__global__ void AATransSmall(mtxel *mtx, mtxel *dest, int dim)
{
/* Naive implementation. Rather slow, even with small matrices */
int t = blockDim.x * blockIdx.x + threadIdx.x;
int c = t / dim;
int r = t % dim;
if(c >= 0 && c < dim && r >= 0 && r < dim) {
dest[c * dim + r] = 0.0;
for(int k = 0; k < dim; k++)
dest[c * dim + r] += mtx[r * dim + k] * mtx[c * dim + k];
}
}
cudaDeviceProp dev;
void computeCUDA(mtxel *hostmtx, mtxel *dest, int dim)
{
if(dim == 1) {
printf("0.000000, ");
return;
}
mtxel *devmtx, *devdest;
cudaMalloc(&devmtx, sizeof(mtxel[dim * dim]));
cudaMalloc(&devdest, sizeof(mtxel[dim * dim]));
if(!devmtx || !devdest)
return;
cudaMemset(devdest, 0, sizeof(mtxel[dim * dim]));
cudaMemcpy(devmtx, hostmtx, sizeof(mtxel[dim * dim]), cudaMemcpyHostToDevice);
/* blksize is the number of rows and columns a thread works with */
int blksize = 1;
/* We want to keep all processors busy, so use some number of
* blocks higher than the number of processors.
* 4 seems to be the magic number, after which performance doesn't
* significantly improve.
*/
int blocks = dim * (dim + 1) / 2;
/* Now calculate the size of the blocks each thread works with,
* and add one extra block for the common case
*/
while(blocks > dev.multiProcessorCount &&
blksize * 2 < dev.maxThreadsPerBlock) {
blksize *= 2;
blocks /= 2;
}
blocks++;
/* There are issues with using all the shared memory (not unexpected),
* so use a large fraction of it instead
*/
dev.sharedMemPerBlock = dev.sharedMemPerBlock * 3 / 4;
struct timeval t1, t2, elapsed;
gettimeofday(&t1, NULL);
/* Note that performance metrics must be collected
* with CUDA_LAUNCH_BLOCKING set
*/
AATrans <<< blocks, blksize, dev.sharedMemPerBlock >>>
(devmtx, devdest, dim, dev.sharedMemPerBlock / sizeof(mtxel));
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess) {
printf("CUDA Error %d: %s\n", err, cudaGetErrorString(err));
}
gettimeofday(&t2, NULL);
timersub(&t2, &t1, &elapsed);
printf("%d.%06d, ",
elapsed.tv_sec, elapsed.tv_usec);
cudaMemcpy(dest, devdest, sizeof(mtxel[dim * dim]), cudaMemcpyDeviceToHost);
cudaFree(devmtx);
cudaFree(devdest);
}
void checkCUBLAS(cublasStatus_t err, char *event)
{
switch(err) {
case CUBLAS_STATUS_SUCCESS:
break;
default:
printf("Unknown error %d! %s\n", err, event);
}
}
void computeCUBLAS(mtxel *mtx, mtxel *dest, int dim)
{
cublasStatus_t err;
mtxel *devmtx1, *devdest;
err = cublasAlloc(dim * dim, sizeof(mtxel), (void **)&devmtx1);
checkCUBLAS(err, "Allocated dev matrix 1");
err = cublasAlloc(dim * dim, sizeof(mtxel), (void **)&devdest);
checkCUBLAS(err, "Allocated dev dest matrix");
err = cublasSetMatrix(dim, dim, sizeof(mtxel), (void *)mtx, dim, (void *)devmtx1, dim);
checkCUBLAS(err, "Set dev matrix 1");
struct timeval t1, t2, elapsed;
gettimeofday(&t1, NULL);
cublasDgemm('T', 'N', dim, dim, dim, 1.0,
devmtx1, dim, devmtx1, dim, 0.0, devdest, dim);
err = cublasGetError();
checkCUBLAS(err, "Multiplied matrix");
err = cublasGetMatrix(dim, dim, sizeof(mtxel), (void *)devdest, dim, dest, dim);
gettimeofday(&t2, NULL);
timersub(&t2, &t1, &elapsed);
printf("%d.%06d, ",
elapsed.tv_sec, elapsed.tv_usec);
checkCUBLAS(err, "Got matrix");
cublasFree(devmtx1);
cublasFree(devdest);
}
int initCUDA()
{
/* Make certain we have a CUDA capable machine */
int count = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
return 1;
}
/* Find out some information about it.
* Require at least compute 2.0
*/
cudaSetDevice(0);
cudaGetDeviceProperties(&dev, 0);
if(dev.major < 2) {
return 2;
}
/* Make a call to a CUDA function so initialization time
* isn't included in our computeCUDA time calculation
*/
void *mem = NULL;
cudaMalloc(&mem, 0);
if(mem)
cudaFree(mem);
/* Similarly for CUBLAS */
cublasInit();
return 0;
}
void shutdownCUDA()
{
cublasShutdown();
}
|
array_index_select.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2019 by Contributors
* \file array/cpu/array_index_select.cu
* \brief Array index select GPU implementation
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "../../cuda_utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
template <typename DType, typename IdType>
__global__ void _IndexSelectKernel(const DType* array, const IdType* index,
int64_t length, DType* out) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int stride_x = gridDim.x * blockDim.x;
while (tx < length) {
out[tx] = array[index[tx]];
tx += stride_x;
}
}
template<DLDeviceType XPU, typename DType, typename IdType>
NDArray IndexSelect(NDArray array, IdArray index) {
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
const DType* array_data = static_cast<DType*>(array->data);
const IdType* idx_data = static_cast<IdType*>(index->data);
const int64_t arr_len = array->shape[0];
const int64_t len = index->shape[0];
NDArray ret = NDArray::Empty({len}, array->dtype, array->ctx);
if (len == 0)
return ret;
DType* ret_data = static_cast<DType*>(ret->data);
const int nt = cuda::FindNumThreads(len);
const int nb = (len + nt - 1) / nt;
hipLaunchKernelGGL(( _IndexSelectKernel), dim3(nb), dim3(nt), 0, thr_entry->stream, array_data, idx_data, len, ret_data);
return ret;
}
template NDArray IndexSelect<kDLGPU, int32_t, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, int32_t, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, int64_t, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, int64_t, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, float, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, float, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, double, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, double, int64_t>(NDArray, IdArray);
template <DLDeviceType XPU, typename DType>
DType IndexSelect(NDArray array, uint64_t index) {
auto device = runtime::DeviceAPI::Get(array->ctx);
DType ret = 0;
device->CopyDataFromTo(
static_cast<DType*>(array->data) + index, 0, &ret, 0,
sizeof(DType), array->ctx, DLContext{kDLCPU, 0},
array->dtype, nullptr);
return ret;
}
template int32_t IndexSelect<kDLGPU, int32_t>(NDArray array, uint64_t index);
template int64_t IndexSelect<kDLGPU, int64_t>(NDArray array, uint64_t index);
template uint32_t IndexSelect<kDLGPU, uint32_t>(NDArray array, uint64_t index);
template uint64_t IndexSelect<kDLGPU, uint64_t>(NDArray array, uint64_t index);
template float IndexSelect<kDLGPU, float>(NDArray array, uint64_t index);
template double IndexSelect<kDLGPU, double>(NDArray array, uint64_t index);
} // namespace impl
} // namespace aten
} // namespace dgl
| array_index_select.cu | /*!
* Copyright (c) 2019 by Contributors
* \file array/cpu/array_index_select.cu
* \brief Array index select GPU implementation
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "../../cuda_utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
template <typename DType, typename IdType>
__global__ void _IndexSelectKernel(const DType* array, const IdType* index,
int64_t length, DType* out) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int stride_x = gridDim.x * blockDim.x;
while (tx < length) {
out[tx] = array[index[tx]];
tx += stride_x;
}
}
template<DLDeviceType XPU, typename DType, typename IdType>
NDArray IndexSelect(NDArray array, IdArray index) {
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
const DType* array_data = static_cast<DType*>(array->data);
const IdType* idx_data = static_cast<IdType*>(index->data);
const int64_t arr_len = array->shape[0];
const int64_t len = index->shape[0];
NDArray ret = NDArray::Empty({len}, array->dtype, array->ctx);
if (len == 0)
return ret;
DType* ret_data = static_cast<DType*>(ret->data);
const int nt = cuda::FindNumThreads(len);
const int nb = (len + nt - 1) / nt;
_IndexSelectKernel<<<nb, nt, 0, thr_entry->stream>>>(array_data, idx_data, len, ret_data);
return ret;
}
template NDArray IndexSelect<kDLGPU, int32_t, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, int32_t, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, int64_t, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, int64_t, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, float, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, float, int64_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, double, int32_t>(NDArray, IdArray);
template NDArray IndexSelect<kDLGPU, double, int64_t>(NDArray, IdArray);
template <DLDeviceType XPU, typename DType>
DType IndexSelect(NDArray array, uint64_t index) {
auto device = runtime::DeviceAPI::Get(array->ctx);
DType ret = 0;
device->CopyDataFromTo(
static_cast<DType*>(array->data) + index, 0, &ret, 0,
sizeof(DType), array->ctx, DLContext{kDLCPU, 0},
array->dtype, nullptr);
return ret;
}
template int32_t IndexSelect<kDLGPU, int32_t>(NDArray array, uint64_t index);
template int64_t IndexSelect<kDLGPU, int64_t>(NDArray array, uint64_t index);
template uint32_t IndexSelect<kDLGPU, uint32_t>(NDArray array, uint64_t index);
template uint64_t IndexSelect<kDLGPU, uint64_t>(NDArray array, uint64_t index);
template float IndexSelect<kDLGPU, float>(NDArray array, uint64_t index);
template double IndexSelect<kDLGPU, double>(NDArray array, uint64_t index);
} // namespace impl
} // namespace aten
} // namespace dgl
|
6762e4bdffbfa776e633084d2995072b1d38f773.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/ztrtri_diag.cu normal z -> d, Tue Feb 9 16:05:34 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o
in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu
*/
#include "magma_internal.h"
#include "dtrtri.cuh"
/**
Inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in dtrsm.
Same as dtrtri_diag, but adds queue argument.
@ingroup magma_dblas3
********************************************************************/
/**
Purpose
-------
dtrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA DOUBLE PRECISION array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA DOUBLE PRECISION array of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dtrtri_diag_q(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr d_dinvA,
magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
hipMemset( d_dinvA, 0, magma_roundup( n, NB )*NB * sizeof(double) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
hipLaunchKernelGGL(( dtrtri_diag_lower_kernel)
, dim3(nblocks), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_dgemm16_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm16_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_dgemm32_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm32_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_dgemm64_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm64_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_dgemm_above64_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part3_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
hipLaunchKernelGGL(( dtrtri_diag_upper_kernel)
, dim3(nblocks), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_dgemm16_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm16_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_dgemm32_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm32_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_dgemm64_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm64_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_dgemm_above64_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part3_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
| 6762e4bdffbfa776e633084d2995072b1d38f773.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/ztrtri_diag.cu normal z -> d, Tue Feb 9 16:05:34 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o
in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu
*/
#include "magma_internal.h"
#include "dtrtri.cuh"
/**
Inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in dtrsm.
Same as dtrtri_diag, but adds queue argument.
@ingroup magma_dblas3
********************************************************************/
/**
Purpose
-------
dtrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA DOUBLE PRECISION array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA DOUBLE PRECISION array of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dtrtri_diag_q(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_ptr d_dinvA,
magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
cudaMemset( d_dinvA, 0, magma_roundup( n, NB )*NB * sizeof(double) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dtrtri_diag_lower_kernel
<<< nblocks, IB, 0, queue->cuda_stream() >>>
( diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_dgemm16_part1_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm16_part2_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_dgemm32_part1_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm32_part2_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_dgemm64_part1_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm64_part2_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_dgemm_above64_part1_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm_above64_part2_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm_above64_part3_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dtrtri_diag_upper_kernel
<<< nblocks, IB, 0, queue->cuda_stream() >>>
( diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_dgemm16_part1_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm16_part2_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_dgemm32_part1_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm32_part2_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_dgemm64_part1_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm64_part2_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_dgemm_above64_part1_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm_above64_part2_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_dgemm_above64_part3_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
ccc4b76dc88c48af8d46ceefce306384e473c07d.hip | // !!! This is a file automatically generated by hipify!!!
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* gpu_errchk.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: jwalsh <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/06/04 15:30:53 by jwalsh #+# #+# */
/* Updated: 2017/06/05 10:57:14 by jwalsh ### ########.fr */
/* */
/* ************************************************************************** */
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include "rt.cuh"
/*
** Checks a cuda function return value for errors and exits with an error
** message.
*/
__host__
void gpu_errchk(int code)
{
int l;
const char *s;
s = hipGetErrorString((hipError_t)code);
l = strlen(s);
if (code != hipSuccess)
{
write(2, "\e[1;93mGPUassert: ", 17);
write(1, "test\n", 5);
write(2, s, l);
write(2, "\e[0m\n", 5);
exit(code);
}
}
| ccc4b76dc88c48af8d46ceefce306384e473c07d.cu | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* gpu_errchk.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: jwalsh <[email protected]> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/06/04 15:30:53 by jwalsh #+# #+# */
/* Updated: 2017/06/05 10:57:14 by jwalsh ### ########.fr */
/* */
/* ************************************************************************** */
#include <cuda_runtime.h>
#include <stdlib.h>
#include "rt.cuh"
/*
** Checks a cuda function return value for errors and exits with an error
** message.
*/
__host__
void gpu_errchk(int code)
{
int l;
const char *s;
s = cudaGetErrorString((cudaError_t)code);
l = strlen(s);
if (code != cudaSuccess)
{
write(2, "\e[1;93mGPUassert: ", 17);
write(1, "test\n", 5);
write(2, s, l);
write(2, "\e[0m\n", 5);
exit(code);
}
}
|
4c5af6cead3742c96810fe73eb781c6ed09bf068.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoImplicitGPU.cuh"
namespace hpmc
{
namespace detail
{
/*! \file IntegratorHPMCMonoImplicitGPU.cu
\brief Definition of CUDA kernels and drivers for IntegratorHPMCMonoImplicit
*/
//! Kernel to compute the configurational bias weights
__global__ void gpu_implicit_compute_weights_kernel(unsigned int n_overlaps,
unsigned int *d_n_success_forward,
unsigned int *d_n_overlap_shape_forward,
unsigned int *d_n_success_reverse,
unsigned int *d_n_overlap_shape_reverse,
float *d_lnb,
unsigned int *d_n_success_zero,
unsigned int *d_depletant_active_cell)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx >= n_overlaps)
return;
unsigned int n_success_forward = d_n_success_forward[idx];
// we use float for probability
float lnb(0.0);
if (n_success_forward != 0)
{
lnb = logf((Scalar)n_success_forward/(Scalar)d_n_overlap_shape_forward[idx]);
lnb -= logf((Scalar)d_n_success_reverse[idx]/(Scalar)d_n_overlap_shape_reverse[idx]);
}
else
{
// flag that the argument is zero
d_n_success_zero[d_depletant_active_cell[idx]] = 1;
}
// write out result
d_lnb[idx] = lnb;
}
//! Set up cuRAND for the maximum kernel parameters
__global__ void gpu_curand_implicit_setup(unsigned int n_rng,
unsigned int seed,
unsigned int timestep,
hiprandState_t *d_state)
{
// one active cell per thread block
unsigned int irng = blockIdx.x*blockDim.x + threadIdx.x;
if (irng >= n_rng) return;
hiprand_init((unsigned long long)seed+(unsigned long long)irng, (unsigned long long)timestep, 0, &d_state[irng]);
}
}; // end namespace detail
} // end namespace hpmc
| 4c5af6cead3742c96810fe73eb781c6ed09bf068.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoImplicitGPU.cuh"
namespace hpmc
{
namespace detail
{
/*! \file IntegratorHPMCMonoImplicitGPU.cu
\brief Definition of CUDA kernels and drivers for IntegratorHPMCMonoImplicit
*/
//! Kernel to compute the configurational bias weights
__global__ void gpu_implicit_compute_weights_kernel(unsigned int n_overlaps,
unsigned int *d_n_success_forward,
unsigned int *d_n_overlap_shape_forward,
unsigned int *d_n_success_reverse,
unsigned int *d_n_overlap_shape_reverse,
float *d_lnb,
unsigned int *d_n_success_zero,
unsigned int *d_depletant_active_cell)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx >= n_overlaps)
return;
unsigned int n_success_forward = d_n_success_forward[idx];
// we use float for probability
float lnb(0.0);
if (n_success_forward != 0)
{
lnb = logf((Scalar)n_success_forward/(Scalar)d_n_overlap_shape_forward[idx]);
lnb -= logf((Scalar)d_n_success_reverse[idx]/(Scalar)d_n_overlap_shape_reverse[idx]);
}
else
{
// flag that the argument is zero
d_n_success_zero[d_depletant_active_cell[idx]] = 1;
}
// write out result
d_lnb[idx] = lnb;
}
//! Set up cuRAND for the maximum kernel parameters
__global__ void gpu_curand_implicit_setup(unsigned int n_rng,
unsigned int seed,
unsigned int timestep,
curandState_t *d_state)
{
// one active cell per thread block
unsigned int irng = blockIdx.x*blockDim.x + threadIdx.x;
if (irng >= n_rng) return;
curand_init((unsigned long long)seed+(unsigned long long)irng, (unsigned long long)timestep, 0, &d_state[irng]);
}
}; // end namespace detail
} // end namespace hpmc
|
5b7bb92dd61ad95da87586ab211107d196218920.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2018 tsooBGX contributors
*/
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <tsoobgx/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "../common/common.h"
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/host_device_vector.h"
#include "../common/timer.h"
#include "../common/span.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace tsoobgx {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public dmlc::Parameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
// number of rows in a single GPU batch
int gpu_batch_nrows;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(gpu_batch_nrows)
.set_lower_bound(-1)
.set_default(0)
.describe("Number of rows in a GPU batch, used for finding quantiles on GPU; "
"-1 to use all rows assignted to a GPU, and 0 to auto-deduce");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (param.max_depth > 0 && depth == param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false;
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
// Find a gidx value for a given feature otherwise return -1 if not found
__forceinline__ __device__ int BinarySearchRow(
bst_uint begin, bst_uint end,
common::CompressedIterator<uint32_t> data,
int const fidx_begin, int const fidx_end) {
bst_uint previous_middle = UINT32_MAX;
while (end != begin) {
auto middle = begin + (end - begin) / 2;
if (middle == previous_middle) {
break;
}
previous_middle = middle;
auto gidx = data[middle];
if (gidx >= fidx_begin && gidx < fidx_end) {
return gidx;
} else if (gidx < fidx_begin) {
begin = middle;
} else {
end = middle;
}
}
// Value is missing
return -1;
}
/** \brief Struct for accessing and manipulating an ellpack matrix on the
* device. Does not own underlying memory and may be trivially copied into
* kernels.*/
struct ELLPackMatrix {
common::Span<uint32_t> feature_segments;
/*! \brief minimum value for each feature. */
common::Span<bst_float> min_fvalue;
/*! \brief Cut. */
common::Span<bst_float> gidx_fvalue_map;
/*! \brief row length for ELLPack. */
size_t row_stride{0};
common::CompressedIterator<uint32_t> gidx_iter;
bool is_dense;
int null_gidx_value;
TSOOBGX_DEVICE size_t BinCount() const { return gidx_fvalue_map.size(); }
// Get a matrix element, uses binary search for look up
// Return NaN if missing
__device__ bst_float GetElement(size_t ridx, size_t fidx) const {
auto row_begin = row_stride * ridx;
auto row_end = row_begin + row_stride;
auto gidx = -1;
if (is_dense) {
gidx = gidx_iter[row_begin + fidx];
} else {
gidx =
BinarySearchRow(row_begin, row_end, gidx_iter, feature_segments[fidx],
feature_segments[fidx + 1]);
}
if (gidx == -1) {
return nan("");
}
return gidx_fvalue_map[gidx];
}
void Init(common::Span<uint32_t> feature_segments,
common::Span<bst_float> min_fvalue,
common::Span<bst_float> gidx_fvalue_map, size_t row_stride,
common::CompressedIterator<uint32_t> gidx_iter, bool is_dense,
int null_gidx_value) {
this->feature_segments = feature_segments;
this->min_fvalue = min_fvalue;
this->gidx_fvalue_map = gidx_fvalue_map;
this->row_stride = row_stride;
this->gidx_iter = gidx_iter;
this->is_dense = is_dense;
this->null_gidx_value = null_gidx_value;
}
};
// With constraints
template <typename GradientPairT>
TSOOBGX_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, hipcub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx, common::Span<const GradientSumT> node_histogram,
const ELLPackMatrix& matrix,
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin
uint32_t gidx_end =
matrix.feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
hipcub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax());
__shared__ hipcub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = matrix.min_fvalue[fidx];
} else {
fvalue = matrix.gidx_fvalue_map[split_gidx];
}
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue,
fidx, GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT>
node_histogram, // histogram for gradients
common::Span<const int> feature_set, // Selected features
DeviceNodeStats node,
ELLPackMatrix matrix,
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = hipcub::KeyValuePair<int, float>;
using BlockScanT =
hipcub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = hipcub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage,
constraint, value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
thrust::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
dh::safe_cuda(hipMemsetAsync(
data_.data().get(), 0,
data_.size() * sizeof(typename decltype(data_)::value_type)));
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
thrust::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
dh::safe_cuda(hipSetDevice(device_id_));
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
dh::safe_cuda(hipMemsetAsync(data_.data().get() + used_size, 0,
n_bins_ * sizeof(GradientSumT)));
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
dh::safe_cuda(hipMemsetAsync(data_.data().get() + old_entry.second, 0,
n_bins_ * sizeof(GradientSumT)));
nidx_map_[nidx] = old_entry.second;
}
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
size_t new_required_memory = ::max(data_.size() * 2, HistogramSize());
if (data_.size() < new_required_memory) {
data_.resize(new_required_memory);
}
}
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
TSOOBGX_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Bin each input data entry, store the bin indices in compressed form.
template<typename std::enable_if<true, int>::type = 0>
__global__ void CompressBinEllpackKernel(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistCutMatrix::cut
const uint32_t* __restrict__ cut_rows, // HistCutMatrix::row_ptrs
size_t base_row, // batch_row_begin
size_t n_rows,
// row_ptr_begin: row_offset[base_row], the start position of base_row
size_t row_ptr_begin,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride) {
return;
}
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptr_begin + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float *feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
bin = dh::UpperBound(feature_cuts, ncuts, fvalue);
if (bin >= ncuts) {
bin = ncuts - 1;
}
// Add the number of bins in previous features.
bin += cut_rows[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
template <typename GradientSumT>
__global__ void SharedMemHistKernel(ELLPackMatrix matrix, const bst_uint* d_ridx,
GradientSumT* d_node_hist,
const GradientPair* d_gpair,
size_t segment_begin, size_t n_elements) {
extern __shared__ char smem[];
GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT
for (auto i :
dh::BlockStrideRange(static_cast<size_t>(0), matrix.BinCount())) {
smem_arr[i] = GradientSumT();
}
__syncthreads();
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / matrix.row_stride + segment_begin];
int gidx = matrix.gidx_iter[ridx * matrix.row_stride + idx % matrix.row_stride];
if (gidx != matrix.null_gidx_value) {
AtomicAddGpair(smem_arr + gidx, d_gpair[ridx]);
}
}
__syncthreads();
for (auto i :
dh::BlockStrideRange(static_cast<size_t>(0), matrix.BinCount())) {
AtomicAddGpair(d_node_hist + i, smem_arr[i]);
}
}
struct Segment {
size_t begin;
size_t end;
Segment() : begin{0}, end{0} {}
Segment(size_t begin, size_t end) : begin(begin), end(end) {
CHECK_GE(end, begin);
}
size_t Size() const { return end - begin; }
};
/** \brief Returns a one if the left node index is encountered, otherwise return
* zero. */
struct IndicateLeftTransform {
int left_nidx;
explicit IndicateLeftTransform(int left_nidx) : left_nidx(left_nidx) {}
__host__ __device__ __forceinline__ int operator()(const int& x) const {
return x == left_nidx ? 1 : 0;
}
};
/**
* \brief Optimised routine for sorting key value pairs into left and right
* segments. Based on a single pass of exclusive scan, uses iterators to
* redirect inputs and outputs.
*/
inline void SortPosition(dh::CubMemory* temp_memory, common::Span<int> position,
common::Span<int> position_out, common::Span<bst_uint> ridx,
common::Span<bst_uint> ridx_out, int left_nidx,
int right_nidx, int64_t* d_left_count,
hipStream_t stream = nullptr) {
auto d_position_out = position_out.data();
auto d_position_in = position.data();
auto d_ridx_out = ridx_out.data();
auto d_ridx_in = ridx.data();
auto write_results = [=] __device__(size_t idx, int ex_scan_result) {
int scatter_address;
if (d_position_in[idx] == left_nidx) {
scatter_address = ex_scan_result;
} else {
scatter_address = (idx - ex_scan_result) + *d_left_count;
}
d_position_out[scatter_address] = d_position_in[idx];
d_ridx_out[scatter_address] = d_ridx_in[idx];
}; // NOLINT
IndicateLeftTransform conversion_op(left_nidx);
hipcub::TransformInputIterator<int, IndicateLeftTransform, int*> in_itr(
d_position_in, conversion_op);
dh::DiscardLambdaItr<decltype(write_results)> out_itr(write_results);
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes, in_itr, out_itr,
position.size(), stream);
temp_memory->LazyAllocate(temp_storage_bytes);
hipcub::DeviceScan::ExclusiveSum(temp_memory->d_temp_storage,
temp_memory->temp_storage_bytes, in_itr,
out_itr, position.size(), stream);
}
/*! \brief Count how many rows are assigned to left node. */
__forceinline__ __device__ void CountLeft(int64_t* d_count, int val,
int left_nidx) {
#if __CUDACC_VER_MAJOR__ > 8
int mask = __activemask();
unsigned ballot = __ballot_sync(mask, val == left_nidx);
int leader = __ffs(mask) - 1;
if (threadIdx.x % 32 == leader) {
atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT
static_cast<unsigned long long>(__popc(ballot))); // NOLINT
}
#else
unsigned ballot = __ballot(val == left_nidx);
if (threadIdx.x % 32 == 0) {
atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT
static_cast<unsigned long long>(__popc(ballot))); // NOLINT
}
#endif
}
template <typename GradientSumT>
struct DeviceShard;
template <typename GradientSumT>
struct GPUHistBuilderBase {
public:
virtual void Build(DeviceShard<GradientSumT>* shard, int idx) = 0;
virtual ~GPUHistBuilderBase() = default;
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct DeviceShard {
int n_bins;
int device_id;
int shard_idx; // Position in the local array of shards
dh::BulkAllocator ba;
ELLPackMatrix ellpack_matrix;
/*! \brief Range of rows for each node. */
std::vector<Segment> ridx_segments;
DeviceHistogram<GradientSumT> hist;
/*! \brief row_ptr form HistCutMatrix. */
common::Span<uint32_t> feature_segments;
/*! \brief minimum value for each feature. */
common::Span<bst_float> min_fvalue;
/*! \brief Cut. */
common::Span<bst_float> gidx_fvalue_map;
/*! \brief global index of histogram, which is stored in ELLPack format. */
common::Span<common::CompressedByteT> gidx_buffer;
/*! \brief Row indices relative to this shard, necessary for sorting rows. */
dh::DoubleBuffer<bst_uint> ridx;
dh::DoubleBuffer<int> position;
/*! \brief Gradient pair for each row. */
common::Span<GradientPair> gpair;
common::Span<int> monotone_constraints;
common::Span<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
common::Span<GradientPair> node_sum_gradients_d;
/*! \brief row offset in SparsePage (the input data). */
thrust::device_vector<size_t> row_ptrs;
/*! \brief On-device feature set, only actually used on one of the devices */
thrust::device_vector<int> feature_set_d;
thrust::device_vector<int64_t>
left_counts; // Useful to keep a bunch of zeroed memory for sort position
/*! The row offset for this shard. */
bst_uint row_begin_idx;
bst_uint row_end_idx;
bst_uint n_rows;
TrainParam param;
bool prediction_cache_initialised;
dh::CubMemory temp_memory;
dh::PinnedMemory pinned_memory;
std::vector<hipStream_t> streams;
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
std::unique_ptr<GPUHistBuilderBase<GradientSumT>> hist_builder;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
// TODO(canonizer): do add support multi-batch DMatrix here
DeviceShard(int _device_id, int shard_idx, bst_uint row_begin,
bst_uint row_end, TrainParam _param, uint32_t column_sampler_seed)
: device_id(_device_id),
shard_idx(shard_idx),
row_begin_idx(row_begin),
row_end_idx(row_end),
n_rows(row_end - row_begin),
n_bins(0),
param(std::move(_param)),
prediction_cache_initialised(false),
column_sampler(column_sampler_seed) {
monitor.Init(std::string("DeviceShard") + std::to_string(device_id));
}
/* Init row_ptrs and row_stride */
size_t InitRowPtrs(const SparsePage& row_batch) {
const auto& offset_vec = row_batch.offset.HostVector();
row_ptrs.resize(n_rows + 1);
thrust::copy(offset_vec.data() + row_begin_idx,
offset_vec.data() + row_end_idx + 1,
row_ptrs.begin());
auto row_iter = row_ptrs.begin();
// find the maximum row size for converting to ELLPack
auto get_size = [=] __device__(size_t row) {
return row_iter[row + 1] - row_iter[row];
}; // NOLINT
auto counting = thrust::make_counting_iterator(size_t(0));
using TransformT = thrust::transform_iterator<decltype(get_size),
decltype(counting), size_t>;
TransformT row_size_iter = TransformT(counting, get_size);
size_t row_stride = thrust::reduce(row_size_iter, row_size_iter + n_rows, 0,
thrust::maximum<size_t>());
return row_stride;
}
void InitCompressedData(
const common::HistCutMatrix& hmat, const SparsePage& row_batch, bool is_dense);
void CreateHistIndices(const SparsePage& row_batch, size_t row_stride, int null_gidx_value);
~DeviceShard() {
dh::safe_cuda(hipSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<hipStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(hipStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(hipSetDevice(device_id));
thrust::fill(
thrust::device_pointer_cast(position.Current()),
thrust::device_pointer_cast(position.Current() + position.Size()), 0);
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
if (left_counts.size() < 256) {
left_counts.resize(256);
} else {
dh::safe_cuda(hipMemsetAsync(left_counts.data().get(), 0,
sizeof(int64_t) * left_counts.size()));
}
thrust::sequence(
thrust::device_pointer_cast(ridx.CurrentSpan().data()),
thrust::device_pointer_cast(ridx.CurrentSpan().data() + ridx.Size()));
std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0));
ridx_segments.front() = Segment(0, ridx.Size());
dh::safe_cuda(hipMemcpyAsync(
gpair.data(), dh_gpair->ConstDevicePointer(device_id),
gpair.size() * sizeof(GradientPair), hipMemcpyHostToHost));
SubsampleGradientPair(device_id, gpair, param.subsample, row_begin_idx);
hist.Reset();
}
std::vector<DeviceSplitCandidate> EvaluateSplits(
std::vector<int> nidxs, const RegTree& tree,
size_t num_columns) {
dh::safe_cuda(hipSetDevice(device_id));
auto result = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size());
// Work out cub temporary memory requirement
GPUTrainingParam gpu_param(param);
DeviceSplitCandidateReduceOp op(gpu_param);
size_t temp_storage_bytes;
DeviceSplitCandidate*dummy = nullptr;
hipcub::DeviceReduce::Reduce(
nullptr, temp_storage_bytes, dummy,
dummy, num_columns, op,
DeviceSplitCandidate());
// size in terms of DeviceSplitCandidate
size_t cub_memory_size =
::ceil(static_cast<double>(temp_storage_bytes) /
sizeof(DeviceSplitCandidate));
// Allocate enough temporary memory
// Result for each nidx
// + intermediate result for each column
// + cub reduce memory
auto temp_span = temp_memory.GetSpan<DeviceSplitCandidate>(
nidxs.size() + nidxs.size() * num_columns +cub_memory_size*nidxs.size());
auto d_result_all = temp_span.subspan(0, nidxs.size());
auto d_split_candidates_all =
temp_span.subspan(d_result_all.size(), nidxs.size() * num_columns);
auto d_cub_memory_all =
temp_span.subspan(d_result_all.size() + d_split_candidates_all.size(),
cub_memory_size * nidxs.size());
auto& streams = this->GetStreams(nidxs.size());
for (auto i = 0ull; i < nidxs.size(); i++) {
auto nidx = nidxs[i];
auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx));
p_feature_set->Shard(GPUSet(device_id, 1));
auto d_feature_set = p_feature_set->DeviceSpan(device_id);
auto d_split_candidates =
d_split_candidates_all.subspan(i * num_columns, d_feature_set.size());
DeviceNodeStats node(node_sum_gradients[nidx], nidx, param);
// One block for each feature
int constexpr kBlockThreads = 256;
hipLaunchKernelGGL(( EvaluateSplitKernel<kBlockThreads, GradientSumT>)
, dim3(uint32_t(d_feature_set.size())), dim3(kBlockThreads), 0, streams[i],
hist.GetNodeHistogram(nidx), d_feature_set, node, ellpack_matrix,
gpu_param, d_split_candidates, node_value_constraints[nidx],
monotone_constraints);
// Reduce over features to find best feature
auto d_result = d_result_all.subspan(i, 1);
auto d_cub_memory =
d_cub_memory_all.subspan(i * cub_memory_size, cub_memory_size);
size_t cub_bytes = d_cub_memory.size() * sizeof(DeviceSplitCandidate);
hipcub::DeviceReduce::Reduce(reinterpret_cast<void*>(d_cub_memory.data()),
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
}
dh::safe_cuda(hipMemcpy(result.data(), d_result_all.data(),
sizeof(DeviceSplitCandidate) * d_result_all.size(),
hipMemcpyDeviceToHost));
return std::vector<DeviceSplitCandidate>(result.begin(), result.end());
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
hist_builder->Build(this, nidx);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, n_bins, [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
CHECK(!split_node.IsLeaf()) <<"Node must not be leaf";
Segment segment = ridx_segments[nidx];
bst_uint* d_ridx = ridx.Current();
int* d_position = position.Current();
if (left_counts.size() <= nidx) {
left_counts.resize((nidx * 2) + 1);
}
int64_t* d_left_count = left_counts.data().get() + nidx;
auto d_matrix = this->ellpack_matrix;
// Launch 1 thread for each row
dh::LaunchN<1, 128>(
device_id, segment.Size(), [=] __device__(bst_uint idx) {
idx += segment.begin;
bst_uint ridx = d_ridx[idx];
bst_float element = d_matrix.GetElement(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(element)) {
new_position = split_node.DefaultChild();
} else {
if (element <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
CountLeft(d_left_count, new_position, split_node.LeftChild());
d_position[idx] = new_position;
});
// Overlap device to host memory copy (left_count) with sort
auto& streams = this->GetStreams(2);
auto tmp_pinned = pinned_memory.GetSpan<int64_t>(1);
dh::safe_cuda(hipMemcpyAsync(tmp_pinned.data(), d_left_count, sizeof(int64_t),
hipMemcpyDeviceToHost, streams[0]));
SortPositionAndCopy(segment, split_node.LeftChild(), split_node.RightChild(), d_left_count,
streams[1]);
dh::safe_cuda(hipStreamSynchronize(streams[0]));
int64_t left_count = tmp_pinned[0];
CHECK_LE(left_count, segment.Size());
CHECK_GE(left_count, 0);
ridx_segments[split_node.LeftChild()] =
Segment(segment.begin, segment.begin + left_count);
ridx_segments[split_node.RightChild()] =
Segment(segment.begin + left_count, segment.end);
}
/*! \brief Sort row indices according to position. */
void SortPositionAndCopy(const Segment& segment, int left_nidx,
int right_nidx, int64_t* d_left_count,
hipStream_t stream) {
SortPosition(
&temp_memory,
common::Span<int>(position.Current() + segment.begin, segment.Size()),
common::Span<int>(position.other() + segment.begin, segment.Size()),
common::Span<bst_uint>(ridx.Current() + segment.begin, segment.Size()),
common::Span<bst_uint>(ridx.other() + segment.begin, segment.Size()),
left_nidx, right_nidx, d_left_count, stream);
// Copy back key/value
const auto d_position_current = position.Current() + segment.begin;
const auto d_position_other = position.other() + segment.begin;
const auto d_ridx_current = ridx.Current() + segment.begin;
const auto d_ridx_other = ridx.other() + segment.begin;
dh::LaunchN(device_id, segment.Size(), stream, [=] __device__(size_t idx) {
d_position_current[idx] = d_position_other[idx];
d_ridx_current[idx] = d_ridx_other[idx];
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf This information is used later to update the
// prediction cache
void FinalisePosition(RegTree* p_tree) {
const auto d_nodes =
temp_memory.GetSpan<RegTree::Node>(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpy(d_nodes.data(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
auto d_position = position.Current();
const auto d_ridx = ridx.Current();
auto d_matrix = this->ellpack_matrix;
dh::LaunchN(device_id, position.Size(), [=] __device__(size_t idx) {
auto position = d_position[idx];
auto node = d_nodes[position];
bst_uint ridx = d_ridx[idx];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetElement(ridx, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
d_position[idx] = position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id));
if (!prediction_cache_initialised) {
dh::safe_cuda(hipMemcpyAsync(prediction_cache.data(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
hipMemcpyDefault));
}
prediction_cache_initialised = true;
CalcWeightTrainParam param_d(param);
dh::safe_cuda(
hipMemcpyAsync(node_sum_gradients_d.data(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = position.Current();
auto d_ridx = ridx.Current();
auto d_node_sum_gradients = node_sum_gradients_d.data();
auto d_prediction_cache = prediction_cache.data();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(hipMemcpy(
out_preds_d, prediction_cache.data(),
prediction_cache.size() * sizeof(bst_float), hipMemcpyDefault));
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
shard_idx,
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
ellpack_matrix.BinCount() *
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize(device_id);
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// If we are using a single GPU, build the histogram for the node with the
// fewest training instances
// If we are distributed, don't bother
if (reducer->IsSingleGPU()) {
bool fewer_right =
ridx_segments[nidx_right].Size() < ridx_segments[nidx_left].Size();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
nidx_parent, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(nidx_parent, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats;
left_stats.Add(candidate.split.left_sum);
GradStats right_stats;
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum;
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum);
auto left_weight =
node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate;
auto right_weight =
node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess);
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
}
void InitRoot(RegTree* p_tree, HostDeviceVector<GradientPair>* gpair_all,
dh::AllReducer* reducer, int64_t num_columns) {
constexpr int kRootNIdx = 0;
const auto &gpair = gpair_all->DeviceSpan(device_id);
dh::SumReduction(temp_memory, gpair, node_sum_gradients_d,
gpair.size());
reducer->AllReduceSum(
shard_idx, reinterpret_cast<float*>(node_sum_gradients_d.data()),
reinterpret_cast<float*>(node_sum_gradients_d.data()), 2);
reducer->Synchronize(device_id);
dh::safe_cuda(hipMemcpy(node_sum_gradients.data(),
node_sum_gradients_d.data(), sizeof(GradientPair),
hipMemcpyDeviceToHost));
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
p_tree->Stat(kRootNIdx).sum_hess = node_sum_gradients[kRootNIdx].GetHess();
auto weight = CalcWeight(param, node_sum_gradients[kRootNIdx]);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, gpair_all, reducer, p_fmat->Info().num_col_);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx},
*p_tree, p_fmat->Info().num_col_);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
struct SharedMemHistBuilder : public GPUHistBuilderBase<GradientSumT> {
void Build(DeviceShard<GradientSumT>* shard, int nidx) override {
auto segment = shard->ridx_segments[nidx];
auto segment_begin = segment.begin;
auto d_node_hist = shard->hist.GetNodeHistogram(nidx);
auto d_ridx = shard->ridx.Current();
auto d_gpair = shard->gpair.data();
auto n_elements = segment.Size() * shard->ellpack_matrix.row_stride;
const size_t smem_size = sizeof(GradientSumT) * shard->ellpack_matrix.BinCount();
const int items_per_thread = 8;
const int block_threads = 256;
const int grid_size =
static_cast<int>(dh::DivRoundUp(n_elements,
items_per_thread * block_threads));
if (grid_size <= 0) {
return;
}
hipLaunchKernelGGL(( SharedMemHistKernel), dim3(grid_size), dim3(block_threads), smem_size, 0,
shard->ellpack_matrix, d_ridx, d_node_hist.data(), d_gpair,
segment_begin, n_elements);
}
};
template <typename GradientSumT>
struct GlobalMemHistBuilder : public GPUHistBuilderBase<GradientSumT> {
void Build(DeviceShard<GradientSumT>* shard, int nidx) override {
Segment segment = shard->ridx_segments[nidx];
auto d_node_hist = shard->hist.GetNodeHistogram(nidx).data();
bst_uint* d_ridx = shard->ridx.Current();
GradientPair* d_gpair = shard->gpair.data();
size_t const n_elements = segment.Size() * shard->ellpack_matrix.row_stride;
auto d_matrix = shard->ellpack_matrix;
dh::LaunchN(shard->device_id, n_elements, [=] __device__(size_t idx) {
int ridx = d_ridx[(idx / d_matrix.row_stride) + segment.begin];
// lookup the index (bin) of histogram.
int gidx = d_matrix.gidx_iter[ridx * d_matrix.row_stride + idx % d_matrix.row_stride];
if (gidx != d_matrix.null_gidx_value) {
AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]);
}
});
}
};
template <typename GradientSumT>
inline void DeviceShard<GradientSumT>::InitCompressedData(
const common::HistCutMatrix& hmat, const SparsePage& row_batch, bool is_dense) {
size_t row_stride = this->InitRowPtrs(row_batch);
n_bins = hmat.row_ptr.back();
int null_gidx_value = hmat.row_ptr.back();
int max_nodes =
param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth);
ba.Allocate(device_id,
&gpair, n_rows,
&ridx, n_rows,
&position, n_rows,
&prediction_cache, n_rows,
&node_sum_gradients_d, max_nodes,
&feature_segments, hmat.row_ptr.size(),
&gidx_fvalue_map, hmat.cut.size(),
&min_fvalue, hmat.min_val.size(),
&monotone_constraints, param.monotone_constraints.size());
dh::CopyVectorToDeviceSpan(gidx_fvalue_map, hmat.cut);
dh::CopyVectorToDeviceSpan(min_fvalue, hmat.min_val);
dh::CopyVectorToDeviceSpan(feature_segments, hmat.row_ptr);
dh::CopyVectorToDeviceSpan(monotone_constraints, param.monotone_constraints);
node_sum_gradients.resize(max_nodes);
ridx_segments.resize(max_nodes);
// allocate compressed bin data
int num_symbols = n_bins + 1;
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
CHECK(!(param.max_leaves == 0 && param.max_depth == 0))
<< "Max leaves and max depth cannot both be unconstrained for "
"gpu_hist.";
ba.Allocate(device_id, &gidx_buffer, compressed_size_bytes);
thrust::fill(
thrust::device_pointer_cast(gidx_buffer.data()),
thrust::device_pointer_cast(gidx_buffer.data() + gidx_buffer.size()), 0);
this->CreateHistIndices(row_batch, row_stride, null_gidx_value);
ellpack_matrix.Init(
feature_segments, min_fvalue,
gidx_fvalue_map, row_stride,
common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols),
is_dense, null_gidx_value);
// check if we can use shared memory for building histograms
// (assuming atleast we need 2 CTAs per SM to maintain decent latency
// hiding)
auto histogram_size = sizeof(GradientSumT) * hmat.row_ptr.back();
auto max_smem = dh::MaxSharedMemory(device_id);
if (histogram_size <= max_smem) {
hist_builder.reset(new SharedMemHistBuilder<GradientSumT>);
} else {
hist_builder.reset(new GlobalMemHistBuilder<GradientSumT>);
}
// Init histogram
hist.Init(device_id, hmat.NumBins());
}
template <typename GradientSumT>
inline void DeviceShard<GradientSumT>::CreateHistIndices(
const SparsePage& row_batch, size_t row_stride, int null_gidx_value) {
int num_symbols = n_bins + 1;
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
std::min
(dh::TotalMemory(device_id) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(n_rows));
const std::vector<Entry>& data_vec = row_batch.data.HostVector();
thrust::device_vector<Entry> entries_d(gpu_batch_nrows * row_stride);
size_t gpu_nbatches = dh::DivRoundUp(n_rows, gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows;
if (batch_row_end > n_rows) {
batch_row_end = n_rows;
}
size_t batch_nrows = batch_row_end - batch_row_begin;
// number of entries in this batch.
size_t n_entries = row_ptrs[batch_row_end] - row_ptrs[batch_row_begin];
// copy data entries to device.
dh::safe_cuda
(hipMemcpy
(entries_d.data().get(), data_vec.data() + row_ptrs[batch_row_begin],
n_entries * sizeof(Entry), hipMemcpyDefault));
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(dh::DivRoundUp(n_rows, block3.x),
dh::DivRoundUp(row_stride, block3.y), 1);
hipLaunchKernelGGL(( CompressBinEllpackKernel), dim3(grid3), dim3(block3), 0, 0,
common::CompressedBufferWriter(num_symbols),
gidx_buffer.data(),
row_ptrs.data().get() + batch_row_begin,
entries_d.data().get(),
gidx_fvalue_map.data(), feature_segments.data(),
batch_row_begin, batch_nrows,
row_ptrs[batch_row_begin],
row_stride, null_gidx_value);
}
// free the memory that is no longer needed
row_ptrs.resize(0);
row_ptrs.shrink_to_fit();
entries_d.resize(0);
entries_d.shrink_to_fit();
}
template <typename GradientSumT>
class GPUHistMakerSpecialised{
public:
GPUHistMakerSpecialised() : initialised_{false}, p_last_fmat_{nullptr} {}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) {
param_.InitAllowUnknown(args);
hist_maker_param_.InitAllowUnknown(args);
CHECK(param_.n_gpus != 0) << "Must have at least one device";
n_devices_ = param_.n_gpus;
dist_ = GPUDistribution::Block(GPUSet::All(param_.gpu_id, param_.n_gpus));
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (tsoobgx::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
info_ = &dmat->Info();
int n_devices = dist_.Devices().Size();
device_list_.resize(n_devices);
for (int index = 0; index < n_devices; ++index) {
int device_id = dist_.Devices().DeviceId(index);
device_list_[index] = device_id;
}
reducer_.Init(device_list_);
auto batch_iter = dmat->GetRowBatches().begin();
const SparsePage& batch = *batch_iter;
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
// Create device shards
shards_.resize(n_devices);
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
dh::safe_cuda(hipSetDevice(dist_.Devices().DeviceId(idx)));
size_t start = dist_.ShardStart(info_->num_row_, idx);
size_t size = dist_.ShardSize(info_->num_row_, idx);
shard = std::unique_ptr<DeviceShard<GradientSumT>>(
new DeviceShard<GradientSumT>(dist_.Devices().DeviceId(idx), idx,
start, start + size, param_,
column_sampling_seed));
});
// Find the cuts.
monitor_.StartCuda("Quantiles");
common::DeviceSketch(batch, *info_, param_, &hmat_, hist_maker_param_.gpu_batch_nrows);
n_bins_ = hmat_.row_ptr.back();
monitor_.StopCuda("Quantiles");
auto is_dense = info_->num_nonzero_ == info_->num_row_ * info_->num_col_;
monitor_.StartCuda("BinningCompression");
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
dh::safe_cuda(hipSetDevice(shard->device_id));
shard->InitCompressedData(hmat_, batch, is_dense);
});
monitor_.StopCuda("BinningCompression");
++batch_iter;
CHECK(batch_iter.AtEnd()) << "External memory not supported";
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(const std::vector<RegTree>& local_trees) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_trees.front().Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree;
reference_tree.Load(&fs);
for (const auto& tree : local_trees) {
CHECK(tree == reference_tree);
}
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(gpair, p_fmat);
monitor_.StopCuda("InitData");
std::vector<RegTree> trees(shards_.size());
for (auto& tree : trees) {
tree = *p_tree;
}
gpair->Reshard(dist_);
// Launch one thread for each device "shard" containing a subset of rows.
// Threads will cooperatively build the tree, synchronising over histograms.
// Each thread will redundantly build its own copy of the tree
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->UpdateTree(gpair, p_fmat, &trees.at(idx), &reducer_);
});
// All trees are expected to be identical
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(trees);
}
// Write the output tree
*p_tree = trees.front();
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (shards_.empty() || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->Shard(dist_.Devices());
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
dh::safe_cuda(hipSetDevice(shard->device_id));
shard->UpdatePredictionCache(
p_out_preds->DevicePointer(shard->device_id));
});
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
common::HistCutMatrix hmat_; // NOLINT
MetaInfo* info_; // NOLINT
std::vector<std::unique_ptr<DeviceShard<GradientSumT>>> shards_; // NOLINT
private:
bool initialised_;
int n_devices_;
int n_bins_;
GPUHistMakerTrainParam hist_maker_param_;
common::GHistIndexMatrix gmat_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_;
GPUDistribution dist_;
common::Monitor monitor_;
/*! List storing device id. */
std::vector<int> device_list_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
hist_maker_param_.InitAllowUnknown(args);
float_maker_.reset();
double_maker_.reset();
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->Init(args);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->Init(args);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
TSOOBGX_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace tsoobgx
| 5b7bb92dd61ad95da87586ab211107d196218920.cu | /*!
* Copyright 2017-2018 tsooBGX contributors
*/
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <tsoobgx/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "../common/common.h"
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/host_device_vector.h"
#include "../common/timer.h"
#include "../common/span.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace tsoobgx {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public dmlc::Parameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
// number of rows in a single GPU batch
int gpu_batch_nrows;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(gpu_batch_nrows)
.set_lower_bound(-1)
.set_default(0)
.describe("Number of rows in a GPU batch, used for finding quantiles on GPU; "
"-1 to use all rows assignted to a GPU, and 0 to auto-deduce");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (param.max_depth > 0 && depth == param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false;
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
// Find a gidx value for a given feature otherwise return -1 if not found
__forceinline__ __device__ int BinarySearchRow(
bst_uint begin, bst_uint end,
common::CompressedIterator<uint32_t> data,
int const fidx_begin, int const fidx_end) {
bst_uint previous_middle = UINT32_MAX;
while (end != begin) {
auto middle = begin + (end - begin) / 2;
if (middle == previous_middle) {
break;
}
previous_middle = middle;
auto gidx = data[middle];
if (gidx >= fidx_begin && gidx < fidx_end) {
return gidx;
} else if (gidx < fidx_begin) {
begin = middle;
} else {
end = middle;
}
}
// Value is missing
return -1;
}
/** \brief Struct for accessing and manipulating an ellpack matrix on the
* device. Does not own underlying memory and may be trivially copied into
* kernels.*/
struct ELLPackMatrix {
common::Span<uint32_t> feature_segments;
/*! \brief minimum value for each feature. */
common::Span<bst_float> min_fvalue;
/*! \brief Cut. */
common::Span<bst_float> gidx_fvalue_map;
/*! \brief row length for ELLPack. */
size_t row_stride{0};
common::CompressedIterator<uint32_t> gidx_iter;
bool is_dense;
int null_gidx_value;
TSOOBGX_DEVICE size_t BinCount() const { return gidx_fvalue_map.size(); }
// Get a matrix element, uses binary search for look up
// Return NaN if missing
__device__ bst_float GetElement(size_t ridx, size_t fidx) const {
auto row_begin = row_stride * ridx;
auto row_end = row_begin + row_stride;
auto gidx = -1;
if (is_dense) {
gidx = gidx_iter[row_begin + fidx];
} else {
gidx =
BinarySearchRow(row_begin, row_end, gidx_iter, feature_segments[fidx],
feature_segments[fidx + 1]);
}
if (gidx == -1) {
return nan("");
}
return gidx_fvalue_map[gidx];
}
void Init(common::Span<uint32_t> feature_segments,
common::Span<bst_float> min_fvalue,
common::Span<bst_float> gidx_fvalue_map, size_t row_stride,
common::CompressedIterator<uint32_t> gidx_iter, bool is_dense,
int null_gidx_value) {
this->feature_segments = feature_segments;
this->min_fvalue = min_fvalue;
this->gidx_fvalue_map = gidx_fvalue_map;
this->row_stride = row_stride;
this->gidx_iter = gidx_iter;
this->is_dense = is_dense;
this->null_gidx_value = null_gidx_value;
}
};
// With constraints
template <typename GradientPairT>
TSOOBGX_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx, common::Span<const GradientSumT> node_histogram,
const ELLPackMatrix& matrix,
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin
uint32_t gidx_end =
matrix.feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
cub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
cub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax());
__shared__ cub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = matrix.min_fvalue[fidx];
} else {
fvalue = matrix.gidx_fvalue_map[split_gidx];
}
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue,
fidx, GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT>
node_histogram, // histogram for gradients
common::Span<const int> feature_set, // Selected features
DeviceNodeStats node,
ELLPackMatrix matrix,
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = cub::KeyValuePair<int, float>;
using BlockScanT =
cub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = cub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = cub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage,
constraint, value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
thrust::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
dh::safe_cuda(cudaMemsetAsync(
data_.data().get(), 0,
data_.size() * sizeof(typename decltype(data_)::value_type)));
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
thrust::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
dh::safe_cuda(cudaSetDevice(device_id_));
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
dh::safe_cuda(cudaMemsetAsync(data_.data().get() + used_size, 0,
n_bins_ * sizeof(GradientSumT)));
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
dh::safe_cuda(cudaMemsetAsync(data_.data().get() + old_entry.second, 0,
n_bins_ * sizeof(GradientSumT)));
nidx_map_[nidx] = old_entry.second;
}
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
size_t new_required_memory = std::max(data_.size() * 2, HistogramSize());
if (data_.size() < new_required_memory) {
data_.resize(new_required_memory);
}
}
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
TSOOBGX_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Bin each input data entry, store the bin indices in compressed form.
template<typename std::enable_if<true, int>::type = 0>
__global__ void CompressBinEllpackKernel(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistCutMatrix::cut
const uint32_t* __restrict__ cut_rows, // HistCutMatrix::row_ptrs
size_t base_row, // batch_row_begin
size_t n_rows,
// row_ptr_begin: row_offset[base_row], the start position of base_row
size_t row_ptr_begin,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride) {
return;
}
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptr_begin + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float *feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
bin = dh::UpperBound(feature_cuts, ncuts, fvalue);
if (bin >= ncuts) {
bin = ncuts - 1;
}
// Add the number of bins in previous features.
bin += cut_rows[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
template <typename GradientSumT>
__global__ void SharedMemHistKernel(ELLPackMatrix matrix, const bst_uint* d_ridx,
GradientSumT* d_node_hist,
const GradientPair* d_gpair,
size_t segment_begin, size_t n_elements) {
extern __shared__ char smem[];
GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT
for (auto i :
dh::BlockStrideRange(static_cast<size_t>(0), matrix.BinCount())) {
smem_arr[i] = GradientSumT();
}
__syncthreads();
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / matrix.row_stride + segment_begin];
int gidx = matrix.gidx_iter[ridx * matrix.row_stride + idx % matrix.row_stride];
if (gidx != matrix.null_gidx_value) {
AtomicAddGpair(smem_arr + gidx, d_gpair[ridx]);
}
}
__syncthreads();
for (auto i :
dh::BlockStrideRange(static_cast<size_t>(0), matrix.BinCount())) {
AtomicAddGpair(d_node_hist + i, smem_arr[i]);
}
}
struct Segment {
size_t begin;
size_t end;
Segment() : begin{0}, end{0} {}
Segment(size_t begin, size_t end) : begin(begin), end(end) {
CHECK_GE(end, begin);
}
size_t Size() const { return end - begin; }
};
/** \brief Returns a one if the left node index is encountered, otherwise return
* zero. */
struct IndicateLeftTransform {
int left_nidx;
explicit IndicateLeftTransform(int left_nidx) : left_nidx(left_nidx) {}
__host__ __device__ __forceinline__ int operator()(const int& x) const {
return x == left_nidx ? 1 : 0;
}
};
/**
* \brief Optimised routine for sorting key value pairs into left and right
* segments. Based on a single pass of exclusive scan, uses iterators to
* redirect inputs and outputs.
*/
inline void SortPosition(dh::CubMemory* temp_memory, common::Span<int> position,
common::Span<int> position_out, common::Span<bst_uint> ridx,
common::Span<bst_uint> ridx_out, int left_nidx,
int right_nidx, int64_t* d_left_count,
cudaStream_t stream = nullptr) {
auto d_position_out = position_out.data();
auto d_position_in = position.data();
auto d_ridx_out = ridx_out.data();
auto d_ridx_in = ridx.data();
auto write_results = [=] __device__(size_t idx, int ex_scan_result) {
int scatter_address;
if (d_position_in[idx] == left_nidx) {
scatter_address = ex_scan_result;
} else {
scatter_address = (idx - ex_scan_result) + *d_left_count;
}
d_position_out[scatter_address] = d_position_in[idx];
d_ridx_out[scatter_address] = d_ridx_in[idx];
}; // NOLINT
IndicateLeftTransform conversion_op(left_nidx);
cub::TransformInputIterator<int, IndicateLeftTransform, int*> in_itr(
d_position_in, conversion_op);
dh::DiscardLambdaItr<decltype(write_results)> out_itr(write_results);
size_t temp_storage_bytes = 0;
cub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes, in_itr, out_itr,
position.size(), stream);
temp_memory->LazyAllocate(temp_storage_bytes);
cub::DeviceScan::ExclusiveSum(temp_memory->d_temp_storage,
temp_memory->temp_storage_bytes, in_itr,
out_itr, position.size(), stream);
}
/*! \brief Count how many rows are assigned to left node. */
__forceinline__ __device__ void CountLeft(int64_t* d_count, int val,
int left_nidx) {
#if __CUDACC_VER_MAJOR__ > 8
int mask = __activemask();
unsigned ballot = __ballot_sync(mask, val == left_nidx);
int leader = __ffs(mask) - 1;
if (threadIdx.x % 32 == leader) {
atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT
static_cast<unsigned long long>(__popc(ballot))); // NOLINT
}
#else
unsigned ballot = __ballot(val == left_nidx);
if (threadIdx.x % 32 == 0) {
atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT
static_cast<unsigned long long>(__popc(ballot))); // NOLINT
}
#endif
}
template <typename GradientSumT>
struct DeviceShard;
template <typename GradientSumT>
struct GPUHistBuilderBase {
public:
virtual void Build(DeviceShard<GradientSumT>* shard, int idx) = 0;
virtual ~GPUHistBuilderBase() = default;
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct DeviceShard {
int n_bins;
int device_id;
int shard_idx; // Position in the local array of shards
dh::BulkAllocator ba;
ELLPackMatrix ellpack_matrix;
/*! \brief Range of rows for each node. */
std::vector<Segment> ridx_segments;
DeviceHistogram<GradientSumT> hist;
/*! \brief row_ptr form HistCutMatrix. */
common::Span<uint32_t> feature_segments;
/*! \brief minimum value for each feature. */
common::Span<bst_float> min_fvalue;
/*! \brief Cut. */
common::Span<bst_float> gidx_fvalue_map;
/*! \brief global index of histogram, which is stored in ELLPack format. */
common::Span<common::CompressedByteT> gidx_buffer;
/*! \brief Row indices relative to this shard, necessary for sorting rows. */
dh::DoubleBuffer<bst_uint> ridx;
dh::DoubleBuffer<int> position;
/*! \brief Gradient pair for each row. */
common::Span<GradientPair> gpair;
common::Span<int> monotone_constraints;
common::Span<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
common::Span<GradientPair> node_sum_gradients_d;
/*! \brief row offset in SparsePage (the input data). */
thrust::device_vector<size_t> row_ptrs;
/*! \brief On-device feature set, only actually used on one of the devices */
thrust::device_vector<int> feature_set_d;
thrust::device_vector<int64_t>
left_counts; // Useful to keep a bunch of zeroed memory for sort position
/*! The row offset for this shard. */
bst_uint row_begin_idx;
bst_uint row_end_idx;
bst_uint n_rows;
TrainParam param;
bool prediction_cache_initialised;
dh::CubMemory temp_memory;
dh::PinnedMemory pinned_memory;
std::vector<cudaStream_t> streams;
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
std::unique_ptr<GPUHistBuilderBase<GradientSumT>> hist_builder;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
// TODO(canonizer): do add support multi-batch DMatrix here
DeviceShard(int _device_id, int shard_idx, bst_uint row_begin,
bst_uint row_end, TrainParam _param, uint32_t column_sampler_seed)
: device_id(_device_id),
shard_idx(shard_idx),
row_begin_idx(row_begin),
row_end_idx(row_end),
n_rows(row_end - row_begin),
n_bins(0),
param(std::move(_param)),
prediction_cache_initialised(false),
column_sampler(column_sampler_seed) {
monitor.Init(std::string("DeviceShard") + std::to_string(device_id));
}
/* Init row_ptrs and row_stride */
size_t InitRowPtrs(const SparsePage& row_batch) {
const auto& offset_vec = row_batch.offset.HostVector();
row_ptrs.resize(n_rows + 1);
thrust::copy(offset_vec.data() + row_begin_idx,
offset_vec.data() + row_end_idx + 1,
row_ptrs.begin());
auto row_iter = row_ptrs.begin();
// find the maximum row size for converting to ELLPack
auto get_size = [=] __device__(size_t row) {
return row_iter[row + 1] - row_iter[row];
}; // NOLINT
auto counting = thrust::make_counting_iterator(size_t(0));
using TransformT = thrust::transform_iterator<decltype(get_size),
decltype(counting), size_t>;
TransformT row_size_iter = TransformT(counting, get_size);
size_t row_stride = thrust::reduce(row_size_iter, row_size_iter + n_rows, 0,
thrust::maximum<size_t>());
return row_stride;
}
void InitCompressedData(
const common::HistCutMatrix& hmat, const SparsePage& row_batch, bool is_dense);
void CreateHistIndices(const SparsePage& row_batch, size_t row_stride, int null_gidx_value);
~DeviceShard() {
dh::safe_cuda(cudaSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<cudaStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(device_id));
thrust::fill(
thrust::device_pointer_cast(position.Current()),
thrust::device_pointer_cast(position.Current() + position.Size()), 0);
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
if (left_counts.size() < 256) {
left_counts.resize(256);
} else {
dh::safe_cuda(cudaMemsetAsync(left_counts.data().get(), 0,
sizeof(int64_t) * left_counts.size()));
}
thrust::sequence(
thrust::device_pointer_cast(ridx.CurrentSpan().data()),
thrust::device_pointer_cast(ridx.CurrentSpan().data() + ridx.Size()));
std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0));
ridx_segments.front() = Segment(0, ridx.Size());
dh::safe_cuda(cudaMemcpyAsync(
gpair.data(), dh_gpair->ConstDevicePointer(device_id),
gpair.size() * sizeof(GradientPair), cudaMemcpyHostToHost));
SubsampleGradientPair(device_id, gpair, param.subsample, row_begin_idx);
hist.Reset();
}
std::vector<DeviceSplitCandidate> EvaluateSplits(
std::vector<int> nidxs, const RegTree& tree,
size_t num_columns) {
dh::safe_cuda(cudaSetDevice(device_id));
auto result = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size());
// Work out cub temporary memory requirement
GPUTrainingParam gpu_param(param);
DeviceSplitCandidateReduceOp op(gpu_param);
size_t temp_storage_bytes;
DeviceSplitCandidate*dummy = nullptr;
cub::DeviceReduce::Reduce(
nullptr, temp_storage_bytes, dummy,
dummy, num_columns, op,
DeviceSplitCandidate());
// size in terms of DeviceSplitCandidate
size_t cub_memory_size =
std::ceil(static_cast<double>(temp_storage_bytes) /
sizeof(DeviceSplitCandidate));
// Allocate enough temporary memory
// Result for each nidx
// + intermediate result for each column
// + cub reduce memory
auto temp_span = temp_memory.GetSpan<DeviceSplitCandidate>(
nidxs.size() + nidxs.size() * num_columns +cub_memory_size*nidxs.size());
auto d_result_all = temp_span.subspan(0, nidxs.size());
auto d_split_candidates_all =
temp_span.subspan(d_result_all.size(), nidxs.size() * num_columns);
auto d_cub_memory_all =
temp_span.subspan(d_result_all.size() + d_split_candidates_all.size(),
cub_memory_size * nidxs.size());
auto& streams = this->GetStreams(nidxs.size());
for (auto i = 0ull; i < nidxs.size(); i++) {
auto nidx = nidxs[i];
auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx));
p_feature_set->Shard(GPUSet(device_id, 1));
auto d_feature_set = p_feature_set->DeviceSpan(device_id);
auto d_split_candidates =
d_split_candidates_all.subspan(i * num_columns, d_feature_set.size());
DeviceNodeStats node(node_sum_gradients[nidx], nidx, param);
// One block for each feature
int constexpr kBlockThreads = 256;
EvaluateSplitKernel<kBlockThreads, GradientSumT>
<<<uint32_t(d_feature_set.size()), kBlockThreads, 0, streams[i]>>>(
hist.GetNodeHistogram(nidx), d_feature_set, node, ellpack_matrix,
gpu_param, d_split_candidates, node_value_constraints[nidx],
monotone_constraints);
// Reduce over features to find best feature
auto d_result = d_result_all.subspan(i, 1);
auto d_cub_memory =
d_cub_memory_all.subspan(i * cub_memory_size, cub_memory_size);
size_t cub_bytes = d_cub_memory.size() * sizeof(DeviceSplitCandidate);
cub::DeviceReduce::Reduce(reinterpret_cast<void*>(d_cub_memory.data()),
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
}
dh::safe_cuda(cudaMemcpy(result.data(), d_result_all.data(),
sizeof(DeviceSplitCandidate) * d_result_all.size(),
cudaMemcpyDeviceToHost));
return std::vector<DeviceSplitCandidate>(result.begin(), result.end());
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
hist_builder->Build(this, nidx);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, n_bins, [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
CHECK(!split_node.IsLeaf()) <<"Node must not be leaf";
Segment segment = ridx_segments[nidx];
bst_uint* d_ridx = ridx.Current();
int* d_position = position.Current();
if (left_counts.size() <= nidx) {
left_counts.resize((nidx * 2) + 1);
}
int64_t* d_left_count = left_counts.data().get() + nidx;
auto d_matrix = this->ellpack_matrix;
// Launch 1 thread for each row
dh::LaunchN<1, 128>(
device_id, segment.Size(), [=] __device__(bst_uint idx) {
idx += segment.begin;
bst_uint ridx = d_ridx[idx];
bst_float element = d_matrix.GetElement(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(element)) {
new_position = split_node.DefaultChild();
} else {
if (element <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
CountLeft(d_left_count, new_position, split_node.LeftChild());
d_position[idx] = new_position;
});
// Overlap device to host memory copy (left_count) with sort
auto& streams = this->GetStreams(2);
auto tmp_pinned = pinned_memory.GetSpan<int64_t>(1);
dh::safe_cuda(cudaMemcpyAsync(tmp_pinned.data(), d_left_count, sizeof(int64_t),
cudaMemcpyDeviceToHost, streams[0]));
SortPositionAndCopy(segment, split_node.LeftChild(), split_node.RightChild(), d_left_count,
streams[1]);
dh::safe_cuda(cudaStreamSynchronize(streams[0]));
int64_t left_count = tmp_pinned[0];
CHECK_LE(left_count, segment.Size());
CHECK_GE(left_count, 0);
ridx_segments[split_node.LeftChild()] =
Segment(segment.begin, segment.begin + left_count);
ridx_segments[split_node.RightChild()] =
Segment(segment.begin + left_count, segment.end);
}
/*! \brief Sort row indices according to position. */
void SortPositionAndCopy(const Segment& segment, int left_nidx,
int right_nidx, int64_t* d_left_count,
cudaStream_t stream) {
SortPosition(
&temp_memory,
common::Span<int>(position.Current() + segment.begin, segment.Size()),
common::Span<int>(position.other() + segment.begin, segment.Size()),
common::Span<bst_uint>(ridx.Current() + segment.begin, segment.Size()),
common::Span<bst_uint>(ridx.other() + segment.begin, segment.Size()),
left_nidx, right_nidx, d_left_count, stream);
// Copy back key/value
const auto d_position_current = position.Current() + segment.begin;
const auto d_position_other = position.other() + segment.begin;
const auto d_ridx_current = ridx.Current() + segment.begin;
const auto d_ridx_other = ridx.other() + segment.begin;
dh::LaunchN(device_id, segment.Size(), stream, [=] __device__(size_t idx) {
d_position_current[idx] = d_position_other[idx];
d_ridx_current[idx] = d_ridx_other[idx];
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf This information is used later to update the
// prediction cache
void FinalisePosition(RegTree* p_tree) {
const auto d_nodes =
temp_memory.GetSpan<RegTree::Node>(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpy(d_nodes.data(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
auto d_position = position.Current();
const auto d_ridx = ridx.Current();
auto d_matrix = this->ellpack_matrix;
dh::LaunchN(device_id, position.Size(), [=] __device__(size_t idx) {
auto position = d_position[idx];
auto node = d_nodes[position];
bst_uint ridx = d_ridx[idx];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetElement(ridx, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
d_position[idx] = position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id));
if (!prediction_cache_initialised) {
dh::safe_cuda(cudaMemcpyAsync(prediction_cache.data(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
cudaMemcpyDefault));
}
prediction_cache_initialised = true;
CalcWeightTrainParam param_d(param);
dh::safe_cuda(
cudaMemcpyAsync(node_sum_gradients_d.data(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = position.Current();
auto d_ridx = ridx.Current();
auto d_node_sum_gradients = node_sum_gradients_d.data();
auto d_prediction_cache = prediction_cache.data();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(cudaMemcpy(
out_preds_d, prediction_cache.data(),
prediction_cache.size() * sizeof(bst_float), cudaMemcpyDefault));
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
shard_idx,
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
ellpack_matrix.BinCount() *
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize(device_id);
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// If we are using a single GPU, build the histogram for the node with the
// fewest training instances
// If we are distributed, don't bother
if (reducer->IsSingleGPU()) {
bool fewer_right =
ridx_segments[nidx_right].Size() < ridx_segments[nidx_left].Size();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
nidx_parent, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(nidx_parent, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats;
left_stats.Add(candidate.split.left_sum);
GradStats right_stats;
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum;
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum);
auto left_weight =
node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate;
auto right_weight =
node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess);
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
}
void InitRoot(RegTree* p_tree, HostDeviceVector<GradientPair>* gpair_all,
dh::AllReducer* reducer, int64_t num_columns) {
constexpr int kRootNIdx = 0;
const auto &gpair = gpair_all->DeviceSpan(device_id);
dh::SumReduction(temp_memory, gpair, node_sum_gradients_d,
gpair.size());
reducer->AllReduceSum(
shard_idx, reinterpret_cast<float*>(node_sum_gradients_d.data()),
reinterpret_cast<float*>(node_sum_gradients_d.data()), 2);
reducer->Synchronize(device_id);
dh::safe_cuda(cudaMemcpy(node_sum_gradients.data(),
node_sum_gradients_d.data(), sizeof(GradientPair),
cudaMemcpyDeviceToHost));
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
p_tree->Stat(kRootNIdx).sum_hess = node_sum_gradients[kRootNIdx].GetHess();
auto weight = CalcWeight(param, node_sum_gradients[kRootNIdx]);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, gpair_all, reducer, p_fmat->Info().num_col_);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx},
*p_tree, p_fmat->Info().num_col_);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
struct SharedMemHistBuilder : public GPUHistBuilderBase<GradientSumT> {
void Build(DeviceShard<GradientSumT>* shard, int nidx) override {
auto segment = shard->ridx_segments[nidx];
auto segment_begin = segment.begin;
auto d_node_hist = shard->hist.GetNodeHistogram(nidx);
auto d_ridx = shard->ridx.Current();
auto d_gpair = shard->gpair.data();
auto n_elements = segment.Size() * shard->ellpack_matrix.row_stride;
const size_t smem_size = sizeof(GradientSumT) * shard->ellpack_matrix.BinCount();
const int items_per_thread = 8;
const int block_threads = 256;
const int grid_size =
static_cast<int>(dh::DivRoundUp(n_elements,
items_per_thread * block_threads));
if (grid_size <= 0) {
return;
}
SharedMemHistKernel<<<grid_size, block_threads, smem_size>>>(
shard->ellpack_matrix, d_ridx, d_node_hist.data(), d_gpair,
segment_begin, n_elements);
}
};
template <typename GradientSumT>
struct GlobalMemHistBuilder : public GPUHistBuilderBase<GradientSumT> {
void Build(DeviceShard<GradientSumT>* shard, int nidx) override {
Segment segment = shard->ridx_segments[nidx];
auto d_node_hist = shard->hist.GetNodeHistogram(nidx).data();
bst_uint* d_ridx = shard->ridx.Current();
GradientPair* d_gpair = shard->gpair.data();
size_t const n_elements = segment.Size() * shard->ellpack_matrix.row_stride;
auto d_matrix = shard->ellpack_matrix;
dh::LaunchN(shard->device_id, n_elements, [=] __device__(size_t idx) {
int ridx = d_ridx[(idx / d_matrix.row_stride) + segment.begin];
// lookup the index (bin) of histogram.
int gidx = d_matrix.gidx_iter[ridx * d_matrix.row_stride + idx % d_matrix.row_stride];
if (gidx != d_matrix.null_gidx_value) {
AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]);
}
});
}
};
template <typename GradientSumT>
inline void DeviceShard<GradientSumT>::InitCompressedData(
const common::HistCutMatrix& hmat, const SparsePage& row_batch, bool is_dense) {
size_t row_stride = this->InitRowPtrs(row_batch);
n_bins = hmat.row_ptr.back();
int null_gidx_value = hmat.row_ptr.back();
int max_nodes =
param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth);
ba.Allocate(device_id,
&gpair, n_rows,
&ridx, n_rows,
&position, n_rows,
&prediction_cache, n_rows,
&node_sum_gradients_d, max_nodes,
&feature_segments, hmat.row_ptr.size(),
&gidx_fvalue_map, hmat.cut.size(),
&min_fvalue, hmat.min_val.size(),
&monotone_constraints, param.monotone_constraints.size());
dh::CopyVectorToDeviceSpan(gidx_fvalue_map, hmat.cut);
dh::CopyVectorToDeviceSpan(min_fvalue, hmat.min_val);
dh::CopyVectorToDeviceSpan(feature_segments, hmat.row_ptr);
dh::CopyVectorToDeviceSpan(monotone_constraints, param.monotone_constraints);
node_sum_gradients.resize(max_nodes);
ridx_segments.resize(max_nodes);
// allocate compressed bin data
int num_symbols = n_bins + 1;
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
CHECK(!(param.max_leaves == 0 && param.max_depth == 0))
<< "Max leaves and max depth cannot both be unconstrained for "
"gpu_hist.";
ba.Allocate(device_id, &gidx_buffer, compressed_size_bytes);
thrust::fill(
thrust::device_pointer_cast(gidx_buffer.data()),
thrust::device_pointer_cast(gidx_buffer.data() + gidx_buffer.size()), 0);
this->CreateHistIndices(row_batch, row_stride, null_gidx_value);
ellpack_matrix.Init(
feature_segments, min_fvalue,
gidx_fvalue_map, row_stride,
common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols),
is_dense, null_gidx_value);
// check if we can use shared memory for building histograms
// (assuming atleast we need 2 CTAs per SM to maintain decent latency
// hiding)
auto histogram_size = sizeof(GradientSumT) * hmat.row_ptr.back();
auto max_smem = dh::MaxSharedMemory(device_id);
if (histogram_size <= max_smem) {
hist_builder.reset(new SharedMemHistBuilder<GradientSumT>);
} else {
hist_builder.reset(new GlobalMemHistBuilder<GradientSumT>);
}
// Init histogram
hist.Init(device_id, hmat.NumBins());
}
template <typename GradientSumT>
inline void DeviceShard<GradientSumT>::CreateHistIndices(
const SparsePage& row_batch, size_t row_stride, int null_gidx_value) {
int num_symbols = n_bins + 1;
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
std::min
(dh::TotalMemory(device_id) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(n_rows));
const std::vector<Entry>& data_vec = row_batch.data.HostVector();
thrust::device_vector<Entry> entries_d(gpu_batch_nrows * row_stride);
size_t gpu_nbatches = dh::DivRoundUp(n_rows, gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows;
if (batch_row_end > n_rows) {
batch_row_end = n_rows;
}
size_t batch_nrows = batch_row_end - batch_row_begin;
// number of entries in this batch.
size_t n_entries = row_ptrs[batch_row_end] - row_ptrs[batch_row_begin];
// copy data entries to device.
dh::safe_cuda
(cudaMemcpy
(entries_d.data().get(), data_vec.data() + row_ptrs[batch_row_begin],
n_entries * sizeof(Entry), cudaMemcpyDefault));
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(dh::DivRoundUp(n_rows, block3.x),
dh::DivRoundUp(row_stride, block3.y), 1);
CompressBinEllpackKernel<<<grid3, block3>>>
(common::CompressedBufferWriter(num_symbols),
gidx_buffer.data(),
row_ptrs.data().get() + batch_row_begin,
entries_d.data().get(),
gidx_fvalue_map.data(), feature_segments.data(),
batch_row_begin, batch_nrows,
row_ptrs[batch_row_begin],
row_stride, null_gidx_value);
}
// free the memory that is no longer needed
row_ptrs.resize(0);
row_ptrs.shrink_to_fit();
entries_d.resize(0);
entries_d.shrink_to_fit();
}
template <typename GradientSumT>
class GPUHistMakerSpecialised{
public:
GPUHistMakerSpecialised() : initialised_{false}, p_last_fmat_{nullptr} {}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) {
param_.InitAllowUnknown(args);
hist_maker_param_.InitAllowUnknown(args);
CHECK(param_.n_gpus != 0) << "Must have at least one device";
n_devices_ = param_.n_gpus;
dist_ = GPUDistribution::Block(GPUSet::All(param_.gpu_id, param_.n_gpus));
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (tsoobgx::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
info_ = &dmat->Info();
int n_devices = dist_.Devices().Size();
device_list_.resize(n_devices);
for (int index = 0; index < n_devices; ++index) {
int device_id = dist_.Devices().DeviceId(index);
device_list_[index] = device_id;
}
reducer_.Init(device_list_);
auto batch_iter = dmat->GetRowBatches().begin();
const SparsePage& batch = *batch_iter;
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
// Create device shards
shards_.resize(n_devices);
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
dh::safe_cuda(cudaSetDevice(dist_.Devices().DeviceId(idx)));
size_t start = dist_.ShardStart(info_->num_row_, idx);
size_t size = dist_.ShardSize(info_->num_row_, idx);
shard = std::unique_ptr<DeviceShard<GradientSumT>>(
new DeviceShard<GradientSumT>(dist_.Devices().DeviceId(idx), idx,
start, start + size, param_,
column_sampling_seed));
});
// Find the cuts.
monitor_.StartCuda("Quantiles");
common::DeviceSketch(batch, *info_, param_, &hmat_, hist_maker_param_.gpu_batch_nrows);
n_bins_ = hmat_.row_ptr.back();
monitor_.StopCuda("Quantiles");
auto is_dense = info_->num_nonzero_ == info_->num_row_ * info_->num_col_;
monitor_.StartCuda("BinningCompression");
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
dh::safe_cuda(cudaSetDevice(shard->device_id));
shard->InitCompressedData(hmat_, batch, is_dense);
});
monitor_.StopCuda("BinningCompression");
++batch_iter;
CHECK(batch_iter.AtEnd()) << "External memory not supported";
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(const std::vector<RegTree>& local_trees) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_trees.front().Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree;
reference_tree.Load(&fs);
for (const auto& tree : local_trees) {
CHECK(tree == reference_tree);
}
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(gpair, p_fmat);
monitor_.StopCuda("InitData");
std::vector<RegTree> trees(shards_.size());
for (auto& tree : trees) {
tree = *p_tree;
}
gpair->Reshard(dist_);
// Launch one thread for each device "shard" containing a subset of rows.
// Threads will cooperatively build the tree, synchronising over histograms.
// Each thread will redundantly build its own copy of the tree
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
shard->UpdateTree(gpair, p_fmat, &trees.at(idx), &reducer_);
});
// All trees are expected to be identical
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(trees);
}
// Write the output tree
*p_tree = trees.front();
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (shards_.empty() || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->Shard(dist_.Devices());
dh::ExecuteIndexShards(
&shards_,
[&](int idx, std::unique_ptr<DeviceShard<GradientSumT>>& shard) {
dh::safe_cuda(cudaSetDevice(shard->device_id));
shard->UpdatePredictionCache(
p_out_preds->DevicePointer(shard->device_id));
});
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
common::HistCutMatrix hmat_; // NOLINT
MetaInfo* info_; // NOLINT
std::vector<std::unique_ptr<DeviceShard<GradientSumT>>> shards_; // NOLINT
private:
bool initialised_;
int n_devices_;
int n_bins_;
GPUHistMakerTrainParam hist_maker_param_;
common::GHistIndexMatrix gmat_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_;
GPUDistribution dist_;
common::Monitor monitor_;
/*! List storing device id. */
std::vector<int> device_list_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
hist_maker_param_.InitAllowUnknown(args);
float_maker_.reset();
double_maker_.reset();
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->Init(args);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->Init(args);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
TSOOBGX_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace tsoobgx
|
c0ec9a7055571ea0f443aaf3149c5d435195fb33.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#if TORCH_HIP_VERSION >= 7050
#include <hip/hip_fp16.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/relu_op.h"
namespace caffe2 {
namespace {
__global__ void ReluKernelHalf(const int N, const half* X, half* Y) {
const half kZero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hgt(X[i], kZero) ? X[i] : kZero;
#else
Y[i] = (__half2float(X[i]) > 0) ? X[i] : kZero;
#endif
}
}
__global__ void ReluKernelHalf2(const int N, const half2* X, half2* Y) {
const half2 kZero = __float2half2_rn(0.0);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hmul2(__hgt2(X[i], kZero), X[i]);
#else
float2 xx = __half22float2(X[i]);
Y[i] = __floats2half2_rn(xx.x > 0 ? xx.x : 0.f,
xx.y > 0 ? xx.y : 0.f);
#endif
}
}
__global__ void ReluGradientKernelHalf(
const int N, const half* Y, const half* dY, half* dX) {
const half kZero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hgt(Y[i], kZero) ? dY[i] : kZero;
#else
dX[i] = (__half2float(Y[i]) > 0) ? dY[i] : kZero;
#endif
}
}
} // namespace
template <>
bool ReluOp<float16, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
CAFFE_DCHECK_GT(X.size(), 0);
Y->ReshapeLike(X);
if (X.size() % 2 == 0) {
hipLaunchKernelGGL(( ReluKernelHalf2), dim3(CAFFE_GET_BLOCKS(X.size() / 2)), dim3(CAFFE_CUDA_NUM_THREADS),
0, device_context_.cuda_stream(),
X.size() / 2, reinterpret_cast<const half2*>(X.data<float16>()),
reinterpret_cast<half2*>(Y->mutable_data<float16>()));
return true;
} else {
hipLaunchKernelGGL(( ReluKernelHalf), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, device_context_.cuda_stream(),
X.size(), reinterpret_cast<const half*>(X.data<float16>()),
reinterpret_cast<half*>(Y->mutable_data<float16>()));
return true;
}
}
template <>
bool ReluGradientOp<float16, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
CAFFE_DCHECK_GT(Y.size(), 0);
CAFFE_DCHECK_EQ(dY.size(), Y.size());
dX->ReshapeLike(Y);
hipLaunchKernelGGL(( ReluGradientKernelHalf), dim3(CAFFE_GET_BLOCKS(Y.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, device_context_.cuda_stream(),
Y.size(), reinterpret_cast<const half*>(Y.data<float16>()),
reinterpret_cast<const half*>(dY.data<float16>()),
reinterpret_cast<half*>(dX->mutable_data<float16>()));
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(ReluFp16, ReluOp<float16, CUDAContext>);
REGISTER_CUDA_OPERATOR(ReluFp16Gradient, ReluGradientOp<float16, CUDAContext>);
} // namespace
} // namespace caffe2
#endif // TORCH_HIP_VERSION >= 7050
| c0ec9a7055571ea0f443aaf3149c5d435195fb33.cu | #include <cuda.h>
#if CUDA_VERSION >= 7050
#include <cuda_fp16.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/relu_op.h"
namespace caffe2 {
namespace {
__global__ void ReluKernelHalf(const int N, const half* X, half* Y) {
const half kZero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hgt(X[i], kZero) ? X[i] : kZero;
#else
Y[i] = (__half2float(X[i]) > 0) ? X[i] : kZero;
#endif
}
}
__global__ void ReluKernelHalf2(const int N, const half2* X, half2* Y) {
const half2 kZero = __float2half2_rn(0.0);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hmul2(__hgt2(X[i], kZero), X[i]);
#else
float2 xx = __half22float2(X[i]);
Y[i] = __floats2half2_rn(xx.x > 0 ? xx.x : 0.f,
xx.y > 0 ? xx.y : 0.f);
#endif
}
}
__global__ void ReluGradientKernelHalf(
const int N, const half* Y, const half* dY, half* dX) {
const half kZero = __float2half(0.0);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hgt(Y[i], kZero) ? dY[i] : kZero;
#else
dX[i] = (__half2float(Y[i]) > 0) ? dY[i] : kZero;
#endif
}
}
} // namespace
template <>
bool ReluOp<float16, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
CAFFE_DCHECK_GT(X.size(), 0);
Y->ReshapeLike(X);
if (X.size() % 2 == 0) {
ReluKernelHalf2<<<CAFFE_GET_BLOCKS(X.size() / 2), CAFFE_CUDA_NUM_THREADS,
0, device_context_.cuda_stream()>>>(
X.size() / 2, reinterpret_cast<const half2*>(X.data<float16>()),
reinterpret_cast<half2*>(Y->mutable_data<float16>()));
return true;
} else {
ReluKernelHalf<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS,
0, device_context_.cuda_stream()>>>(
X.size(), reinterpret_cast<const half*>(X.data<float16>()),
reinterpret_cast<half*>(Y->mutable_data<float16>()));
return true;
}
}
template <>
bool ReluGradientOp<float16, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
CAFFE_DCHECK_GT(Y.size(), 0);
CAFFE_DCHECK_EQ(dY.size(), Y.size());
dX->ReshapeLike(Y);
ReluGradientKernelHalf<<<CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS,
0, device_context_.cuda_stream()>>>(
Y.size(), reinterpret_cast<const half*>(Y.data<float16>()),
reinterpret_cast<const half*>(dY.data<float16>()),
reinterpret_cast<half*>(dX->mutable_data<float16>()));
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(ReluFp16, ReluOp<float16, CUDAContext>);
REGISTER_CUDA_OPERATOR(ReluFp16Gradient, ReluGradientOp<float16, CUDAContext>);
} // namespace
} // namespace caffe2
#endif // CUDA_VERSION >= 7050
|
0e6a4da34e26951820c130b128564803472b79ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
*
* File: knapsack_gpu_randomstart_oahttslf.cu
* Author: Alex Stivala & Peter J Stuckey
* Created: April 2009
*
* $Id: knapsack_gpu_randomstart_nr_oahttslf.cu 4616 2013-01-25 01:07:43Z astivala $
*
* This is the GPU implementation using the oahttslf GPU lock free hash table.
* This version does not use recursion.
* This version has an additional feature in which we try
* stating some threads at random (i,w) positions in the problem rather
* than starting all at the top (solution) values of (i, w)
*
*
* Usage: knapsack_gpu_randomstart_oahttslf [-nvy] [-r threads] < problemspec
* -v: Verbose output
* -n: assume no name in the first line of the file
* -y: show instrumentatino summary line (like -t but one line summary)
*
* The problemspec is in the format generated by gen2.c from David Pisinger
* (http://www.diku.dk/hjemmesider/ansatte/pisinger/codes.html):
*
* numitems
* 1 profit_1 weight_1
* 2 profit_2 weight_2
* ...
* numitems profit_numitems weight_numitems
* capacity
*
* all profits and weights are positive integers.
*
*
* Requires CUDA 4.x and device with compute capability 2.x and higher
* as it uses atomic CAS on 64 bits.
* Also printf() from device function for debug.
* It uses the CURAND library for pseudrandom number generation.
*
* Preprocessor symbols:
*
* DEBUG - compile in lots of debugging code.
* USE_SHARED - use shared memory not global for stacks
* USE_HTTSLF - use httslf gpu kernel instead of oahttslf gpu kerne
* USE_INSTRUMENT - compile in (per-thread) instrumentation counts.
*
*****************************************************************************/
#define NOTDEF_XXX_NORAND /* yes we DO want randomizatino */
#undef USE_SHARED
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <assert.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "cutil_inline.h"
#include "hiprand/hiprand_kernel.h"
// number of thread blocks
#define NUM_BLOCKS 512 //some sensible value for this? (65535 max)
// Number of threads per block
#define NUM_THREADS 32 //some sensible value for this? (1024 max)
// (these are defined prior to inclde of httslf_gpu_oahttslf_kernel.cu etc
// as they are used to define arrays for instrumentation if USE_INSTRUMENT)
#ifdef USE_HTTSLF
#undef ALLOW_DELETE /* never need to delete keys */
#include "httslf_gpu_kernel.cu"
#define HASHTABLE_TYPE httslf_entry_t**
#define INSERT_FUNCTION httslf_insert
#define LOOKUP_FUNCTION httslf_lookup
#else
#undef ALLOW_UPDATE /* never need to update a value once insterted */
#undef ALLOW_DELETE /* never need to delete keys */
#include "oahttslf_gpu_kernel.cu"
#define HASHTABLE_TYPE oahttslf_entry_t*
#define INSERT_FUNCTION oahttslf_insert
#define LOOKUP_FUNCTION oahttslf_lookup
#endif
#ifdef DEBUG
#define DEBUG_PRINT(x) printf x
#else
#define DEBUG_PRINT(x) /* nothing */
#endif
/*****************************************************************************
*
* global constants
*
*****************************************************************************/
#define MAXITEMS 1024 /* used for stack size */
// size of constant memory on device to store profit,weight for each item
#define MAX_KNAPSACK_ITEMS 512
/* dodgy: 0 is the OAHTTSLF_EMPTY_KEY and OAHTTSLF_EMPTY_VALUE value,
so if key or value is 0 set it to MAGIC_ZERO instead */
#define MAGIC_ZERO 0xffffffffffffffff
/*****************************************************************************
*
* type definitions
*
*****************************************************************************/
typedef unsigned long long uint64_t;
typedef unsigned int uint32_t;
typedef unsigned short uint16_t;
typedef unsigned char uint8_t;
/* definition of type for an item */
typedef struct item_s
{
unsigned int profit;
unsigned int weight;
} item_t;
typedef unsigned int counter_t;
typedef struct stats_s
{
counter_t reuse; /* number of times value already found in hashtable */
counter_t hashcount; /* number of times value computed & stored in ht */
} stats_t;
/*****************************************************************************
*
* device constant data
*
*****************************************************************************/
// array of weight and profit for each item. indexed 1..NUM_KNAPSACK_ITEMS,
// element 0 is unused
__constant__ item_t c_KNAPSACK_ITEMS[MAX_KNAPSACK_ITEMS+1];
const char *c_KNAPSACK_ITEMS_symbol = "c_KNAPSACK_ITEMS";
/*****************************************************************************
*
* device global data
*
*****************************************************************************/
__device__ volatile bool finished = false; // set when a thread has finished the computation
#ifdef USE_INSTRUMENT
/* instrumentatino totals (summed over all threads) */
__device__ counter_t total_reuse = 0, total_hashcount = 0;
#endif
/*****************************************************************************
*
* device shared data
*
*****************************************************************************/
#ifdef USE_SHARED
//
// per-thread stacks, using shared memory for speed
//
// index as [ threadIdx.x * (MAXITEMS+1) + stackindex ]
#define STACKINDEX(i) (threadIdx.x * (MAXITEMS+1) + (i))
__shared__ bool stackc[NUM_THREADS * (MAXITEMS+1)]; /* call or answer */
__shared__ unsigned int stacki[NUM_THREADS * (MAXITEMS+1)];
__shared__ unsigned int stackw[NUM_THREADS * (MAXITEMS+1)];
#else
// instead of shared, use global memory allocated with hipMalloc
// (to avoid linker errors if size gets too large)
// index as [ thread_id * (MAXITEMS+1) + stackindex ]
// NB the macro below dpends on local variable
// tid = blockIdx.x*blockDim.x+threadIdx.x
#define STACKINDEX(i) (tid * (MAXITEMS+1) + (i))
#define TOTAL_NUM_THREADS (NUM_THREADS * NUM_BLOCKS)
__device__ bool *stackc; /* call or answer */
__device__ unsigned int *stacki;
__device__ unsigned int *stackw;
#endif /* USE_SHARED */
#ifdef USE_INSTRUMENT
/* per-thread instrumentation */
/* initialize with knapsack_reset_stats() */
__shared__ stats_t stats[NUM_THREADS];
#endif
/*****************************************************************************
*
* host static data
*
*****************************************************************************/
static bool printstats; /* whether to print call stats */
static bool verbose; /* verbose output */
static bool show_stats_summary = 0; /* -y summary instrumentation stats */
static unsigned int CAPACITY; /* total capacity for the problem */
static unsigned int NUM_KNAPSACK_ITEMS; /* number of items */
static item_t *KNAPSACK_ITEMS; /* array of item profits and weights (0 unused)*/
/*****************************************************************************
*
* host static functions
*
*****************************************************************************/
/*
* Read the input from stdin in the gen2.c format:
*
* numitems
* 1 profit_1 weight_1
* 2 profit_2 weight_2
* ...
* numitems profit_numitems weight_numitems
* capacity
*
* all profits and weights are positive integers.
*
* Parameters:
* None.
* Return value:
* None.
* Uses global data (write):
* KNAPSACK_ITEMS - allocates array, sets profit and weight for each item
* CAPACITY - sets capacity for problem
* NUM_KNAPSACK_ITEMS - number of items
*/
static void readdata(void)
{
unsigned int i,inum;
if (scanf("%d", &NUM_KNAPSACK_ITEMS) != 1)
{
fprintf(stderr, "ERROR reading number of items\n");
exit(EXIT_FAILURE);
}
if (!(KNAPSACK_ITEMS = (item_t *)malloc((NUM_KNAPSACK_ITEMS+1) * sizeof(item_t))))
{
fprintf(stderr,"malloc KNAPSACK_ITEMS failed\n");
exit(EXIT_FAILURE);
}
for (i = 1; i <= NUM_KNAPSACK_ITEMS; i++)
{
if(scanf("%d %d %d", &inum, &KNAPSACK_ITEMS[i].profit, &KNAPSACK_ITEMS[i].weight) != 3)
{
fprintf(stderr, "ERROR reading item %d\n", i);
exit(EXIT_FAILURE);
}
if (inum != i)
{
fprintf(stderr, "ERROR expecting item %d got %d\n", i, inum);
exit(EXIT_FAILURE);
}
}
if (scanf("%d", &CAPACITY) != 1)
{
fprintf(stderr, "ERROR reading capacity\n");
exit(EXIT_FAILURE);
}
}
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0.
(from GNU libc manual) */
int
timeval_subtract (struct timeval *result, struct timeval *x,
struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
/*****************************************************************************
*
* device functions
*
*****************************************************************************/
#ifdef USE_INSTRUMENT
/*
* knapsack_reset_stats()
*
* Initialize the per-thread intrumentatino counters in shared memory
*
* Parameters: None
* Return value: None
*/
__device__ void knapsack_reset_stats(void)
{
for (int i = threadIdx.x; i < NUM_THREADS; i += blockDim.x)
{
stats[i].reuse = 0;
stats[i].hashcount = 0;
}
__syncthreads();
}
/*
* knapsack_sum_stats()
*
* Sum up the per-thread counters
*
* Also instead of doing a proper efficient reduction just uses atomicAdd
* (on globals), also reqwuired compute 2.x (only debug/instrumentatinos,
* no need for efficiency here, just keep it simple).
*
* Parameters: None
* Retrun value: None
*/
__device__ void knapsack_sum_stats(void)
{
unsigned int block_reuse = 0;
unsigned int block_hashcount = 0;
__syncthreads();
if (threadIdx.x == 0)
{
for (int i = 0; i < NUM_THREADS; i++)
{
block_reuse += stats[i].reuse;
block_hashcount += stats[i].hashcount;
}
atomicAdd(&total_reuse, block_reuse);
atomicAdd(&total_hashcount, block_hashcount);
}
}
#endif /* USE_INSTRUMENT */
/*
* insert_indices()
*
* Insert value for (i,j) into the hashtable
*
* Parameters:
* i,j - indices to build insertion key
* value - value to insert for the key
*
* Uses global data:
* devHashTable
*
* Return value:
* None.
*/
__device__ void insert_indices(unsigned int i, unsigned int j,
unsigned int value, HASHTABLE_TYPE devHashtable)
{
uint64_t key, val64;
#ifdef USE_HTTSLF
key = ((uint64_t)i << 32) | (j & 0xffffffff);
val64 = (uint64_t)value;
#else
key = (i == 0 && j == 0 ? MAGIC_ZERO :
((uint64_t)i << 32) | (j & 0xffffffff));
val64 = (value == 0 ? MAGIC_ZERO : (uint64_t)value);
#endif
#ifdef USE_INSTRUMENT
stats[threadIdx.x].hashcount++;
#endif
INSERT_FUNCTION(devHashtable, key, val64);
}
/*
* lookup_indices()
*
* Get the value for (i,j) from the hashtable
*
* Parameters:
* i,j - indices to build key for lookup
* pvalue - (OUTPUT) value for key, only set if true returned
*
* Uses global data:
* devHashTable (readonly)
*
* Return value:
* true if found, false otherwise
*/
__device__ bool lookup_indices(unsigned int i, unsigned int j,
unsigned int *pvalue, HASHTABLE_TYPE devHashtable)
{
uint64_t key;
uint64_t val64 = 0;
bool found;
#ifdef USE_HTTSLF
key = ((uint64_t)i << 32) | (j & 0xffffffff);
#else
key = (i == 0 && j == 0 ? MAGIC_ZERO :
((uint64_t)i << 32) | (j & 0xffffffff));
#endif
found = LOOKUP_FUNCTION(devHashtable, key, &val64);
// avoid branch by using ternay operator to set to 0 if not found
// since val64 is initalized to 0 (note this depends on LOOKUP_FUNCTION
// not setting return parmater &val64 if not found)
*pvalue = ((val64 == MAGIC_ZERO) ? 0 : (unsigned int)val64);
return found;
}
/*
* lookk - lookup a computed value for (i,w) from the hash table
*
* Parameters:
* i,w - indices to build key for lookup
* pvalue - (OUTPUT) value for key, only set if true returned
* devHashtable - the hash table on the device
* Return value:
* The value p (profit) in the table for (i,w) or 0 if not there
*
*/
__device__ unsigned int lookk(unsigned int i, unsigned int w,
HASHTABLE_TYPE devHashtable)
{
unsigned int p = 0;
lookup_indices(i, w, &p, devHashtable);
return p;
}
/*
* dp_knapsack_nr()
*
* This version is multi-threaded, sharing hashtable used to
* store computed values between the threads.
* This version is not recursive, it maintains its own explicit stack.
* This function is called by dp_knapsack_thread()
* with identical instances running
* in several threads. This functino itself is not recursive
* (and never creates threads itself) and diverges as there is
* a random choice as to which of the paths we take first; we use
* parallelism to explore the search space concurrently with
* diverged paths due to this choice, but still reusing computed
* values by the shared lock-free hashtable.
*
*
* This version uses no bounding.
*
* Parameters: i - item index
* w - total weight
* state - CURAND state for random number generation
* devHashtable - the hash table
*
* global constant memory:
*
* c_KNAPSACK_ITEMS - array of profit and weight for each item
*
* global memory:
* finished (readonly) -checked to see if compuattion done
*
* Return value:
* value of d.p. at (i,w)
*
*
*/
__device__ unsigned int dp_knapsack_nr(unsigned int i, unsigned int w,
hiprandState_t *state, HASHTABLE_TYPE devHashtable)
{
int tid=blockIdx.x*blockDim.x+threadIdx.x;
hiprandState_t localState = state[tid];
unsigned int p;
bool c;
int top = 1;
unsigned int oi = i, ow = w;
#ifdef DEBUG
printf("knapsack_nr(%d,%d)\n",i,w);
#endif
stacki[STACKINDEX(top)] = i;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = true;
while(top) {
assert(threadIdx.x < NUM_THREADS);
assert(top <= MAXITEMS);
if (finished)
return 0;
i = stacki[STACKINDEX(top)];
w = stackw[STACKINDEX(top)];
c = stackc[STACKINDEX(top)];
top--;
#ifdef DEBUG
printf("knapsack_nr(%d,%d,%d)\n",i,w,c);
#endif
if (c) {
if (i == 1) {
p = (w >= c_KNAPSACK_ITEMS[i].weight ? c_KNAPSACK_ITEMS[i].profit : 0);
#ifdef DEBUG
printf("knapsack_nr(%d,%d,%d) = %d\n",i,w,c,p);
#endif
insert_indices(i, w, p, devHashtable);
}
else if (lookk(i-1,w,devHashtable) > 0 && lookk(i-1,w - c_KNAPSACK_ITEMS[i].weight,devHashtable) > 0) {
p = MAX(lookk(i-1,w,devHashtable),
(w >= c_KNAPSACK_ITEMS[i].weight ? lookk(i-1,w - c_KNAPSACK_ITEMS[i].weight,devHashtable) + c_KNAPSACK_ITEMS[i].profit : 0));
#ifdef DEBUG
printf("knapsack_nr(%d,%d,%d) = %d\n",i,w,c,p);
#endif
insert_indices(i, w, p, devHashtable);
}
else {
#ifdef NOTDEF_XXX_NORAND
if (hiprand(&localState) & 1) {
top++;
stacki[STACKINDEX(top)] = i;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = false;
if (i >= 1 && lookk(i-1,w,devHashtable) == 0) {
top++;
stacki[STACKINDEX(top)] = i-1;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = true;
}
if (i >= 1 && w >= c_KNAPSACK_ITEMS[i].weight && lookk(i-1,w-c_KNAPSACK_ITEMS[i].weight,devHashtable) == 0) {
top++;
stacki[STACKINDEX(top)] = i-1;
stackw[STACKINDEX(top)] = w - c_KNAPSACK_ITEMS[i].weight;
stackc[STACKINDEX(top)] = true;
}
}
else {
#endif /*NOTDEF_XXX_NORAND*/
top++;
stacki[STACKINDEX(top)] = i;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = false;
if (i >= 1 && w >= c_KNAPSACK_ITEMS[i].weight && lookk(i-1,w-c_KNAPSACK_ITEMS[i].weight,devHashtable) == 0) {
top++;
stacki[STACKINDEX(top)] = i-1;
stackw[STACKINDEX(top)] = w - c_KNAPSACK_ITEMS[i].weight;
stackc[STACKINDEX(top)] = true;
}
if (i >= 1 && lookk(i-1,w,devHashtable) == 0) {
top++;
stacki[STACKINDEX(top)] = i-1;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = true;
}
#ifdef NOTDEF_XXX_NORAND
}
#endif /*NOTDEF_XXX_NORAND*/
}
} else {
p = MAX(lookk(i-1,w,devHashtable),
(w >= c_KNAPSACK_ITEMS[i].weight ? lookk(i-1,w - c_KNAPSACK_ITEMS[i].weight,devHashtable) +c_KNAPSACK_ITEMS[i].profit : 0));
#ifdef DEBUG
printf("knapsack_nr(%d,%d,%d) = %d\n",i,w,c,p);
#endif
insert_indices(i, w, p, devHashtable);
}
}
insert_indices(oi, ow, p, devHashtable);
#ifdef DEBUG
printf("knapsack_nr(%d,%d) = %d\n",i,w,p);
#endif
state[tid] = localState;
return p;
}
/*****************************************************************************
*
* global functions
*
*****************************************************************************/
/*
* init_rng()
*
* Initialize CURAND pseudrandom number generator
* See CUDA Toolkit 4.1 CURAND Guide (p.21)
*
* Parameters:
* state - CURAND state for random number generation
* seed - seed for CURAND init
*
*/
__global__ void init_rng(hiprandState_t *state, unsigned long long seed)
{
int tid=blockIdx.x*blockDim.x+threadIdx.x;
/* give each therad same seed, different sequence number, no offset */
hiprand_init(seed, tid, 0, &state[tid]);
}
/*
* dp_knapsack_kernel()
*
* Caller interafce to the multithreaded version: just calls the actual
* device function
*
*
* Paramters:
* i - item index to start at
* w - total capacity to start at
* p - (output) score for this product set
* state - CURAND state for random number generation
* devHashtable - the hash table
*
* global memory:
* finished - set when compuattion done to tell other threads to end
*/
__global__ void dp_knapsack_kernel(unsigned int i, unsigned int w,
unsigned int *p, hiprandState_t *state,
HASHTABLE_TYPE devHashtable)
{
int tid=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int profit = 0;
#ifdef USE_INSTRUMENT
reset_counters();
#endif
//DEBUG_PRINT(("(0) blockIdx.x = %d blockDim.x = %d threadIx = %d tid = %d\n",
// blockIdx.x, blockDim.x, threadIdx.x, tid));
#ifdef DEBUG
for (int j = 0; j < i; j++)
{
printf("item %d: weight = %d profit = %d\n", j, c_KNAPSACK_ITEMS[j].weight, c_KNAPSACK_ITEMS[j].profit);
}
#endif
/* start half the threads at a random point. These threads will be
* in a "forever" loop, we only care about threads actually computing
* the required solution terminating - when one of them terminates the
* solution is found. The random threaed just go and start at another
* random point if they finish first.
*/
bool randomstart = (tid > 31);
if (randomstart)
{
/* experimental "random" case: just keep starting at random points
* hoping we compute something helpful to the solution. The idea is
* to avoid large growth in unncessary recomputation (h/h_0) for large
* number of threads
* TODO more sensible choices of points, not totally random
*/
while (!finished)
{
hiprandState_t localState = state[tid];
unsigned int random_i = hiprand(&localState) % i;
unsigned int random_w = hiprand(&localState) % w;
state[tid] = localState;
(void)dp_knapsack_nr(random_i, random_w, state, devHashtable);
}
}
else
{
/* this thread starts at the (i,w) value to solve the actual problem */
profit = dp_knapsack_nr(i, w, state, devHashtable);
finished = true;
__threadfence();// FIXME do we need some sort of sync or threadfence here?
//DEBUG_PRINT(("SET profit = %d (tid = %d)\n", profit, tid));
if (profit != 0)
*p = profit;
}
#ifdef USE_INSTRUMENT
#ifdef USE_HTTSLF
httslf_sumcounters();
#else
oahttslf_sum_stats();
#endif /* USE_HTTSLF */
knapsack_sum_stats();
#endif /* USE_INSTRUMENT */
}
/*****************************************************************************
*
* host main
*
*****************************************************************************/
/*
* print usage message and exit
*
*/
static void usage(const char *program)
{
fprintf(stderr,
"Usage: %s [-ntvy] < problemspec\n"
" -n: assume no name in the first line of the file\n"
" -t: show statistics of operations\n"
" -v: Verbose output\n"
" -y: show instrumentatino summary line (like -t but one line summary)\n",
program);
exit(EXIT_FAILURE);
}
/*
* main
*/
int main(int argc, char *argv[])
{
int i = 0;
char flags[100];
int c;
int otime, ttime, etime;
unsigned int profit =0;
unsigned int *d_profit ;
struct rusage starttime,totaltime,runtime,endtime,opttime;
struct timeval start_timeval,end_timeval,elapsed_timeval;
unsigned int t;
char name[100];
int noname = 0;
hipError_t rc;
hiprandState_t *devStates;
unsigned int hTimer;
size_t stacksize;
HASHTABLE_TYPE devHashtable;
#ifndef NOTDEF_XXX_NORAND
fprintf(stderr, "NO RANDOMIZATION\n");
#endif
strcpy(flags, "[NONE]");
while ((c = getopt(argc, argv, "nvyt?")) != -1)
{
switch(c) {
case 'v':
/* verbose output */
verbose = 1;
break;
case 't':
/* show stats */
printstats = 1;
break;
case 'n':
/* no name on first line of input */
noname = 1;
break;
case 'y':
/* show statistics summaary line of insturmentation */
show_stats_summary = 1;
break;
default:
usage(argv[0]);
break;
}
if (i < (int)sizeof(flags)-1)
flags[i++] = c;
}
if (i > 0)
flags[i] = '\0';
/* we should have no command line parameters */
if (optind != argc)
usage(argv[0]);
// Pick the best GPU available, or if the developer selects one at the command line
int devID = cutilChooseCudaDevice(argc, argv);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, devID);
if (verbose)
fprintf(stderr, "> GPU Device has Compute Capabilities SM %d.%d\n\n", deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x20) {
fprintf(stderr, "device with compute capability 2.0 or better is required\n");
exit(1);
}
// start time AFTER first CUDA call so as not to count the annoying
// and apparently unavoidable approx. 4 second init overhead
gettimeofday(&start_timeval, NULL);
// We need L1 cache to store the stack (only applicable to sm_20 and higher)
if ((rc = hipFuncSetCacheConfig(dp_knapsack_kernel,
hipFuncCachePreferL1)) != hipSuccess)
{
fprintf(stderr, "hipFuncSetCacheConfig failed %d\n", rc);
exit(1);
}
// per-thread stacks, hard limit of total of 512KB
// local memory per thread (where stack is stored), so cannot use
// all of that for stack either
// see http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities
const int STACKSIZE = 1024; /* in bytes */
if ((rc = hipDeviceSetLimit(hipLimitStackSize, STACKSIZE)) != hipSuccess)
{
fprintf(stderr, "hipDeviceSetLimit failed %d\n",rc);
exit(1);
}
if ((rc = hipDeviceGetLimit(&stacksize, hipLimitStackSize)) != hipSuccess)
{
fprintf(stderr, "hipDeviceGetLimit failed %d\n",rc);
exit(1);
}
if (verbose)
fprintf(stderr, "cuda stack size = %.1f KB\n", (double)stacksize/1024);
if (noname)
strcpy(name,"[NONE]\n");
else
fgets(name,sizeof(name)-1,stdin);
getrusage(RUSAGE_SELF, &starttime);
readdata(); /* read into the KNAPSACK_ITEMS array and set CAPACITY, NUM_KNAPSACK_ITEMS */
if (NUM_KNAPSACK_ITEMS > MAX_KNAPSACK_ITEMS)
{
fprintf(stderr,
"num knapsack items %d exceeds %d, increase MAX_KNAPSACK_ITEMS\n",
NUM_KNAPSACK_ITEMS, MAX_KNAPSACK_ITEMS);
exit(1);
}
if (NUM_KNAPSACK_ITEMS > MAXITEMS)
{
fprintf(stderr, "number of items %d is too large, increase MAXITEMS\n",
NUM_KNAPSACK_ITEMS);
exit(1);
}
cutCreateTimer(&hTimer) ;
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
/* copy the knapsack items to device constant memory */
if ((rc = hipMemcpyToSymbol(c_KNAPSACK_ITEMS_symbol, KNAPSACK_ITEMS,
(1+NUM_KNAPSACK_ITEMS)*sizeof(item_t)))!= hipSuccess)
{
fprintf(stderr, "cudaMemcpyTosymbol failed %d\n", rc);
}
cutStopTimer(hTimer);
if (verbose)
fprintf(stderr, "copy %.1f KB of knapsack data to constant memory: %f ms\n",
(double)sizeof(NUM_KNAPSACK_ITEMS*sizeof(item_t)/1024.0),
cutGetTimerValue(hTimer));
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
/* allocate space on device for random number generator state */
if ((rc = hipMalloc((void **)&devStates,
NUM_BLOCKS*NUM_THREADS*sizeof(hiprandState_t))) != hipSuccess)
{
fprintf(stderr, "hipMalloc devStates failed %d\n", rc);
exit(1);
}
/* initialize device random number generator */
hipLaunchKernelGGL(( init_rng), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, devStates, time(NULL));
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "init_rng kernel error %d\n", rc);
}
cutilDeviceSynchronize();
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "init_rng sync error %d\n", rc);
}
if (verbose)
fprintf(stderr, "allocate and initialize CURAND device RNG for %d threads: %f ms\n",
NUM_BLOCKS*NUM_THREADS, cutGetTimerValue(hTimer));
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
#ifdef USE_HTTSLF
httslf_entry_t **devCellpool;
gettimeofday(&start_timeval, NULL);
/* allocate cell pool on device */
size_t cell_size = sizeof(httslf_entry_t);
unsigned int devCellpool_num_items = 67108864 ; /* 2^26 */
if (verbose)
fprintf(stderr, "devCellpool_num_items = %u\n", devCellpool_num_items);
size_t devCellpool_size = cell_size * devCellpool_num_items;
if ((rc = hipMalloc((void **)&devCellpool, devCellpool_size)) != hipSuccess)
{
fprintf(stderr, "hipMalloc devCellpool failed %d\n", rc);
exit(1);
}
gettimeofday(&end_timeval, NULL);
timeval_subtract(&elapsed_timeval, &end_timeval, &start_timeval);
etime = 1000 * elapsed_timeval.tv_sec + elapsed_timeval.tv_usec/1000;
if (verbose)
fprintf(stderr, "hipMalloc %.1f MB cellpool elapsed time %d ms\n",
(double)devCellpool_size/(1024*1024), etime);
/* set globals on device for clel pool alloc */
if ((rc = hipMemcpyToSymbol("cellpool", &devCellpool, sizeof(httslf_entry_t *))) != hipSuccess)
{
fprintf(stderr, "hipMemcpyToSymbol cellpool failed %d\n", rc);
exit(1);
}
/* set constanst on device for cell pool alloc */
if ((rc = hipMemcpyToSymbol((const char *)"total_num_cells", &devCellpool_num_items, sizeof(devCellpool_num_items))) != hipSuccess)
{
fprintf(stderr, "hipMemcpyToSymbol poolsize failed%d\n",rc);
exit(1);
}
gettimeofday(&start_timeval, NULL);
/* allocate hashtable on device */
if ((rc = hipMalloc((void **)&devHashtable,
HTTSLF_SIZE*sizeof(httslf_entry_t *))) != hipSuccess)
{
fprintf(stderr, "hipMalloc devHashtable failed %d\n", rc);
exit(1);
}
gettimeofday(&end_timeval, NULL);
timeval_subtract(&elapsed_timeval, &end_timeval, &start_timeval);
etime = 1000 * elapsed_timeval.tv_sec + elapsed_timeval.tv_usec/1000;
if (verbose)
fprintf(stderr, "hipMalloc %.1f MB hashtable elapsed time %d ms\n",
(double)HTTSLF_SIZE*sizeof(httslf_entry_t *)/(1024*1024), etime);
#else
/* allocate hashtable on device */
if ((rc = hipMalloc((void **)&devHashtable,
OAHTTSLF_SIZE*sizeof(oahttslf_entry_t))) != hipSuccess)
{
fprintf(stderr, "hipMalloc devHashtable failed %d\n", rc);
exit(1);
}
cutStopTimer(hTimer) ;
if (verbose)
fprintf(stderr, "hipMalloc %.1f MB hashtable elapsed time %d ms\n",
(double)OAHTTSLF_SIZE*sizeof(oahttslf_entry_t)/(1024*1024), cutGetTimerValue(hTimer));
#endif /* USE_HTTSLF*/
#ifndef USE_SHARED
/* allocate the per-thread stacks in device global memory */
/* hipMemcpyToSymbol() the pointers to allocated device memory to the
global device pionteers rather than passing as parameters for
convenience so code is same as using shared memory, just using macros */
bool *cuda_stackc; /* call or answer */
unsigned int *cuda_stacki;
unsigned int *cuda_stackw;
if ((rc = hipMalloc((void **)&cuda_stackc,
sizeof(bool) * TOTAL_NUM_THREADS * (MAXITEMS+1))) != hipSuccess)
{
fprintf(stderr, "hipMalloc stackc failed %d (%s)\n", rc, hipGetErrorString(rc));
exit(1);
}
if ((rc = hipMemcpyToSymbol("stackc", &cuda_stackc, sizeof(bool*))) != hipSuccess)
{
fprintf(stderr, "cudaMemcpyTosymbol stackc failed %d (%s)\n", rc,
hipGetErrorString(rc));
exit(1);
}
if ((rc = hipMalloc((void **)&cuda_stacki,
sizeof(unsigned int) * TOTAL_NUM_THREADS * (MAXITEMS+1))) != hipSuccess)
{
fprintf(stderr, "hipMalloc stacki failed %d (%s)\n", rc, hipGetErrorString(rc));
exit(1);
}
if ((rc = hipMemcpyToSymbol("stacki", &cuda_stacki, sizeof(unsigned int*))) != hipSuccess)
{
fprintf(stderr, "cudaMemcpyTosymbol stacki failed %d (%s)\n", rc,
hipGetErrorString(rc));
exit(1);
}
if ((rc = hipMalloc((void **)&cuda_stackw,
sizeof(unsigned int) * TOTAL_NUM_THREADS * (MAXITEMS+1))) != hipSuccess)
{
fprintf(stderr, "hipMalloc stackw failed %d (%s)\n", rc, hipGetErrorString(rc));
exit(1);
}
if ((rc = hipMemcpyToSymbol("stackw", &cuda_stackw, sizeof(unsigned int*))) != hipSuccess)
{
fprintf(stderr, "cudaMemcpyTosymbol stackw failed %d (%s)\n", rc,
hipGetErrorString(rc));
exit(1);
}
#endif /* USE_SHARED */
#ifdef USE_HTTSLF
gettimeofday(&start_timeval, NULL);
/* set hashtable to all empty keys/values */
hipLaunchKernelGGL(( httslf_reset), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, devHashtable);
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "httslf_reset kernel error %d\n", rc);
}
cutilDeviceSynchronize();
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "httslf_reset sync error %d (%s)\n", rc,hipGetErrorString(rc));
}
gettimeofday(&end_timeval, NULL);
timeval_subtract(&elapsed_timeval, &end_timeval, &start_timeval);
etime = 1000 * elapsed_timeval.tv_sec + elapsed_timeval.tv_usec/1000;
if (verbose)
fprintf(stderr, "httslf_reset elapsed time %d ms\n", etime);
#else
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
// Initialize the device memory
hipLaunchKernelGGL(( oahttslf_reset), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, devHashtable);
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "oahttslf_reset kernel error %d\n", rc);
}
cutilDeviceSynchronize();
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "oahttslf_reset sync error %d\n", rc);
}
cutStopTimer(hTimer) ;
if (verbose)
fprintf(stderr, "oahttslf_reset kernel elapsed time %d ms\n",
cutGetTimerValue(hTimer));
#endif /*USE_HTTSLF*/
if (hipMalloc((void **)&d_profit, sizeof(unsigned int)) != hipSuccess)
{
fprintf(stderr, "hipMalloc d_profit failed\n");
exit(1);
}
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
if (verbose)
fprintf(stderr, "NUM_BLOCKS = %d, NUM_THREADS = %d\n", NUM_BLOCKS,NUM_THREADS);
/* Run the kernel */
dim3 dimGrid(NUM_BLOCKS) ; // blocks
dim3 dimBlock(NUM_THREADS); // threads
hipLaunchKernelGGL(( dp_knapsack_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, NUM_KNAPSACK_ITEMS, CAPACITY, d_profit, devStates, devHashtable);
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "knapsack kernel error %d (%s)\n", rc, hipGetErrorString(rc));
}
cutilDeviceSynchronize();
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "knapsack sync error %d (%s)\n", rc, hipGetErrorString(rc));
}
cutStopTimer(hTimer) ;
if (verbose)
fprintf(stderr, "knapsack kernel time: %f ms\n", cutGetTimerValue(hTimer));
hipMemcpy(&profit, d_profit, sizeof(unsigned int), hipMemcpyDeviceToHost);
getrusage(RUSAGE_SELF, &endtime);
gettimeofday(&end_timeval, NULL);
timeval_subtract(&elapsed_timeval, &end_timeval, &start_timeval);
/* timeval_subtract(&endtime,&starttime,&runtime); */
runtime = endtime;
ttime = 1000 * runtime.ru_utime.tv_sec + runtime.ru_utime.tv_usec/1000
+ 1000 * runtime.ru_stime.tv_sec + runtime.ru_stime.tv_usec/1000;
etime = 1000 * elapsed_timeval.tv_sec + elapsed_timeval.tv_usec/1000;
#ifdef USE_INSTRUMENT
counter_t host_total_reuse, host_total_hashcount;
if ((rc = hipMemcpyFromSymbol(&host_total_reuse, "total_reuse",
sizeof(counter_t))) != hipSuccess)
{
fprintf(stderr, "hipMemcpyFromSymbol total_reuse failed %d (%s)\n",
rc, hipGetErrorString(rc));
exit(1);
}
if ((rc = hipMemcpyFromSymbol(&host_total_hashcount, "total_hashcount",
sizeof(counter_t))) != hipSuccess)
{
fprintf(stderr, "hipMemcpyFromSymbol total_hashcount failed %d (%s)\n",
rc, hipGetErrorString(rc));
exit(1);
}
#endif /* USE_INSTRUMENT */
printf("%u %d %d %d %d %s %s",
profit,
#ifdef USE_INSTRUMENT
host_total_reuse, host_total_hashcount,
#else
0, 0,
#endif
ttime, etime, flags, name);
if (show_stats_summary)
{
#ifdef USE_INSTRUMENT
#ifdef USE_HTTSLF
hipLaunchKernelGGL(( httslf_computestats), dim3(dimGrid), dim3(dimBlock), 0, 0, devHashtable);
cutilDeviceSynchronize();
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "httslf_computestats sync error %d (%s)\n", rc,hipGetErrorString(rc));
exit(1);
}
hipLaunchKernelGGL(( httslf_printstats), dim3(dimGrid), dim3(dimBlock), 0, 0, );
cutilDeviceSynchronize();
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "httslf_printstats sync error %d (%s)\n", rc,hipGetErrorString(rc));
exit(1);
}
#else
hipLaunchKernelGGL(( oahttslf_print_stats), dim3(dimGrid), dim3(dimBlock), 0, 0, );
cutilDeviceSynchronize();
if ((rc = hipGetLastError()) != hipSuccess)
{
fprintf(stderr, "oahttslf_print_stats sync error %d\n", rc);
}
#endif /* USE_HTTSLF */
unsigned int num_keys, total_retry_count;
if ((rc = hipMemcpyFromSymbol(&total_retry_count, "global_retry_count",
sizeof(counter_t))) != hipSuccess)
{
fprintf(stderr, "hipMemcpyFromSymbol total_retry_count failed %d (%s)\n",
rc, hipGetErrorString(rc));
exit(1);
}
if ((rc = hipMemcpyFromSymbol(&num_keys, "global_new_insert_count",
sizeof(counter_t))) != hipSuccess)
{
fprintf(stderr, "httslf_printstats sync error %d (%s)\n", rc,hipGetErrorString(rc));
fprintf(stderr, "hipMemcpyFromSymbol global_new_insert_count failed %d (%s)\n",
rc, hipGetErrorString(rc));
exit(1);
}
printf("INSTRUMENT hc=%lu,re=%lu,re/hc=%f,hn=%u,or=%ld\n",
host_total_hashcount, host_total_reuse,
(float)host_total_reuse / host_total_hashcount,
num_keys,
total_retry_count
);
#else
printf("COMPILED WITHOUT -DUSE_INSTRUMENT : NO STATS AVAIL\n");
#endif /* USE_INSTRUMENT */
}
/* clean up */
hipFree(devStates);
cutilDeviceReset();
free(KNAPSACK_ITEMS);
exit(0);
}
| 0e6a4da34e26951820c130b128564803472b79ce.cu | /*****************************************************************************
*
* File: knapsack_gpu_randomstart_oahttslf.cu
* Author: Alex Stivala & Peter J Stuckey
* Created: April 2009
*
* $Id: knapsack_gpu_randomstart_nr_oahttslf.cu 4616 2013-01-25 01:07:43Z astivala $
*
* This is the GPU implementation using the oahttslf GPU lock free hash table.
* This version does not use recursion.
* This version has an additional feature in which we try
* stating some threads at random (i,w) positions in the problem rather
* than starting all at the top (solution) values of (i, w)
*
*
* Usage: knapsack_gpu_randomstart_oahttslf [-nvy] [-r threads] < problemspec
* -v: Verbose output
* -n: assume no name in the first line of the file
* -y: show instrumentatino summary line (like -t but one line summary)
*
* The problemspec is in the format generated by gen2.c from David Pisinger
* (http://www.diku.dk/hjemmesider/ansatte/pisinger/codes.html):
*
* numitems
* 1 profit_1 weight_1
* 2 profit_2 weight_2
* ...
* numitems profit_numitems weight_numitems
* capacity
*
* all profits and weights are positive integers.
*
*
* Requires CUDA 4.x and device with compute capability 2.x and higher
* as it uses atomic CAS on 64 bits.
* Also printf() from device function for debug.
* It uses the CURAND library for pseudrandom number generation.
*
* Preprocessor symbols:
*
* DEBUG - compile in lots of debugging code.
* USE_SHARED - use shared memory not global for stacks
* USE_HTTSLF - use httslf gpu kernel instead of oahttslf gpu kerne
* USE_INSTRUMENT - compile in (per-thread) instrumentation counts.
*
*****************************************************************************/
#define NOTDEF_XXX_NORAND /* yes we DO want randomizatino */
#undef USE_SHARED
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <assert.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "cutil_inline.h"
#include "curand_kernel.h"
// number of thread blocks
#define NUM_BLOCKS 512 //some sensible value for this? (65535 max)
// Number of threads per block
#define NUM_THREADS 32 //some sensible value for this? (1024 max)
// (these are defined prior to inclde of httslf_gpu_oahttslf_kernel.cu etc
// as they are used to define arrays for instrumentation if USE_INSTRUMENT)
#ifdef USE_HTTSLF
#undef ALLOW_DELETE /* never need to delete keys */
#include "httslf_gpu_kernel.cu"
#define HASHTABLE_TYPE httslf_entry_t**
#define INSERT_FUNCTION httslf_insert
#define LOOKUP_FUNCTION httslf_lookup
#else
#undef ALLOW_UPDATE /* never need to update a value once insterted */
#undef ALLOW_DELETE /* never need to delete keys */
#include "oahttslf_gpu_kernel.cu"
#define HASHTABLE_TYPE oahttslf_entry_t*
#define INSERT_FUNCTION oahttslf_insert
#define LOOKUP_FUNCTION oahttslf_lookup
#endif
#ifdef DEBUG
#define DEBUG_PRINT(x) printf x
#else
#define DEBUG_PRINT(x) /* nothing */
#endif
/*****************************************************************************
*
* global constants
*
*****************************************************************************/
#define MAXITEMS 1024 /* used for stack size */
// size of constant memory on device to store profit,weight for each item
#define MAX_KNAPSACK_ITEMS 512
/* dodgy: 0 is the OAHTTSLF_EMPTY_KEY and OAHTTSLF_EMPTY_VALUE value,
so if key or value is 0 set it to MAGIC_ZERO instead */
#define MAGIC_ZERO 0xffffffffffffffff
/*****************************************************************************
*
* type definitions
*
*****************************************************************************/
typedef unsigned long long uint64_t;
typedef unsigned int uint32_t;
typedef unsigned short uint16_t;
typedef unsigned char uint8_t;
/* definition of type for an item */
typedef struct item_s
{
unsigned int profit;
unsigned int weight;
} item_t;
typedef unsigned int counter_t;
typedef struct stats_s
{
counter_t reuse; /* number of times value already found in hashtable */
counter_t hashcount; /* number of times value computed & stored in ht */
} stats_t;
/*****************************************************************************
*
* device constant data
*
*****************************************************************************/
// array of weight and profit for each item. indexed 1..NUM_KNAPSACK_ITEMS,
// element 0 is unused
__constant__ item_t c_KNAPSACK_ITEMS[MAX_KNAPSACK_ITEMS+1];
const char *c_KNAPSACK_ITEMS_symbol = "c_KNAPSACK_ITEMS";
/*****************************************************************************
*
* device global data
*
*****************************************************************************/
__device__ volatile bool finished = false; // set when a thread has finished the computation
#ifdef USE_INSTRUMENT
/* instrumentatino totals (summed over all threads) */
__device__ counter_t total_reuse = 0, total_hashcount = 0;
#endif
/*****************************************************************************
*
* device shared data
*
*****************************************************************************/
#ifdef USE_SHARED
//
// per-thread stacks, using shared memory for speed
//
// index as [ threadIdx.x * (MAXITEMS+1) + stackindex ]
#define STACKINDEX(i) (threadIdx.x * (MAXITEMS+1) + (i))
__shared__ bool stackc[NUM_THREADS * (MAXITEMS+1)]; /* call or answer */
__shared__ unsigned int stacki[NUM_THREADS * (MAXITEMS+1)];
__shared__ unsigned int stackw[NUM_THREADS * (MAXITEMS+1)];
#else
// instead of shared, use global memory allocated with cudaMalloc
// (to avoid linker errors if size gets too large)
// index as [ thread_id * (MAXITEMS+1) + stackindex ]
// NB the macro below dpends on local variable
// tid = blockIdx.x*blockDim.x+threadIdx.x
#define STACKINDEX(i) (tid * (MAXITEMS+1) + (i))
#define TOTAL_NUM_THREADS (NUM_THREADS * NUM_BLOCKS)
__device__ bool *stackc; /* call or answer */
__device__ unsigned int *stacki;
__device__ unsigned int *stackw;
#endif /* USE_SHARED */
#ifdef USE_INSTRUMENT
/* per-thread instrumentation */
/* initialize with knapsack_reset_stats() */
__shared__ stats_t stats[NUM_THREADS];
#endif
/*****************************************************************************
*
* host static data
*
*****************************************************************************/
static bool printstats; /* whether to print call stats */
static bool verbose; /* verbose output */
static bool show_stats_summary = 0; /* -y summary instrumentation stats */
static unsigned int CAPACITY; /* total capacity for the problem */
static unsigned int NUM_KNAPSACK_ITEMS; /* number of items */
static item_t *KNAPSACK_ITEMS; /* array of item profits and weights (0 unused)*/
/*****************************************************************************
*
* host static functions
*
*****************************************************************************/
/*
* Read the input from stdin in the gen2.c format:
*
* numitems
* 1 profit_1 weight_1
* 2 profit_2 weight_2
* ...
* numitems profit_numitems weight_numitems
* capacity
*
* all profits and weights are positive integers.
*
* Parameters:
* None.
* Return value:
* None.
* Uses global data (write):
* KNAPSACK_ITEMS - allocates array, sets profit and weight for each item
* CAPACITY - sets capacity for problem
* NUM_KNAPSACK_ITEMS - number of items
*/
static void readdata(void)
{
unsigned int i,inum;
if (scanf("%d", &NUM_KNAPSACK_ITEMS) != 1)
{
fprintf(stderr, "ERROR reading number of items\n");
exit(EXIT_FAILURE);
}
if (!(KNAPSACK_ITEMS = (item_t *)malloc((NUM_KNAPSACK_ITEMS+1) * sizeof(item_t))))
{
fprintf(stderr,"malloc KNAPSACK_ITEMS failed\n");
exit(EXIT_FAILURE);
}
for (i = 1; i <= NUM_KNAPSACK_ITEMS; i++)
{
if(scanf("%d %d %d", &inum, &KNAPSACK_ITEMS[i].profit, &KNAPSACK_ITEMS[i].weight) != 3)
{
fprintf(stderr, "ERROR reading item %d\n", i);
exit(EXIT_FAILURE);
}
if (inum != i)
{
fprintf(stderr, "ERROR expecting item %d got %d\n", i, inum);
exit(EXIT_FAILURE);
}
}
if (scanf("%d", &CAPACITY) != 1)
{
fprintf(stderr, "ERROR reading capacity\n");
exit(EXIT_FAILURE);
}
}
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0.
(from GNU libc manual) */
int
timeval_subtract (struct timeval *result, struct timeval *x,
struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
/*****************************************************************************
*
* device functions
*
*****************************************************************************/
#ifdef USE_INSTRUMENT
/*
* knapsack_reset_stats()
*
* Initialize the per-thread intrumentatino counters in shared memory
*
* Parameters: None
* Return value: None
*/
__device__ void knapsack_reset_stats(void)
{
for (int i = threadIdx.x; i < NUM_THREADS; i += blockDim.x)
{
stats[i].reuse = 0;
stats[i].hashcount = 0;
}
__syncthreads();
}
/*
* knapsack_sum_stats()
*
* Sum up the per-thread counters
*
* Also instead of doing a proper efficient reduction just uses atomicAdd
* (on globals), also reqwuired compute 2.x (only debug/instrumentatinos,
* no need for efficiency here, just keep it simple).
*
* Parameters: None
* Retrun value: None
*/
__device__ void knapsack_sum_stats(void)
{
unsigned int block_reuse = 0;
unsigned int block_hashcount = 0;
__syncthreads();
if (threadIdx.x == 0)
{
for (int i = 0; i < NUM_THREADS; i++)
{
block_reuse += stats[i].reuse;
block_hashcount += stats[i].hashcount;
}
atomicAdd(&total_reuse, block_reuse);
atomicAdd(&total_hashcount, block_hashcount);
}
}
#endif /* USE_INSTRUMENT */
/*
* insert_indices()
*
* Insert value for (i,j) into the hashtable
*
* Parameters:
* i,j - indices to build insertion key
* value - value to insert for the key
*
* Uses global data:
* devHashTable
*
* Return value:
* None.
*/
__device__ void insert_indices(unsigned int i, unsigned int j,
unsigned int value, HASHTABLE_TYPE devHashtable)
{
uint64_t key, val64;
#ifdef USE_HTTSLF
key = ((uint64_t)i << 32) | (j & 0xffffffff);
val64 = (uint64_t)value;
#else
key = (i == 0 && j == 0 ? MAGIC_ZERO :
((uint64_t)i << 32) | (j & 0xffffffff));
val64 = (value == 0 ? MAGIC_ZERO : (uint64_t)value);
#endif
#ifdef USE_INSTRUMENT
stats[threadIdx.x].hashcount++;
#endif
INSERT_FUNCTION(devHashtable, key, val64);
}
/*
* lookup_indices()
*
* Get the value for (i,j) from the hashtable
*
* Parameters:
* i,j - indices to build key for lookup
* pvalue - (OUTPUT) value for key, only set if true returned
*
* Uses global data:
* devHashTable (readonly)
*
* Return value:
* true if found, false otherwise
*/
__device__ bool lookup_indices(unsigned int i, unsigned int j,
unsigned int *pvalue, HASHTABLE_TYPE devHashtable)
{
uint64_t key;
uint64_t val64 = 0;
bool found;
#ifdef USE_HTTSLF
key = ((uint64_t)i << 32) | (j & 0xffffffff);
#else
key = (i == 0 && j == 0 ? MAGIC_ZERO :
((uint64_t)i << 32) | (j & 0xffffffff));
#endif
found = LOOKUP_FUNCTION(devHashtable, key, &val64);
// avoid branch by using ternay operator to set to 0 if not found
// since val64 is initalized to 0 (note this depends on LOOKUP_FUNCTION
// not setting return parmater &val64 if not found)
*pvalue = ((val64 == MAGIC_ZERO) ? 0 : (unsigned int)val64);
return found;
}
/*
* lookk - lookup a computed value for (i,w) from the hash table
*
* Parameters:
* i,w - indices to build key for lookup
* pvalue - (OUTPUT) value for key, only set if true returned
* devHashtable - the hash table on the device
* Return value:
* The value p (profit) in the table for (i,w) or 0 if not there
*
*/
__device__ unsigned int lookk(unsigned int i, unsigned int w,
HASHTABLE_TYPE devHashtable)
{
unsigned int p = 0;
lookup_indices(i, w, &p, devHashtable);
return p;
}
/*
* dp_knapsack_nr()
*
* This version is multi-threaded, sharing hashtable used to
* store computed values between the threads.
* This version is not recursive, it maintains its own explicit stack.
* This function is called by dp_knapsack_thread()
* with identical instances running
* in several threads. This functino itself is not recursive
* (and never creates threads itself) and diverges as there is
* a random choice as to which of the paths we take first; we use
* parallelism to explore the search space concurrently with
* diverged paths due to this choice, but still reusing computed
* values by the shared lock-free hashtable.
*
*
* This version uses no bounding.
*
* Parameters: i - item index
* w - total weight
* state - CURAND state for random number generation
* devHashtable - the hash table
*
* global constant memory:
*
* c_KNAPSACK_ITEMS - array of profit and weight for each item
*
* global memory:
* finished (readonly) -checked to see if compuattion done
*
* Return value:
* value of d.p. at (i,w)
*
*
*/
__device__ unsigned int dp_knapsack_nr(unsigned int i, unsigned int w,
curandState *state, HASHTABLE_TYPE devHashtable)
{
int tid=blockIdx.x*blockDim.x+threadIdx.x;
curandState localState = state[tid];
unsigned int p;
bool c;
int top = 1;
unsigned int oi = i, ow = w;
#ifdef DEBUG
printf("knapsack_nr(%d,%d)\n",i,w);
#endif
stacki[STACKINDEX(top)] = i;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = true;
while(top) {
assert(threadIdx.x < NUM_THREADS);
assert(top <= MAXITEMS);
if (finished)
return 0;
i = stacki[STACKINDEX(top)];
w = stackw[STACKINDEX(top)];
c = stackc[STACKINDEX(top)];
top--;
#ifdef DEBUG
printf("knapsack_nr(%d,%d,%d)\n",i,w,c);
#endif
if (c) {
if (i == 1) {
p = (w >= c_KNAPSACK_ITEMS[i].weight ? c_KNAPSACK_ITEMS[i].profit : 0);
#ifdef DEBUG
printf("knapsack_nr(%d,%d,%d) = %d\n",i,w,c,p);
#endif
insert_indices(i, w, p, devHashtable);
}
else if (lookk(i-1,w,devHashtable) > 0 && lookk(i-1,w - c_KNAPSACK_ITEMS[i].weight,devHashtable) > 0) {
p = MAX(lookk(i-1,w,devHashtable),
(w >= c_KNAPSACK_ITEMS[i].weight ? lookk(i-1,w - c_KNAPSACK_ITEMS[i].weight,devHashtable) + c_KNAPSACK_ITEMS[i].profit : 0));
#ifdef DEBUG
printf("knapsack_nr(%d,%d,%d) = %d\n",i,w,c,p);
#endif
insert_indices(i, w, p, devHashtable);
}
else {
#ifdef NOTDEF_XXX_NORAND
if (curand(&localState) & 1) {
top++;
stacki[STACKINDEX(top)] = i;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = false;
if (i >= 1 && lookk(i-1,w,devHashtable) == 0) {
top++;
stacki[STACKINDEX(top)] = i-1;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = true;
}
if (i >= 1 && w >= c_KNAPSACK_ITEMS[i].weight && lookk(i-1,w-c_KNAPSACK_ITEMS[i].weight,devHashtable) == 0) {
top++;
stacki[STACKINDEX(top)] = i-1;
stackw[STACKINDEX(top)] = w - c_KNAPSACK_ITEMS[i].weight;
stackc[STACKINDEX(top)] = true;
}
}
else {
#endif /*NOTDEF_XXX_NORAND*/
top++;
stacki[STACKINDEX(top)] = i;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = false;
if (i >= 1 && w >= c_KNAPSACK_ITEMS[i].weight && lookk(i-1,w-c_KNAPSACK_ITEMS[i].weight,devHashtable) == 0) {
top++;
stacki[STACKINDEX(top)] = i-1;
stackw[STACKINDEX(top)] = w - c_KNAPSACK_ITEMS[i].weight;
stackc[STACKINDEX(top)] = true;
}
if (i >= 1 && lookk(i-1,w,devHashtable) == 0) {
top++;
stacki[STACKINDEX(top)] = i-1;
stackw[STACKINDEX(top)] = w;
stackc[STACKINDEX(top)] = true;
}
#ifdef NOTDEF_XXX_NORAND
}
#endif /*NOTDEF_XXX_NORAND*/
}
} else {
p = MAX(lookk(i-1,w,devHashtable),
(w >= c_KNAPSACK_ITEMS[i].weight ? lookk(i-1,w - c_KNAPSACK_ITEMS[i].weight,devHashtable) +c_KNAPSACK_ITEMS[i].profit : 0));
#ifdef DEBUG
printf("knapsack_nr(%d,%d,%d) = %d\n",i,w,c,p);
#endif
insert_indices(i, w, p, devHashtable);
}
}
insert_indices(oi, ow, p, devHashtable);
#ifdef DEBUG
printf("knapsack_nr(%d,%d) = %d\n",i,w,p);
#endif
state[tid] = localState;
return p;
}
/*****************************************************************************
*
* global functions
*
*****************************************************************************/
/*
* init_rng()
*
* Initialize CURAND pseudrandom number generator
* See CUDA Toolkit 4.1 CURAND Guide (p.21)
*
* Parameters:
* state - CURAND state for random number generation
* seed - seed for CURAND init
*
*/
__global__ void init_rng(curandState *state, unsigned long long seed)
{
int tid=blockIdx.x*blockDim.x+threadIdx.x;
/* give each therad same seed, different sequence number, no offset */
curand_init(seed, tid, 0, &state[tid]);
}
/*
* dp_knapsack_kernel()
*
* Caller interafce to the multithreaded version: just calls the actual
* device function
*
*
* Paramters:
* i - item index to start at
* w - total capacity to start at
* p - (output) score for this product set
* state - CURAND state for random number generation
* devHashtable - the hash table
*
* global memory:
* finished - set when compuattion done to tell other threads to end
*/
__global__ void dp_knapsack_kernel(unsigned int i, unsigned int w,
unsigned int *p, curandState *state,
HASHTABLE_TYPE devHashtable)
{
int tid=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int profit = 0;
#ifdef USE_INSTRUMENT
reset_counters();
#endif
//DEBUG_PRINT(("(0) blockIdx.x = %d blockDim.x = %d threadIx = %d tid = %d\n",
// blockIdx.x, blockDim.x, threadIdx.x, tid));
#ifdef DEBUG
for (int j = 0; j < i; j++)
{
printf("item %d: weight = %d profit = %d\n", j, c_KNAPSACK_ITEMS[j].weight, c_KNAPSACK_ITEMS[j].profit);
}
#endif
/* start half the threads at a random point. These threads will be
* in a "forever" loop, we only care about threads actually computing
* the required solution terminating - when one of them terminates the
* solution is found. The random threaed just go and start at another
* random point if they finish first.
*/
bool randomstart = (tid > 31);
if (randomstart)
{
/* experimental "random" case: just keep starting at random points
* hoping we compute something helpful to the solution. The idea is
* to avoid large growth in unncessary recomputation (h/h_0) for large
* number of threads
* TODO more sensible choices of points, not totally random
*/
while (!finished)
{
curandState localState = state[tid];
unsigned int random_i = curand(&localState) % i;
unsigned int random_w = curand(&localState) % w;
state[tid] = localState;
(void)dp_knapsack_nr(random_i, random_w, state, devHashtable);
}
}
else
{
/* this thread starts at the (i,w) value to solve the actual problem */
profit = dp_knapsack_nr(i, w, state, devHashtable);
finished = true;
__threadfence();// FIXME do we need some sort of sync or threadfence here?
//DEBUG_PRINT(("SET profit = %d (tid = %d)\n", profit, tid));
if (profit != 0)
*p = profit;
}
#ifdef USE_INSTRUMENT
#ifdef USE_HTTSLF
httslf_sumcounters();
#else
oahttslf_sum_stats();
#endif /* USE_HTTSLF */
knapsack_sum_stats();
#endif /* USE_INSTRUMENT */
}
/*****************************************************************************
*
* host main
*
*****************************************************************************/
/*
* print usage message and exit
*
*/
static void usage(const char *program)
{
fprintf(stderr,
"Usage: %s [-ntvy] < problemspec\n"
" -n: assume no name in the first line of the file\n"
" -t: show statistics of operations\n"
" -v: Verbose output\n"
" -y: show instrumentatino summary line (like -t but one line summary)\n",
program);
exit(EXIT_FAILURE);
}
/*
* main
*/
int main(int argc, char *argv[])
{
int i = 0;
char flags[100];
int c;
int otime, ttime, etime;
unsigned int profit =0;
unsigned int *d_profit ;
struct rusage starttime,totaltime,runtime,endtime,opttime;
struct timeval start_timeval,end_timeval,elapsed_timeval;
unsigned int t;
char name[100];
int noname = 0;
cudaError_t rc;
curandState *devStates;
unsigned int hTimer;
size_t stacksize;
HASHTABLE_TYPE devHashtable;
#ifndef NOTDEF_XXX_NORAND
fprintf(stderr, "NO RANDOMIZATION\n");
#endif
strcpy(flags, "[NONE]");
while ((c = getopt(argc, argv, "nvyt?")) != -1)
{
switch(c) {
case 'v':
/* verbose output */
verbose = 1;
break;
case 't':
/* show stats */
printstats = 1;
break;
case 'n':
/* no name on first line of input */
noname = 1;
break;
case 'y':
/* show statistics summaary line of insturmentation */
show_stats_summary = 1;
break;
default:
usage(argv[0]);
break;
}
if (i < (int)sizeof(flags)-1)
flags[i++] = c;
}
if (i > 0)
flags[i] = '\0';
/* we should have no command line parameters */
if (optind != argc)
usage(argv[0]);
// Pick the best GPU available, or if the developer selects one at the command line
int devID = cutilChooseCudaDevice(argc, argv);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, devID);
if (verbose)
fprintf(stderr, "> GPU Device has Compute Capabilities SM %d.%d\n\n", deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x20) {
fprintf(stderr, "device with compute capability 2.0 or better is required\n");
exit(1);
}
// start time AFTER first CUDA call so as not to count the annoying
// and apparently unavoidable approx. 4 second init overhead
gettimeofday(&start_timeval, NULL);
// We need L1 cache to store the stack (only applicable to sm_20 and higher)
if ((rc = cudaFuncSetCacheConfig(dp_knapsack_kernel,
cudaFuncCachePreferL1)) != cudaSuccess)
{
fprintf(stderr, "cudaFuncSetCacheConfig failed %d\n", rc);
exit(1);
}
// per-thread stacks, hard limit of total of 512KB
// local memory per thread (where stack is stored), so cannot use
// all of that for stack either
// see http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities
const int STACKSIZE = 1024; /* in bytes */
if ((rc = cudaDeviceSetLimit(cudaLimitStackSize, STACKSIZE)) != cudaSuccess)
{
fprintf(stderr, "cudaDeviceSetLimit failed %d\n",rc);
exit(1);
}
if ((rc = cudaDeviceGetLimit(&stacksize, cudaLimitStackSize)) != cudaSuccess)
{
fprintf(stderr, "cudaDeviceGetLimit failed %d\n",rc);
exit(1);
}
if (verbose)
fprintf(stderr, "cuda stack size = %.1f KB\n", (double)stacksize/1024);
if (noname)
strcpy(name,"[NONE]\n");
else
fgets(name,sizeof(name)-1,stdin);
getrusage(RUSAGE_SELF, &starttime);
readdata(); /* read into the KNAPSACK_ITEMS array and set CAPACITY, NUM_KNAPSACK_ITEMS */
if (NUM_KNAPSACK_ITEMS > MAX_KNAPSACK_ITEMS)
{
fprintf(stderr,
"num knapsack items %d exceeds %d, increase MAX_KNAPSACK_ITEMS\n",
NUM_KNAPSACK_ITEMS, MAX_KNAPSACK_ITEMS);
exit(1);
}
if (NUM_KNAPSACK_ITEMS > MAXITEMS)
{
fprintf(stderr, "number of items %d is too large, increase MAXITEMS\n",
NUM_KNAPSACK_ITEMS);
exit(1);
}
cutCreateTimer(&hTimer) ;
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
/* copy the knapsack items to device constant memory */
if ((rc = cudaMemcpyToSymbol(c_KNAPSACK_ITEMS_symbol, KNAPSACK_ITEMS,
(1+NUM_KNAPSACK_ITEMS)*sizeof(item_t)))!= cudaSuccess)
{
fprintf(stderr, "cudaMemcpyTosymbol failed %d\n", rc);
}
cutStopTimer(hTimer);
if (verbose)
fprintf(stderr, "copy %.1f KB of knapsack data to constant memory: %f ms\n",
(double)sizeof(NUM_KNAPSACK_ITEMS*sizeof(item_t)/1024.0),
cutGetTimerValue(hTimer));
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
/* allocate space on device for random number generator state */
if ((rc = cudaMalloc((void **)&devStates,
NUM_BLOCKS*NUM_THREADS*sizeof(curandState))) != cudaSuccess)
{
fprintf(stderr, "cudaMalloc devStates failed %d\n", rc);
exit(1);
}
/* initialize device random number generator */
init_rng<<<NUM_BLOCKS, NUM_THREADS>>>(devStates, time(NULL));
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "init_rng kernel error %d\n", rc);
}
cutilDeviceSynchronize();
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "init_rng sync error %d\n", rc);
}
if (verbose)
fprintf(stderr, "allocate and initialize CURAND device RNG for %d threads: %f ms\n",
NUM_BLOCKS*NUM_THREADS, cutGetTimerValue(hTimer));
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
#ifdef USE_HTTSLF
httslf_entry_t **devCellpool;
gettimeofday(&start_timeval, NULL);
/* allocate cell pool on device */
size_t cell_size = sizeof(httslf_entry_t);
unsigned int devCellpool_num_items = 67108864 ; /* 2^26 */
if (verbose)
fprintf(stderr, "devCellpool_num_items = %u\n", devCellpool_num_items);
size_t devCellpool_size = cell_size * devCellpool_num_items;
if ((rc = cudaMalloc((void **)&devCellpool, devCellpool_size)) != cudaSuccess)
{
fprintf(stderr, "cudaMalloc devCellpool failed %d\n", rc);
exit(1);
}
gettimeofday(&end_timeval, NULL);
timeval_subtract(&elapsed_timeval, &end_timeval, &start_timeval);
etime = 1000 * elapsed_timeval.tv_sec + elapsed_timeval.tv_usec/1000;
if (verbose)
fprintf(stderr, "cudaMalloc %.1f MB cellpool elapsed time %d ms\n",
(double)devCellpool_size/(1024*1024), etime);
/* set globals on device for clel pool alloc */
if ((rc = cudaMemcpyToSymbol("cellpool", &devCellpool, sizeof(httslf_entry_t *))) != cudaSuccess)
{
fprintf(stderr, "cudaMemcpyToSymbol cellpool failed %d\n", rc);
exit(1);
}
/* set constanst on device for cell pool alloc */
if ((rc = cudaMemcpyToSymbol((const char *)"total_num_cells", &devCellpool_num_items, sizeof(devCellpool_num_items))) != cudaSuccess)
{
fprintf(stderr, "cudaMemcpyToSymbol poolsize failed%d\n",rc);
exit(1);
}
gettimeofday(&start_timeval, NULL);
/* allocate hashtable on device */
if ((rc = cudaMalloc((void **)&devHashtable,
HTTSLF_SIZE*sizeof(httslf_entry_t *))) != cudaSuccess)
{
fprintf(stderr, "cudaMalloc devHashtable failed %d\n", rc);
exit(1);
}
gettimeofday(&end_timeval, NULL);
timeval_subtract(&elapsed_timeval, &end_timeval, &start_timeval);
etime = 1000 * elapsed_timeval.tv_sec + elapsed_timeval.tv_usec/1000;
if (verbose)
fprintf(stderr, "cudaMalloc %.1f MB hashtable elapsed time %d ms\n",
(double)HTTSLF_SIZE*sizeof(httslf_entry_t *)/(1024*1024), etime);
#else
/* allocate hashtable on device */
if ((rc = cudaMalloc((void **)&devHashtable,
OAHTTSLF_SIZE*sizeof(oahttslf_entry_t))) != cudaSuccess)
{
fprintf(stderr, "cudaMalloc devHashtable failed %d\n", rc);
exit(1);
}
cutStopTimer(hTimer) ;
if (verbose)
fprintf(stderr, "cudaMalloc %.1f MB hashtable elapsed time %d ms\n",
(double)OAHTTSLF_SIZE*sizeof(oahttslf_entry_t)/(1024*1024), cutGetTimerValue(hTimer));
#endif /* USE_HTTSLF*/
#ifndef USE_SHARED
/* allocate the per-thread stacks in device global memory */
/* cudaMemcpyToSymbol() the pointers to allocated device memory to the
global device pionteers rather than passing as parameters for
convenience so code is same as using shared memory, just using macros */
bool *cuda_stackc; /* call or answer */
unsigned int *cuda_stacki;
unsigned int *cuda_stackw;
if ((rc = cudaMalloc((void **)&cuda_stackc,
sizeof(bool) * TOTAL_NUM_THREADS * (MAXITEMS+1))) != cudaSuccess)
{
fprintf(stderr, "cudaMalloc stackc failed %d (%s)\n", rc, cudaGetErrorString(rc));
exit(1);
}
if ((rc = cudaMemcpyToSymbol("stackc", &cuda_stackc, sizeof(bool*))) != cudaSuccess)
{
fprintf(stderr, "cudaMemcpyTosymbol stackc failed %d (%s)\n", rc,
cudaGetErrorString(rc));
exit(1);
}
if ((rc = cudaMalloc((void **)&cuda_stacki,
sizeof(unsigned int) * TOTAL_NUM_THREADS * (MAXITEMS+1))) != cudaSuccess)
{
fprintf(stderr, "cudaMalloc stacki failed %d (%s)\n", rc, cudaGetErrorString(rc));
exit(1);
}
if ((rc = cudaMemcpyToSymbol("stacki", &cuda_stacki, sizeof(unsigned int*))) != cudaSuccess)
{
fprintf(stderr, "cudaMemcpyTosymbol stacki failed %d (%s)\n", rc,
cudaGetErrorString(rc));
exit(1);
}
if ((rc = cudaMalloc((void **)&cuda_stackw,
sizeof(unsigned int) * TOTAL_NUM_THREADS * (MAXITEMS+1))) != cudaSuccess)
{
fprintf(stderr, "cudaMalloc stackw failed %d (%s)\n", rc, cudaGetErrorString(rc));
exit(1);
}
if ((rc = cudaMemcpyToSymbol("stackw", &cuda_stackw, sizeof(unsigned int*))) != cudaSuccess)
{
fprintf(stderr, "cudaMemcpyTosymbol stackw failed %d (%s)\n", rc,
cudaGetErrorString(rc));
exit(1);
}
#endif /* USE_SHARED */
#ifdef USE_HTTSLF
gettimeofday(&start_timeval, NULL);
/* set hashtable to all empty keys/values */
httslf_reset<<<NUM_BLOCKS, NUM_THREADS>>>(devHashtable);
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "httslf_reset kernel error %d\n", rc);
}
cutilDeviceSynchronize();
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "httslf_reset sync error %d (%s)\n", rc,cudaGetErrorString(rc));
}
gettimeofday(&end_timeval, NULL);
timeval_subtract(&elapsed_timeval, &end_timeval, &start_timeval);
etime = 1000 * elapsed_timeval.tv_sec + elapsed_timeval.tv_usec/1000;
if (verbose)
fprintf(stderr, "httslf_reset elapsed time %d ms\n", etime);
#else
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
// Initialize the device memory
oahttslf_reset<<<NUM_BLOCKS, NUM_THREADS>>>(devHashtable);
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "oahttslf_reset kernel error %d\n", rc);
}
cutilDeviceSynchronize();
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "oahttslf_reset sync error %d\n", rc);
}
cutStopTimer(hTimer) ;
if (verbose)
fprintf(stderr, "oahttslf_reset kernel elapsed time %d ms\n",
cutGetTimerValue(hTimer));
#endif /*USE_HTTSLF*/
if (cudaMalloc((void **)&d_profit, sizeof(unsigned int)) != cudaSuccess)
{
fprintf(stderr, "cudaMalloc d_profit failed\n");
exit(1);
}
cutResetTimer(hTimer) ;
cutStartTimer(hTimer) ;
if (verbose)
fprintf(stderr, "NUM_BLOCKS = %d, NUM_THREADS = %d\n", NUM_BLOCKS,NUM_THREADS);
/* Run the kernel */
dim3 dimGrid(NUM_BLOCKS) ; // blocks
dim3 dimBlock(NUM_THREADS); // threads
dp_knapsack_kernel<<<dimGrid, dimBlock>>>(NUM_KNAPSACK_ITEMS, CAPACITY, d_profit, devStates, devHashtable);
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "knapsack kernel error %d (%s)\n", rc, cudaGetErrorString(rc));
}
cutilDeviceSynchronize();
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "knapsack sync error %d (%s)\n", rc, cudaGetErrorString(rc));
}
cutStopTimer(hTimer) ;
if (verbose)
fprintf(stderr, "knapsack kernel time: %f ms\n", cutGetTimerValue(hTimer));
cudaMemcpy(&profit, d_profit, sizeof(unsigned int), cudaMemcpyDeviceToHost);
getrusage(RUSAGE_SELF, &endtime);
gettimeofday(&end_timeval, NULL);
timeval_subtract(&elapsed_timeval, &end_timeval, &start_timeval);
/* timeval_subtract(&endtime,&starttime,&runtime); */
runtime = endtime;
ttime = 1000 * runtime.ru_utime.tv_sec + runtime.ru_utime.tv_usec/1000
+ 1000 * runtime.ru_stime.tv_sec + runtime.ru_stime.tv_usec/1000;
etime = 1000 * elapsed_timeval.tv_sec + elapsed_timeval.tv_usec/1000;
#ifdef USE_INSTRUMENT
counter_t host_total_reuse, host_total_hashcount;
if ((rc = cudaMemcpyFromSymbol(&host_total_reuse, "total_reuse",
sizeof(counter_t))) != cudaSuccess)
{
fprintf(stderr, "cudaMemcpyFromSymbol total_reuse failed %d (%s)\n",
rc, cudaGetErrorString(rc));
exit(1);
}
if ((rc = cudaMemcpyFromSymbol(&host_total_hashcount, "total_hashcount",
sizeof(counter_t))) != cudaSuccess)
{
fprintf(stderr, "cudaMemcpyFromSymbol total_hashcount failed %d (%s)\n",
rc, cudaGetErrorString(rc));
exit(1);
}
#endif /* USE_INSTRUMENT */
printf("%u %d %d %d %d %s %s",
profit,
#ifdef USE_INSTRUMENT
host_total_reuse, host_total_hashcount,
#else
0, 0,
#endif
ttime, etime, flags, name);
if (show_stats_summary)
{
#ifdef USE_INSTRUMENT
#ifdef USE_HTTSLF
httslf_computestats<<<dimGrid, dimBlock>>>(devHashtable);
cutilDeviceSynchronize();
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "httslf_computestats sync error %d (%s)\n", rc,cudaGetErrorString(rc));
exit(1);
}
httslf_printstats<<<dimGrid, dimBlock>>>();
cutilDeviceSynchronize();
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "httslf_printstats sync error %d (%s)\n", rc,cudaGetErrorString(rc));
exit(1);
}
#else
oahttslf_print_stats<<<dimGrid, dimBlock>>>();
cutilDeviceSynchronize();
if ((rc = cudaGetLastError()) != cudaSuccess)
{
fprintf(stderr, "oahttslf_print_stats sync error %d\n", rc);
}
#endif /* USE_HTTSLF */
unsigned int num_keys, total_retry_count;
if ((rc = cudaMemcpyFromSymbol(&total_retry_count, "global_retry_count",
sizeof(counter_t))) != cudaSuccess)
{
fprintf(stderr, "cudaMemcpyFromSymbol total_retry_count failed %d (%s)\n",
rc, cudaGetErrorString(rc));
exit(1);
}
if ((rc = cudaMemcpyFromSymbol(&num_keys, "global_new_insert_count",
sizeof(counter_t))) != cudaSuccess)
{
fprintf(stderr, "httslf_printstats sync error %d (%s)\n", rc,cudaGetErrorString(rc));
fprintf(stderr, "cudaMemcpyFromSymbol global_new_insert_count failed %d (%s)\n",
rc, cudaGetErrorString(rc));
exit(1);
}
printf("INSTRUMENT hc=%lu,re=%lu,re/hc=%f,hn=%u,or=%ld\n",
host_total_hashcount, host_total_reuse,
(float)host_total_reuse / host_total_hashcount,
num_keys,
total_retry_count
);
#else
printf("COMPILED WITHOUT -DUSE_INSTRUMENT : NO STATS AVAIL\n");
#endif /* USE_INSTRUMENT */
}
/* clean up */
cudaFree(devStates);
cutilDeviceReset();
free(KNAPSACK_ITEMS);
exit(0);
}
|
c35b7dca0601aad78b3de02168c994554f5a6e2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
const size_t NTB = 256;
const size_t EXT = 8;
#define divCeil(a, b) (((a) + (b) - 1) / (b))
struct Ctx {
float *x, *y, *r;
size_t n;
};
__global__ void devDot(float *x, float *y, size_t n, float *r) {
__shared__ float rb[NTB];
size_t itb = threadIdx.x;
size_t i = blockIdx.x * blockDim.x * EXT + itb;
float s = 0.0;
for (size_t j = 0; j < EXT && i < n; j++, i += blockDim.x) {
s += x[i] * y[i];
}
rb[itb] = s;
__syncthreads();
for (size_t i = NTB >> 1; i != 0; i >>= 1) {
if (itb < i) rb[itb] += rb[itb + i];
__syncthreads();
}
if (0 == itb) r[blockIdx.x] = rb[0];
}
extern "C" __declspec(dllexport) void getInputs(Ctx *ctx, float **px, float **py) {
*px = ctx->x;
*py = ctx->y;
}
extern "C" __declspec(dllexport) void init(Ctx **p, size_t n) {
Ctx *ctx = (Ctx *)malloc(sizeof(Ctx));
ctx->n = n;
size_t sz = sizeof(float) * n;
hipMallocManaged(&(ctx->x), sz);
hipMallocManaged(&(ctx->y), sz);
hipMallocManaged(&(ctx->r), sizeof(float) * divCeil(n, NTB) / EXT);
*p = ctx;
}
extern "C" __declspec(dllexport) void deinit(Ctx *ctx) {
hipFree(ctx->x);
hipFree(ctx->y);
hipFree(ctx->r);
free(ctx);
}
extern "C" __declspec(dllexport) void dot(Ctx *ctx, float *r) {
size_t nb = divCeil(ctx->n, NTB) / EXT;
float *rd = ctx->r;
hipLaunchKernelGGL(( devDot), dim3(nb), dim3(NTB), 0, 0, ctx->x, ctx->y, ctx->n, rd);
hipDeviceSynchronize();
float s = 0.0;
for (size_t i = 0; i < nb; i++) s += rd[i];
*r = s;
} | c35b7dca0601aad78b3de02168c994554f5a6e2d.cu | const size_t NTB = 256;
const size_t EXT = 8;
#define divCeil(a, b) (((a) + (b) - 1) / (b))
struct Ctx {
float *x, *y, *r;
size_t n;
};
__global__ void devDot(float *x, float *y, size_t n, float *r) {
__shared__ float rb[NTB];
size_t itb = threadIdx.x;
size_t i = blockIdx.x * blockDim.x * EXT + itb;
float s = 0.0;
for (size_t j = 0; j < EXT && i < n; j++, i += blockDim.x) {
s += x[i] * y[i];
}
rb[itb] = s;
__syncthreads();
for (size_t i = NTB >> 1; i != 0; i >>= 1) {
if (itb < i) rb[itb] += rb[itb + i];
__syncthreads();
}
if (0 == itb) r[blockIdx.x] = rb[0];
}
extern "C" __declspec(dllexport) void getInputs(Ctx *ctx, float **px, float **py) {
*px = ctx->x;
*py = ctx->y;
}
extern "C" __declspec(dllexport) void init(Ctx **p, size_t n) {
Ctx *ctx = (Ctx *)malloc(sizeof(Ctx));
ctx->n = n;
size_t sz = sizeof(float) * n;
cudaMallocManaged(&(ctx->x), sz);
cudaMallocManaged(&(ctx->y), sz);
cudaMallocManaged(&(ctx->r), sizeof(float) * divCeil(n, NTB) / EXT);
*p = ctx;
}
extern "C" __declspec(dllexport) void deinit(Ctx *ctx) {
cudaFree(ctx->x);
cudaFree(ctx->y);
cudaFree(ctx->r);
free(ctx);
}
extern "C" __declspec(dllexport) void dot(Ctx *ctx, float *r) {
size_t nb = divCeil(ctx->n, NTB) / EXT;
float *rd = ctx->r;
devDot<<<nb, NTB>>>(ctx->x, ctx->y, ctx->n, rd);
cudaDeviceSynchronize();
float s = 0.0;
for (size_t i = 0; i < nb; i++) s += rd[i];
*r = s;
} |
59a633a9dd86c390fc13a7413365a9050a667a76.hip | // !!! This is a file automatically generated by hipify!!!
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform.h>
#pragma GCC diagnostic pop
#include "transform.hpp"
#include "error.hpp"
namespace ral {
namespace utilities {
struct length_to_end_functor {
template<typename T>
std::enable_if_t<std::is_integral<T>::value && !cudf::is_boolean<T>()>
operator()(cudf::mutable_column_view& length, const cudf::column_view & start, hipStream_t stream = 0)
{
thrust::transform(rmm::exec_policy(stream)->on(stream),
length.begin<T>(),
length.end<T>(),
start.begin<T>(),
length.begin<T>(),
[] __device__ (auto len_, auto start_){
return start_ + len_;
});
}
template<typename T>
std::enable_if_t<!std::is_integral<T>::value || cudf::is_boolean<T>()>
operator()(cudf::mutable_column_view& length, const cudf::column_view & start, hipStream_t stream = 0)
{
RAL_FAIL("Only integer types supported");
}
};
void transform_length_to_end(cudf::mutable_column_view& length, const cudf::column_view & start) {
RAL_EXPECTS(length.type() == start.type(), "Mistmatched type between length and start columns");
cudf::type_dispatcher(length.type(), length_to_end_functor{}, length, start);
}
struct start_to_zero_based_indexing_functor {
template<typename T>
std::enable_if_t<std::is_integral<T>::value && !cudf::is_boolean<T>()>
operator()(cudf::mutable_column_view& start, hipStream_t stream = 0)
{
thrust::transform(rmm::exec_policy(stream)->on(stream),
start.begin<T>(),
start.end<T>(),
start.begin<T>(),
[] __device__ (auto start_){
return start_ - 1;
});
}
template<typename T>
std::enable_if_t<!std::is_integral<T>::value || cudf::is_boolean<T>()>
operator()(cudf::mutable_column_view& start, hipStream_t stream = 0)
{
RAL_FAIL("Only integer types supported");
}
};
void transform_start_to_zero_based_indexing(cudf::mutable_column_view& start) {
cudf::type_dispatcher(start.type(), start_to_zero_based_indexing_functor{}, start);
}
} // namespace utilities
} // namespace ral
| 59a633a9dd86c390fc13a7413365a9050a667a76.cu | #pragma GCC diagnostic ignored "-Wunknown-pragmas"
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform.h>
#pragma GCC diagnostic pop
#include "transform.hpp"
#include "error.hpp"
namespace ral {
namespace utilities {
struct length_to_end_functor {
template<typename T>
std::enable_if_t<std::is_integral<T>::value && !cudf::is_boolean<T>()>
operator()(cudf::mutable_column_view& length, const cudf::column_view & start, cudaStream_t stream = 0)
{
thrust::transform(rmm::exec_policy(stream)->on(stream),
length.begin<T>(),
length.end<T>(),
start.begin<T>(),
length.begin<T>(),
[] __device__ (auto len_, auto start_){
return start_ + len_;
});
}
template<typename T>
std::enable_if_t<!std::is_integral<T>::value || cudf::is_boolean<T>()>
operator()(cudf::mutable_column_view& length, const cudf::column_view & start, cudaStream_t stream = 0)
{
RAL_FAIL("Only integer types supported");
}
};
void transform_length_to_end(cudf::mutable_column_view& length, const cudf::column_view & start) {
RAL_EXPECTS(length.type() == start.type(), "Mistmatched type between length and start columns");
cudf::type_dispatcher(length.type(), length_to_end_functor{}, length, start);
}
struct start_to_zero_based_indexing_functor {
template<typename T>
std::enable_if_t<std::is_integral<T>::value && !cudf::is_boolean<T>()>
operator()(cudf::mutable_column_view& start, cudaStream_t stream = 0)
{
thrust::transform(rmm::exec_policy(stream)->on(stream),
start.begin<T>(),
start.end<T>(),
start.begin<T>(),
[] __device__ (auto start_){
return start_ - 1;
});
}
template<typename T>
std::enable_if_t<!std::is_integral<T>::value || cudf::is_boolean<T>()>
operator()(cudf::mutable_column_view& start, cudaStream_t stream = 0)
{
RAL_FAIL("Only integer types supported");
}
};
void transform_start_to_zero_based_indexing(cudf::mutable_column_view& start) {
cudf::type_dispatcher(start.type(), start_to_zero_based_indexing_functor{}, start);
}
} // namespace utilities
} // namespace ral
|
4aae625c478ecede8887a66fd601b0bf2fcf0140.hip | // !!! This is a file automatically generated by hipify!!!
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <utilities/legacy/cudf_utils.h>
#include <utilities/legacy/column_utils.hpp>
#include <cudf/cudf.h>
#include <thrust/device_vector.h>
#include <cstdlib>
#include <iostream>
#include <vector>
#include <chrono>
#include <map>
// uncomment to enable benchmarking gdf_column_concat
//#define ENABLE_CONCAT_BENCHMARK
template <typename T>
struct print {
__device__ void operator()(T x) { printf("%x ", x); }
};
template <typename ColumnType>
struct ColumnConcatTest : public GdfTest
{
ColumnConcatTest() {}
~ColumnConcatTest() {}
template <typename data_initializer_t, typename null_initializer_t>
void multicolumn_test(std::vector<cudf::size_type> column_sizes,
data_initializer_t data_init,
null_initializer_t null_init)
{
std::vector< std::vector<ColumnType> > the_columns(column_sizes.size());
for (size_t i = 0; i < column_sizes.size(); ++i)
initialize_vector(the_columns[i], column_sizes[i], data_init);
// This is just an alias to a gdf_column with a custom deleter that will free
// the data and valid fields when the unique_ptr goes out of scope
using gdf_col_pointer = typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Copies the random data from each host vector in the_columns to the device in a gdf_column.
// Each gdf_column's validity bit i will be initialized with the lambda
std::vector<gdf_col_pointer> gdf_columns = initialize_gdf_columns(the_columns, null_init);
std::vector<gdf_column*> raw_gdf_columns;
for(auto const & c : gdf_columns) {
raw_gdf_columns.push_back(c.get());
}
gdf_column **columns_to_concat = raw_gdf_columns.data();
int num_columns = raw_gdf_columns.size();
cudf::size_type total_size = 0;
for (auto sz : column_sizes) total_size += sz;
std::vector<ColumnType> output_data(total_size);
std::vector<cudf::valid_type> output_valid(gdf_valid_allocation_size(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
EXPECT_EQ( GDF_SUCCESS, gdf_column_concat(output_gdf_col.get(),
columns_to_concat,
num_columns) );
// make a concatenated reference
std::vector<ColumnType> ref_data;
for (size_t i = 0; i < the_columns.size(); ++i)
std::copy(the_columns[i].begin(), the_columns[i].end(), std::back_inserter(ref_data));
cudf::size_type ref_null_count = 0;
std::vector<cudf::valid_type> ref_valid(gdf_valid_allocation_size(total_size));
for (cudf::size_type index = 0, col = 0, row = 0; index < total_size; ++index)
{
if (null_init(row, col)) cudf::util::turn_bit_on(ref_valid.data(), index);
else ref_null_count++;
if (++row >= column_sizes[col]) { row = 0; col++; }
}
auto ref_gdf_col = create_gdf_column(ref_data, ref_valid);
EXPECT_EQ(ref_null_count, ref_gdf_col->null_count);
EXPECT_TRUE(gdf_equal_columns(*ref_gdf_col.get(), *output_gdf_col.get()));
}
template <typename data_initializer_t, typename null_initializer_t>
void multicolumn_bench(std::vector<size_t> column_sizes,
data_initializer_t data_init,
null_initializer_t null_init)
{
std::vector< std::vector<ColumnType> > the_columns(column_sizes.size());
for (size_t i = 0; i < column_sizes.size(); ++i)
initialize_vector(the_columns[i], column_sizes[i], data_init);
std::vector<gdf_col_pointer> gdf_columns = initialize_gdf_columns(the_columns, null_init);
std::vector<gdf_column*> raw_gdf_columns;
for(auto const & c : gdf_columns) {
raw_gdf_columns.push_back(c.get());
}
gdf_column **columns_to_concat = raw_gdf_columns.data();
int num_columns = raw_gdf_columns.size();
cudf::size_type total_size = 0;
for (auto sz : column_sizes) total_size += sz;
std::vector<ColumnType> output_data(total_size);
std::vector<cudf::valid_type> output_valid(gdf_valid_allocation_size(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
auto start = std::chrono::high_resolution_clock::now();
EXPECT_EQ( GDF_SUCCESS, gdf_column_concat(output_gdf_col.get(),
columns_to_concat,
num_columns) );
int num = 100;
for (int i = 0; i < num; ++i) {
gdf_column_concat(output_gdf_col.get(), columns_to_concat, num_columns);
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end-start;
std::cout << "Time for " << num << " concats of " << num_columns << " columns of "
<< total_size << " total elements:\n";
std::cout << diff.count() << " s\n";
}
};
using TestTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double>;
TYPED_TEST_CASE(ColumnConcatTest, TestTypes);
TYPED_TEST(ColumnConcatTest, ZeroColumns){
EXPECT_EQ(GDF_INVALID_API_CALL, gdf_column_concat(nullptr, nullptr, 0));
}
TYPED_TEST(ColumnConcatTest, NegativeColumns){
EXPECT_EQ(GDF_INVALID_API_CALL, gdf_column_concat(nullptr, nullptr, -1));
}
TYPED_TEST(ColumnConcatTest, NullOutput){
gdf_column input{};
gdf_column * input_p = &input;
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(nullptr, &input_p, 1));
}
TYPED_TEST(ColumnConcatTest, NullInput){
gdf_column output{};
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(&output, nullptr, 1));
}
TYPED_TEST(ColumnConcatTest, NullFirstInputColumn){
gdf_column output{};
gdf_column * input_p = nullptr;
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(&output, &input_p, 1));
}
TYPED_TEST(ColumnConcatTest, OutputWrongSize){
cudf::size_type num_columns = 4;
std::vector<cudf::size_type> column_sizes{4, 1, 2, 3};
ASSERT_EQ(num_columns, static_cast<cudf::size_type>(column_sizes.size()));
cudf::size_type const total_size{
std::accumulate(column_sizes.begin(), column_sizes.end(), 0)};
std::vector<gdf_col_pointer> input_column_pointers(num_columns);
std::vector<gdf_column*> input_columns(num_columns, nullptr);
for (int i = 0; i < num_columns; ++i) {
cudf::size_type size = column_sizes[i];
std::vector<TypeParam> data(size);
std::vector<cudf::valid_type> valid(gdf_valid_allocation_size(size));
input_column_pointers[i] = create_gdf_column(data, valid);
input_columns[i] = input_column_pointers[i].get();
}
std::vector<TypeParam> output_data(total_size);
std::vector<cudf::valid_type> output_valid(gdf_valid_allocation_size(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
// test mismatched sizes
output_gdf_col->size = total_size - 1;
EXPECT_EQ(GDF_COLUMN_SIZE_MISMATCH, gdf_column_concat(output_gdf_col.get(), input_columns.data(), num_columns));
}
TYPED_TEST(ColumnConcatTest, NullInputData){
cudf::size_type num_columns = 4;
std::vector<cudf::size_type> column_sizes{4, 1, 2, 3};
ASSERT_EQ(num_columns, static_cast<cudf::size_type>(column_sizes.size()));
cudf::size_type const total_size{
std::accumulate(column_sizes.begin(), column_sizes.end(), 0)};
std::vector<TypeParam> output_data(total_size);
std::vector<cudf::valid_type> output_valid(gdf_valid_allocation_size(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
std::vector<gdf_column> cols(num_columns);
std::vector<gdf_column*> input_columns(num_columns, nullptr);
for (int i = 0; i < num_columns; ++i) {
cols[i].data = nullptr;
cols[i].valid = nullptr;
cols[i].size = column_sizes[i];
cols[i].dtype = output_gdf_col->dtype;
input_columns[i] = &cols[i];
}
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(output_gdf_col.get(), input_columns.data(), num_columns));
}
TYPED_TEST(ColumnConcatTest, RandomData) {
cudf::size_type column_size = 1005;
cudf::size_type null_interval = 17;
std::vector<cudf::size_type> column_sizes{column_size, column_size, column_size};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
[null_interval](cudf::size_type row, cudf::size_type col) {
return (row % null_interval) != 0;
});
}
TYPED_TEST(ColumnConcatTest, DifferentLengthColumns) {
cudf::size_type null_interval = 2;
std::vector<cudf::size_type> column_sizes{13, 3, 5};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
[null_interval](cudf::size_type row, cudf::size_type col) {
return (row % null_interval) != 0;
});
}
TYPED_TEST(ColumnConcatTest, DifferentLengthColumnsLimitedBits) {
std::vector<cudf::size_type> column_sizes{13, 3, 5};
auto limited_bits = [column_sizes](cudf::size_type row, cudf::size_type col){
return row < column_sizes[col];
};
this->multicolumn_test(
column_sizes, [](int index) { return std::rand(); }, limited_bits);
}
TYPED_TEST(ColumnConcatTest, MoreComplicatedColumns) {
std::vector<cudf::size_type> column_sizes{5, 1003, 17, 117};
auto bit_setter = [column_sizes](cudf::size_type row, cudf::size_type col) {
switch (col) {
case 0:
return (row % 2) != 0; // column 0 has odd bits set
case 1:
return row < column_sizes[col];
case 2:
return (row % 17) != 0;
case 3:
return row < 3;
}
return true;
};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
bit_setter);
}
TYPED_TEST(ColumnConcatTest, EightByteColumns) {
std::vector<cudf::size_type> column_sizes{13, 3, 5};
auto limited_bits = [column_sizes](cudf::size_type row, cudf::size_type col){
return row < column_sizes[col];
};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
limited_bits);
}
TYPED_TEST(ColumnConcatTest, SingleColumn){
std::vector<cudf::size_type> column_sizes{13};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
[](cudf::size_type row, cudf::size_type col) {
return true;
});
}
#ifdef ENABLE_CONCAT_BENCHMARK
TYPED_TEST(ColumnConcatTest, Benchmark) {
size_t n = 42000000;
std::vector<size_t> column_sizes{n, n, n, n};
cudf::size_type null_interval = 17;
auto bit_setter = [null_interval](cudf::size_type row, cudf::size_type col) {
return (row % null_interval) != 0;
};
multicolumn_bench<TypeParam>(column_sizes,
[](int index){ return std::rand(); },
bit_setter);
}
#endif // ENABLE_CONCAT_BENCHMARK
TEST(ColumnByteWidth, TestByteWidth)
{
std::map<gdf_dtype, int> enum_to_type_size { {GDF_INT8, sizeof(int8_t)},
{GDF_INT16, sizeof(int16_t)},
{GDF_INT32, sizeof(int32_t)},
{GDF_INT64, sizeof(int64_t)},
{GDF_FLOAT32, sizeof(float)},
{GDF_FLOAT64, sizeof(double)},
{GDF_DATE32, sizeof(gdf_date32)},
{GDF_DATE64, sizeof(gdf_date64)},
{GDF_TIMESTAMP, sizeof(gdf_timestamp)},
{GDF_CATEGORY, sizeof(gdf_category)}
};
for(auto const& pair : enum_to_type_size)
{
int byte_width{0};
gdf_column col{};
col.dtype = pair.first;
ASSERT_NO_THROW(byte_width = cudf::byte_width(col));
EXPECT_EQ(pair.second, byte_width);
}
}
TEST(ColumnByteWidth, TestGdfTypeSize)
{
std::map<gdf_dtype, int> enum_to_type_size { {GDF_INT8, sizeof(int8_t)},
{GDF_INT16, sizeof(int16_t)},
{GDF_INT32, sizeof(int32_t)},
{GDF_INT64, sizeof(int64_t)},
{GDF_FLOAT32, sizeof(float)},
{GDF_FLOAT64, sizeof(double)},
{GDF_DATE32, sizeof(gdf_date32)},
{GDF_DATE64, sizeof(gdf_date64)},
{GDF_TIMESTAMP, sizeof(gdf_timestamp)},
{GDF_CATEGORY, sizeof(gdf_category)}
};
for(auto const& pair : enum_to_type_size)
{
EXPECT_EQ(pair.second, (int) cudf::size_of(pair.first));
}
}
| 4aae625c478ecede8887a66fd601b0bf2fcf0140.cu | #include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
#include <utilities/legacy/cudf_utils.h>
#include <utilities/legacy/column_utils.hpp>
#include <cudf/cudf.h>
#include <thrust/device_vector.h>
#include <cstdlib>
#include <iostream>
#include <vector>
#include <chrono>
#include <map>
// uncomment to enable benchmarking gdf_column_concat
//#define ENABLE_CONCAT_BENCHMARK
template <typename T>
struct print {
__device__ void operator()(T x) { printf("%x ", x); }
};
template <typename ColumnType>
struct ColumnConcatTest : public GdfTest
{
ColumnConcatTest() {}
~ColumnConcatTest() {}
template <typename data_initializer_t, typename null_initializer_t>
void multicolumn_test(std::vector<cudf::size_type> column_sizes,
data_initializer_t data_init,
null_initializer_t null_init)
{
std::vector< std::vector<ColumnType> > the_columns(column_sizes.size());
for (size_t i = 0; i < column_sizes.size(); ++i)
initialize_vector(the_columns[i], column_sizes[i], data_init);
// This is just an alias to a gdf_column with a custom deleter that will free
// the data and valid fields when the unique_ptr goes out of scope
using gdf_col_pointer = typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Copies the random data from each host vector in the_columns to the device in a gdf_column.
// Each gdf_column's validity bit i will be initialized with the lambda
std::vector<gdf_col_pointer> gdf_columns = initialize_gdf_columns(the_columns, null_init);
std::vector<gdf_column*> raw_gdf_columns;
for(auto const & c : gdf_columns) {
raw_gdf_columns.push_back(c.get());
}
gdf_column **columns_to_concat = raw_gdf_columns.data();
int num_columns = raw_gdf_columns.size();
cudf::size_type total_size = 0;
for (auto sz : column_sizes) total_size += sz;
std::vector<ColumnType> output_data(total_size);
std::vector<cudf::valid_type> output_valid(gdf_valid_allocation_size(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
EXPECT_EQ( GDF_SUCCESS, gdf_column_concat(output_gdf_col.get(),
columns_to_concat,
num_columns) );
// make a concatenated reference
std::vector<ColumnType> ref_data;
for (size_t i = 0; i < the_columns.size(); ++i)
std::copy(the_columns[i].begin(), the_columns[i].end(), std::back_inserter(ref_data));
cudf::size_type ref_null_count = 0;
std::vector<cudf::valid_type> ref_valid(gdf_valid_allocation_size(total_size));
for (cudf::size_type index = 0, col = 0, row = 0; index < total_size; ++index)
{
if (null_init(row, col)) cudf::util::turn_bit_on(ref_valid.data(), index);
else ref_null_count++;
if (++row >= column_sizes[col]) { row = 0; col++; }
}
auto ref_gdf_col = create_gdf_column(ref_data, ref_valid);
EXPECT_EQ(ref_null_count, ref_gdf_col->null_count);
EXPECT_TRUE(gdf_equal_columns(*ref_gdf_col.get(), *output_gdf_col.get()));
}
template <typename data_initializer_t, typename null_initializer_t>
void multicolumn_bench(std::vector<size_t> column_sizes,
data_initializer_t data_init,
null_initializer_t null_init)
{
std::vector< std::vector<ColumnType> > the_columns(column_sizes.size());
for (size_t i = 0; i < column_sizes.size(); ++i)
initialize_vector(the_columns[i], column_sizes[i], data_init);
std::vector<gdf_col_pointer> gdf_columns = initialize_gdf_columns(the_columns, null_init);
std::vector<gdf_column*> raw_gdf_columns;
for(auto const & c : gdf_columns) {
raw_gdf_columns.push_back(c.get());
}
gdf_column **columns_to_concat = raw_gdf_columns.data();
int num_columns = raw_gdf_columns.size();
cudf::size_type total_size = 0;
for (auto sz : column_sizes) total_size += sz;
std::vector<ColumnType> output_data(total_size);
std::vector<cudf::valid_type> output_valid(gdf_valid_allocation_size(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
auto start = std::chrono::high_resolution_clock::now();
EXPECT_EQ( GDF_SUCCESS, gdf_column_concat(output_gdf_col.get(),
columns_to_concat,
num_columns) );
int num = 100;
for (int i = 0; i < num; ++i) {
gdf_column_concat(output_gdf_col.get(), columns_to_concat, num_columns);
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end-start;
std::cout << "Time for " << num << " concats of " << num_columns << " columns of "
<< total_size << " total elements:\n";
std::cout << diff.count() << " s\n";
}
};
using TestTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double>;
TYPED_TEST_CASE(ColumnConcatTest, TestTypes);
TYPED_TEST(ColumnConcatTest, ZeroColumns){
EXPECT_EQ(GDF_INVALID_API_CALL, gdf_column_concat(nullptr, nullptr, 0));
}
TYPED_TEST(ColumnConcatTest, NegativeColumns){
EXPECT_EQ(GDF_INVALID_API_CALL, gdf_column_concat(nullptr, nullptr, -1));
}
TYPED_TEST(ColumnConcatTest, NullOutput){
gdf_column input{};
gdf_column * input_p = &input;
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(nullptr, &input_p, 1));
}
TYPED_TEST(ColumnConcatTest, NullInput){
gdf_column output{};
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(&output, nullptr, 1));
}
TYPED_TEST(ColumnConcatTest, NullFirstInputColumn){
gdf_column output{};
gdf_column * input_p = nullptr;
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(&output, &input_p, 1));
}
TYPED_TEST(ColumnConcatTest, OutputWrongSize){
cudf::size_type num_columns = 4;
std::vector<cudf::size_type> column_sizes{4, 1, 2, 3};
ASSERT_EQ(num_columns, static_cast<cudf::size_type>(column_sizes.size()));
cudf::size_type const total_size{
std::accumulate(column_sizes.begin(), column_sizes.end(), 0)};
std::vector<gdf_col_pointer> input_column_pointers(num_columns);
std::vector<gdf_column*> input_columns(num_columns, nullptr);
for (int i = 0; i < num_columns; ++i) {
cudf::size_type size = column_sizes[i];
std::vector<TypeParam> data(size);
std::vector<cudf::valid_type> valid(gdf_valid_allocation_size(size));
input_column_pointers[i] = create_gdf_column(data, valid);
input_columns[i] = input_column_pointers[i].get();
}
std::vector<TypeParam> output_data(total_size);
std::vector<cudf::valid_type> output_valid(gdf_valid_allocation_size(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
// test mismatched sizes
output_gdf_col->size = total_size - 1;
EXPECT_EQ(GDF_COLUMN_SIZE_MISMATCH, gdf_column_concat(output_gdf_col.get(), input_columns.data(), num_columns));
}
TYPED_TEST(ColumnConcatTest, NullInputData){
cudf::size_type num_columns = 4;
std::vector<cudf::size_type> column_sizes{4, 1, 2, 3};
ASSERT_EQ(num_columns, static_cast<cudf::size_type>(column_sizes.size()));
cudf::size_type const total_size{
std::accumulate(column_sizes.begin(), column_sizes.end(), 0)};
std::vector<TypeParam> output_data(total_size);
std::vector<cudf::valid_type> output_valid(gdf_valid_allocation_size(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
std::vector<gdf_column> cols(num_columns);
std::vector<gdf_column*> input_columns(num_columns, nullptr);
for (int i = 0; i < num_columns; ++i) {
cols[i].data = nullptr;
cols[i].valid = nullptr;
cols[i].size = column_sizes[i];
cols[i].dtype = output_gdf_col->dtype;
input_columns[i] = &cols[i];
}
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(output_gdf_col.get(), input_columns.data(), num_columns));
}
TYPED_TEST(ColumnConcatTest, RandomData) {
cudf::size_type column_size = 1005;
cudf::size_type null_interval = 17;
std::vector<cudf::size_type> column_sizes{column_size, column_size, column_size};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
[null_interval](cudf::size_type row, cudf::size_type col) {
return (row % null_interval) != 0;
});
}
TYPED_TEST(ColumnConcatTest, DifferentLengthColumns) {
cudf::size_type null_interval = 2;
std::vector<cudf::size_type> column_sizes{13, 3, 5};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
[null_interval](cudf::size_type row, cudf::size_type col) {
return (row % null_interval) != 0;
});
}
TYPED_TEST(ColumnConcatTest, DifferentLengthColumnsLimitedBits) {
std::vector<cudf::size_type> column_sizes{13, 3, 5};
auto limited_bits = [column_sizes](cudf::size_type row, cudf::size_type col){
return row < column_sizes[col];
};
this->multicolumn_test(
column_sizes, [](int index) { return std::rand(); }, limited_bits);
}
TYPED_TEST(ColumnConcatTest, MoreComplicatedColumns) {
std::vector<cudf::size_type> column_sizes{5, 1003, 17, 117};
auto bit_setter = [column_sizes](cudf::size_type row, cudf::size_type col) {
switch (col) {
case 0:
return (row % 2) != 0; // column 0 has odd bits set
case 1:
return row < column_sizes[col];
case 2:
return (row % 17) != 0;
case 3:
return row < 3;
}
return true;
};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
bit_setter);
}
TYPED_TEST(ColumnConcatTest, EightByteColumns) {
std::vector<cudf::size_type> column_sizes{13, 3, 5};
auto limited_bits = [column_sizes](cudf::size_type row, cudf::size_type col){
return row < column_sizes[col];
};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
limited_bits);
}
TYPED_TEST(ColumnConcatTest, SingleColumn){
std::vector<cudf::size_type> column_sizes{13};
this->multicolumn_test(column_sizes,
[](int index){ return std::rand(); },
[](cudf::size_type row, cudf::size_type col) {
return true;
});
}
#ifdef ENABLE_CONCAT_BENCHMARK
TYPED_TEST(ColumnConcatTest, Benchmark) {
size_t n = 42000000;
std::vector<size_t> column_sizes{n, n, n, n};
cudf::size_type null_interval = 17;
auto bit_setter = [null_interval](cudf::size_type row, cudf::size_type col) {
return (row % null_interval) != 0;
};
multicolumn_bench<TypeParam>(column_sizes,
[](int index){ return std::rand(); },
bit_setter);
}
#endif // ENABLE_CONCAT_BENCHMARK
TEST(ColumnByteWidth, TestByteWidth)
{
std::map<gdf_dtype, int> enum_to_type_size { {GDF_INT8, sizeof(int8_t)},
{GDF_INT16, sizeof(int16_t)},
{GDF_INT32, sizeof(int32_t)},
{GDF_INT64, sizeof(int64_t)},
{GDF_FLOAT32, sizeof(float)},
{GDF_FLOAT64, sizeof(double)},
{GDF_DATE32, sizeof(gdf_date32)},
{GDF_DATE64, sizeof(gdf_date64)},
{GDF_TIMESTAMP, sizeof(gdf_timestamp)},
{GDF_CATEGORY, sizeof(gdf_category)}
};
for(auto const& pair : enum_to_type_size)
{
int byte_width{0};
gdf_column col{};
col.dtype = pair.first;
ASSERT_NO_THROW(byte_width = cudf::byte_width(col));
EXPECT_EQ(pair.second, byte_width);
}
}
TEST(ColumnByteWidth, TestGdfTypeSize)
{
std::map<gdf_dtype, int> enum_to_type_size { {GDF_INT8, sizeof(int8_t)},
{GDF_INT16, sizeof(int16_t)},
{GDF_INT32, sizeof(int32_t)},
{GDF_INT64, sizeof(int64_t)},
{GDF_FLOAT32, sizeof(float)},
{GDF_FLOAT64, sizeof(double)},
{GDF_DATE32, sizeof(gdf_date32)},
{GDF_DATE64, sizeof(gdf_date64)},
{GDF_TIMESTAMP, sizeof(gdf_timestamp)},
{GDF_CATEGORY, sizeof(gdf_category)}
};
for(auto const& pair : enum_to_type_size)
{
EXPECT_EQ(pair.second, (int) cudf::size_of(pair.first));
}
}
|
718f29a31033a5db4ce55f208f7c948020bb5048.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void shiftRightPixels(int16_t *bayImg, size_t width, size_t height, int bppMult)
{
int2 pixelCoord = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (pixelCoord.x < width && pixelCoord.y < height)
{
bayImg[pixelCoord.y * width + pixelCoord.x] >>= bppMult;
}
} | 718f29a31033a5db4ce55f208f7c948020bb5048.cu | #include "includes.h"
__global__ void shiftRightPixels(int16_t *bayImg, size_t width, size_t height, int bppMult)
{
int2 pixelCoord = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (pixelCoord.x < width && pixelCoord.y < height)
{
bayImg[pixelCoord.y * width + pixelCoord.x] >>= bppMult;
}
} |
f555018fb7016d81ff04580fb2472fad6b0b8906.hip | // !!! This is a file automatically generated by hipify!!!
/*
* test_smc.cu
*
* Created on: 1-Mar-2009
* Author: Owner
*/
#include <stdio.h>
#include <cutil.h>
#include "rng.h"
#include "gauss.h"
#include "output.h"
#include "kalman.h"
#include "matrix.h"
#include "fsv.h"
#include "smc_fsv.h"
#include "smc_lg.h"
#include "smc_usv.h"
#include "smc_mvlg.h"
#include "scan.h"
#include "usv.h"
void generate_data(float* xs, float* ys, int T, float sigma_x, float sigma_y) {
const int M = 32768;
float steps[M];
populate_randn(steps, M);
// xs_{-1} = 0;
xs[0] = steps[0];
ys[0] = xs[0] + steps[1];
for (int i = 1; i < T; i++) {
xs[i] = xs[i - 1] + steps[i * 2];
ys[i] = xs[i] + steps[i * 2 + 1];
}
}
template<class T>
void generate_data_mv(T* xs, T* ys, int Dx, int Dy, int total_time, T* scale_step, T* cov_step,
T* scale_like, T* cov_like) {
int Mx = max(total_time * Dx, 32768);
int My = max(total_time * Dy, 32768);
T* steps_x = (T*) malloc(Mx * sizeof(T));
T* steps_y = (T*) malloc(My * sizeof(T));
T* L_step = (T*) malloc(Dx * Dx * sizeof(T));
T* L_like = (T*) malloc(Dy * Dy * sizeof(T));
T* temp_x = (T*) malloc(Dx * sizeof(T));
T* temp_y = (T*) malloc(Dy * sizeof(T));
matrix_chol(cov_step, L_step, Dx);
matrix_chol(cov_like, L_like, Dy);
// matrix_print(L_step, Dx, Dx);
// matrix_print(L_like, Dy, Dy);
populate_randn(steps_x, Mx);
populate_randn(steps_y, My);
// xs_{-1} = 0;
matrix_times(L_step, steps_x, temp_x, Dx, Dx, Dx, 1);
vector_set(xs, temp_x, Dx);
matrix_times(scale_like, xs, ys, Dy, Dx, Dx, 1);
matrix_times(L_like, vector_get(steps_y, Dy, 0), temp_y, Dy, Dy, Dy, 1);
vector_add(ys, temp_y, ys, Dy);
for (int i = 1; i < total_time; i++) {
matrix_times(scale_step, vector_get(xs, Dx, i - 1), vector_get(xs, Dx, i), Dx, Dx, Dx, 1);
matrix_times(L_step, vector_get(steps_x, Dx, i), temp_x, Dx, Dx, Dx, 1);
vector_add(vector_get(xs, Dx, i), temp_x, vector_get(xs, Dx, i), Dx);
matrix_times(scale_like, vector_get(xs, Dx, i), vector_get(ys, Dy, i), Dy, Dx, Dx, 1);
matrix_times(L_like, vector_get(steps_y, Dy, i), temp_y, Dy, Dy, Dy, 1);
vector_add(vector_get(ys, Dy, i), temp_y, vector_get(ys, Dy, i), Dy);
}
free(L_step);
free(L_like);
free(temp_x);
free(temp_y);
free(steps_x);
free(steps_y);
}
void test_smc_lg_kalman(int T, float* ys_real, float sigma_like, float sigma_step) {
float ll_kalman;
float* kalman_xs = (float*) malloc(T * sizeof(float));
kalman(0.0, kalman_xs, ys_real, T, sigma_like, sigma_step, &ll_kalman);
to_file(kalman_xs, T, "xs_lg_kalman.txt");
printf("ll_lg_kalman = %f\n", ll_kalman);
free(kalman_xs);
}
void test_smc_lg(int N, int T, float* ys_real, float* h_args_l, float scale_step, float sigma_step,
int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* xs = (float*) malloc(N * T * sizeof(float));
hipMalloc((void**) &d_xs, N * T * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
hipMalloc((void**) &d_ws, N * sizeof(float));
hipMalloc((void**) &x_init, N * sizeof(float));
populate_randn_d(x_init, N);
float ll;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_lg(x_init, d_xs, d_ws, ys_real, N, T, h_args_l, scale_step, sigma_step, &ll, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_lg = %f\n", ll);
hipMemcpy(xs, d_xs, N * T * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(ws, d_ws, N * sizeof(float), hipMemcpyDeviceToHost);
to_file(xs, N * T, "xs_lg.txt");
to_file(ws, N, "ws_lg.txt");
free(ws);
hipFree(d_ws);
free(xs);
hipFree(d_xs);
hipFree(x_init);
}
void test_smc_lg_forget(int N, int T, float* ys_real, float* h_args_l, float scale_step,
float sigma_step, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* xs = (float*) malloc(N * sizeof(float));
hipMalloc((void**) &d_xs, N * sizeof(float));
hipMalloc((void**) &x_init, N * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
hipMalloc((void**) &d_ws, N * sizeof(float));
populate_randn_d(x_init, N);
float ll_forget;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_forget_lg(x_init, d_xs, d_ws, ys_real, N, T, h_args_l, scale_step, sigma_step, &ll_forget,
nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_lg_forget = %f\n", ll_forget);
free(xs);
free(ws);
hipFree(d_ws);
hipFree(d_xs);
hipFree(x_init);
}
void test_smc_lg_ref(int N, int T, float* ys_real, float* h_args_l, float scale_step,
float sigma_step) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* xs = (float*) malloc(N * T * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* hx_init = (float*) malloc(N * sizeof(float));
populate_randn(hx_init, N);
float ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_lg(hx_init, xs, ws, ys_real, N, T, h_args_l, scale_step, sigma_step, &ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_lg_ref = %f\n", ll_ref);
to_file(xs, N * T, "xs_lg_ref.txt");
to_file(ws, N, "ws_lg_ref.txt");
free(ws);
free(hx_init);
free(xs);
}
void test_smc_lg_forget_ref(int N, int T, float* ys_real, float* h_args_l, float scale_step,
float sigma_step) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* xs = (float*) malloc(N * sizeof(float));
float* hx_init = (float*) malloc(N * sizeof(float));
populate_randn(hx_init, N);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
hipMalloc((void**) &d_ws, N * sizeof(float));
float ll_forget_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_lg(hx_init, xs, ws, ys_real, N, T, h_args_l, scale_step, sigma_step,
&ll_forget_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_lg_forget_ref = %f\n", ll_forget_ref);
free(ws);
hipFree(d_ws);
free(hx_init);
free(xs);
}
template<class T>
void test_smc_usv_ref(int N, int total_time, T* ys_real, T* h_args_l, T alpha, T sigma) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
T* xs = (float*) malloc(N * total_time * sizeof(float));
T* ws = (float*) malloc(N * sizeof(float));
float* hx_init = (float*) malloc(N * sizeof(float));
populate_randn(hx_init, N);
float ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_usv(hx_init, xs, ws, ys_real, N, total_time, h_args_l, alpha, sigma, &ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_usv_ref = %f\n", ll_ref);
char filename_xs[] = "xs_usv_ref.txt";
char filename_ws[] = "xs_usv_ref.txt";
to_file(xs, N * total_time, filename_xs);
to_file(ws, N, filename_ws);
free(ws);
free(hx_init);
free(xs);
}
template<class T>
void test_smc_usv_forget_ref(int N, int total_time, T* ys_real, T* h_args_l, T alpha, T sigma) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
T* xs = (float*) malloc(N * sizeof(float));
T* ws = (float*) malloc(N * sizeof(float));
float* hx_init = (float*) malloc(N * sizeof(float));
populate_randn(hx_init, N);
float ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_usv(hx_init, xs, ws, ys_real, N, total_time, h_args_l, alpha, sigma, &ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_usv_forget_ref = %f\n", ll_ref);
// to_file(xs, N * total_time, "xs_usv_ref.txt");
// to_file(ws, N, "ws_usv_ref.txt");
free(ws);
free(hx_init);
free(xs);
}
void test_smc_usv(int N, int T, float* ys_real, float* h_args_l, float alpha, float sigma, int nb,
int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* xs = (float*) malloc(N * T * sizeof(float));
hipMalloc((void**) &d_xs, N * T * sizeof(float));
hipMalloc((void**) &x_init, N * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
hipMalloc((void**) &d_ws, N * sizeof(float));
populate_randn_d(x_init, N);
float ll;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_usv(x_init, d_xs, d_ws, ys_real, N, T, h_args_l, alpha, sigma, &ll, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_usv = %f\n", ll);
hipMemcpy(xs, d_xs, N * T * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(ws, d_ws, N * sizeof(float), hipMemcpyDeviceToHost);
to_file(xs, N * T, "xs_usv.txt");
to_file(ws, N, "ws_usv.txt");
free(xs);
free(ws);
hipFree(d_ws);
hipFree(d_xs);
hipFree(x_init);
}
void test_smc_usv_forget(int N, int T, float* ys_real, float* h_args_l, float alpha, float sigma,
int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* xs = (float*) malloc(N * sizeof(float));
hipMalloc((void**) &d_xs, N * sizeof(float));
hipMalloc((void**) &x_init, N * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
hipMalloc((void**) &d_ws, N * sizeof(float));
populate_randn_d(x_init, N);
float ll_forget;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_forget_usv(x_init, d_xs, d_ws, ys_real, N, T, h_args_l, alpha, sigma, &ll_forget, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_usv_forget = %f\n", ll_forget);
free(xs);
free(ws);
hipFree(d_ws);
hipFree(d_xs);
hipFree(x_init);
}
void test_smc_mvlg_forget(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* d_ys_real;
float* xs = (float*) malloc(N * Dx * sizeof(float));
hipMalloc((void**) &d_xs, N * Dx * sizeof(float));
hipMalloc((void**) &x_init, N * Dx * sizeof(float));
hipMalloc((void**) &d_ys_real, T * Dy * sizeof(float));
hipMemcpy(d_ys_real, ys_real, T * Dy * sizeof(float), hipMemcpyHostToDevice);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
hipMalloc((void**) &d_ws, N * sizeof(float));
float* hx_init = (float*) malloc(N * Dx * sizeof(float));
matrix_zero(hx_init, N, Dx);
hipMemcpy(x_init, hx_init, N * Dx * sizeof(float), hipMemcpyHostToDevice);
free(hx_init);
float ll_forget_D;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_forget_mvlg(x_init, d_xs, d_ws, d_ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_D, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_mvlg_forget = %f\n", ll_forget_D);
free(ws);
hipFree(d_ws);
free(xs);
hipFree(d_xs);
hipFree(x_init);
hipFree(d_ys_real);
}
void test_smc_mvlg(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* d_ys_real;
float* xs = (float*) malloc(N * Dx * T * sizeof(float));
hipMalloc((void**) &d_xs, N * Dx * T * sizeof(float));
hipMalloc((void**) &x_init, N * Dx * sizeof(float));
hipMalloc((void**) &d_ys_real, T * Dy * sizeof(float));
hipMemcpy(d_ys_real, ys_real, T * Dy * sizeof(float), hipMemcpyHostToDevice);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
hipMalloc((void**) &d_ws, N * sizeof(float));
float* hx_init = (float*) malloc(N * Dx * sizeof(float));
matrix_zero(hx_init, N, Dx);
hipMemcpy(x_init, hx_init, N * Dx * sizeof(float), hipMemcpyHostToDevice);
free(hx_init);
float ll_D;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_mvlg(x_init, d_xs, d_ws, d_ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step, &ll_D,
nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_mvlg = %f\n", ll_D);
hipMemcpy(xs, d_xs, N * Dx * T * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(ws, d_ws, N * sizeof(float), hipMemcpyDeviceToHost);
to_file(xs, N * Dx * T, "xs_mvlg.txt");
to_file(ws, N, "ws_mvlg.txt");
free(ws);
hipFree(d_ws);
free(xs);
hipFree(d_xs);
hipFree(x_init);
hipFree(d_ys_real);
}
template<class T>
void test_smc_mvlg_ref(int N, int Dx, int Dy, int total_time, T* ys_real, T* scale_step,
T* cov_step, T* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
T* x_init = (T*) malloc(N * Dx * sizeof(T));
T* xs = (T*) malloc(N * Dx * total_time * sizeof(T));
T* ws = (T*) malloc(N * sizeof(T));
matrix_zero(x_init, N, Dx);
T ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_mvlg(x_init, xs, ws, ys_real, N, Dx, Dy, total_time, h_args_l, scale_step, cov_step,
&ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_mvlg_ref = %f\n", ll_ref);
char filename_xs[] = "xs_mvlg_ref.txt";
char filename_ws[] = "ws_mvlg_ref.txt";
to_file(xs, N * Dx * total_time, filename_xs);
to_file(ws, N, filename_ws);
free(ws);
free(xs);
free(x_init);
}
void test_smc_mvlg_forget_ref(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* x_init = (float*) malloc(N * Dx * sizeof(float));
float* xs = (float*) malloc(N * Dx * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
matrix_zero(x_init, N, Dx);
float ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_mvlg(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_mvlg_forget_ref = %f\n", ll_ref);
free(ws);
free(xs);
free(x_init);
}
template<class T>
void test_smc_mvlg_kalman(int Dx, int Dy, int total_time, T* ys_real, T* scale_step, T* cov_step,
T* scale_like, T* cov_like) {
T ll_kalman_D;
T* kalman_xs = (T*) malloc(Dx * total_time * sizeof(T));
T* init_xs = (T*) malloc(Dx * sizeof(T));
for (int i = 0; i < Dx; i++) {
init_xs[i] = 0;
}
kalman(init_xs, kalman_xs, ys_real, Dx, Dy, total_time, scale_step, cov_step, scale_like,
cov_like, &ll_kalman_D);
char filename[] = "xs_mvlg_kalman.txt";
to_file(kalman_xs, Dx * total_time, filename);
printf("ll_mvlg_kalman = %f\n", ll_kalman_D);
free(kalman_xs);
free(init_xs);
}
void test_smc_fsv_forget(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* d_ys_real;
float* xs = (float*) malloc(N * Dx * sizeof(float));
hipMalloc((void**) &d_xs, N * Dx * sizeof(float));
hipMalloc((void**) &x_init, N * Dx * sizeof(float));
hipMalloc((void**) &d_ys_real, T * Dy * sizeof(float));
hipMemcpy(d_ys_real, ys_real, T * Dy * sizeof(float), hipMemcpyHostToDevice);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
hipMalloc((void**) &d_ws, N * sizeof(float));
// populate_randn_d(x_init, N * D);
float* hx_init = (float*) malloc(N * Dx * sizeof(float));
matrix_zero(hx_init, N, Dx);
hipMemcpy(x_init, hx_init, N * Dx * sizeof(float), hipMemcpyHostToDevice);
free(hx_init);
float ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_forget_fsv(x_init, d_xs, d_ws, d_ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_forget = %f\n", ll_forget_fsv);
free(ws);
free(xs);
hipFree(d_xs);
hipFree(x_init);
hipFree(d_ys_real);
}
void test_smc_fsv(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step, float* cov_step,
float* h_args_l, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* d_ys_real;
float* xs = (float*) malloc(N * Dx * T * sizeof(float));
hipMalloc((void**) &d_xs, N * Dx * T * sizeof(float));
hipMalloc((void**) &x_init, N * Dx * sizeof(float));
hipMalloc((void**) &d_ys_real, T * Dy * sizeof(float));
hipMemcpy(d_ys_real, ys_real, T * Dy * sizeof(float), hipMemcpyHostToDevice);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
hipMalloc((void**) &d_ws, N * sizeof(float));
// populate_randn_d(x_init, N * D);
float* hx_init = (float*) malloc(N * Dx * sizeof(float));
matrix_zero(hx_init, N, Dx);
hipMemcpy(x_init, hx_init, N * Dx * sizeof(float), hipMemcpyHostToDevice);
free(hx_init);
float ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_fsv(x_init, d_xs, d_ws, d_ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv = %f\n", ll_forget_fsv);
hipMemcpy(xs, d_xs, N * Dx * T * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(ws, d_ws, N * sizeof(float), hipMemcpyDeviceToHost);
to_file(xs, N * Dx * T, "xs_fsv.txt");
to_file(ws, N, "ws_fsv.txt");
free(ws);
hipFree(d_ws);
free(xs);
hipFree(d_xs);
hipFree(x_init);
hipFree(d_ys_real);
}
void test_smc_fsv_ref(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* x_init = (float*) malloc(N * Dx * sizeof(float));
float* xs = (float*) malloc(N * Dx * T * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
matrix_zero(x_init, N, Dx);
float ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_fsv(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_ref = %f\n", ll_forget_fsv);
to_file(xs, N * Dx * T, "xs_fsv.txt");
to_file(ws, N, "ws_fsv.txt");
free(ws);
free(xs);
free(x_init);
}
void test_smc_fsv_forget_ref(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* x_init = (float*) malloc(N * Dx * sizeof(float));
float* xs = (float*) malloc(N * Dx * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
matrix_zero(x_init, N, Dx);
float ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_fsv(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_forget_ref = %f\n", ll_forget_fsv);
free(ws);
free(xs);
free(x_init);
}
void test_smc_fsv_ref(int N, int Dx, int Dy, int T, double* ys_real, double* scale_step,
double* cov_step, double* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
double* x_init = (double*) malloc(N * Dx * sizeof(double));
double* xs = (double*) malloc(N * Dx * T * sizeof(double));
double* ws = (double*) malloc(N * sizeof(double));
matrix_zero(x_init, N, Dx);
double ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_fsv(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_ref = %f\n", ll_forget_fsv);
to_file(xs, N * Dx * T, "xs_fsv.txt");
to_file(ws, N, "ws_fsv.txt");
free(ws);
free(xs);
free(x_init);
}
void test_smc_fsv_forget_ref(int N, int Dx, int Dy, int T, double* ys_real, double* scale_step,
double* cov_step, double* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
double* x_init = (double*) malloc(N * Dx * sizeof(double));
double* xs = (double*) malloc(N * Dx * sizeof(double));
double* ws = (double*) malloc(N * sizeof(double));
matrix_zero(x_init, N, Dx);
double ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_fsv(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_forget_ref = %f\n", ll_forget_fsv);
free(ws);
free(xs);
free(x_init);
}
void test_1D(int N, int T, int nb, int nt) {
float sigma_like = 1.0;
float sigma_step = 1.0;
float scale_step = 1.0;
float* xs_real = (float*) malloc(T * sizeof(float));
float* ys_real = (float*) malloc(T * sizeof(float));
generate_data(xs_real, ys_real, T, sigma_step, sigma_like);
to_file(xs_real, T, "xs_real_lg.txt");
to_file(ys_real, T, "ys_real_lg.txt");
test_smc_lg_kalman(T, ys_real, sigma_like, sigma_step);
float h_args_l[2];
compute_c1_c2(sigma_like, h_args_l[0], h_args_l[1]);
test_smc_lg(N, T, ys_real, h_args_l, scale_step, sigma_step, nb, nt);
test_smc_lg_forget(N, T, ys_real, h_args_l, scale_step, sigma_step, nb, nt);
test_smc_lg_ref(N, T, ys_real, h_args_l, scale_step, sigma_step);
test_smc_lg_forget_ref(N, T, ys_real, h_args_l, scale_step, sigma_step);
free(xs_real);
free(ys_real);
}
void test_usv(int N, int T, int nb, int nt) {
float alpha = 0.9f;
float sigma = 1.0f;
float beta = 1.0f;
float* xs_real = (float*) malloc(T * sizeof(float));
float* ys_real = (float*) malloc(T * sizeof(float));
generate_data_usv(xs_real, ys_real, T, alpha, sigma, beta);
to_file(xs_real, T, "xs_real_usv.txt");
to_file(ys_real, T, "ys_real_usv.txt");
float h_args_l[1];
h_args_l[0] = beta;
// kill_rng();
// seed_rng(16384, 32, 128);
//
// test_smc_usv(N, T, ys_real, h_args_l, alpha, sigma, nb, nt);
kill_rng();
seed_rng(16384, 32, 128);
test_smc_usv_forget(N, T, ys_real, h_args_l, alpha, sigma, nb, nt);
// kill_rng();
// seed_rng(16384, 32, 128);
//
// test_smc_usv_ref(N, T, ys_real, h_args_l, alpha, sigma);
kill_rng();
seed_rng(16384, 32, 128);
test_smc_usv_forget_ref(N, T, ys_real, h_args_l, alpha, sigma);
// test_smc_lg_forget(N, T, ys_real, h_args_l, sigma_step, nb, nt);
//
// test_smc_lg_ref(N, T, ys_real, h_args_l, sigma_step);
//
// test_smc_lg_forget_ref(N, T, ys_real, h_args_l, sigma_step);
free(xs_real);
free(ys_real);
}
void test_2D(int N, int T, int nb, int nt) {
const int D = 2;
float scale_step[D * D] = { 0.5f, 0.0f, 0.0f, 0.5f };
float cov_step[D * D] = { 1.0f, 0.8f, 0.8f, 1.0f };
float scale_like[D * D] = { 1.0f, 0.0f, 0.0f, 1.0f };
float cov_like[D * D] = { 0.5f, 0.0f, 0.0f, 0.5f };
float* xs_real = (float*) malloc(T * D * sizeof(float));
float* ys_real = (float*) malloc(T * D * sizeof(float));
generate_data_mv(xs_real, ys_real, D, D, T, scale_step, cov_step, scale_like, cov_like);
to_file(xs_real, T * D, "xs_real_mvlg.txt");
to_file(ys_real, T * D, "ys_real_mvlg.txt");
test_smc_mvlg_kalman(D, D, T, ys_real, scale_step, cov_step, scale_like, cov_like);
float h_args_l[1 + D * D + D * D];
compute_c1_c2(cov_like, D, h_args_l[0], h_args_l + 1);
for (int i = 0; i < D * D; i++) {
h_args_l[1 + D * D + i] = scale_like[i];
}
test_smc_mvlg(N, D, D, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
test_smc_mvlg_forget(N, D, D, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
test_smc_mvlg_ref(N, D, D, T, ys_real, scale_step, cov_step, h_args_l);
test_smc_mvlg_forget_ref(N, D, D, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_3by5D(int N, int T, int nb, int nt, int n_burn_filter) {
const int Dx = 3;
const int Dy = 5;
float scale_step[Dx * Dx] = { 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f };
float cov_step[Dx * Dx] = { 1.0f, 0.8f, 0.0f, 0.8f, 1.0f, 0.4f, 0.0f, 0.4f, 1.0f };
float scale_like[Dy * Dx] = { 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.4f, 0.3f,
0.3f, 0.2f, 0.5f, 0.3f };
float cov_like[Dy * Dy];
matrix_identity(cov_like, Dy);
matrix_times(cov_like, cov_like, 0.5f, Dy, Dy);
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_mv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, scale_like, cov_like);
to_file(xs_real, T * Dx, "xs_real_mvlg.txt");
to_file(ys_real, T * Dy, "ys_real_mvlg.txt");
test_smc_mvlg_kalman(Dx, Dy, T, ys_real, scale_step, cov_step, scale_like, cov_like);
float h_args_l[1 + Dy * Dy + Dy * Dx];
compute_c1_c2(cov_like, Dy, h_args_l[0], h_args_l + 1);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[1 + Dy * Dy + i] = scale_like[i];
}
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_3by5D_double(int N, int T, int n_burn_filter) {
const int Dx = 3;
const int Dy = 5;
double scale_step[Dx * Dx] = { 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f };
double cov_step[Dx * Dx] = { 1.0f, 0.8f, 0.0f, 0.8f, 1.0f, 0.4f, 0.0f, 0.4f, 1.0f };
double scale_like[Dy * Dx] = { 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.4f,
0.3f, 0.3f, 0.2f, 0.5f, 0.3f };
double cov_like[Dy * Dy];
matrix_identity(cov_like, Dy);
matrix_times(cov_like, cov_like, 0.5, Dy, Dy);
double* xs_real = (double*) malloc(T * Dx * sizeof(double));
double* ys_real = (double*) malloc(T * Dy * sizeof(double));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_mv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, scale_like, cov_like);
to_file(xs_real, T * Dx, "xs_real_mvlg.txt");
to_file(ys_real, T * Dy, "ys_real_mvlg.txt");
test_smc_mvlg_kalman(Dx, Dy, T, ys_real, scale_step, cov_step, scale_like, cov_like);
double h_args_l[1 + Dy * Dy + Dy * Dx];
compute_c1_c2(cov_like, Dy, h_args_l[0], h_args_l + 1);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[1 + Dy * Dy + i] = scale_like[i];
}
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
// seed_rng(8192, 32, 128);
//
// test_smc_mvlg_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step,
// h_args_l);
free(xs_real);
free(ys_real);
}
void test_fsv(int N, int T, int nb, int nt) {
const int Dx = 3;
const int Dy = 5;
float scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9f, Dx, Dx);
float cov_step[Dx * Dx] = { 0.5f, 0.2f, 0.1f, 0.2f, 0.5f, 0.2f, 0.1f, 0.2f, 0.5f };
float Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5f, Dy, Dy);
float B[Dy * Dx] = { 1.0f, 0.0f, 0.0f, 0.5f, 1.0f, 0.0f, 0.5f, 0.5f, 1.0f, 0.2f, 0.6f, 0.3f,
0.8f, 0.7f, 0.5f };
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
printf("%f\n", xs_real[T - 1]);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
float h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_fsv_2_3(int N, int T, int nb, int nt) {
const int Dx = 2;
const int Dy = 3;
float scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9f, Dx, Dx);
float cov_step[Dx * Dx] = { 0.5f, 0.2f, 0.2f, 0.5f };
float Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5f, Dy, Dy);
float B[Dy * Dx] = { 1.0f, 0.0f, 0.3f, 0.7f, 0.6f, 0.4f };
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
printf("%f\n", xs_real[T - 1]);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
float h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_fsv_2_2(int N, int T, int nb, int nt) {
const int Dx = 2;
const int Dy = 2;
float scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9f, Dx, Dx);
float cov_step[Dx * Dx] = { 0.5f, 0.2f, 0.2f, 0.5f };
float Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5f, Dy, Dy);
float B[Dy * Dx] = { 1.0f, 0.0f, 0.3f, 0.7f };
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
printf("%f\n", xs_real[T - 1]);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
float h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_fsv_1_1(int N, int T, int nb, int nt) {
const int Dx = 1;
const int Dy = 1;
float scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9f, Dx, Dx);
float cov_step[Dx * Dx] = { 0.5f };
float Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5f, Dy, Dy);
float B[Dy * Dx] = { 1.0f };
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
printf("%f\n", xs_real[T - 1]);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
float h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
// HOST ONLY
void test_fsv_double(int N, int T) {
const int Dx = 3;
const int Dy = 5;
double scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9, Dx, Dx);
double cov_step[Dx * Dx] = { 0.5, 0.2, 0.1, 0.2, 0.5, 0.2, 0.1, 0.2, 0.5 };
double Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5, Dy, Dy);
double
B[Dy * Dx] = { 1.0, 0.0, 0.0, 0.5, 1.0, 0.0, 0.5, 0.5, 1.0, 0.2, 0.6, 0.3, 0.8, 0.7,
0.5 };
double* xs_real = (double*) malloc(T * Dx * sizeof(double));
double* ys_real = (double*) malloc(T * Dy * sizeof(double));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
printf("%f\n", xs_real[T - 1]);
double h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
int main(int argc, char **argv) {
// int N = 8192;
// int N = 16384;
// int N = 32768;
// int N = 65536;
// int N = 262144;
// int N = 8192;
// int N = 16384;
// int N = 32768;
int N = 65536;
// int N = 131072;
int nb = 256;
int nt = 64;
int T = 200;
seed_rng(8192, 32, 128);
scan_init(N);
// test_1D(N, T, nb, nt);
// test_2D(N, T, nb, nt);
// test_3by5D(N, T, nb, nt);
// test_fsv(4096, T, nb, nt);
// test_fsv(8192, T, 128, nt);
// test_fsv(16384, T, nb, nt);
// test_fsv(32768, T, nb, nt);
// test_fsv(65536, T, nb, nt);
// test_fsv(131072, T, nb, nt);
test_fsv(N, T, nb, nt);
// test_fsv_double(N, T);
// test_3by5D(N, T, nb, nt, 8192*4);
//
// test_3by5D_double(N, T, 8192*4);
// test_1D(N, T, nb, nt);
// test_usv(N, T, nb, nt);
// test_fsv_2_3(N, T, nb, nt);
// test_fsv_2_2(N, T, nb, nt);
// test_fsv_1_1(N, T, nb, nt);
kill_rng();
scan_destroy();
}
| f555018fb7016d81ff04580fb2472fad6b0b8906.cu | /*
* test_smc.cu
*
* Created on: 1-Mar-2009
* Author: Owner
*/
#include <stdio.h>
#include <cutil.h>
#include "rng.h"
#include "gauss.h"
#include "output.h"
#include "kalman.h"
#include "matrix.h"
#include "fsv.h"
#include "smc_fsv.h"
#include "smc_lg.h"
#include "smc_usv.h"
#include "smc_mvlg.h"
#include "scan.h"
#include "usv.h"
void generate_data(float* xs, float* ys, int T, float sigma_x, float sigma_y) {
const int M = 32768;
float steps[M];
populate_randn(steps, M);
// xs_{-1} = 0;
xs[0] = steps[0];
ys[0] = xs[0] + steps[1];
for (int i = 1; i < T; i++) {
xs[i] = xs[i - 1] + steps[i * 2];
ys[i] = xs[i] + steps[i * 2 + 1];
}
}
template<class T>
void generate_data_mv(T* xs, T* ys, int Dx, int Dy, int total_time, T* scale_step, T* cov_step,
T* scale_like, T* cov_like) {
int Mx = max(total_time * Dx, 32768);
int My = max(total_time * Dy, 32768);
T* steps_x = (T*) malloc(Mx * sizeof(T));
T* steps_y = (T*) malloc(My * sizeof(T));
T* L_step = (T*) malloc(Dx * Dx * sizeof(T));
T* L_like = (T*) malloc(Dy * Dy * sizeof(T));
T* temp_x = (T*) malloc(Dx * sizeof(T));
T* temp_y = (T*) malloc(Dy * sizeof(T));
matrix_chol(cov_step, L_step, Dx);
matrix_chol(cov_like, L_like, Dy);
// matrix_print(L_step, Dx, Dx);
// matrix_print(L_like, Dy, Dy);
populate_randn(steps_x, Mx);
populate_randn(steps_y, My);
// xs_{-1} = 0;
matrix_times(L_step, steps_x, temp_x, Dx, Dx, Dx, 1);
vector_set(xs, temp_x, Dx);
matrix_times(scale_like, xs, ys, Dy, Dx, Dx, 1);
matrix_times(L_like, vector_get(steps_y, Dy, 0), temp_y, Dy, Dy, Dy, 1);
vector_add(ys, temp_y, ys, Dy);
for (int i = 1; i < total_time; i++) {
matrix_times(scale_step, vector_get(xs, Dx, i - 1), vector_get(xs, Dx, i), Dx, Dx, Dx, 1);
matrix_times(L_step, vector_get(steps_x, Dx, i), temp_x, Dx, Dx, Dx, 1);
vector_add(vector_get(xs, Dx, i), temp_x, vector_get(xs, Dx, i), Dx);
matrix_times(scale_like, vector_get(xs, Dx, i), vector_get(ys, Dy, i), Dy, Dx, Dx, 1);
matrix_times(L_like, vector_get(steps_y, Dy, i), temp_y, Dy, Dy, Dy, 1);
vector_add(vector_get(ys, Dy, i), temp_y, vector_get(ys, Dy, i), Dy);
}
free(L_step);
free(L_like);
free(temp_x);
free(temp_y);
free(steps_x);
free(steps_y);
}
void test_smc_lg_kalman(int T, float* ys_real, float sigma_like, float sigma_step) {
float ll_kalman;
float* kalman_xs = (float*) malloc(T * sizeof(float));
kalman(0.0, kalman_xs, ys_real, T, sigma_like, sigma_step, &ll_kalman);
to_file(kalman_xs, T, "xs_lg_kalman.txt");
printf("ll_lg_kalman = %f\n", ll_kalman);
free(kalman_xs);
}
void test_smc_lg(int N, int T, float* ys_real, float* h_args_l, float scale_step, float sigma_step,
int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* xs = (float*) malloc(N * T * sizeof(float));
cudaMalloc((void**) &d_xs, N * T * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
cudaMalloc((void**) &d_ws, N * sizeof(float));
cudaMalloc((void**) &x_init, N * sizeof(float));
populate_randn_d(x_init, N);
float ll;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_lg(x_init, d_xs, d_ws, ys_real, N, T, h_args_l, scale_step, sigma_step, &ll, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_lg = %f\n", ll);
cudaMemcpy(xs, d_xs, N * T * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(ws, d_ws, N * sizeof(float), cudaMemcpyDeviceToHost);
to_file(xs, N * T, "xs_lg.txt");
to_file(ws, N, "ws_lg.txt");
free(ws);
cudaFree(d_ws);
free(xs);
cudaFree(d_xs);
cudaFree(x_init);
}
void test_smc_lg_forget(int N, int T, float* ys_real, float* h_args_l, float scale_step,
float sigma_step, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* xs = (float*) malloc(N * sizeof(float));
cudaMalloc((void**) &d_xs, N * sizeof(float));
cudaMalloc((void**) &x_init, N * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
cudaMalloc((void**) &d_ws, N * sizeof(float));
populate_randn_d(x_init, N);
float ll_forget;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_forget_lg(x_init, d_xs, d_ws, ys_real, N, T, h_args_l, scale_step, sigma_step, &ll_forget,
nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_lg_forget = %f\n", ll_forget);
free(xs);
free(ws);
cudaFree(d_ws);
cudaFree(d_xs);
cudaFree(x_init);
}
void test_smc_lg_ref(int N, int T, float* ys_real, float* h_args_l, float scale_step,
float sigma_step) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* xs = (float*) malloc(N * T * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* hx_init = (float*) malloc(N * sizeof(float));
populate_randn(hx_init, N);
float ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_lg(hx_init, xs, ws, ys_real, N, T, h_args_l, scale_step, sigma_step, &ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_lg_ref = %f\n", ll_ref);
to_file(xs, N * T, "xs_lg_ref.txt");
to_file(ws, N, "ws_lg_ref.txt");
free(ws);
free(hx_init);
free(xs);
}
void test_smc_lg_forget_ref(int N, int T, float* ys_real, float* h_args_l, float scale_step,
float sigma_step) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* xs = (float*) malloc(N * sizeof(float));
float* hx_init = (float*) malloc(N * sizeof(float));
populate_randn(hx_init, N);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
cudaMalloc((void**) &d_ws, N * sizeof(float));
float ll_forget_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_lg(hx_init, xs, ws, ys_real, N, T, h_args_l, scale_step, sigma_step,
&ll_forget_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_lg_forget_ref = %f\n", ll_forget_ref);
free(ws);
cudaFree(d_ws);
free(hx_init);
free(xs);
}
template<class T>
void test_smc_usv_ref(int N, int total_time, T* ys_real, T* h_args_l, T alpha, T sigma) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
T* xs = (float*) malloc(N * total_time * sizeof(float));
T* ws = (float*) malloc(N * sizeof(float));
float* hx_init = (float*) malloc(N * sizeof(float));
populate_randn(hx_init, N);
float ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_usv(hx_init, xs, ws, ys_real, N, total_time, h_args_l, alpha, sigma, &ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_usv_ref = %f\n", ll_ref);
char filename_xs[] = "xs_usv_ref.txt";
char filename_ws[] = "xs_usv_ref.txt";
to_file(xs, N * total_time, filename_xs);
to_file(ws, N, filename_ws);
free(ws);
free(hx_init);
free(xs);
}
template<class T>
void test_smc_usv_forget_ref(int N, int total_time, T* ys_real, T* h_args_l, T alpha, T sigma) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
T* xs = (float*) malloc(N * sizeof(float));
T* ws = (float*) malloc(N * sizeof(float));
float* hx_init = (float*) malloc(N * sizeof(float));
populate_randn(hx_init, N);
float ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_usv(hx_init, xs, ws, ys_real, N, total_time, h_args_l, alpha, sigma, &ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_usv_forget_ref = %f\n", ll_ref);
// to_file(xs, N * total_time, "xs_usv_ref.txt");
// to_file(ws, N, "ws_usv_ref.txt");
free(ws);
free(hx_init);
free(xs);
}
void test_smc_usv(int N, int T, float* ys_real, float* h_args_l, float alpha, float sigma, int nb,
int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* xs = (float*) malloc(N * T * sizeof(float));
cudaMalloc((void**) &d_xs, N * T * sizeof(float));
cudaMalloc((void**) &x_init, N * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
cudaMalloc((void**) &d_ws, N * sizeof(float));
populate_randn_d(x_init, N);
float ll;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_usv(x_init, d_xs, d_ws, ys_real, N, T, h_args_l, alpha, sigma, &ll, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_usv = %f\n", ll);
cudaMemcpy(xs, d_xs, N * T * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(ws, d_ws, N * sizeof(float), cudaMemcpyDeviceToHost);
to_file(xs, N * T, "xs_usv.txt");
to_file(ws, N, "ws_usv.txt");
free(xs);
free(ws);
cudaFree(d_ws);
cudaFree(d_xs);
cudaFree(x_init);
}
void test_smc_usv_forget(int N, int T, float* ys_real, float* h_args_l, float alpha, float sigma,
int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* xs = (float*) malloc(N * sizeof(float));
cudaMalloc((void**) &d_xs, N * sizeof(float));
cudaMalloc((void**) &x_init, N * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
cudaMalloc((void**) &d_ws, N * sizeof(float));
populate_randn_d(x_init, N);
float ll_forget;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_forget_usv(x_init, d_xs, d_ws, ys_real, N, T, h_args_l, alpha, sigma, &ll_forget, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_usv_forget = %f\n", ll_forget);
free(xs);
free(ws);
cudaFree(d_ws);
cudaFree(d_xs);
cudaFree(x_init);
}
void test_smc_mvlg_forget(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* d_ys_real;
float* xs = (float*) malloc(N * Dx * sizeof(float));
cudaMalloc((void**) &d_xs, N * Dx * sizeof(float));
cudaMalloc((void**) &x_init, N * Dx * sizeof(float));
cudaMalloc((void**) &d_ys_real, T * Dy * sizeof(float));
cudaMemcpy(d_ys_real, ys_real, T * Dy * sizeof(float), cudaMemcpyHostToDevice);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
cudaMalloc((void**) &d_ws, N * sizeof(float));
float* hx_init = (float*) malloc(N * Dx * sizeof(float));
matrix_zero(hx_init, N, Dx);
cudaMemcpy(x_init, hx_init, N * Dx * sizeof(float), cudaMemcpyHostToDevice);
free(hx_init);
float ll_forget_D;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_forget_mvlg(x_init, d_xs, d_ws, d_ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_D, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_mvlg_forget = %f\n", ll_forget_D);
free(ws);
cudaFree(d_ws);
free(xs);
cudaFree(d_xs);
cudaFree(x_init);
cudaFree(d_ys_real);
}
void test_smc_mvlg(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* d_ys_real;
float* xs = (float*) malloc(N * Dx * T * sizeof(float));
cudaMalloc((void**) &d_xs, N * Dx * T * sizeof(float));
cudaMalloc((void**) &x_init, N * Dx * sizeof(float));
cudaMalloc((void**) &d_ys_real, T * Dy * sizeof(float));
cudaMemcpy(d_ys_real, ys_real, T * Dy * sizeof(float), cudaMemcpyHostToDevice);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
cudaMalloc((void**) &d_ws, N * sizeof(float));
float* hx_init = (float*) malloc(N * Dx * sizeof(float));
matrix_zero(hx_init, N, Dx);
cudaMemcpy(x_init, hx_init, N * Dx * sizeof(float), cudaMemcpyHostToDevice);
free(hx_init);
float ll_D;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_mvlg(x_init, d_xs, d_ws, d_ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step, &ll_D,
nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_mvlg = %f\n", ll_D);
cudaMemcpy(xs, d_xs, N * Dx * T * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(ws, d_ws, N * sizeof(float), cudaMemcpyDeviceToHost);
to_file(xs, N * Dx * T, "xs_mvlg.txt");
to_file(ws, N, "ws_mvlg.txt");
free(ws);
cudaFree(d_ws);
free(xs);
cudaFree(d_xs);
cudaFree(x_init);
cudaFree(d_ys_real);
}
template<class T>
void test_smc_mvlg_ref(int N, int Dx, int Dy, int total_time, T* ys_real, T* scale_step,
T* cov_step, T* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
T* x_init = (T*) malloc(N * Dx * sizeof(T));
T* xs = (T*) malloc(N * Dx * total_time * sizeof(T));
T* ws = (T*) malloc(N * sizeof(T));
matrix_zero(x_init, N, Dx);
T ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_mvlg(x_init, xs, ws, ys_real, N, Dx, Dy, total_time, h_args_l, scale_step, cov_step,
&ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_mvlg_ref = %f\n", ll_ref);
char filename_xs[] = "xs_mvlg_ref.txt";
char filename_ws[] = "ws_mvlg_ref.txt";
to_file(xs, N * Dx * total_time, filename_xs);
to_file(ws, N, filename_ws);
free(ws);
free(xs);
free(x_init);
}
void test_smc_mvlg_forget_ref(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* x_init = (float*) malloc(N * Dx * sizeof(float));
float* xs = (float*) malloc(N * Dx * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
matrix_zero(x_init, N, Dx);
float ll_ref;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_mvlg(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_ref);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_mvlg_forget_ref = %f\n", ll_ref);
free(ws);
free(xs);
free(x_init);
}
template<class T>
void test_smc_mvlg_kalman(int Dx, int Dy, int total_time, T* ys_real, T* scale_step, T* cov_step,
T* scale_like, T* cov_like) {
T ll_kalman_D;
T* kalman_xs = (T*) malloc(Dx * total_time * sizeof(T));
T* init_xs = (T*) malloc(Dx * sizeof(T));
for (int i = 0; i < Dx; i++) {
init_xs[i] = 0;
}
kalman(init_xs, kalman_xs, ys_real, Dx, Dy, total_time, scale_step, cov_step, scale_like,
cov_like, &ll_kalman_D);
char filename[] = "xs_mvlg_kalman.txt";
to_file(kalman_xs, Dx * total_time, filename);
printf("ll_mvlg_kalman = %f\n", ll_kalman_D);
free(kalman_xs);
free(init_xs);
}
void test_smc_fsv_forget(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* d_ys_real;
float* xs = (float*) malloc(N * Dx * sizeof(float));
cudaMalloc((void**) &d_xs, N * Dx * sizeof(float));
cudaMalloc((void**) &x_init, N * Dx * sizeof(float));
cudaMalloc((void**) &d_ys_real, T * Dy * sizeof(float));
cudaMemcpy(d_ys_real, ys_real, T * Dy * sizeof(float), cudaMemcpyHostToDevice);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
cudaMalloc((void**) &d_ws, N * sizeof(float));
// populate_randn_d(x_init, N * D);
float* hx_init = (float*) malloc(N * Dx * sizeof(float));
matrix_zero(hx_init, N, Dx);
cudaMemcpy(x_init, hx_init, N * Dx * sizeof(float), cudaMemcpyHostToDevice);
free(hx_init);
float ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_forget_fsv(x_init, d_xs, d_ws, d_ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_forget = %f\n", ll_forget_fsv);
free(ws);
free(xs);
cudaFree(d_xs);
cudaFree(x_init);
cudaFree(d_ys_real);
}
void test_smc_fsv(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step, float* cov_step,
float* h_args_l, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* d_xs;
float* x_init;
float* d_ys_real;
float* xs = (float*) malloc(N * Dx * T * sizeof(float));
cudaMalloc((void**) &d_xs, N * Dx * T * sizeof(float));
cudaMalloc((void**) &x_init, N * Dx * sizeof(float));
cudaMalloc((void**) &d_ys_real, T * Dy * sizeof(float));
cudaMemcpy(d_ys_real, ys_real, T * Dy * sizeof(float), cudaMemcpyHostToDevice);
float* ws = (float*) malloc(N * sizeof(float));
float* d_ws;
cudaMalloc((void**) &d_ws, N * sizeof(float));
// populate_randn_d(x_init, N * D);
float* hx_init = (float*) malloc(N * Dx * sizeof(float));
matrix_zero(hx_init, N, Dx);
cudaMemcpy(x_init, hx_init, N * Dx * sizeof(float), cudaMemcpyHostToDevice);
free(hx_init);
float ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_fsv(x_init, d_xs, d_ws, d_ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv = %f\n", ll_forget_fsv);
cudaMemcpy(xs, d_xs, N * Dx * T * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(ws, d_ws, N * sizeof(float), cudaMemcpyDeviceToHost);
to_file(xs, N * Dx * T, "xs_fsv.txt");
to_file(ws, N, "ws_fsv.txt");
free(ws);
cudaFree(d_ws);
free(xs);
cudaFree(d_xs);
cudaFree(x_init);
cudaFree(d_ys_real);
}
void test_smc_fsv_ref(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* x_init = (float*) malloc(N * Dx * sizeof(float));
float* xs = (float*) malloc(N * Dx * T * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
matrix_zero(x_init, N, Dx);
float ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_fsv(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_ref = %f\n", ll_forget_fsv);
to_file(xs, N * Dx * T, "xs_fsv.txt");
to_file(ws, N, "ws_fsv.txt");
free(ws);
free(xs);
free(x_init);
}
void test_smc_fsv_forget_ref(int N, int Dx, int Dy, int T, float* ys_real, float* scale_step,
float* cov_step, float* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* x_init = (float*) malloc(N * Dx * sizeof(float));
float* xs = (float*) malloc(N * Dx * sizeof(float));
float* ws = (float*) malloc(N * sizeof(float));
matrix_zero(x_init, N, Dx);
float ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_fsv(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_forget_ref = %f\n", ll_forget_fsv);
free(ws);
free(xs);
free(x_init);
}
void test_smc_fsv_ref(int N, int Dx, int Dy, int T, double* ys_real, double* scale_step,
double* cov_step, double* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
double* x_init = (double*) malloc(N * Dx * sizeof(double));
double* xs = (double*) malloc(N * Dx * T * sizeof(double));
double* ws = (double*) malloc(N * sizeof(double));
matrix_zero(x_init, N, Dx);
double ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_fsv(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_ref = %f\n", ll_forget_fsv);
to_file(xs, N * Dx * T, "xs_fsv.txt");
to_file(ws, N, "ws_fsv.txt");
free(ws);
free(xs);
free(x_init);
}
void test_smc_fsv_forget_ref(int N, int Dx, int Dy, int T, double* ys_real, double* scale_step,
double* cov_step, double* h_args_l) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
double* x_init = (double*) malloc(N * Dx * sizeof(double));
double* xs = (double*) malloc(N * Dx * sizeof(double));
double* ws = (double*) malloc(N * sizeof(double));
matrix_zero(x_init, N, Dx);
double ll_forget_fsv;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
smc_ref_forget_fsv(x_init, xs, ws, ys_real, N, Dx, Dy, T, h_args_l, scale_step, cov_step,
&ll_forget_fsv);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f, ", time);
printf("ll_fsv_forget_ref = %f\n", ll_forget_fsv);
free(ws);
free(xs);
free(x_init);
}
void test_1D(int N, int T, int nb, int nt) {
float sigma_like = 1.0;
float sigma_step = 1.0;
float scale_step = 1.0;
float* xs_real = (float*) malloc(T * sizeof(float));
float* ys_real = (float*) malloc(T * sizeof(float));
generate_data(xs_real, ys_real, T, sigma_step, sigma_like);
to_file(xs_real, T, "xs_real_lg.txt");
to_file(ys_real, T, "ys_real_lg.txt");
test_smc_lg_kalman(T, ys_real, sigma_like, sigma_step);
float h_args_l[2];
compute_c1_c2(sigma_like, h_args_l[0], h_args_l[1]);
test_smc_lg(N, T, ys_real, h_args_l, scale_step, sigma_step, nb, nt);
test_smc_lg_forget(N, T, ys_real, h_args_l, scale_step, sigma_step, nb, nt);
test_smc_lg_ref(N, T, ys_real, h_args_l, scale_step, sigma_step);
test_smc_lg_forget_ref(N, T, ys_real, h_args_l, scale_step, sigma_step);
free(xs_real);
free(ys_real);
}
void test_usv(int N, int T, int nb, int nt) {
float alpha = 0.9f;
float sigma = 1.0f;
float beta = 1.0f;
float* xs_real = (float*) malloc(T * sizeof(float));
float* ys_real = (float*) malloc(T * sizeof(float));
generate_data_usv(xs_real, ys_real, T, alpha, sigma, beta);
to_file(xs_real, T, "xs_real_usv.txt");
to_file(ys_real, T, "ys_real_usv.txt");
float h_args_l[1];
h_args_l[0] = beta;
// kill_rng();
// seed_rng(16384, 32, 128);
//
// test_smc_usv(N, T, ys_real, h_args_l, alpha, sigma, nb, nt);
kill_rng();
seed_rng(16384, 32, 128);
test_smc_usv_forget(N, T, ys_real, h_args_l, alpha, sigma, nb, nt);
// kill_rng();
// seed_rng(16384, 32, 128);
//
// test_smc_usv_ref(N, T, ys_real, h_args_l, alpha, sigma);
kill_rng();
seed_rng(16384, 32, 128);
test_smc_usv_forget_ref(N, T, ys_real, h_args_l, alpha, sigma);
// test_smc_lg_forget(N, T, ys_real, h_args_l, sigma_step, nb, nt);
//
// test_smc_lg_ref(N, T, ys_real, h_args_l, sigma_step);
//
// test_smc_lg_forget_ref(N, T, ys_real, h_args_l, sigma_step);
free(xs_real);
free(ys_real);
}
void test_2D(int N, int T, int nb, int nt) {
const int D = 2;
float scale_step[D * D] = { 0.5f, 0.0f, 0.0f, 0.5f };
float cov_step[D * D] = { 1.0f, 0.8f, 0.8f, 1.0f };
float scale_like[D * D] = { 1.0f, 0.0f, 0.0f, 1.0f };
float cov_like[D * D] = { 0.5f, 0.0f, 0.0f, 0.5f };
float* xs_real = (float*) malloc(T * D * sizeof(float));
float* ys_real = (float*) malloc(T * D * sizeof(float));
generate_data_mv(xs_real, ys_real, D, D, T, scale_step, cov_step, scale_like, cov_like);
to_file(xs_real, T * D, "xs_real_mvlg.txt");
to_file(ys_real, T * D, "ys_real_mvlg.txt");
test_smc_mvlg_kalman(D, D, T, ys_real, scale_step, cov_step, scale_like, cov_like);
float h_args_l[1 + D * D + D * D];
compute_c1_c2(cov_like, D, h_args_l[0], h_args_l + 1);
for (int i = 0; i < D * D; i++) {
h_args_l[1 + D * D + i] = scale_like[i];
}
test_smc_mvlg(N, D, D, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
test_smc_mvlg_forget(N, D, D, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
test_smc_mvlg_ref(N, D, D, T, ys_real, scale_step, cov_step, h_args_l);
test_smc_mvlg_forget_ref(N, D, D, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_3by5D(int N, int T, int nb, int nt, int n_burn_filter) {
const int Dx = 3;
const int Dy = 5;
float scale_step[Dx * Dx] = { 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f };
float cov_step[Dx * Dx] = { 1.0f, 0.8f, 0.0f, 0.8f, 1.0f, 0.4f, 0.0f, 0.4f, 1.0f };
float scale_like[Dy * Dx] = { 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.4f, 0.3f,
0.3f, 0.2f, 0.5f, 0.3f };
float cov_like[Dy * Dy];
matrix_identity(cov_like, Dy);
matrix_times(cov_like, cov_like, 0.5f, Dy, Dy);
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_mv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, scale_like, cov_like);
to_file(xs_real, T * Dx, "xs_real_mvlg.txt");
to_file(ys_real, T * Dy, "ys_real_mvlg.txt");
test_smc_mvlg_kalman(Dx, Dy, T, ys_real, scale_step, cov_step, scale_like, cov_like);
float h_args_l[1 + Dy * Dy + Dy * Dx];
compute_c1_c2(cov_like, Dy, h_args_l[0], h_args_l + 1);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[1 + Dy * Dy + i] = scale_like[i];
}
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_3by5D_double(int N, int T, int n_burn_filter) {
const int Dx = 3;
const int Dy = 5;
double scale_step[Dx * Dx] = { 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.5f };
double cov_step[Dx * Dx] = { 1.0f, 0.8f, 0.0f, 0.8f, 1.0f, 0.4f, 0.0f, 0.4f, 1.0f };
double scale_like[Dy * Dx] = { 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.4f,
0.3f, 0.3f, 0.2f, 0.5f, 0.3f };
double cov_like[Dy * Dy];
matrix_identity(cov_like, Dy);
matrix_times(cov_like, cov_like, 0.5, Dy, Dy);
double* xs_real = (double*) malloc(T * Dx * sizeof(double));
double* ys_real = (double*) malloc(T * Dy * sizeof(double));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_mv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, scale_like, cov_like);
to_file(xs_real, T * Dx, "xs_real_mvlg.txt");
to_file(ys_real, T * Dy, "ys_real_mvlg.txt");
test_smc_mvlg_kalman(Dx, Dy, T, ys_real, scale_step, cov_step, scale_like, cov_like);
double h_args_l[1 + Dy * Dy + Dy * Dx];
compute_c1_c2(cov_like, Dy, h_args_l[0], h_args_l + 1);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[1 + Dy * Dy + i] = scale_like[i];
}
seed_rng(n_burn_filter, 32, 128);
test_smc_mvlg_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
// seed_rng(8192, 32, 128);
//
// test_smc_mvlg_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step,
// h_args_l);
free(xs_real);
free(ys_real);
}
void test_fsv(int N, int T, int nb, int nt) {
const int Dx = 3;
const int Dy = 5;
float scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9f, Dx, Dx);
float cov_step[Dx * Dx] = { 0.5f, 0.2f, 0.1f, 0.2f, 0.5f, 0.2f, 0.1f, 0.2f, 0.5f };
float Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5f, Dy, Dy);
float B[Dy * Dx] = { 1.0f, 0.0f, 0.0f, 0.5f, 1.0f, 0.0f, 0.5f, 0.5f, 1.0f, 0.2f, 0.6f, 0.3f,
0.8f, 0.7f, 0.5f };
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
printf("%f\n", xs_real[T - 1]);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
float h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_fsv_2_3(int N, int T, int nb, int nt) {
const int Dx = 2;
const int Dy = 3;
float scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9f, Dx, Dx);
float cov_step[Dx * Dx] = { 0.5f, 0.2f, 0.2f, 0.5f };
float Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5f, Dy, Dy);
float B[Dy * Dx] = { 1.0f, 0.0f, 0.3f, 0.7f, 0.6f, 0.4f };
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
printf("%f\n", xs_real[T - 1]);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
float h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_fsv_2_2(int N, int T, int nb, int nt) {
const int Dx = 2;
const int Dy = 2;
float scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9f, Dx, Dx);
float cov_step[Dx * Dx] = { 0.5f, 0.2f, 0.2f, 0.5f };
float Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5f, Dy, Dy);
float B[Dy * Dx] = { 1.0f, 0.0f, 0.3f, 0.7f };
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
printf("%f\n", xs_real[T - 1]);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
float h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
void test_fsv_1_1(int N, int T, int nb, int nt) {
const int Dx = 1;
const int Dy = 1;
float scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9f, Dx, Dx);
float cov_step[Dx * Dx] = { 0.5f };
float Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5f, Dy, Dy);
float B[Dy * Dx] = { 1.0f };
float* xs_real = (float*) malloc(T * Dx * sizeof(float));
float* ys_real = (float*) malloc(T * Dy * sizeof(float));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
printf("%f\n", xs_real[T - 1]);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
float h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l, nb, nt);
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
// HOST ONLY
void test_fsv_double(int N, int T) {
const int Dx = 3;
const int Dy = 5;
double scale_step[Dx * Dx];
matrix_identity(scale_step, Dx);
matrix_times(scale_step, scale_step, 0.9, Dx, Dx);
double cov_step[Dx * Dx] = { 0.5, 0.2, 0.1, 0.2, 0.5, 0.2, 0.1, 0.2, 0.5 };
double Psi[Dy * Dy];
matrix_identity(Psi, Dy);
matrix_times(Psi, Psi, 0.5, Dy, Dy);
double
B[Dy * Dx] = { 1.0, 0.0, 0.0, 0.5, 1.0, 0.0, 0.5, 0.5, 1.0, 0.2, 0.6, 0.3, 0.8, 0.7,
0.5 };
double* xs_real = (double*) malloc(T * Dx * sizeof(double));
double* ys_real = (double*) malloc(T * Dy * sizeof(double));
kill_rng();
seed_rng(16384, 32, 128);
generate_data_fsv(xs_real, ys_real, Dx, Dy, T, scale_step, cov_step, Psi, B);
to_file(xs_real, T * Dx, "fsv_xs_real.txt");
to_file(ys_real, T * Dy, "fsv_ys_real.txt");
printf("%f\n", xs_real[T - 1]);
double h_args_l[Dy * Dx + Dx * Dy + Dy * Dy];
matrix_transpose(B, h_args_l + Dy * Dx, Dy, Dx);
for (int i = 0; i < Dy * Dx; i++) {
h_args_l[i] = B[i];
}
for (int i = 0; i < Dy * Dy; i++) {
h_args_l[2 * Dy * Dx + i] = Psi[i];
}
// kill_rng();
// seed_rng(8192, 32, 128);
//
// test_smc_fsv_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
kill_rng();
seed_rng(8192, 32, 128);
test_smc_fsv_forget_ref(N, Dx, Dy, T, ys_real, scale_step, cov_step, h_args_l);
free(xs_real);
free(ys_real);
}
int main(int argc, char **argv) {
// int N = 8192;
// int N = 16384;
// int N = 32768;
// int N = 65536;
// int N = 262144;
// int N = 8192;
// int N = 16384;
// int N = 32768;
int N = 65536;
// int N = 131072;
int nb = 256;
int nt = 64;
int T = 200;
seed_rng(8192, 32, 128);
scan_init(N);
// test_1D(N, T, nb, nt);
// test_2D(N, T, nb, nt);
// test_3by5D(N, T, nb, nt);
// test_fsv(4096, T, nb, nt);
// test_fsv(8192, T, 128, nt);
// test_fsv(16384, T, nb, nt);
// test_fsv(32768, T, nb, nt);
// test_fsv(65536, T, nb, nt);
// test_fsv(131072, T, nb, nt);
test_fsv(N, T, nb, nt);
// test_fsv_double(N, T);
// test_3by5D(N, T, nb, nt, 8192*4);
//
// test_3by5D_double(N, T, 8192*4);
// test_1D(N, T, nb, nt);
// test_usv(N, T, nb, nt);
// test_fsv_2_3(N, T, nb, nt);
// test_fsv_2_2(N, T, nb, nt);
// test_fsv_1_1(N, T, nb, nt);
kill_rng();
scan_destroy();
}
|
233da1f6ea90e187f4673aa647891ac1f8f832c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by kindr on 2021/5/2.
//
#include "misalignedRead.cuh"
#include "../../common/utils.cuh"
__global__
void misalignedAddOne(float *vec, size_t N, const size_t offset) {
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x + offset;
if (idx < N) vec[idx] = vec[idx] + 1.f;
}
void misalignedRead(size_t nElement, size_t nThread, const size_t offset) {
float *vec;
size_t nBytes = nElement * sizeof(float);
hipMallocManaged(&vec, nBytes, hipMemAttachGlobal);
CHECK(hipGetLastError());
memset(vec, 0, nBytes);
size_t nBlock = (nElement + nThread - 1) / nThread;
hipLaunchKernelGGL(( misalignedAddOne), dim3(nBlock), dim3(nThread), 0, 0, vec, nElement, offset);
hipDeviceSynchronize();
CHECK(hipGetLastError());
bool isSame = true;
for (size_t i = offset; i < nElement; ++i) {
if (vec[i] != 1.f) {
isSame = false;
}
}
printf("isSame?: %s", isSame ? "true" : "false");
hipHostFree(vec);
}
| 233da1f6ea90e187f4673aa647891ac1f8f832c4.cu | //
// Created by kindr on 2021/5/2.
//
#include "misalignedRead.cuh"
#include "../../common/utils.cuh"
__global__
void misalignedAddOne(float *vec, size_t N, const size_t offset) {
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x + offset;
if (idx < N) vec[idx] = vec[idx] + 1.f;
}
void misalignedRead(size_t nElement, size_t nThread, const size_t offset) {
float *vec;
size_t nBytes = nElement * sizeof(float);
cudaMallocManaged(&vec, nBytes, cudaMemAttachGlobal);
CHECK(cudaGetLastError());
memset(vec, 0, nBytes);
size_t nBlock = (nElement + nThread - 1) / nThread;
misalignedAddOne<<<nBlock, nThread>>>(vec, nElement, offset);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
bool isSame = true;
for (size_t i = offset; i < nElement; ++i) {
if (vec[i] != 1.f) {
isSame = false;
}
}
printf("isSame?: %s", isSame ? "true" : "false");
cudaFreeHost(vec);
}
|
3188730668429c2089cabf7a82fcf4dc52c38cab.hip | // !!! This is a file automatically generated by hipify!!!
/* -------------------------------------------------------------------------
* Implements gpusim::FingerprintDB CUDA enabled similarity
* scoring
*
* Copyright Schrodinger LLC, All Rights Reserved.
--------------------------------------------------------------------------- */
#include <hip/hip_runtime.h>
#include "fingerprintdb_cuda.h"
#include <iostream>
#include <cmath>
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <QtConcurrent/QtConcurrentRun>
#include <QDebug>
#include <QFuture>
#include <QMutex>
using std::make_shared;
using std::pair;
using std::vector;
using thrust::device_vector;
namespace gpusim
{
size_t get_gpu_free_memory(unsigned int device_index)
{
hipSetDevice(device_index);
size_t free, total;
hipMemGetInfo(&free, &total);
return free;
}
unsigned int get_gpu_count()
{
static int device_count = 0;
static bool initialized = false;
if(!initialized) {
hipGetDeviceCount(&device_count);
initialized = true;
}
return device_count;
}
unsigned int get_next_gpu(size_t required_memory)
{
static int next_device = 0;
for(unsigned int i=0; i<get_gpu_count(); i++) {
int gpu = next_device++ % get_gpu_count(); // Divide by 0 if called w/o GPU
auto free = get_gpu_free_memory(i);
if(free > required_memory) {
return gpu;
}
}
throw std::runtime_error("Can't find a GPU with enough memory to copy data.");
return 0; // Never gets here, just for compiler happiness
}
typedef device_vector<int> DFingerprint;
/**
* @internal
* Functor used to perform tanimoto similarity on GPGPU via thrust::transform
*/
struct TanimotoFunctor {
const int* m_ref_fp;
const int m_fp_intsize;
const int* m_dbdata;
const float m_similarity_cutoff;
TanimotoFunctor(const DFingerprint& ref_fp, int fp_intsize,
const device_vector<int>& dbdata, float similarity_cutoff) :
m_ref_fp(ref_fp.data().get()),m_fp_intsize(fp_intsize),m_dbdata(dbdata.data().get()),
m_similarity_cutoff(similarity_cutoff)
{};
__device__ float
operator()(const int& fp_index) const
{
int total = 0;
int common = 0;
int offset = m_fp_intsize*fp_index;
for(int i=0; i<m_fp_intsize; i++) {
const int fp1 = m_ref_fp[i];
const int fp2 = m_dbdata[offset+i];
total += __popc(fp1) + __popc(fp2);
common += __popc(fp1 & fp2);
}
float score = static_cast<float>(common) / static_cast<float>(total-common);
return score >= m_similarity_cutoff ? score : 0;
};
};
class FingerprintDBPriv
{
public:
std::shared_ptr<device_vector<int> > d_data;
};
FingerprintDBStorage::FingerprintDBStorage(FingerprintDB* parent, std::vector<char>& fp_data,
int index_offset, int fp_bitcount) : m_parent(parent), m_index_offset(index_offset),
m_count(fp_data.size() / (fp_bitcount / CHAR_BIT))
{
const int* int_data = reinterpret_cast<const int*>(fp_data.data());
const size_t int_size = fp_data.size() / sizeof(int);
m_data.assign(int_data, int_data+int_size);
}
unsigned int FingerprintDBStorage::getOffsetIndex(unsigned int without_offset)
{
return without_offset + m_index_offset;
}
FingerprintDB::FingerprintDB(int fp_bitcount, int fp_count,
vector<vector<char> >& data,
vector<char*>& smiles_vector,
std::vector<char*>& ids_vector)
{
m_fp_intsize = fp_bitcount / (sizeof(int)*8); //ASSUMES INT-DIVISIBLE SIZE
m_total_count = fp_count;
int current_fp_count = 0;
for(auto& dataset : data) {
auto storage = make_shared<FingerprintDBStorage>(this, dataset,
current_fp_count, fp_bitcount);
storage->m_priv = make_shared<FingerprintDBPriv>();
m_storage.push_back(storage);
current_fp_count += storage->m_data.size() / m_fp_intsize;
}
if(current_fp_count != m_total_count) {
throw std::runtime_error("Mismatch between FP count and data, potential database corruption.");
}
m_total_data_size = static_cast<size_t>(m_total_count) *
static_cast<size_t>(m_fp_intsize)*sizeof(int);
qDebug() << "Database loaded with" << m_total_count << "molecules";
// Optimization, take the underlying storage of the incoming vectors,
// which won't be used again in calling code
m_smiles.swap(smiles_vector);
m_ids.swap(ids_vector);
}
void FingerprintDB::copyToGPU(unsigned int fold_factor)
{
m_fold_factor = fold_factor;
while(m_fp_intsize % m_fold_factor != 0) {
m_fold_factor++;
}
if(m_fold_factor == 1) {
for(const auto& storage : m_storage) {
storage->m_gpu_device = get_next_gpu(storage->m_data.size() * sizeof(int));
hipSetDevice(storage->m_gpu_device);
// Have to create vector where correct cuda device is set
storage->m_priv->d_data = make_shared< device_vector<int> >();
*(storage->m_priv->d_data) = storage->m_data;
}
} else {
for(const auto& storage : m_storage) {
auto folded_data = fold_data(storage->m_data);
storage->m_gpu_device = get_next_gpu(folded_data.size() * sizeof(int));
hipSetDevice(storage->m_gpu_device);
// Have to create vector where correct cuda device is set
storage->m_priv->d_data = make_shared<device_vector<int> >();
*(storage->m_priv->d_data) = folded_data;
}
}
}
void FingerprintDB::getStorageAndLocalIndex(unsigned int offset_index,
FingerprintDBStorage** storage, unsigned int* local_index) const
{
int slice_index_offset=0;
*storage = m_storage[0].get();
for(unsigned int i=1; i<m_storage.size(); i++) {
if(m_storage[i]->m_index_offset >= offset_index) break;
*storage = m_storage[i].get();
slice_index_offset = (*storage)->m_index_offset;
}
*local_index = offset_index - slice_index_offset;
}
Fingerprint FingerprintDB::getFingerprint(unsigned int index) const
{
Fingerprint output(m_fp_intsize);
FingerprintDBStorage* storage;
unsigned int local_index;
getStorageAndLocalIndex(index, &storage, &local_index);
unsigned int offset = local_index*m_fp_intsize;
for(int i=0; i<m_fp_intsize; i++) {
output[i] = storage->m_data[offset+i];
}
return output;
}
void FingerprintDB::search_storage(const Fingerprint& query,
const std::shared_ptr<FingerprintDBStorage>& storage,
vector<SortableResult>* sortable_results,
unsigned int return_count,
float similarity_cutoff) const
{
hipSetDevice(storage->m_gpu_device);
static QMutex mutex;
vector<int> indices;
std::vector<char*> results_smiles;
std::vector<char*> results_ids;
std::vector<float> results_scores;
device_vector<float> d_results_scores(storage->m_count);
device_vector<int> d_results_indices(storage->m_count);
try
{
// Fill indices [0->N), which will be sorted along with scores at end
thrust::sequence(d_results_indices.begin(), d_results_indices.end());
DFingerprint d_ref_fp;
if(m_fold_factor == 1) {
// Copy the query fingerprint up to the GPU
d_ref_fp = query;
} else {
auto folded = fold_data(query);
d_ref_fp = folded;
}
const int folded_fp_intsize = m_fp_intsize / m_fold_factor;
// Use Tanimoto to score similarity of all compounds to query fingerprint
thrust::transform(d_results_indices.begin(), d_results_indices.end(),
d_results_scores.begin(),
TanimotoFunctor(d_ref_fp, folded_fp_intsize, *(storage->m_priv->d_data),
similarity_cutoff));
auto indices_end = d_results_indices.end();
auto scores_end = d_results_scores.end();
if(similarity_cutoff > 0) {
indices_end = thrust::remove_if(d_results_indices.begin(),
d_results_indices.end(), d_results_scores.begin(),
thrust::logical_not<bool>());
scores_end = thrust::remove(d_results_scores.begin(),
d_results_scores.end(), 0);
}
unsigned int indices_size = std::distance(d_results_indices.begin(),
indices_end);
// Sort scores & indices vectors descending on score
thrust::sort_by_key(d_results_scores.begin(), scores_end,
d_results_indices.begin(), thrust::greater<float>());
int results_to_consider = 0;
results_to_consider = ::min(indices_size,
return_count*m_fold_factor*(int)std::log2(2*m_fold_factor));
indices.assign(d_results_indices.begin(),
d_results_indices.begin()+results_to_consider);
} catch(thrust::system_error e) {
qDebug() << "Error!" << e.what();
throw;
}
if(m_fold_factor == 1) { // If we don't fold, we can take exact GPU results
// Push top return_count results to CPU results vectors to be returned
for(auto index : indices) {
int offset_index = storage->getOffsetIndex(index);
results_smiles.push_back(m_smiles[offset_index]);
results_ids.push_back(m_ids[offset_index]);
}
results_scores.assign(d_results_scores.begin(),
d_results_scores.begin()+indices.size());
} else { // If we folded, we need to recalculate scores with full fingerprints
results_scores.resize(indices.size());
for(unsigned int i=0;i<indices.size();i++) {
int offset_index = storage->getOffsetIndex(indices[i]);
results_scores[i] = tanimoto_similarity_cpu(query,
getFingerprint(offset_index));
// Uncomment below to debug pre vs post folding scores
// qDebug() << results_scores[i] << " vs " << d_results_scores[i];
}
top_results_bubble_sort(indices, results_scores, return_count);
return_count = ::min((size_t)return_count, indices.size());
results_scores.resize(return_count);
for(unsigned int i=0;i<return_count;i++) {
// Check whether the re-scored similarity is too low
if(results_scores[i] < similarity_cutoff) {
results_scores.resize(i);
break;
}
results_ids.push_back(m_ids[storage->getOffsetIndex(indices[i])]);
results_smiles.push_back(m_smiles[storage->getOffsetIndex(indices[i])]);
}
}
mutex.lock();
for(unsigned int i=0; i<results_smiles.size(); i++) {
sortable_results->push_back(SortableResult(results_scores[i],
ResultData(results_smiles[i], results_ids[i])));
}
mutex.unlock();
}
void FingerprintDB::search(const Fingerprint& query,
std::vector<char*>& results_smiles,
std::vector<char*>& results_ids,
std::vector<float>& results_scores,
unsigned int return_count,
float similarity_cutoff) const
{
vector<SortableResult> sortable_results;
vector<QFuture<void> > futures;
for(auto& storage : m_storage) {
QFuture<void> future = QtConcurrent::run(this,
&FingerprintDB::search_storage, query, storage,
&sortable_results, return_count, similarity_cutoff);
futures.push_back(future);
}
for(auto& future : futures) {
future.waitForFinished();
}
std::sort(sortable_results.begin(), sortable_results.end());
std::reverse(sortable_results.begin(), sortable_results.end());
for(auto result : sortable_results) {
results_scores.push_back(result.first);
results_smiles.push_back(result.second.first);
results_ids.push_back(result.second.second);
}
int result_size = ::min((int)return_count, (int)results_scores.size());
results_scores.resize(result_size);
results_smiles.resize(result_size);
results_ids.resize(result_size);
}
/**
* @brief
* A CPU implementation of tanimoto similarity, meant purely for testing.
*/
float FingerprintDB::tanimoto_similarity_cpu(const Fingerprint& fp1,
const Fingerprint& fp2) const
{
int total = 0;
int common = 0;
for(int i=0; i<m_fp_intsize; i++) {
total += __builtin_popcount(fp1[i]) + __builtin_popcount(fp2[i]);
common += __builtin_popcount(fp1[i] & fp2[i]);
}
return (float)common / (float)(total-common);
}
size_t get_available_gpu_memory()
{
size_t free=0;
for(unsigned int gpu=0; gpu<get_gpu_count(); gpu++) {
auto lfree = get_gpu_free_memory(gpu);
free += lfree;
}
// Comment out below line to force-test folding:
// free = 100*1024*1024;
return free;
}
} // namespace gpusim
| 3188730668429c2089cabf7a82fcf4dc52c38cab.cu | /* -------------------------------------------------------------------------
* Implements gpusim::FingerprintDB CUDA enabled similarity
* scoring
*
* Copyright Schrodinger LLC, All Rights Reserved.
--------------------------------------------------------------------------- */
#include <cuda_runtime.h>
#include "fingerprintdb_cuda.h"
#include <iostream>
#include <cmath>
#include <algorithm>
#include <cuda_runtime_api.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <QtConcurrent/QtConcurrentRun>
#include <QDebug>
#include <QFuture>
#include <QMutex>
using std::make_shared;
using std::pair;
using std::vector;
using thrust::device_vector;
namespace gpusim
{
size_t get_gpu_free_memory(unsigned int device_index)
{
cudaSetDevice(device_index);
size_t free, total;
cudaMemGetInfo(&free, &total);
return free;
}
unsigned int get_gpu_count()
{
static int device_count = 0;
static bool initialized = false;
if(!initialized) {
cudaGetDeviceCount(&device_count);
initialized = true;
}
return device_count;
}
unsigned int get_next_gpu(size_t required_memory)
{
static int next_device = 0;
for(unsigned int i=0; i<get_gpu_count(); i++) {
int gpu = next_device++ % get_gpu_count(); // Divide by 0 if called w/o GPU
auto free = get_gpu_free_memory(i);
if(free > required_memory) {
return gpu;
}
}
throw std::runtime_error("Can't find a GPU with enough memory to copy data.");
return 0; // Never gets here, just for compiler happiness
}
typedef device_vector<int> DFingerprint;
/**
* @internal
* Functor used to perform tanimoto similarity on GPGPU via thrust::transform
*/
struct TanimotoFunctor {
const int* m_ref_fp;
const int m_fp_intsize;
const int* m_dbdata;
const float m_similarity_cutoff;
TanimotoFunctor(const DFingerprint& ref_fp, int fp_intsize,
const device_vector<int>& dbdata, float similarity_cutoff) :
m_ref_fp(ref_fp.data().get()),m_fp_intsize(fp_intsize),m_dbdata(dbdata.data().get()),
m_similarity_cutoff(similarity_cutoff)
{};
__device__ float
operator()(const int& fp_index) const
{
int total = 0;
int common = 0;
int offset = m_fp_intsize*fp_index;
for(int i=0; i<m_fp_intsize; i++) {
const int fp1 = m_ref_fp[i];
const int fp2 = m_dbdata[offset+i];
total += __popc(fp1) + __popc(fp2);
common += __popc(fp1 & fp2);
}
float score = static_cast<float>(common) / static_cast<float>(total-common);
return score >= m_similarity_cutoff ? score : 0;
};
};
class FingerprintDBPriv
{
public:
std::shared_ptr<device_vector<int> > d_data;
};
FingerprintDBStorage::FingerprintDBStorage(FingerprintDB* parent, std::vector<char>& fp_data,
int index_offset, int fp_bitcount) : m_parent(parent), m_index_offset(index_offset),
m_count(fp_data.size() / (fp_bitcount / CHAR_BIT))
{
const int* int_data = reinterpret_cast<const int*>(fp_data.data());
const size_t int_size = fp_data.size() / sizeof(int);
m_data.assign(int_data, int_data+int_size);
}
unsigned int FingerprintDBStorage::getOffsetIndex(unsigned int without_offset)
{
return without_offset + m_index_offset;
}
FingerprintDB::FingerprintDB(int fp_bitcount, int fp_count,
vector<vector<char> >& data,
vector<char*>& smiles_vector,
std::vector<char*>& ids_vector)
{
m_fp_intsize = fp_bitcount / (sizeof(int)*8); //ASSUMES INT-DIVISIBLE SIZE
m_total_count = fp_count;
int current_fp_count = 0;
for(auto& dataset : data) {
auto storage = make_shared<FingerprintDBStorage>(this, dataset,
current_fp_count, fp_bitcount);
storage->m_priv = make_shared<FingerprintDBPriv>();
m_storage.push_back(storage);
current_fp_count += storage->m_data.size() / m_fp_intsize;
}
if(current_fp_count != m_total_count) {
throw std::runtime_error("Mismatch between FP count and data, potential database corruption.");
}
m_total_data_size = static_cast<size_t>(m_total_count) *
static_cast<size_t>(m_fp_intsize)*sizeof(int);
qDebug() << "Database loaded with" << m_total_count << "molecules";
// Optimization, take the underlying storage of the incoming vectors,
// which won't be used again in calling code
m_smiles.swap(smiles_vector);
m_ids.swap(ids_vector);
}
void FingerprintDB::copyToGPU(unsigned int fold_factor)
{
m_fold_factor = fold_factor;
while(m_fp_intsize % m_fold_factor != 0) {
m_fold_factor++;
}
if(m_fold_factor == 1) {
for(const auto& storage : m_storage) {
storage->m_gpu_device = get_next_gpu(storage->m_data.size() * sizeof(int));
cudaSetDevice(storage->m_gpu_device);
// Have to create vector where correct cuda device is set
storage->m_priv->d_data = make_shared< device_vector<int> >();
*(storage->m_priv->d_data) = storage->m_data;
}
} else {
for(const auto& storage : m_storage) {
auto folded_data = fold_data(storage->m_data);
storage->m_gpu_device = get_next_gpu(folded_data.size() * sizeof(int));
cudaSetDevice(storage->m_gpu_device);
// Have to create vector where correct cuda device is set
storage->m_priv->d_data = make_shared<device_vector<int> >();
*(storage->m_priv->d_data) = folded_data;
}
}
}
void FingerprintDB::getStorageAndLocalIndex(unsigned int offset_index,
FingerprintDBStorage** storage, unsigned int* local_index) const
{
int slice_index_offset=0;
*storage = m_storage[0].get();
for(unsigned int i=1; i<m_storage.size(); i++) {
if(m_storage[i]->m_index_offset >= offset_index) break;
*storage = m_storage[i].get();
slice_index_offset = (*storage)->m_index_offset;
}
*local_index = offset_index - slice_index_offset;
}
Fingerprint FingerprintDB::getFingerprint(unsigned int index) const
{
Fingerprint output(m_fp_intsize);
FingerprintDBStorage* storage;
unsigned int local_index;
getStorageAndLocalIndex(index, &storage, &local_index);
unsigned int offset = local_index*m_fp_intsize;
for(int i=0; i<m_fp_intsize; i++) {
output[i] = storage->m_data[offset+i];
}
return output;
}
void FingerprintDB::search_storage(const Fingerprint& query,
const std::shared_ptr<FingerprintDBStorage>& storage,
vector<SortableResult>* sortable_results,
unsigned int return_count,
float similarity_cutoff) const
{
cudaSetDevice(storage->m_gpu_device);
static QMutex mutex;
vector<int> indices;
std::vector<char*> results_smiles;
std::vector<char*> results_ids;
std::vector<float> results_scores;
device_vector<float> d_results_scores(storage->m_count);
device_vector<int> d_results_indices(storage->m_count);
try
{
// Fill indices [0->N), which will be sorted along with scores at end
thrust::sequence(d_results_indices.begin(), d_results_indices.end());
DFingerprint d_ref_fp;
if(m_fold_factor == 1) {
// Copy the query fingerprint up to the GPU
d_ref_fp = query;
} else {
auto folded = fold_data(query);
d_ref_fp = folded;
}
const int folded_fp_intsize = m_fp_intsize / m_fold_factor;
// Use Tanimoto to score similarity of all compounds to query fingerprint
thrust::transform(d_results_indices.begin(), d_results_indices.end(),
d_results_scores.begin(),
TanimotoFunctor(d_ref_fp, folded_fp_intsize, *(storage->m_priv->d_data),
similarity_cutoff));
auto indices_end = d_results_indices.end();
auto scores_end = d_results_scores.end();
if(similarity_cutoff > 0) {
indices_end = thrust::remove_if(d_results_indices.begin(),
d_results_indices.end(), d_results_scores.begin(),
thrust::logical_not<bool>());
scores_end = thrust::remove(d_results_scores.begin(),
d_results_scores.end(), 0);
}
unsigned int indices_size = std::distance(d_results_indices.begin(),
indices_end);
// Sort scores & indices vectors descending on score
thrust::sort_by_key(d_results_scores.begin(), scores_end,
d_results_indices.begin(), thrust::greater<float>());
int results_to_consider = 0;
results_to_consider = std::min(indices_size,
return_count*m_fold_factor*(int)std::log2(2*m_fold_factor));
indices.assign(d_results_indices.begin(),
d_results_indices.begin()+results_to_consider);
} catch(thrust::system_error e) {
qDebug() << "Error!" << e.what();
throw;
}
if(m_fold_factor == 1) { // If we don't fold, we can take exact GPU results
// Push top return_count results to CPU results vectors to be returned
for(auto index : indices) {
int offset_index = storage->getOffsetIndex(index);
results_smiles.push_back(m_smiles[offset_index]);
results_ids.push_back(m_ids[offset_index]);
}
results_scores.assign(d_results_scores.begin(),
d_results_scores.begin()+indices.size());
} else { // If we folded, we need to recalculate scores with full fingerprints
results_scores.resize(indices.size());
for(unsigned int i=0;i<indices.size();i++) {
int offset_index = storage->getOffsetIndex(indices[i]);
results_scores[i] = tanimoto_similarity_cpu(query,
getFingerprint(offset_index));
// Uncomment below to debug pre vs post folding scores
// qDebug() << results_scores[i] << " vs " << d_results_scores[i];
}
top_results_bubble_sort(indices, results_scores, return_count);
return_count = std::min((size_t)return_count, indices.size());
results_scores.resize(return_count);
for(unsigned int i=0;i<return_count;i++) {
// Check whether the re-scored similarity is too low
if(results_scores[i] < similarity_cutoff) {
results_scores.resize(i);
break;
}
results_ids.push_back(m_ids[storage->getOffsetIndex(indices[i])]);
results_smiles.push_back(m_smiles[storage->getOffsetIndex(indices[i])]);
}
}
mutex.lock();
for(unsigned int i=0; i<results_smiles.size(); i++) {
sortable_results->push_back(SortableResult(results_scores[i],
ResultData(results_smiles[i], results_ids[i])));
}
mutex.unlock();
}
void FingerprintDB::search(const Fingerprint& query,
std::vector<char*>& results_smiles,
std::vector<char*>& results_ids,
std::vector<float>& results_scores,
unsigned int return_count,
float similarity_cutoff) const
{
vector<SortableResult> sortable_results;
vector<QFuture<void> > futures;
for(auto& storage : m_storage) {
QFuture<void> future = QtConcurrent::run(this,
&FingerprintDB::search_storage, query, storage,
&sortable_results, return_count, similarity_cutoff);
futures.push_back(future);
}
for(auto& future : futures) {
future.waitForFinished();
}
std::sort(sortable_results.begin(), sortable_results.end());
std::reverse(sortable_results.begin(), sortable_results.end());
for(auto result : sortable_results) {
results_scores.push_back(result.first);
results_smiles.push_back(result.second.first);
results_ids.push_back(result.second.second);
}
int result_size = std::min((int)return_count, (int)results_scores.size());
results_scores.resize(result_size);
results_smiles.resize(result_size);
results_ids.resize(result_size);
}
/**
* @brief
* A CPU implementation of tanimoto similarity, meant purely for testing.
*/
float FingerprintDB::tanimoto_similarity_cpu(const Fingerprint& fp1,
const Fingerprint& fp2) const
{
int total = 0;
int common = 0;
for(int i=0; i<m_fp_intsize; i++) {
total += __builtin_popcount(fp1[i]) + __builtin_popcount(fp2[i]);
common += __builtin_popcount(fp1[i] & fp2[i]);
}
return (float)common / (float)(total-common);
}
size_t get_available_gpu_memory()
{
size_t free=0;
for(unsigned int gpu=0; gpu<get_gpu_count(); gpu++) {
auto lfree = get_gpu_free_memory(gpu);
free += lfree;
}
// Comment out below line to force-test folding:
// free = 100*1024*1024;
return free;
}
} // namespace gpusim
|
135e203f383cb23389886dd141ffda6e9cd0ad10.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include "cublas_utils.h"
using data_type = hipComplex;
int main(int argc, char *argv[]) {
hipblasHandle_t cublasH = NULL;
hipStream_t stream = NULL;
const int m = 2;
const int n = 2;
const int k = 2;
const int lda = 2;
const int ldc = 2;
/*
* A = | 1.1 + 1.2j | 2.3 + 2.4j |
* | 3.5 + 3.6j | 4.7 + 4.8j |
*/
const std::vector<data_type> A = {{1.1, 1.2}, {3.5, 3.6}, {3.5, 3.6}, {4.7, 4.8}};
std::vector<data_type> C(m * n);
const data_type alpha = {1.0, 1.0};
const data_type beta = {0.0, 0.0};
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER;
hipblasOperation_t trans = HIPBLAS_OP_N;
data_type *d_A = nullptr;
data_type *d_C = nullptr;
printf("A\n");
print_matrix(n, k, A.data(), lda);
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(hipblasCreate(&cublasH));
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
CUBLAS_CHECK(hipblasSetStream(cublasH, stream));
/* step 2: copy data to device */
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size()));
CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_C), sizeof(data_type) * C.size()));
CUDA_CHECK(hipMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), hipMemcpyHostToDevice,
stream));
/* step 3: compute */
CUBLAS_CHECK(hipblasCsyrkEx(cublasH, uplo, trans, n, k, &alpha, d_A,
traits<data_type>::cuda_data_type, lda, &beta, d_C,
traits<data_type>::cuda_data_type, ldc));
/* step 4: copy data to host */
CUDA_CHECK(hipMemcpyAsync(C.data(), d_C, sizeof(data_type) * C.size(), hipMemcpyDeviceToHost,
stream));
CUDA_CHECK(hipStreamSynchronize(stream));
/*
* C = | -28.78 + 26.90j | -43.18 + 40.58j |
* | -28.78 + 26.90j | -43.18 + 40.58j |
*/
printf("C\n");
print_matrix(n, k, C.data(), ldc);
printf("=====\n");
/* free resources */
CUDA_CHECK(hipFree(d_A));
CUDA_CHECK(hipFree(d_C));
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 135e203f383cb23389886dd141ffda6e9cd0ad10.cu | /*
* Copyright 2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include "cublas_utils.h"
using data_type = cuComplex;
int main(int argc, char *argv[]) {
cublasHandle_t cublasH = NULL;
cudaStream_t stream = NULL;
const int m = 2;
const int n = 2;
const int k = 2;
const int lda = 2;
const int ldc = 2;
/*
* A = | 1.1 + 1.2j | 2.3 + 2.4j |
* | 3.5 + 3.6j | 4.7 + 4.8j |
*/
const std::vector<data_type> A = {{1.1, 1.2}, {3.5, 3.6}, {3.5, 3.6}, {4.7, 4.8}};
std::vector<data_type> C(m * n);
const data_type alpha = {1.0, 1.0};
const data_type beta = {0.0, 0.0};
cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
cublasOperation_t trans = CUBLAS_OP_N;
data_type *d_A = nullptr;
data_type *d_C = nullptr;
printf("A\n");
print_matrix(n, k, A.data(), lda);
printf("=====\n");
/* step 1: create cublas handle, bind a stream */
CUBLAS_CHECK(cublasCreate(&cublasH));
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
CUBLAS_CHECK(cublasSetStream(cublasH, stream));
/* step 2: copy data to device */
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size()));
CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_C), sizeof(data_type) * C.size()));
CUDA_CHECK(cudaMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), cudaMemcpyHostToDevice,
stream));
/* step 3: compute */
CUBLAS_CHECK(cublasCsyrkEx(cublasH, uplo, trans, n, k, &alpha, d_A,
traits<data_type>::cuda_data_type, lda, &beta, d_C,
traits<data_type>::cuda_data_type, ldc));
/* step 4: copy data to host */
CUDA_CHECK(cudaMemcpyAsync(C.data(), d_C, sizeof(data_type) * C.size(), cudaMemcpyDeviceToHost,
stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
/*
* C = | -28.78 + 26.90j | -43.18 + 40.58j |
* | -28.78 + 26.90j | -43.18 + 40.58j |
*/
printf("C\n");
print_matrix(n, k, C.data(), ldc);
printf("=====\n");
/* free resources */
CUDA_CHECK(cudaFree(d_A));
CUDA_CHECK(cudaFree(d_C));
CUBLAS_CHECK(cublasDestroy(cublasH));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
0079c929dd3ae6df242a339139a14dc71ec70175.hip | // !!! This is a file automatically generated by hipify!!!
/*
Accelerated Computing for Deep Learning
*/
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <hip/hip_runtime.h>
#include <cstdio>
#include <time.h>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
// Define layers of CNN
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);
static Layer l_s1 = Layer(4*4, 1, 6*6*6);
static Layer l_f = Layer(6*6*6, 10, 10);
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
hipError_t err = hipInit(0);
if (err != hipSuccess) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
loaddata();
learn();
test();
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
clock_t start, end;
start = clock();
l_input.setOutput((float *)input); // Changed the blocks as 128, threads equals wo 128
hipLaunchKernelGGL(( fp_preact_c1), dim3(128), dim3(128), 0, 0, (float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight);
hipLaunchKernelGGL(( fp_bias_c1), dim3(128), dim3(128), 0, 0, (float (*)[24][24])l_c1.preact, l_c1.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(128), dim3(128), 0, 0, l_c1.preact, l_c1.output, l_c1.O);
hipLaunchKernelGGL(( fp_preact_s1), dim3(128), dim3(128), 0, 0, (float (*)[24][24])l_c1.output, (float (*)[6][6])l_s1.preact, (float (*)[4][4])l_s1.weight);
hipLaunchKernelGGL(( fp_bias_s1), dim3(128), dim3(128), 0, 0, (float (*)[6][6])l_s1.preact, l_s1.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(128), dim3(128), 0, 0, l_s1.preact, l_s1.output, l_s1.O);
hipLaunchKernelGGL(( fp_preact_f), dim3(128), dim3(128), 0, 0, (float (*)[6][6])l_s1.output, l_f.preact, (float (*)[6][6][6])l_f.weight);
hipLaunchKernelGGL(( fp_bias_f), dim3(128), dim3(128), 0, 0, l_f.preact, l_f.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(128), dim3(128), 0, 0, l_f.preact, l_f.output, l_f.O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass()
{
clock_t start, end;
start = clock();
hipLaunchKernelGGL(( bp_weight_f), dim3(128), dim3(128), 0, 0, (float (*)[6][6][6])l_f.d_weight, l_f.d_preact, (float (*)[6][6])l_s1.output); // change 64 to 128
hipLaunchKernelGGL(( bp_bias_f), dim3(128), dim3(128), 0, 0, l_f.bias, l_f.d_preact);
hipLaunchKernelGGL(( bp_output_s1), dim3(128), dim3(128), 0, 0, (float (*)[6][6])l_s1.d_output, (float (*)[6][6][6])l_f.weight, l_f.d_preact);
hipLaunchKernelGGL(( bp_preact_s1), dim3(128), dim3(128), 0, 0, (float (*)[6][6])l_s1.d_preact, (float (*)[6][6])l_s1.d_output, (float (*)[6][6])l_s1.preact);
hipLaunchKernelGGL(( bp_weight_s1), dim3(128), dim3(128), 0, 0, (float (*)[4][4])l_s1.d_weight, (float (*)[6][6])l_s1.d_preact, (float (*)[24][24])l_c1.output);
hipLaunchKernelGGL(( bp_bias_s1), dim3(128), dim3(128), 0, 0, l_s1.bias, (float (*)[6][6])l_s1.d_preact);
hipLaunchKernelGGL(( bp_output_c1), dim3(128), dim3(128), 0, 0, (float (*)[24][24])l_c1.d_output, (float (*)[4][4])l_s1.weight, (float (*)[6][6])l_s1.d_preact);
hipLaunchKernelGGL(( bp_preact_c1), dim3(128), dim3(128), 0, 0, (float (*)[24][24])l_c1.d_preact, (float (*)[24][24])l_c1.d_output, (float (*)[24][24])l_c1.preact);
hipLaunchKernelGGL(( bp_weight_c1), dim3(128), dim3(128), 0, 0, (float (*)[5][5])l_c1.d_weight, (float (*)[24][24])l_c1.d_preact, (float (*)[28])l_input.output);
hipLaunchKernelGGL(( bp_bias_c1), dim3(128), dim3(128), 0, 0, l_c1.bias, (float (*)[24][24])l_c1.d_preact);
hipLaunchKernelGGL(( apply_grad), dim3(128), dim3(128), 0, 0, l_f.weight, l_f.d_weight, l_f.M * l_f.N);
hipLaunchKernelGGL(( apply_grad), dim3(128), dim3(128), 0, 0, l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
hipLaunchKernelGGL(( apply_grad), dim3(128), dim3(128), 0, 0, l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static hipblasHandle_t blas;
hipblasCreate(&blas);
float err;
int iter = 200; //Increase the iteration to get a better perfermance
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp_err;
time_taken += forward_pass(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
// Euclid distance of train_set[i]
hipLaunchKernelGGL(( makeError), dim3(10), dim3(1), 0, 0, l_f.d_preact, l_f.output, train_set[i].label, 10);
hipblasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass();
}
err /= train_cnt;
fprintf(stdout, "error: %e, time_on_gpu: %lf\n", err, time_taken);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n Time - %lf\n", time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
hipMemcpy(res, l_f.output, sizeof(float) * 10, hipMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
| 0079c929dd3ae6df242a339139a14dc71ec70175.cu | /*
Accelerated Computing for Deep Learning
*/
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <cuda.h>
#include <cstdio>
#include <time.h>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
// Define layers of CNN
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);
static Layer l_s1 = Layer(4*4, 1, 6*6*6);
static Layer l_f = Layer(6*6*6, 10, 10);
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
CUresult err = cuInit(0);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
loaddata();
learn();
test();
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
clock_t start, end;
start = clock();
l_input.setOutput((float *)input); // Changed the blocks as 128, threads equals wo 128
fp_preact_c1<<<128, 128>>>((float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight);
fp_bias_c1<<<128, 128>>>((float (*)[24][24])l_c1.preact, l_c1.bias);
apply_step_function<<<128, 128>>>(l_c1.preact, l_c1.output, l_c1.O);
fp_preact_s1<<<128, 128>>>((float (*)[24][24])l_c1.output, (float (*)[6][6])l_s1.preact, (float (*)[4][4])l_s1.weight);
fp_bias_s1<<<128, 128>>>((float (*)[6][6])l_s1.preact, l_s1.bias);
apply_step_function<<<128, 128>>>(l_s1.preact, l_s1.output, l_s1.O);
fp_preact_f<<<128, 128>>>((float (*)[6][6])l_s1.output, l_f.preact, (float (*)[6][6][6])l_f.weight);
fp_bias_f<<<128, 128>>>(l_f.preact, l_f.bias);
apply_step_function<<<128, 128>>>(l_f.preact, l_f.output, l_f.O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass()
{
clock_t start, end;
start = clock();
bp_weight_f<<<128, 128>>>((float (*)[6][6][6])l_f.d_weight, l_f.d_preact, (float (*)[6][6])l_s1.output); // change 64 to 128
bp_bias_f<<<128, 128>>>(l_f.bias, l_f.d_preact);
bp_output_s1<<<128, 128>>>((float (*)[6][6])l_s1.d_output, (float (*)[6][6][6])l_f.weight, l_f.d_preact);
bp_preact_s1<<<128, 128>>>((float (*)[6][6])l_s1.d_preact, (float (*)[6][6])l_s1.d_output, (float (*)[6][6])l_s1.preact);
bp_weight_s1<<<128, 128>>>((float (*)[4][4])l_s1.d_weight, (float (*)[6][6])l_s1.d_preact, (float (*)[24][24])l_c1.output);
bp_bias_s1<<<128, 128>>>(l_s1.bias, (float (*)[6][6])l_s1.d_preact);
bp_output_c1<<<128, 128>>>((float (*)[24][24])l_c1.d_output, (float (*)[4][4])l_s1.weight, (float (*)[6][6])l_s1.d_preact);
bp_preact_c1<<<128, 128>>>((float (*)[24][24])l_c1.d_preact, (float (*)[24][24])l_c1.d_output, (float (*)[24][24])l_c1.preact);
bp_weight_c1<<<128, 128>>>((float (*)[5][5])l_c1.d_weight, (float (*)[24][24])l_c1.d_preact, (float (*)[28])l_input.output);
bp_bias_c1<<<128, 128>>>(l_c1.bias, (float (*)[24][24])l_c1.d_preact);
apply_grad<<<128, 128>>>(l_f.weight, l_f.d_weight, l_f.M * l_f.N);
apply_grad<<<128, 128>>>(l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
apply_grad<<<128, 128>>>(l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static cublasHandle_t blas;
cublasCreate(&blas);
float err;
int iter = 200; //Increase the iteration to get a better perfermance
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp_err;
time_taken += forward_pass(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
// Euclid distance of train_set[i]
makeError<<<10, 1>>>(l_f.d_preact, l_f.output, train_set[i].label, 10);
cublasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass();
}
err /= train_cnt;
fprintf(stdout, "error: %e, time_on_gpu: %lf\n", err, time_taken);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n Time - %lf\n", time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
cudaMemcpy(res, l_f.output, sizeof(float) * 10, cudaMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
|
679ac99f5cb0f733e84af1fbc7e460c810374ad7.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2013 Andreas Schfer
*
* Distributed under the Boost Software License, Version 1.0. (See accompanying
* file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#include <hip/hip_runtime.h>
#include "cell.h"
#include "util.h"
#include "update_lbm_classic.h"
#include "update_lbm_object_oriented.h"
#include "update_lbm_cuda_flat_array.h"
int main(int argc, char **argv)
{
if (argc != 2) {
std::cerr << "usage: " << argv[0] << " CUDA_DEVICE\n";
return 1;
}
std::stringstream s;
s << argv[1];
int cudaDevice;
s >> cudaDevice;
hipSetDevice(cudaDevice);
std::cout << "# test name ; dim ; performance\n";
benchmark_lbm_cuda_object_oriented().evaluate();
benchmark_lbm_cuda_classic().evaluate();
benchmark_lbm_cuda_flat_array().evaluate();
return 0;
}
| 679ac99f5cb0f733e84af1fbc7e460c810374ad7.cu | /**
* Copyright 2013 Andreas Schäfer
*
* Distributed under the Boost Software License, Version 1.0. (See accompanying
* file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#include <cuda.h>
#include "cell.h"
#include "util.h"
#include "update_lbm_classic.h"
#include "update_lbm_object_oriented.h"
#include "update_lbm_cuda_flat_array.h"
int main(int argc, char **argv)
{
if (argc != 2) {
std::cerr << "usage: " << argv[0] << " CUDA_DEVICE\n";
return 1;
}
std::stringstream s;
s << argv[1];
int cudaDevice;
s >> cudaDevice;
cudaSetDevice(cudaDevice);
std::cout << "# test name ; dim ; performance\n";
benchmark_lbm_cuda_object_oriented().evaluate();
benchmark_lbm_cuda_classic().evaluate();
benchmark_lbm_cuda_flat_array().evaluate();
return 0;
}
|
f3c85eaacb63f4c50072d180c65ca239b0c04dba.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaCompactMesh.h"
#include "CudaMesh.h"
void compactMesh(
int& out_numofpoint,
double*& out_pointlist,
double*& out_weightlist,
RealD& t_pointlist,
RealD& t_weightlist,
int& out_numoftriface,
int*& out_trifacelist,
double*& out_trifacecent,
IntD& t_trifacelist,
RealD& t_trifacecent,
TriStatusD& t_tristatus,
TetHandleD& t_tri2tetlist,
int& out_numoftet,
int& out_numoftet_indomain,
int*& out_tetlist,
tetstatus*& out_tetstatus,
IntD& t_tetlist,
TetStatusD& t_tetstatus
)
{
IntD t_sizes, t_indices, t_list;
RealD t_list1;
TetStatusD t_list2;
int numberofthreads, numberofblocks;
out_numofpoint = t_pointlist.size() / 3;
out_pointlist = new double[3 * out_numofpoint];
out_weightlist = new double[out_numofpoint];
hipMemcpy(out_pointlist, thrust::raw_pointer_cast(&t_pointlist[0]), 3 * out_numofpoint * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(out_weightlist, thrust::raw_pointer_cast(&t_weightlist[0]), out_numofpoint * sizeof(double), hipMemcpyDeviceToHost);
int last_triface = t_tristatus.size();
t_sizes.resize(last_triface);
t_indices.resize(last_triface);
thrust::fill(t_sizes.begin(), t_sizes.end(), 1);
thrust::replace_if(t_sizes.begin(), t_sizes.end(), t_tristatus.begin(), isEmptyTri(), 0);
thrust::exclusive_scan(t_sizes.begin(), t_sizes.end(), t_indices.begin());
out_numoftriface = thrust::reduce(t_sizes.begin(), t_sizes.end());
out_trifacelist = new int[3 * out_numoftriface];
out_trifacecent = new double[3 * out_numoftriface];
t_list.resize(3 * out_numoftriface);
t_list1.resize(3 * out_numoftriface);
numberofthreads = last_triface;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelCompactTriface << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_trifacelist[0]),
thrust::raw_pointer_cast(&t_trifacecent[0]),
thrust::raw_pointer_cast(&t_tri2tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
thrust::raw_pointer_cast(&t_sizes[0]),
thrust::raw_pointer_cast(&t_indices[0]),
thrust::raw_pointer_cast(&t_list[0]),
thrust::raw_pointer_cast(&t_list1[0]),
numberofthreads
);
hipMemcpy(out_trifacelist, thrust::raw_pointer_cast(&t_list[0]), 3 * out_numoftriface * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(out_trifacecent, thrust::raw_pointer_cast(&t_list1[0]), 3 * out_numoftriface * sizeof(double), hipMemcpyDeviceToHost);
int last_tet = t_tetstatus.size();
t_sizes.resize(last_tet);
t_indices.resize(last_tet);
thrust::fill(t_sizes.begin(), t_sizes.end(), 1);
numberofthreads = last_tet;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelCompactTet_Phase1 << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
thrust::raw_pointer_cast(&t_sizes[0]),
numberofthreads
);
thrust::exclusive_scan(t_sizes.begin(), t_sizes.end(), t_indices.begin());
out_numoftet = thrust::reduce(t_sizes.begin(), t_sizes.end());
out_tetlist = new int[4 * out_numoftet];
out_tetstatus = new tetstatus[out_numoftet];
t_list.resize(4 * out_numoftet);
t_list2.resize(out_numoftet);
kernelCompactTet_Phase2 << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
thrust::raw_pointer_cast(&t_sizes[0]),
thrust::raw_pointer_cast(&t_indices[0]),
thrust::raw_pointer_cast(&t_list[0]),
thrust::raw_pointer_cast(&t_list2[0]),
numberofthreads
);
hipMemcpy(out_tetlist, thrust::raw_pointer_cast(&t_list[0]), 4 * out_numoftet * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(out_tetstatus, thrust::raw_pointer_cast(&t_list2[0]), out_numoftet * sizeof(tetstatus), hipMemcpyDeviceToHost);
int numoftets_indomain = 0;
for (int i = 0; i < out_numoftet; i++)
{
if (out_tetstatus[i].isInDomain())
numoftets_indomain++;
}
out_numoftet_indomain = numoftets_indomain;
} | f3c85eaacb63f4c50072d180c65ca239b0c04dba.cu | #include "CudaCompactMesh.h"
#include "CudaMesh.h"
void compactMesh(
int& out_numofpoint,
double*& out_pointlist,
double*& out_weightlist,
RealD& t_pointlist,
RealD& t_weightlist,
int& out_numoftriface,
int*& out_trifacelist,
double*& out_trifacecent,
IntD& t_trifacelist,
RealD& t_trifacecent,
TriStatusD& t_tristatus,
TetHandleD& t_tri2tetlist,
int& out_numoftet,
int& out_numoftet_indomain,
int*& out_tetlist,
tetstatus*& out_tetstatus,
IntD& t_tetlist,
TetStatusD& t_tetstatus
)
{
IntD t_sizes, t_indices, t_list;
RealD t_list1;
TetStatusD t_list2;
int numberofthreads, numberofblocks;
out_numofpoint = t_pointlist.size() / 3;
out_pointlist = new double[3 * out_numofpoint];
out_weightlist = new double[out_numofpoint];
cudaMemcpy(out_pointlist, thrust::raw_pointer_cast(&t_pointlist[0]), 3 * out_numofpoint * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(out_weightlist, thrust::raw_pointer_cast(&t_weightlist[0]), out_numofpoint * sizeof(double), cudaMemcpyDeviceToHost);
int last_triface = t_tristatus.size();
t_sizes.resize(last_triface);
t_indices.resize(last_triface);
thrust::fill(t_sizes.begin(), t_sizes.end(), 1);
thrust::replace_if(t_sizes.begin(), t_sizes.end(), t_tristatus.begin(), isEmptyTri(), 0);
thrust::exclusive_scan(t_sizes.begin(), t_sizes.end(), t_indices.begin());
out_numoftriface = thrust::reduce(t_sizes.begin(), t_sizes.end());
out_trifacelist = new int[3 * out_numoftriface];
out_trifacecent = new double[3 * out_numoftriface];
t_list.resize(3 * out_numoftriface);
t_list1.resize(3 * out_numoftriface);
numberofthreads = last_triface;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelCompactTriface << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_trifacelist[0]),
thrust::raw_pointer_cast(&t_trifacecent[0]),
thrust::raw_pointer_cast(&t_tri2tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
thrust::raw_pointer_cast(&t_sizes[0]),
thrust::raw_pointer_cast(&t_indices[0]),
thrust::raw_pointer_cast(&t_list[0]),
thrust::raw_pointer_cast(&t_list1[0]),
numberofthreads
);
cudaMemcpy(out_trifacelist, thrust::raw_pointer_cast(&t_list[0]), 3 * out_numoftriface * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(out_trifacecent, thrust::raw_pointer_cast(&t_list1[0]), 3 * out_numoftriface * sizeof(double), cudaMemcpyDeviceToHost);
int last_tet = t_tetstatus.size();
t_sizes.resize(last_tet);
t_indices.resize(last_tet);
thrust::fill(t_sizes.begin(), t_sizes.end(), 1);
numberofthreads = last_tet;
numberofblocks = (ceil)((float)numberofthreads / BLOCK_SIZE);
kernelCompactTet_Phase1 << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
thrust::raw_pointer_cast(&t_sizes[0]),
numberofthreads
);
thrust::exclusive_scan(t_sizes.begin(), t_sizes.end(), t_indices.begin());
out_numoftet = thrust::reduce(t_sizes.begin(), t_sizes.end());
out_tetlist = new int[4 * out_numoftet];
out_tetstatus = new tetstatus[out_numoftet];
t_list.resize(4 * out_numoftet);
t_list2.resize(out_numoftet);
kernelCompactTet_Phase2 << <numberofblocks, BLOCK_SIZE >> > (
thrust::raw_pointer_cast(&t_tetlist[0]),
thrust::raw_pointer_cast(&t_tetstatus[0]),
thrust::raw_pointer_cast(&t_sizes[0]),
thrust::raw_pointer_cast(&t_indices[0]),
thrust::raw_pointer_cast(&t_list[0]),
thrust::raw_pointer_cast(&t_list2[0]),
numberofthreads
);
cudaMemcpy(out_tetlist, thrust::raw_pointer_cast(&t_list[0]), 4 * out_numoftet * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(out_tetstatus, thrust::raw_pointer_cast(&t_list2[0]), out_numoftet * sizeof(tetstatus), cudaMemcpyDeviceToHost);
int numoftets_indomain = 0;
for (int i = 0; i < out_numoftet; i++)
{
if (out_tetstatus[i].isInDomain())
numoftets_indomain++;
}
out_numoftet_indomain = numoftets_indomain;
} |
cf567c884f5393eb57f7b9dc87181e3f5d8fc70b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
if (comp <= (var_1 - (-1.8809E-35f - (+1.2500E-42f / var_2 + (-0.0f - var_3))))) {
if (comp > (var_4 * atanf(fmodf(cosf(-1.1861E-37f), -1.0219E-35f / asinf((var_5 - var_6 + +1.3468E-37f)))))) {
comp = asinf(atan2f(log10f(var_7 / expf(-1.2621E-35f)), -1.5984E-35f * (-1.0863E-36f / +1.2865E-5f + var_8)));
comp += tanhf(-0.0f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
hipDeviceSynchronize();
return 0;
}
| cf567c884f5393eb57f7b9dc87181e3f5d8fc70b.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
if (comp <= (var_1 - (-1.8809E-35f - (+1.2500E-42f / var_2 + (-0.0f - var_3))))) {
if (comp > (var_4 * atanf(fmodf(cosf(-1.1861E-37f), -1.0219E-35f / asinf((var_5 - var_6 + +1.3468E-37f)))))) {
comp = asinf(atan2f(log10f(var_7 / expf(-1.2621E-35f)), -1.5984E-35f * (-1.0863E-36f / +1.2865E-5f + var_8)));
comp += tanhf(-0.0f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
cudaDeviceSynchronize();
return 0;
}
|
cb8c2295e287f6245a392b1513fd71ada5d20f66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Program to perform ADI time-marching on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size for
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 32
#define BLOCK_Y 4
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <adi3d_kernel.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
void Gold_adi(int, int, int, float, float*, float*, float*, float*,
float*, float*, float*, float*, float*, float*, float*);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=200, NY=200, NZ=200, REPEAT=10, i, j, k, ind, printout=0;
float *h_u1, *h_u2, *h_du,
*h_ax, *h_bx, *h_cx,
*h_ay, *h_by, *h_cy,
*h_az, *h_bz, *h_cz,
err, lam=1.0f;
// 'd_' prefix - GPU (device) memory space
float *d_u, *d_du,
*d_ax, *d_bx, *d_cx,
*d_ay, *d_by, *d_cy,
*d_az, *d_bz, *d_cz;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
if( NX>256 || NY>256 || NZ>256 ) {
printf("No dimension can exceed 256 due to hard-coded array sizes\n");
return -1;
}
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_du = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_ax = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_bx = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_cx = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_ay = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_by = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_cy = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_az = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_bz = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_cz = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( hipMalloc((void **)&d_u, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_du, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_ax, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_bx, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_cx, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_ay, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_by, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_cy, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_az, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_bz, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_cz, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
hipEventRecord(start);
checkCudaErrors(hipMemcpy(d_u, h_u1, sizeof(float)*NX*NY*NZ,
hipMemcpyHostToDevice));
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %f (ms) \n", milli);
// Set up the execution configuration
dim3 dimGrid1(1+(NX-1)/BLOCK_X, 1+(NY-1)/BLOCK_Y);
dim3 dimBlock1(BLOCK_X,BLOCK_Y);
dim3 dimGrid2(1+(NX-1)/16, 1+(NY-1)/4);
dim3 dimBlock2(16,4);
// Execute GPU kernel
hipEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
hipLaunchKernelGGL(( GPU_adi_rhs), dim3(dimGrid1), dim3(dimBlock1), 0, 0, NX, NY, NZ,
lam, d_u, d_du,
d_ax, d_bx, d_cx,
d_ay, d_by, d_cy,
d_az, d_bz, d_cz);
getLastCudaError("GPU_adi_rhs execution failed\n");
hipLaunchKernelGGL(( GPU_adi_x), dim3(dimGrid2), dim3(dimBlock2), 0, 0, NX, NY, NZ,
d_ax, d_bx, d_cx, d_du);
getLastCudaError("GPU_adi_x execution failed\n");
hipLaunchKernelGGL(( GPU_adi_y), dim3(dimGrid2), dim3(dimBlock2), 0, 0, NX, NY, NZ,
d_ay, d_by, d_cy, d_du);
getLastCudaError("GPU_adi_y execution failed\n");
hipLaunchKernelGGL(( GPU_adi_z), dim3(dimGrid2), dim3(dimBlock2), 0, 0, NX, NY, NZ, d_u,
d_az, d_bz, d_cz, d_du);
getLastCudaError("GPU_adi_z execution failed\n");
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_adi: %f (ms) \n", REPEAT, milli);
// Read back GPU results
hipEventRecord(start);
checkCudaErrors(hipMemcpy(h_u2, d_u, sizeof(float)*NX*NY*NZ,
hipMemcpyDeviceToHost) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %f (ms) \n", milli);
// print out corner of array
if (printout) {
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
}
// Gold treatment
hipEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_adi(NX, NY, NZ,
lam, h_u1, h_du,
h_ax, h_bx, h_cx,
h_ay, h_by, h_cy,
h_az, h_bz, h_cz);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_adi: %f (ms) \n \n", REPEAT, milli);
// print out corner of array
if (printout) {
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
}
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("\n rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( hipFree(d_u) );
checkCudaErrors( hipFree(d_du) );
checkCudaErrors( hipFree(d_ax) );
checkCudaErrors( hipFree(d_bx) );
checkCudaErrors( hipFree(d_cx) );
checkCudaErrors( hipFree(d_ay) );
checkCudaErrors( hipFree(d_by) );
checkCudaErrors( hipFree(d_cy) );
checkCudaErrors( hipFree(d_az) );
checkCudaErrors( hipFree(d_bz) );
checkCudaErrors( hipFree(d_cz) );
free(h_u1);
free(h_u2);
free(h_du);
free(h_ax);
free(h_bx);
free(h_cx);
free(h_ay);
free(h_by);
free(h_cy);
free(h_az);
free(h_bz);
free(h_cz);
hipDeviceReset();
}
| cb8c2295e287f6245a392b1513fd71ada5d20f66.cu | //
// Program to perform ADI time-marching on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size for
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 32
#define BLOCK_Y 4
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <adi3d_kernel.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
void Gold_adi(int, int, int, float, float*, float*, float*, float*,
float*, float*, float*, float*, float*, float*, float*);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=200, NY=200, NZ=200, REPEAT=10, i, j, k, ind, printout=0;
float *h_u1, *h_u2, *h_du,
*h_ax, *h_bx, *h_cx,
*h_ay, *h_by, *h_cy,
*h_az, *h_bz, *h_cz,
err, lam=1.0f;
// 'd_' prefix - GPU (device) memory space
float *d_u, *d_du,
*d_ax, *d_bx, *d_cx,
*d_ay, *d_by, *d_cy,
*d_az, *d_bz, *d_cz;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
if( NX>256 || NY>256 || NZ>256 ) {
printf("No dimension can exceed 256 due to hard-coded array sizes\n");
return -1;
}
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_du = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_ax = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_bx = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_cx = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_ay = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_by = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_cy = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_az = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_bz = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_cz = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( cudaMalloc((void **)&d_u, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_du, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_ax, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_bx, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_cx, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_ay, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_by, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_cy, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_az, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_bz, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_cz, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
cudaEventRecord(start);
checkCudaErrors(cudaMemcpy(d_u, h_u1, sizeof(float)*NX*NY*NZ,
cudaMemcpyHostToDevice));
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %f (ms) \n", milli);
// Set up the execution configuration
dim3 dimGrid1(1+(NX-1)/BLOCK_X, 1+(NY-1)/BLOCK_Y);
dim3 dimBlock1(BLOCK_X,BLOCK_Y);
dim3 dimGrid2(1+(NX-1)/16, 1+(NY-1)/4);
dim3 dimBlock2(16,4);
// Execute GPU kernel
cudaEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
GPU_adi_rhs<<<dimGrid1, dimBlock1>>>(NX, NY, NZ,
lam, d_u, d_du,
d_ax, d_bx, d_cx,
d_ay, d_by, d_cy,
d_az, d_bz, d_cz);
getLastCudaError("GPU_adi_rhs execution failed\n");
GPU_adi_x<<<dimGrid2, dimBlock2>>>(NX, NY, NZ,
d_ax, d_bx, d_cx, d_du);
getLastCudaError("GPU_adi_x execution failed\n");
GPU_adi_y<<<dimGrid2, dimBlock2>>>(NX, NY, NZ,
d_ay, d_by, d_cy, d_du);
getLastCudaError("GPU_adi_y execution failed\n");
GPU_adi_z<<<dimGrid2, dimBlock2>>>(NX, NY, NZ, d_u,
d_az, d_bz, d_cz, d_du);
getLastCudaError("GPU_adi_z execution failed\n");
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_adi: %f (ms) \n", REPEAT, milli);
// Read back GPU results
cudaEventRecord(start);
checkCudaErrors(cudaMemcpy(h_u2, d_u, sizeof(float)*NX*NY*NZ,
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %f (ms) \n", milli);
// print out corner of array
if (printout) {
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
}
// Gold treatment
cudaEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_adi(NX, NY, NZ,
lam, h_u1, h_du,
h_ax, h_bx, h_cx,
h_ay, h_by, h_cy,
h_az, h_bz, h_cz);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_adi: %f (ms) \n \n", REPEAT, milli);
// print out corner of array
if (printout) {
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
}
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("\n rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( cudaFree(d_u) );
checkCudaErrors( cudaFree(d_du) );
checkCudaErrors( cudaFree(d_ax) );
checkCudaErrors( cudaFree(d_bx) );
checkCudaErrors( cudaFree(d_cx) );
checkCudaErrors( cudaFree(d_ay) );
checkCudaErrors( cudaFree(d_by) );
checkCudaErrors( cudaFree(d_cy) );
checkCudaErrors( cudaFree(d_az) );
checkCudaErrors( cudaFree(d_bz) );
checkCudaErrors( cudaFree(d_cz) );
free(h_u1);
free(h_u2);
free(h_du);
free(h_ax);
free(h_bx);
free(h_cx);
free(h_ay);
free(h_by);
free(h_cy);
free(h_az);
free(h_bz);
free(h_cz);
cudaDeviceReset();
}
|
ffdf6c413279651bc9aa8dc6accfbe786f88ce3d.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iomanip>
#include <ios>
#include <iostream>
#include <stdexcept>
#include "cutlass/core_io.h"
#include "cublas_helpers.h"
#include "gemm_operation_profiler.h"
#include "gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
GemmOperationProfiler::GemmOperationProfiler(Options const& options)
: OperationProfiler(
options, library::OperationKind::kGemm,
{
{ArgumentTypeID::kEnumerated,
{"gemm_kind"},
"Variant of GEMM (gemm, batched, array, universal, "
"planar_complex, planar_complex_array)"},
{ArgumentTypeID::kInteger,
{"m", "problem-size::m"},
"M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger,
{"n", "problem-size::n"},
"N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger,
{"k", "problem-size::k"},
"K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor,
{"A"},
"Tensor storing the A operand"},
{ArgumentTypeID::kTensor,
{"B"},
"Tensor storing the B operand"},
{ArgumentTypeID::kTensor,
{"C"},
"Tensor storing the C operand"},
{ArgumentTypeID::kScalar,
{"alpha", "epilogue::alpha"},
"Epilogue scalar alpha"},
{ArgumentTypeID::kScalar,
{"beta", "epilogue::beta"},
"Epilogue scalar beta"},
{ArgumentTypeID::kInteger,
{"split_k_slices", "split-k-slices"},
"Number of partitions of K dimension"},
{ArgumentTypeID::kInteger,
{"batch_count", "batch-count"},
"Number of GEMMs computed in one batch"},
},
{library::Provider::kCUBLAS}) {
description_ =
" General matrix-matrix product. D = alpha * A*B + beta * C";
}
/// Destructor
GemmOperationProfiler::~GemmOperationProfiler() {}
/// Prints usage statement for the math function
void GemmOperationProfiler::print_usage(std::ostream& out) const {
out << "GEMM"
<< "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void GemmOperationProfiler::print_examples(std::ostream& out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 "
"--n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Gemm "
"--accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with "
"row-major (For column major, use column, col, or n. For row major "
"use, row or t):\n"
<< " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Gemm "
"--dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Gemm "
"--dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace "
"if results are incorrect (note that --cta-tile::k=32 is default "
"cta-tile size):\n"
<< " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 "
"--cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and "
"save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Gemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status GemmOperationProfiler::GemmProblem::parse(
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space,
problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(this->alpha, operation_desc.element_epilogue, "alpha",
problem_space, problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue,
1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(this->beta, operation_desc.element_epilogue, "beta",
problem_space, problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->k)})
.front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)})
.front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)})
.front();
return Status::kSuccess;
}
/// Initializes a performance result
void GemmOperationProfiler::GemmProblem::initialize_result(
PerformanceResult& result,
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space,
library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) +
":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) +
":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) +
":" + library::to_string(operation_desc.C.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status GemmOperationProfiler::initialize_configuration(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& operation_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
// gemm_workspace_.configuration.split_k_slices =
// int(problem_.split_k_slices);
gemm_workspace_.configuration.batch_count = int(problem_.split_k_slices);
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc,
problem_space);
return operation->can_implement(&gemm_workspace_.configuration,
&gemm_workspace_.arguments);
}
/// Initializes the performance result
void GemmOperationProfiler::initialize_result_(
PerformanceResult& result, Options const& options,
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc,
problem_space);
// Input bytes read and Output bytes written for the gemm problem
result.bytes = int64_t(library::sizeof_bits(operation_desc.A.element) *
problem_.m / 8) *
problem_.k +
int64_t(library::sizeof_bits(operation_desc.B.element) *
problem_.n / 8) *
problem_.k +
int64_t(library::sizeof_bits(operation_desc.C.element) *
problem_.m / 8) *
problem_.n;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(),
[](uint8_t i) { return i == 0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) *
problem_.m / 8) *
problem_.n;
}
result.flops = 2 * (problem_.m * problem_.n * problem_.k +
problem_.m * problem_.n);
result.runtime = 0;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
result.flops *= 4;
break;
default:
break;
}
}
/// Initializes workspace
Status GemmOperationProfiler::initialize_workspace(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& operation_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
gemm_workspace_.A = device_context.allocate_tensor(
options, "A", operation_desc.A.element, operation_desc.A.layout,
{int(problem_.m), int(problem_.k)}, {int(problem_.lda)});
gemm_workspace_.B = device_context.allocate_tensor(
options, "B", operation_desc.B.element, operation_desc.B.layout,
{int(problem_.k), int(problem_.n)}, {int(problem_.ldb)});
gemm_workspace_.C = device_context.allocate_tensor(
options, "C", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)});
gemm_workspace_.Computed = device_context.allocate_tensor(
"D", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)});
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)});
gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(
&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(
&gemm_workspace_.configuration);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8,
workspace_size);
status = operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kGemm;
results_.back().disposition = Disposition::kNotRun;
for (auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_cutlass(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
hipError_t result = hipDeviceSynchronize();
if (result != hipSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (cublas_satisfies(gemm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(options, report, device_context, operation,
problem_space, problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for (auto& m : results_.back().verification_map) {
if (m.second == Disposition::kFailed ||
m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if (!is_any_verification_run_passed &&
m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if (is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_with_cublas_(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
#if CUTLASS_ENABLE_CUBLAS
library::GemmDescription const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
hipblasStatus_t status = handle.get_cublas_create_status();
if (status != HIPBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kFailed;
return true;
}
std::vector<hipblasGemmAlgo_t> algorithms;
detail::select_cublas_algorithms(algorithms, options, gemm_desc);
if (algorithms.empty()) {
// no algorithm selected
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to hipblasGemmEx()
//
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.D = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode =
library::ScalarPointerMode::kHost;
detail::cublasGemmExDispatcher gemm_op(
gemm_desc, gemm_workspace_.configuration,
gemm_workspace_.arguments, algorithms.front());
if (gemm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = gemm_op(handle);
// Handle errors
if (status != HIPBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] =
compare_tensors(options, *gemm_workspace_.Computed,
*gemm_workspace_.Reference);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] ==
Disposition::kIncorrect) {
save_workspace(device_context, options, gemm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
} catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool GemmOperationProfiler::profile(Options const& options,
PerformanceReport& report,
DeviceContext& device_context,
library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode =
library::ScalarPointerMode::kHost;
results_.back().status =
profile_cutlass_(results_.back().runtime, options, operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| ffdf6c413279651bc9aa8dc6accfbe786f88ce3d.cu | /***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iomanip>
#include <ios>
#include <iostream>
#include <stdexcept>
#include "cutlass/core_io.h"
#include "cublas_helpers.h"
#include "gemm_operation_profiler.h"
#include "gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
GemmOperationProfiler::GemmOperationProfiler(Options const& options)
: OperationProfiler(
options, library::OperationKind::kGemm,
{
{ArgumentTypeID::kEnumerated,
{"gemm_kind"},
"Variant of GEMM (gemm, batched, array, universal, "
"planar_complex, planar_complex_array)"},
{ArgumentTypeID::kInteger,
{"m", "problem-size::m"},
"M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger,
{"n", "problem-size::n"},
"N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger,
{"k", "problem-size::k"},
"K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor,
{"A"},
"Tensor storing the A operand"},
{ArgumentTypeID::kTensor,
{"B"},
"Tensor storing the B operand"},
{ArgumentTypeID::kTensor,
{"C"},
"Tensor storing the C operand"},
{ArgumentTypeID::kScalar,
{"alpha", "epilogue::alpha"},
"Epilogue scalar alpha"},
{ArgumentTypeID::kScalar,
{"beta", "epilogue::beta"},
"Epilogue scalar beta"},
{ArgumentTypeID::kInteger,
{"split_k_slices", "split-k-slices"},
"Number of partitions of K dimension"},
{ArgumentTypeID::kInteger,
{"batch_count", "batch-count"},
"Number of GEMMs computed in one batch"},
},
{library::Provider::kCUBLAS}) {
description_ =
" General matrix-matrix product. D = alpha * A*B + beta * C";
}
/// Destructor
GemmOperationProfiler::~GemmOperationProfiler() {}
/// Prints usage statement for the math function
void GemmOperationProfiler::print_usage(std::ostream& out) const {
out << "GEMM"
<< "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void GemmOperationProfiler::print_examples(std::ostream& out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 "
"--n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Gemm "
"--accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with "
"row-major (For column major, use column, col, or n. For row major "
"use, row or t):\n"
<< " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Gemm "
"--dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Gemm "
"--dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace "
"if results are incorrect (note that --cta-tile::k=32 is default "
"cta-tile size):\n"
<< " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 "
"--cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and "
"save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Gemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status GemmOperationProfiler::GemmProblem::parse(
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space,
problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(this->alpha, operation_desc.element_epilogue, "alpha",
problem_space, problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue,
1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(this->beta, operation_desc.element_epilogue, "beta",
problem_space, problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->k)})
.front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)})
.front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)})
.front();
return Status::kSuccess;
}
/// Initializes a performance result
void GemmOperationProfiler::GemmProblem::initialize_result(
PerformanceResult& result,
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space,
library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) +
":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) +
":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) +
":" + library::to_string(operation_desc.C.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status GemmOperationProfiler::initialize_configuration(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& operation_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
// gemm_workspace_.configuration.split_k_slices =
// int(problem_.split_k_slices);
gemm_workspace_.configuration.batch_count = int(problem_.split_k_slices);
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc,
problem_space);
return operation->can_implement(&gemm_workspace_.configuration,
&gemm_workspace_.arguments);
}
/// Initializes the performance result
void GemmOperationProfiler::initialize_result_(
PerformanceResult& result, Options const& options,
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc,
problem_space);
// Input bytes read and Output bytes written for the gemm problem
result.bytes = int64_t(library::sizeof_bits(operation_desc.A.element) *
problem_.m / 8) *
problem_.k +
int64_t(library::sizeof_bits(operation_desc.B.element) *
problem_.n / 8) *
problem_.k +
int64_t(library::sizeof_bits(operation_desc.C.element) *
problem_.m / 8) *
problem_.n;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(),
[](uint8_t i) { return i == 0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) *
problem_.m / 8) *
problem_.n;
}
result.flops = 2 * (problem_.m * problem_.n * problem_.k +
problem_.m * problem_.n);
result.runtime = 0;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
result.flops *= 4;
break;
default:
break;
}
}
/// Initializes workspace
Status GemmOperationProfiler::initialize_workspace(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& operation_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
gemm_workspace_.A = device_context.allocate_tensor(
options, "A", operation_desc.A.element, operation_desc.A.layout,
{int(problem_.m), int(problem_.k)}, {int(problem_.lda)});
gemm_workspace_.B = device_context.allocate_tensor(
options, "B", operation_desc.B.element, operation_desc.B.layout,
{int(problem_.k), int(problem_.n)}, {int(problem_.ldb)});
gemm_workspace_.C = device_context.allocate_tensor(
options, "C", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)});
gemm_workspace_.Computed = device_context.allocate_tensor(
"D", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)});
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)});
gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(
&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(
&gemm_workspace_.configuration);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8,
workspace_size);
status = operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kGemm;
results_.back().disposition = Disposition::kNotRun;
for (auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_cutlass(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (cublas_satisfies(gemm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(options, report, device_context, operation,
problem_space, problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for (auto& m : results_.back().verification_map) {
if (m.second == Disposition::kFailed ||
m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if (!is_any_verification_run_passed &&
m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if (is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_with_cublas_(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
#if CUTLASS_ENABLE_CUBLAS
library::GemmDescription const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kFailed;
return true;
}
std::vector<cublasGemmAlgo_t> algorithms;
detail::select_cublas_algorithms(algorithms, options, gemm_desc);
if (algorithms.empty()) {
// no algorithm selected
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublasGemmEx()
//
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.D = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode =
library::ScalarPointerMode::kHost;
detail::cublasGemmExDispatcher gemm_op(
gemm_desc, gemm_workspace_.configuration,
gemm_workspace_.arguments, algorithms.front());
if (gemm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = gemm_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] =
compare_tensors(options, *gemm_workspace_.Computed,
*gemm_workspace_.Reference);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] ==
Disposition::kIncorrect) {
save_workspace(device_context, options, gemm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
} catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool GemmOperationProfiler::profile(Options const& options,
PerformanceReport& report,
DeviceContext& device_context,
library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode =
library::ScalarPointerMode::kHost;
results_.back().status =
profile_cutlass_(results_.back().runtime, options, operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
93931a894faa92da4e2b3b9e4f1890b066c34301.hip | // !!! This is a file automatically generated by hipify!!!
#include"struct.h"
#include "init_cuda.h"
#include "cuda_aid.cuh"
/* The number of the grid point should be divisible by GPU number*/
inline bool IsGPUCapableP2P(hipDeviceProp_t *pProp)
{
#ifdef _WIN32
return (bool)(pProp->tccDriver ? true : false);
#else
return (bool)(pProp->major >= 2);
#endif
}
// CUDA includes
int prime[168]={2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997};
// find the maximal factor in a integer which is smaller than 1024(Maximal thread number in cuda)
extern int factor_decompose_1024(GPU_INFO *gpu_info,long N){
long temp;
temp=N;
int decom[10000],index=0;
for(int i=0;i<168;i++){
while(temp%prime[i]==0){
temp=temp/prime[i];
decom[index++]=prime[i];
};
}
int elements[10000];
//printf("index=%d\n",index);
for(int i=0;i<index;i++) elements[i]=0;
int temp_1024=1;
for(int j=1;j<=10;j++){
elements[j-1]=1;
const size_t N_t = index;
std::vector<int> selectors(elements, elements + N_t);
do{
int combo=1;
for (size_t i = 0; i < selectors.size(); ++i){
if (selectors[i]){
//std::cout << decom[i] << ", ";
combo*=decom[i];
}
}
if(combo>temp_1024&&combo<=1024) temp_1024=combo;
if(combo==1024) break;
} while (prev_permutation(selectors.begin(), selectors.end()));
}
return temp_1024;
}
extern void factor_decompose(GPU_INFO *gpu_info,long N, int *Nx_a,int *Ny_a,int *Nz_a){
int Nx,Ny,Nz;
long temp;
temp=N;
int decom[10000],index=0;
for(int i=0;i<168;i++){
while(temp%prime[i]==0){
temp=temp/prime[i];
decom[index++]=prime[i];
};
}
//printf("%ld prime is ",N);
//for(int i=0;i<index;i++) printf(" %d ",decom[i]);
//printf("\n");
if(temp!=1) {
printf("please give a \"good\" polymer number!\n");
exit(0);
}
if(index==1) {
Nx=N;
Ny=1;
Nz=1;
}
else if(index==2){
Nz=1;//decom[index-1]
Ny=decom[0];
Nx=decom[1];
//printf("%d %d\n",Nx,Ny);
}
else if(index>2){
Nx=1;
Ny=1;
Nz=1;
if(index%2==0){
Nz=decom[index-1]*decom[0];
if((index-2)%4==0){
for(int i=0;i<(index-2)/4;i++){
Nx*=decom[i+1]*decom[index-1-i-1];
Ny*=decom[(index-2)/4+1+i]*decom[index-1-(index-2)/4-1-i];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else if((index-2)==2){
Ny=decom[1];
Nx=decom[2];
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else {
Nz*=decom[1]*decom[2];
for(int i=0;i<(index-4)/4;i++){
Nx*=decom[i+3]*decom[index-1-i-1];
Ny*=decom[(index-2)/4+3+i]*decom[index-1-(index-2)/4-1-i];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
}
else{
Nz=decom[index-1];
if((index-1)%4==0){
for(int i=0;i<(index-1)/4;i++){
Nx*=decom[i]*decom[index-1-i-1];
Ny*=decom[(index-1)/4+i]*decom[index-1-(index-1)/4-i-1];
}
//printf("%d: %d %d %d\n",index,Nx,Ny,Nz);
}
else if((index-1)==2){
Ny=decom[0];
Nx=decom[1];
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else {
Nz*=decom[0]*decom[1];
for(int i=0;i<(index-3)/4;i++){
Nx*=decom[i*2+2]*decom[index-1-i*2-1];
Ny*=decom[i*2+3]*decom[index-3-i*2];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
}
}
if(N==1) {
Nx=1;
Ny=1;
Nz=1;
}
if(Nx*Ny*Nz==N) {
*Nx_a=Nx;
*Ny_a=Ny;
*Nz_a=Nz;
}
else {
printf("Error Nx %d *Ny %d *Nz %d!= N %ld\n",Nx,Ny,Nz,N);
exit(0);
}
}
extern void init_cuda(GPU_INFO *gpu_info,int display){
int gpu_count;
int i,j;
hipDeviceProp_t prop[64];
int *gpuid;
int can_access_peer_0_1;
gpu_count=0;
gpuid=(int*)malloc(sizeof(int));
//checkCudaErrors(hipGetDeviceCount(&gpu_info->GPU_N));
//if(gpu_info->GPU_N==8) gpu_info->GPU_N=4;//! Set the number of GPU
for (i=0; i < gpu_info->GPU_N; i++){
checkCudaErrors(hipGetDeviceProperties(&gpu_info->prop[i], i));
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[i]));
// Only boards based on Fermi can support P2P
gpuid[gpu_count++] = gpu_info->whichGPUs[i];
if(display==1){
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, gpu_info->prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
printf("maxThreadsDim %d %d %d\n",gpu_info->prop[i].maxThreadsDim[0],gpu_info->prop[i].maxThreadsDim[1],gpu_info->prop[i].maxThreadsDim[2]);
printf("maxThreadsPerBlock %d\n",gpu_info->prop[i].maxThreadsPerBlock);
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
}
//gpu_info->thread=gpu_info->prop[i].maxThreadsPerBlock;
for(j=0;j<gpu_info->GPU_N;j++){
if(i!=j){
checkCudaErrors(hipDeviceCanAccessPeer(&can_access_peer_0_1, gpu_info->whichGPUs[i], gpu_info->whichGPUs[j]));
if(can_access_peer_0_1) {
checkCudaErrors(hipDeviceEnablePeerAccess(gpu_info->whichGPUs[j], 0));
}
}
}
}
}
extern void initialize_cufft(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){
int Dim[3];
int i,j,k;
int rank = 3;
int Nx=cufft_info->Nx;
int Ny=cufft_info->Ny;
int Nz=cufft_info->Nz;
long NxNyNz=Nx*Ny*Nz,ijk;
cufft_info->NxNyNz=NxNyNz;
cufft_info->Nxh1=Nx/2+1;
cufft_info->Nxh1NyNz=cufft_info->Nxh1*Ny*Nz;
cufft_info->batch=cufft_info->Num_SCFT/gpu_info->GPU_N; //! number of SCFT per GPU
int batch=cufft_info->batch;
gpu_info->thread=factor_decompose_1024(gpu_info,cufft_info->Nx*cufft_info->Ny*cufft_info->Nz);
gpu_info->thread_sur=factor_decompose_1024(gpu_info,cufft_info->Nxh1*cufft_info->Ny*cufft_info->Nz);
//printf("gpu_info->thread_sur %d\n",gpu_info->thread_sur);
double dx,dy,dz;
char comment[200];
double ksq,ds0;
ds0=cufft_info->ds0;
cufft_info->ds2=cufft_info->ds0/2;
cufft_info->dfB=1-cufft_info->fA;
cufft_info->NsA = ((int)(cufft_info->fA/cufft_info->ds0+1.0e-8));
cufft_info->dNsB = ((int)(cufft_info->dfB/cufft_info->ds0+1.0e-8));
//!----------- Initialize GPU memery settings. ------------------------------------------------------
int nGPUs = gpu_info->GPU_N;
cufft_info->kxyzdz_cu=(double **)malloc(sizeof(double*)*nGPUs);
printf("Wonderful We have successfully initialized GPU setting.\n");
//-----------! Initialize CUFFT settings. ------------------------------------------------------
dim3 grid(cufft_info->Nx,cufft_info->Ny,cufft_info->Nz),block(1,1,1);
Dim[0]=Nz;Dim[1]=Ny;Dim[2]=Nx;
cufft_info->plan_forward=(hipfftHandle *)malloc(sizeof(hipfftHandle)*gpu_info->GPU_N);
cufft_info->plan_backward=(hipfftHandle *)malloc(sizeof(hipfftHandle)*gpu_info->GPU_N);
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index]));
checkCudaErrors(hipfftCreate(&cufft_info->plan_forward[gpu_index]));
checkCudaErrors(hipfftCreate(&cufft_info->plan_backward[gpu_index]));
if(rank==3){
checkCudaErrors(hipfftPlanMany (&cufft_info->plan_forward[gpu_index], rank, Dim, NULL, 1, 1, NULL, 1, 1, HIPFFT_D2Z, batch));
checkCudaErrors(hipfftPlanMany (&cufft_info->plan_backward[gpu_index], rank, Dim, NULL, 1, 1, NULL, 1, 1, HIPFFT_Z2D, batch));
}
else if(rank==2) {
checkCudaErrors(hipfftPlanMany (&cufft_info->plan_forward[gpu_index], rank, Dim, NULL, 1, 1, NULL, 1, 1, HIPFFT_D2Z, batch));
checkCudaErrors(hipfftPlanMany (&cufft_info->plan_backward[gpu_index], rank, Dim, NULL, 1, 1, NULL, 1, 1, HIPFFT_Z2D, batch));
}
}
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed [ ]");
printf("Wonderful We have successfully initialized cufft setting.\n");
//-----------! Initialize malloc and initilize on CPU. ------------------------------------------------------
cufft_info->wa=(double*)malloc(sizeof(double)*NxNyNz*cufft_info->Num_SCFT);
cufft_info->wb=(double*)malloc(sizeof(double)*NxNyNz*cufft_info->Num_SCFT);
cufft_info->pha=(double*)malloc(sizeof(double)*NxNyNz*cufft_info->Num_SCFT);
cufft_info->phb=(double*)malloc(sizeof(double)*NxNyNz*cufft_info->Num_SCFT);
cufft_info->kx=(double *)malloc(sizeof(double)*Nx);
cufft_info->ky=(double *)malloc(sizeof(double)*Ny);
cufft_info->kz=(double *)malloc(sizeof(double)*Nz);
cufft_info->dx=cufft_info->lx/(double)Nx;
cufft_info->dy=cufft_info->ly/(double)Ny;
cufft_info->dz=cufft_info->lz/(double)Nz;
dx=cufft_info->dx;
dy=cufft_info->dy;
dz=cufft_info->dz;
cufft_info->kxyzdz=(double *)malloc(sizeof(double)*NxNyNz);
for(i=0;i<=Nx/2-1;i++)cufft_info->kx[i]=2*Pi*i*1.0/Nx/dx;
for(i=Nx/2;i<Nx;i++)cufft_info->kx[i]=2*Pi*(i-Nx)*1.0/dx/Nx;
for(i=0;i<Nx;i++)cufft_info->kx[i]*=cufft_info->kx[i];
for(i=0;i<=Ny/2-1;i++)cufft_info->ky[i]=2*Pi*i*1.0/Ny/dy;
for(i=Ny/2;i<Ny;i++)cufft_info->ky[i]=2*Pi*(i-Ny)*1.0/dy/Ny;
for(i=0;i<Ny;i++)cufft_info->ky[i]*=cufft_info->ky[i];
for(i=0;i<=Nz/2-1;i++)cufft_info->kz[i]=2*Pi*i*1.0/Nz/dz;
for(i=Nz/2;i<Nz;i++)cufft_info->kz[i]=2*Pi*(i-Nz)*1.0/dz/Nz;
for(i=0;i<Nz;i++)cufft_info->kz[i]*=cufft_info->kz[i];
for(k=0;k<Nz;k++)
for(j=0;j<Ny;j++)
for(i=0;i<Nx;i++)
{
ijk=(long)((k*Ny+j)*Nx+i);
ksq=cufft_info->kx[i]+cufft_info->ky[j]+cufft_info->kz[k];
cufft_info->kxyzdz[ijk]=exp(-ds0*ksq);
}
gpu_info->stream=(hipStream_t*)malloc( sizeof(hipStream_t)*gpu_info->GPU_N);
FILE *fp;
if(cufft_info->intag==1024){
for(int i=0;i<cufft_info->Num_SCFT;i++){
sprintf(comment,"phi_%d.dat",i+1);
if((fp=fopen(comment,"r"))==false){
printf("Configration file %s did not exist, please check it in your directory.\n",comment);
}
fgets(comment,200,fp);
fgets(comment,200,fp);
for(long ijk=0;ijk<cufft_info->NxNyNz;ijk++){
fscanf(fp,"%lg %lg %lg %lg\n",&cufft_info->pha[ijk+i*NxNyNz],&cufft_info->phb[ijk+i*NxNyNz],&cufft_info->wa[ijk+i*NxNyNz],&cufft_info->wb[ijk+i*NxNyNz]);
}
fclose(fp);
}
}
cufft_info->wa_cu.resize(gpu_info->GPU_N);
cufft_info->wb_cu.resize(gpu_info->GPU_N);
cufft_info->pha_cu.resize(gpu_info->GPU_N);
cufft_info->phb_cu.resize(gpu_info->GPU_N);
cufft_info->qa_cu.resize(gpu_info->GPU_N);
cufft_info->qb_cu.resize(gpu_info->GPU_N);
cufft_info->qca_cu.resize(gpu_info->GPU_N);
cufft_info->qcb_cu.resize(gpu_info->GPU_N);
cufft_info->ql.resize(gpu_info->GPU_N);
cufft_info->ffl.resize(gpu_info->GPU_N);
cufft_info->wdz_cu.resize(gpu_info->GPU_N);
cufft_info->qInt_cu.resize(gpu_info->GPU_N);
cufft_info->device_in.resize(gpu_info->GPU_N);
cufft_info->device_out.resize(gpu_info->GPU_N);
printf("Wonderful We have successfully initialized CPU setting.\n");
//-----------! Initialize malloc and initilize on each GPUs. ------------------------------------------------------
for (i=0; i < gpu_info->GPU_N; i++){
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[i]));
checkCudaErrors(hipStreamCreate(&gpu_info->stream[i]));
checkCudaErrors(hipfftSetStream(cufft_info->plan_forward[i], gpu_info->stream[i]));
checkCudaErrors(hipfftSetStream(cufft_info->plan_backward[i], gpu_info->stream[i]));
checkCudaErrors(hipMallocManaged((void**)&(cufft_info->kxyzdz_cu[i]), sizeof(double)* NxNyNz));
checkCudaErrors(hipMemcpy(cufft_info->kxyzdz_cu[i], cufft_info->kxyzdz,sizeof(double)*NxNyNz,hipMemcpyHostToDevice));
checkCudaErrors(hipMallocManaged(&(cufft_info->wa_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(hipMemcpy(cufft_info->wa_cu[i], cufft_info->wa+cufft_info->NxNyNz*batch*i,sizeof(double)*cufft_info->NxNyNz*batch,hipMemcpyHostToDevice));
checkCudaErrors(hipMallocManaged(&(cufft_info->wb_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(hipMemcpy(cufft_info->wb_cu[i], cufft_info->wb+cufft_info->NxNyNz*batch*i,sizeof(double)*cufft_info->NxNyNz*batch,hipMemcpyHostToDevice));
checkCudaErrors(hipMallocManaged(&(cufft_info->pha_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(hipMallocManaged(&(cufft_info->phb_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(hipMallocManaged(&(cufft_info->qInt_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(hipMallocManaged(&(cufft_info->wdz_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(hipMallocManaged(&(cufft_info->qa_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch*(cufft_info->NsA+1)));//cufft_info->NsA (cufft_info->NsA+1)*
checkCudaErrors(hipMallocManaged(&(cufft_info->qca_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch*(cufft_info->NsA+1)));
checkCudaErrors(hipMallocManaged(&(cufft_info->qb_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch*(cufft_info->dNsB+1)));
checkCudaErrors(hipMallocManaged(&(cufft_info->qcb_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch*(cufft_info->dNsB+1)));
checkCudaErrors(hipMallocManaged(&(cufft_info->ql[i]), sizeof(double)*batch));
checkCudaErrors(hipMallocManaged(&(cufft_info->ffl[i]), sizeof(double)*batch));
checkCudaErrors(hipMallocManaged(&(cufft_info->device_in[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(hipMallocManaged(&(cufft_info->device_out[i]), sizeof(hipfftDoubleComplex)* cufft_info->Nxh1NyNz*batch));
checkCudaErrors(hipDeviceSynchronize());
}
printf("Wonderful We have successfully initialized all the data.\n");
}
extern void finalize_cufft(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){
int i,j;
int can_access_peer_0_1;
//! free memery on GPU
for (i=0; i < gpu_info->GPU_N; i++){
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[i]));
checkCudaErrors(hipfftDestroy(cufft_info->plan_forward[i]));
checkCudaErrors(hipfftDestroy(cufft_info->plan_backward[i]));
checkCudaErrors(hipFree(cufft_info->kxyzdz_cu[i]));
checkCudaErrors(hipFree(cufft_info->qa_cu[i]));
checkCudaErrors(hipFree(cufft_info->qb_cu[i]));
checkCudaErrors(hipFree(cufft_info->qca_cu[i]));
checkCudaErrors(hipFree(cufft_info->qcb_cu[i]));
checkCudaErrors(hipFree(cufft_info->pha_cu[i]));
checkCudaErrors(hipFree(cufft_info->phb_cu[i]));
checkCudaErrors(hipFree(cufft_info->wa_cu[i]));
checkCudaErrors(hipFree(cufft_info->wb_cu[i]));
checkCudaErrors(hipFree(cufft_info->wdz_cu[i]));
checkCudaErrors(hipFree(cufft_info->device_in[i]));
checkCudaErrors(hipFree(cufft_info->device_out[i]));
for(j=0;j<gpu_info->GPU_N;j++){
if(i!=j){
checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[i]));
checkCudaErrors(hipDeviceCanAccessPeer(&can_access_peer_0_1, i, j));
if(can_access_peer_0_1) {
checkCudaErrors(hipDeviceDisablePeerAccess(gpu_info->whichGPUs[j]));
}// end if can_access_peer_0_1
}// end i!=j
}//! end loop j
hipDeviceSynchronize();
}//! end loop i
//! free memery on CPU
free(cufft_info->wa);
free(cufft_info->wb);
free(cufft_info->pha);
free(cufft_info->phb);
free(cufft_info->kx);
free(cufft_info->ky);
free(cufft_info->kz);
free(gpu_info->stream);
free(cufft_info->kxyzdz);
free(gpu_info->whichGPUs);
printf("Wonderful We have successfully evaculate all the memery on GPU and CPU \n");
hipDeviceReset();
}
/*
extern void test(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){
int index;
long ijk;
long NxNyNz=cufft_info->NxNyNz;
long Nxh1NyNz=cufft_info->Nxh1NyNz;
dim3 gridDim(1,1,1),blockDim(1,1,1);
for(index=0;index<gpu_info->GPU_N;index++){///gpu_info->GPU_N
checkCudaErrors(hipSetDevice(index));
for(ijk=0;ijk<cufft_info->NxNyNz*cufft_info->batch;ijk++){
if(ijk<cufft_info->NxNyNz)
cufft_info->device_in[index][ijk]=ijk;
else
cufft_info->device_in[index][ijk]=ijk-NxNyNz;
}
checkCudaErrors(hipDeviceSynchronize());
}
for(index=0;index<gpu_info->GPU_N;index++){
checkCudaErrors(hipSetDevice(index));
checkCudaErrors(hipfftExecD2Z(cufft_info->plan_forward[index],cufft_info->device_in[index],cufft_info->device_out[index]));
}
for(index=0;index<gpu_info->GPU_N;index++){///gpu_info->GPU_N
checkCudaErrors(hipSetDevice(index));
checkCudaErrors(hipDeviceSynchronize());
}
getLastCudaError("Kernel execution failed [ ]");
for(index=0;index<4;index++)
//display_GPU_Complex_data<<<gridDim,blockDim>>>((hipfftDoubleComplex*)cufft_info->device_out[index],index);
for(ijk=0;ijk<10;ijk++)
printf("%g %g\n",cufft_info->device_out[index][ijk+Nxh1NyNz].x,cufft_info->device_out[index][ijk+Nxh1NyNz].y);
}
extern void com_to_com1d(GPU_INFO *gpu_info,data_assem *data_test){
hipfftHandle plan;
hipfftComplex *data_in,*data_out;
int BATCH=1;
hipMalloc((void**)&data_in,sizeof(hipfftComplex)*data_test->Nx);
checkCudaErrors(hipMalloc((void**)&data_out,sizeof(hipfftComplex)*data_test->Nx));
hipMemcpy(data_in,data_test->data_com_in,sizeof(hipfftComplex)*data_test->Nx,hipMemcpyHostToDevice);
checkCudaErrors(hipfftPlan1d(&plan,data_test->Nx,HIPFFT_C2C,BATCH));
hipfftExecC2C(plan,data_in,data_out,HIPFFT_FORWARD);
hipMemcpy(data_test->data_com_out,data_out,sizeof(hipfftComplex)*data_test->Nx,hipMemcpyDeviceToHost);
printf("dd %g %g\n",data_test->data_com_out[0].x,data_test->data_com_out[0].y);
hipFree(data_in);
hipFree(data_out);
hipfftDestroy(plan);
}
*/
/*
extern void D1_MultipleGPU(GPU_INFO *gpu_info,data_assem *data_test,int N){
hipfftHandle plan_input;
hipfftResult result;
result = hipfftCreate(&plan_input);
int nGPUs = 4, whichGPUs[4];
whichGPUs[0] = 0; whichGPUs[1] = 1;whichGPUs[2] = 2;whichGPUs[3] = 3;
dim3 gridDim(1,1),blockDim(10,10);
printf("grid size on x=%d y=%d z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("block size on x=%d y=%d z=%d\n",blockDim.x,blockDim.y,blockDim.z);
result = cufftXtSetGPUs (plan_input, nGPUs, whichGPUs);
if(result!=HIPFFT_SUCCESS){
printf("failed to set GPU\n");
}
size_t worksize[4]; hipfftComplex *host_data_input, *host_data_output;
int nx = 1024,ny=8,nz=8, batch = 1, rank = 3, n[3];
n[0] = nx;
n[1]=ny;
n[2]=nz;
int size_of_data = sizeof(hipfftComplex) * nx *ny*nz* batch;
host_data_input = (hipfftComplex*)malloc(size_of_data);
host_data_output = (hipfftComplex*)malloc(size_of_data);
printf("length is %d\n",nx);
//initialize_1d_data (nx, batch, rank, n, inembed, &istride, &idist, onembed, &ostride, &odist, host_data_input, host_data_output);
for(int i=0;i<nx*ny*nz;i++){
host_data_input[i].x=i;
host_data_input[i].y=0;
}
printf("finish initial\n");
checkCufft( hipfftMakePlanMany (plan_input, rank, n, NULL, 1, nx, NULL, 1, nx, HIPFFT_C2C, batch, worksize));
//result=hipfftMakePlan1d(plan_input, nx, HIPFFT_C2C, batch, worksize);
// cufftXtMalloc() - Malloc data on multiple GPUs
cudaLibXtDesc *device_data_input, *device_data_output;
result = cufftXtMalloc (plan_input, &device_data_input, CUFFT_XT_FORMAT_INPLACE);
if(result!=HIPFFT_SUCCESS){
printf("failed 1\n");
}
result = cufftXtMalloc (plan_input, &device_data_output, CUFFT_XT_FORMAT_INPLACE);
printf("%zu %zu \n", device_data_input->descriptor->size[0],device_data_input->descriptor->size[1]);
printf("%zu %zu \n", worksize[0],worksize[1]);
hipSetDevice(0);
//display_GPU_Complex_data<<<1,10>>>((hipfftDoubleComplex*)device_data_input->descriptor->data[0]);
if(result!=HIPFFT_SUCCESS){
printf("failed 2\n");
}
// // cufftXtMemcpy() - Copy data from host to multiple GPUs
result = cufftXtMemcpy (plan_input, device_data_input, host_data_input, CUFFT_COPY_HOST_TO_DEVICE);
// // cufftXtExecDescriptorC2C() - Execute FFT on multiple GPUs
//hipSetDevice(0);
result = cufftXtExecDescriptorC2C (plan_input, device_data_input, device_data_input, HIPFFT_FORWARD);
printf("finish memcpy \n");
// // cufftXtMemcpy() - Copy the data to natural order on GPUs
result = cufftXtMemcpy (plan_input, device_data_output, device_data_input, CUFFT_COPY_DEVICE_TO_DEVICE);
hipSetDevice(0);
//display_GPU_Complex_data<<<gridDim,blockDim>>>((hipfftComplex*)device_data_output->descriptor->data[0],N);
hipDeviceSynchronize();
if(result!=HIPFFT_SUCCESS){
printf("failed copy data from device to device\n");
}
printf("problem 1\n");
// // cufftXtMemcpy() - Copy natural order data from multiple GPUs to host
result = cufftXtMemcpy (plan_input, host_data_output, device_data_input, CUFFT_COPY_DEVICE_TO_HOST);
for(int i=0;i<8;i++){
printf("%g %g\n",host_data_output[i].x,host_data_output[i].y);
}
// // Print output and check results int output_return = output_1d_results (nx, batch, host_data_input, host_data_output); //
// cufftXtFree() - Free GPU memory
result = cufftXtFree(device_data_input);
result = cufftXtFree(device_data_output);
// // hipfftDestroy() - Destroy FFT plan
result = hipfftDestroy(plan_input);
free(host_data_input);
free(host_data_output);
}
*/
/*
FILE *dp;
double *kxkykz,testD;
kxkykz=(double *)malloc(sizeof(double)*cufft_info->NxNyNz);
dp=fopen("kxyzdz.dat","r");
testD=0;
for(ijk=0;ijk<cufft_info->NxNyNz;ijk++){
fscanf(dp,"%lg\n",&kxkykz[ijk]);
testD+=(kxkykz[ijk]-cufft_info->kxyzdz[ijk])*(kxkykz[ijk]-cufft_info->kxyzdz[ijk]);
}
printf("compare %g\n",testD);
*/
| 93931a894faa92da4e2b3b9e4f1890b066c34301.cu |
#include"struct.h"
#include "init_cuda.h"
#include "cuda_aid.cuh"
/* The number of the grid point should be divisible by GPU number*/
inline bool IsGPUCapableP2P(cudaDeviceProp *pProp)
{
#ifdef _WIN32
return (bool)(pProp->tccDriver ? true : false);
#else
return (bool)(pProp->major >= 2);
#endif
}
// CUDA includes
int prime[168]={2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997};
// find the maximal factor in a integer which is smaller than 1024(Maximal thread number in cuda)
extern int factor_decompose_1024(GPU_INFO *gpu_info,long N){
long temp;
temp=N;
int decom[10000],index=0;
for(int i=0;i<168;i++){
while(temp%prime[i]==0){
temp=temp/prime[i];
decom[index++]=prime[i];
};
}
int elements[10000];
//printf("index=%d\n",index);
for(int i=0;i<index;i++) elements[i]=0;
int temp_1024=1;
for(int j=1;j<=10;j++){
elements[j-1]=1;
const size_t N_t = index;
std::vector<int> selectors(elements, elements + N_t);
do{
int combo=1;
for (size_t i = 0; i < selectors.size(); ++i){
if (selectors[i]){
//std::cout << decom[i] << ", ";
combo*=decom[i];
}
}
if(combo>temp_1024&&combo<=1024) temp_1024=combo;
if(combo==1024) break;
} while (prev_permutation(selectors.begin(), selectors.end()));
}
return temp_1024;
}
extern void factor_decompose(GPU_INFO *gpu_info,long N, int *Nx_a,int *Ny_a,int *Nz_a){
int Nx,Ny,Nz;
long temp;
temp=N;
int decom[10000],index=0;
for(int i=0;i<168;i++){
while(temp%prime[i]==0){
temp=temp/prime[i];
decom[index++]=prime[i];
};
}
//printf("%ld prime is ",N);
//for(int i=0;i<index;i++) printf(" %d ",decom[i]);
//printf("\n");
if(temp!=1) {
printf("please give a \"good\" polymer number!\n");
exit(0);
}
if(index==1) {
Nx=N;
Ny=1;
Nz=1;
}
else if(index==2){
Nz=1;//decom[index-1]
Ny=decom[0];
Nx=decom[1];
//printf("%d %d\n",Nx,Ny);
}
else if(index>2){
Nx=1;
Ny=1;
Nz=1;
if(index%2==0){
Nz=decom[index-1]*decom[0];
if((index-2)%4==0){
for(int i=0;i<(index-2)/4;i++){
Nx*=decom[i+1]*decom[index-1-i-1];
Ny*=decom[(index-2)/4+1+i]*decom[index-1-(index-2)/4-1-i];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else if((index-2)==2){
Ny=decom[1];
Nx=decom[2];
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else {
Nz*=decom[1]*decom[2];
for(int i=0;i<(index-4)/4;i++){
Nx*=decom[i+3]*decom[index-1-i-1];
Ny*=decom[(index-2)/4+3+i]*decom[index-1-(index-2)/4-1-i];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
}
else{
Nz=decom[index-1];
if((index-1)%4==0){
for(int i=0;i<(index-1)/4;i++){
Nx*=decom[i]*decom[index-1-i-1];
Ny*=decom[(index-1)/4+i]*decom[index-1-(index-1)/4-i-1];
}
//printf("%d: %d %d %d\n",index,Nx,Ny,Nz);
}
else if((index-1)==2){
Ny=decom[0];
Nx=decom[1];
//printf("%d %d %d\n",Nx,Ny,Nz);
}
else {
Nz*=decom[0]*decom[1];
for(int i=0;i<(index-3)/4;i++){
Nx*=decom[i*2+2]*decom[index-1-i*2-1];
Ny*=decom[i*2+3]*decom[index-3-i*2];
}
//printf("%d %d %d\n",Nx,Ny,Nz);
}
}
}
if(N==1) {
Nx=1;
Ny=1;
Nz=1;
}
if(Nx*Ny*Nz==N) {
*Nx_a=Nx;
*Ny_a=Ny;
*Nz_a=Nz;
}
else {
printf("Error Nx %d *Ny %d *Nz %d!= N %ld\n",Nx,Ny,Nz,N);
exit(0);
}
}
extern void init_cuda(GPU_INFO *gpu_info,int display){
int gpu_count;
int i,j;
cudaDeviceProp prop[64];
int *gpuid;
int can_access_peer_0_1;
gpu_count=0;
gpuid=(int*)malloc(sizeof(int));
//checkCudaErrors(cudaGetDeviceCount(&gpu_info->GPU_N));
//if(gpu_info->GPU_N==8) gpu_info->GPU_N=4;//! Set the number of GPU
for (i=0; i < gpu_info->GPU_N; i++){
checkCudaErrors(cudaGetDeviceProperties(&gpu_info->prop[i], i));
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[i]));
// Only boards based on Fermi can support P2P
gpuid[gpu_count++] = gpu_info->whichGPUs[i];
if(display==1){
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, gpu_info->prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
printf("maxThreadsDim %d %d %d\n",gpu_info->prop[i].maxThreadsDim[0],gpu_info->prop[i].maxThreadsDim[1],gpu_info->prop[i].maxThreadsDim[2]);
printf("maxThreadsPerBlock %d\n",gpu_info->prop[i].maxThreadsPerBlock);
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
}
//gpu_info->thread=gpu_info->prop[i].maxThreadsPerBlock;
for(j=0;j<gpu_info->GPU_N;j++){
if(i!=j){
checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer_0_1, gpu_info->whichGPUs[i], gpu_info->whichGPUs[j]));
if(can_access_peer_0_1) {
checkCudaErrors(cudaDeviceEnablePeerAccess(gpu_info->whichGPUs[j], 0));
}
}
}
}
}
extern void initialize_cufft(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){
int Dim[3];
int i,j,k;
int rank = 3;
int Nx=cufft_info->Nx;
int Ny=cufft_info->Ny;
int Nz=cufft_info->Nz;
long NxNyNz=Nx*Ny*Nz,ijk;
cufft_info->NxNyNz=NxNyNz;
cufft_info->Nxh1=Nx/2+1;
cufft_info->Nxh1NyNz=cufft_info->Nxh1*Ny*Nz;
cufft_info->batch=cufft_info->Num_SCFT/gpu_info->GPU_N; //! number of SCFT per GPU
int batch=cufft_info->batch;
gpu_info->thread=factor_decompose_1024(gpu_info,cufft_info->Nx*cufft_info->Ny*cufft_info->Nz);
gpu_info->thread_sur=factor_decompose_1024(gpu_info,cufft_info->Nxh1*cufft_info->Ny*cufft_info->Nz);
//printf("gpu_info->thread_sur %d\n",gpu_info->thread_sur);
double dx,dy,dz;
char comment[200];
double ksq,ds0;
ds0=cufft_info->ds0;
cufft_info->ds2=cufft_info->ds0/2;
cufft_info->dfB=1-cufft_info->fA;
cufft_info->NsA = ((int)(cufft_info->fA/cufft_info->ds0+1.0e-8));
cufft_info->dNsB = ((int)(cufft_info->dfB/cufft_info->ds0+1.0e-8));
//!----------- Initialize GPU memery settings. ------------------------------------------------------
int nGPUs = gpu_info->GPU_N;
cufft_info->kxyzdz_cu=(double **)malloc(sizeof(double*)*nGPUs);
printf("Wonderful We have successfully initialized GPU setting.\n");
//-----------! Initialize CUFFT settings. ------------------------------------------------------
dim3 grid(cufft_info->Nx,cufft_info->Ny,cufft_info->Nz),block(1,1,1);
Dim[0]=Nz;Dim[1]=Ny;Dim[2]=Nx;
cufft_info->plan_forward=(cufftHandle *)malloc(sizeof(cufftHandle)*gpu_info->GPU_N);
cufft_info->plan_backward=(cufftHandle *)malloc(sizeof(cufftHandle)*gpu_info->GPU_N);
for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index]));
checkCudaErrors(cufftCreate(&cufft_info->plan_forward[gpu_index]));
checkCudaErrors(cufftCreate(&cufft_info->plan_backward[gpu_index]));
if(rank==3){
checkCudaErrors(cufftPlanMany (&cufft_info->plan_forward[gpu_index], rank, Dim, NULL, 1, 1, NULL, 1, 1, CUFFT_D2Z, batch));
checkCudaErrors(cufftPlanMany (&cufft_info->plan_backward[gpu_index], rank, Dim, NULL, 1, 1, NULL, 1, 1, CUFFT_Z2D, batch));
}
else if(rank==2) {
checkCudaErrors(cufftPlanMany (&cufft_info->plan_forward[gpu_index], rank, Dim, NULL, 1, 1, NULL, 1, 1, CUFFT_D2Z, batch));
checkCudaErrors(cufftPlanMany (&cufft_info->plan_backward[gpu_index], rank, Dim, NULL, 1, 1, NULL, 1, 1, CUFFT_Z2D, batch));
}
}
cudaDeviceSynchronize();
getLastCudaError("Kernel execution failed [ ]");
printf("Wonderful We have successfully initialized cufft setting.\n");
//-----------! Initialize malloc and initilize on CPU. ------------------------------------------------------
cufft_info->wa=(double*)malloc(sizeof(double)*NxNyNz*cufft_info->Num_SCFT);
cufft_info->wb=(double*)malloc(sizeof(double)*NxNyNz*cufft_info->Num_SCFT);
cufft_info->pha=(double*)malloc(sizeof(double)*NxNyNz*cufft_info->Num_SCFT);
cufft_info->phb=(double*)malloc(sizeof(double)*NxNyNz*cufft_info->Num_SCFT);
cufft_info->kx=(double *)malloc(sizeof(double)*Nx);
cufft_info->ky=(double *)malloc(sizeof(double)*Ny);
cufft_info->kz=(double *)malloc(sizeof(double)*Nz);
cufft_info->dx=cufft_info->lx/(double)Nx;
cufft_info->dy=cufft_info->ly/(double)Ny;
cufft_info->dz=cufft_info->lz/(double)Nz;
dx=cufft_info->dx;
dy=cufft_info->dy;
dz=cufft_info->dz;
cufft_info->kxyzdz=(double *)malloc(sizeof(double)*NxNyNz);
for(i=0;i<=Nx/2-1;i++)cufft_info->kx[i]=2*Pi*i*1.0/Nx/dx;
for(i=Nx/2;i<Nx;i++)cufft_info->kx[i]=2*Pi*(i-Nx)*1.0/dx/Nx;
for(i=0;i<Nx;i++)cufft_info->kx[i]*=cufft_info->kx[i];
for(i=0;i<=Ny/2-1;i++)cufft_info->ky[i]=2*Pi*i*1.0/Ny/dy;
for(i=Ny/2;i<Ny;i++)cufft_info->ky[i]=2*Pi*(i-Ny)*1.0/dy/Ny;
for(i=0;i<Ny;i++)cufft_info->ky[i]*=cufft_info->ky[i];
for(i=0;i<=Nz/2-1;i++)cufft_info->kz[i]=2*Pi*i*1.0/Nz/dz;
for(i=Nz/2;i<Nz;i++)cufft_info->kz[i]=2*Pi*(i-Nz)*1.0/dz/Nz;
for(i=0;i<Nz;i++)cufft_info->kz[i]*=cufft_info->kz[i];
for(k=0;k<Nz;k++)
for(j=0;j<Ny;j++)
for(i=0;i<Nx;i++)
{
ijk=(long)((k*Ny+j)*Nx+i);
ksq=cufft_info->kx[i]+cufft_info->ky[j]+cufft_info->kz[k];
cufft_info->kxyzdz[ijk]=exp(-ds0*ksq);
}
gpu_info->stream=(cudaStream_t*)malloc( sizeof(cudaStream_t)*gpu_info->GPU_N);
FILE *fp;
if(cufft_info->intag==1024){
for(int i=0;i<cufft_info->Num_SCFT;i++){
sprintf(comment,"phi_%d.dat",i+1);
if((fp=fopen(comment,"r"))==false){
printf("Configration file %s did not exist, please check it in your directory.\n",comment);
}
fgets(comment,200,fp);
fgets(comment,200,fp);
for(long ijk=0;ijk<cufft_info->NxNyNz;ijk++){
fscanf(fp,"%lg %lg %lg %lg\n",&cufft_info->pha[ijk+i*NxNyNz],&cufft_info->phb[ijk+i*NxNyNz],&cufft_info->wa[ijk+i*NxNyNz],&cufft_info->wb[ijk+i*NxNyNz]);
}
fclose(fp);
}
}
cufft_info->wa_cu.resize(gpu_info->GPU_N);
cufft_info->wb_cu.resize(gpu_info->GPU_N);
cufft_info->pha_cu.resize(gpu_info->GPU_N);
cufft_info->phb_cu.resize(gpu_info->GPU_N);
cufft_info->qa_cu.resize(gpu_info->GPU_N);
cufft_info->qb_cu.resize(gpu_info->GPU_N);
cufft_info->qca_cu.resize(gpu_info->GPU_N);
cufft_info->qcb_cu.resize(gpu_info->GPU_N);
cufft_info->ql.resize(gpu_info->GPU_N);
cufft_info->ffl.resize(gpu_info->GPU_N);
cufft_info->wdz_cu.resize(gpu_info->GPU_N);
cufft_info->qInt_cu.resize(gpu_info->GPU_N);
cufft_info->device_in.resize(gpu_info->GPU_N);
cufft_info->device_out.resize(gpu_info->GPU_N);
printf("Wonderful We have successfully initialized CPU setting.\n");
//-----------! Initialize malloc and initilize on each GPUs. ------------------------------------------------------
for (i=0; i < gpu_info->GPU_N; i++){
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[i]));
checkCudaErrors(cudaStreamCreate(&gpu_info->stream[i]));
checkCudaErrors(cufftSetStream(cufft_info->plan_forward[i], gpu_info->stream[i]));
checkCudaErrors(cufftSetStream(cufft_info->plan_backward[i], gpu_info->stream[i]));
checkCudaErrors(cudaMallocManaged((void**)&(cufft_info->kxyzdz_cu[i]), sizeof(double)* NxNyNz));
checkCudaErrors(cudaMemcpy(cufft_info->kxyzdz_cu[i], cufft_info->kxyzdz,sizeof(double)*NxNyNz,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMallocManaged(&(cufft_info->wa_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(cudaMemcpy(cufft_info->wa_cu[i], cufft_info->wa+cufft_info->NxNyNz*batch*i,sizeof(double)*cufft_info->NxNyNz*batch,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMallocManaged(&(cufft_info->wb_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(cudaMemcpy(cufft_info->wb_cu[i], cufft_info->wb+cufft_info->NxNyNz*batch*i,sizeof(double)*cufft_info->NxNyNz*batch,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMallocManaged(&(cufft_info->pha_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(cudaMallocManaged(&(cufft_info->phb_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(cudaMallocManaged(&(cufft_info->qInt_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(cudaMallocManaged(&(cufft_info->wdz_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(cudaMallocManaged(&(cufft_info->qa_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch*(cufft_info->NsA+1)));//cufft_info->NsA (cufft_info->NsA+1)*
checkCudaErrors(cudaMallocManaged(&(cufft_info->qca_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch*(cufft_info->NsA+1)));
checkCudaErrors(cudaMallocManaged(&(cufft_info->qb_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch*(cufft_info->dNsB+1)));
checkCudaErrors(cudaMallocManaged(&(cufft_info->qcb_cu[i]), sizeof(double)* cufft_info->NxNyNz*batch*(cufft_info->dNsB+1)));
checkCudaErrors(cudaMallocManaged(&(cufft_info->ql[i]), sizeof(double)*batch));
checkCudaErrors(cudaMallocManaged(&(cufft_info->ffl[i]), sizeof(double)*batch));
checkCudaErrors(cudaMallocManaged(&(cufft_info->device_in[i]), sizeof(double)* cufft_info->NxNyNz*batch));
checkCudaErrors(cudaMallocManaged(&(cufft_info->device_out[i]), sizeof(cufftDoubleComplex)* cufft_info->Nxh1NyNz*batch));
checkCudaErrors(cudaDeviceSynchronize());
}
printf("Wonderful We have successfully initialized all the data.\n");
}
extern void finalize_cufft(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){
int i,j;
int can_access_peer_0_1;
//! free memery on GPU
for (i=0; i < gpu_info->GPU_N; i++){
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[i]));
checkCudaErrors(cufftDestroy(cufft_info->plan_forward[i]));
checkCudaErrors(cufftDestroy(cufft_info->plan_backward[i]));
checkCudaErrors(cudaFree(cufft_info->kxyzdz_cu[i]));
checkCudaErrors(cudaFree(cufft_info->qa_cu[i]));
checkCudaErrors(cudaFree(cufft_info->qb_cu[i]));
checkCudaErrors(cudaFree(cufft_info->qca_cu[i]));
checkCudaErrors(cudaFree(cufft_info->qcb_cu[i]));
checkCudaErrors(cudaFree(cufft_info->pha_cu[i]));
checkCudaErrors(cudaFree(cufft_info->phb_cu[i]));
checkCudaErrors(cudaFree(cufft_info->wa_cu[i]));
checkCudaErrors(cudaFree(cufft_info->wb_cu[i]));
checkCudaErrors(cudaFree(cufft_info->wdz_cu[i]));
checkCudaErrors(cudaFree(cufft_info->device_in[i]));
checkCudaErrors(cudaFree(cufft_info->device_out[i]));
for(j=0;j<gpu_info->GPU_N;j++){
if(i!=j){
checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[i]));
checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer_0_1, i, j));
if(can_access_peer_0_1) {
checkCudaErrors(cudaDeviceDisablePeerAccess(gpu_info->whichGPUs[j]));
}// end if can_access_peer_0_1
}// end i!=j
}//! end loop j
cudaDeviceSynchronize();
}//! end loop i
//! free memery on CPU
free(cufft_info->wa);
free(cufft_info->wb);
free(cufft_info->pha);
free(cufft_info->phb);
free(cufft_info->kx);
free(cufft_info->ky);
free(cufft_info->kz);
free(gpu_info->stream);
free(cufft_info->kxyzdz);
free(gpu_info->whichGPUs);
printf("Wonderful We have successfully evaculate all the memery on GPU and CPU \n");
cudaDeviceReset();
}
/*
extern void test(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){
int index;
long ijk;
long NxNyNz=cufft_info->NxNyNz;
long Nxh1NyNz=cufft_info->Nxh1NyNz;
dim3 gridDim(1,1,1),blockDim(1,1,1);
for(index=0;index<gpu_info->GPU_N;index++){///gpu_info->GPU_N
checkCudaErrors(cudaSetDevice(index));
for(ijk=0;ijk<cufft_info->NxNyNz*cufft_info->batch;ijk++){
if(ijk<cufft_info->NxNyNz)
cufft_info->device_in[index][ijk]=ijk;
else
cufft_info->device_in[index][ijk]=ijk-NxNyNz;
}
checkCudaErrors(cudaDeviceSynchronize());
}
for(index=0;index<gpu_info->GPU_N;index++){
checkCudaErrors(cudaSetDevice(index));
checkCudaErrors(cufftExecD2Z(cufft_info->plan_forward[index],cufft_info->device_in[index],cufft_info->device_out[index]));
}
for(index=0;index<gpu_info->GPU_N;index++){///gpu_info->GPU_N
checkCudaErrors(cudaSetDevice(index));
checkCudaErrors(cudaDeviceSynchronize());
}
getLastCudaError("Kernel execution failed [ ]");
for(index=0;index<4;index++)
//display_GPU_Complex_data<<<gridDim,blockDim>>>((cufftDoubleComplex*)cufft_info->device_out[index],index);
for(ijk=0;ijk<10;ijk++)
printf("%g %g\n",cufft_info->device_out[index][ijk+Nxh1NyNz].x,cufft_info->device_out[index][ijk+Nxh1NyNz].y);
}
extern void com_to_com1d(GPU_INFO *gpu_info,data_assem *data_test){
cufftHandle plan;
cufftComplex *data_in,*data_out;
int BATCH=1;
cudaMalloc((void**)&data_in,sizeof(cufftComplex)*data_test->Nx);
checkCudaErrors(cudaMalloc((void**)&data_out,sizeof(cufftComplex)*data_test->Nx));
cudaMemcpy(data_in,data_test->data_com_in,sizeof(cufftComplex)*data_test->Nx,cudaMemcpyHostToDevice);
checkCudaErrors(cufftPlan1d(&plan,data_test->Nx,CUFFT_C2C,BATCH));
cufftExecC2C(plan,data_in,data_out,CUFFT_FORWARD);
cudaMemcpy(data_test->data_com_out,data_out,sizeof(cufftComplex)*data_test->Nx,cudaMemcpyDeviceToHost);
printf("dd %g %g\n",data_test->data_com_out[0].x,data_test->data_com_out[0].y);
cudaFree(data_in);
cudaFree(data_out);
cufftDestroy(plan);
}
*/
/*
extern void D1_MultipleGPU(GPU_INFO *gpu_info,data_assem *data_test,int N){
cufftHandle plan_input;
cufftResult result;
result = cufftCreate(&plan_input);
int nGPUs = 4, whichGPUs[4];
whichGPUs[0] = 0; whichGPUs[1] = 1;whichGPUs[2] = 2;whichGPUs[3] = 3;
dim3 gridDim(1,1),blockDim(10,10);
printf("grid size on x=%d y=%d z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("block size on x=%d y=%d z=%d\n",blockDim.x,blockDim.y,blockDim.z);
result = cufftXtSetGPUs (plan_input, nGPUs, whichGPUs);
if(result!=CUFFT_SUCCESS){
printf("failed to set GPU\n");
}
size_t worksize[4]; cufftComplex *host_data_input, *host_data_output;
int nx = 1024,ny=8,nz=8, batch = 1, rank = 3, n[3];
n[0] = nx;
n[1]=ny;
n[2]=nz;
int size_of_data = sizeof(cufftComplex) * nx *ny*nz* batch;
host_data_input = (cufftComplex*)malloc(size_of_data);
host_data_output = (cufftComplex*)malloc(size_of_data);
printf("length is %d\n",nx);
//initialize_1d_data (nx, batch, rank, n, inembed, &istride, &idist, onembed, &ostride, &odist, host_data_input, host_data_output);
for(int i=0;i<nx*ny*nz;i++){
host_data_input[i].x=i;
host_data_input[i].y=0;
}
printf("finish initial\n");
checkCufft( cufftMakePlanMany (plan_input, rank, n, NULL, 1, nx, NULL, 1, nx, CUFFT_C2C, batch, worksize));
//result=cufftMakePlan1d(plan_input, nx, CUFFT_C2C, batch, worksize);
// cufftXtMalloc() - Malloc data on multiple GPUs
cudaLibXtDesc *device_data_input, *device_data_output;
result = cufftXtMalloc (plan_input, &device_data_input, CUFFT_XT_FORMAT_INPLACE);
if(result!=CUFFT_SUCCESS){
printf("failed 1\n");
}
result = cufftXtMalloc (plan_input, &device_data_output, CUFFT_XT_FORMAT_INPLACE);
printf("%zu %zu \n", device_data_input->descriptor->size[0],device_data_input->descriptor->size[1]);
printf("%zu %zu \n", worksize[0],worksize[1]);
cudaSetDevice(0);
//display_GPU_Complex_data<<<1,10>>>((cufftDoubleComplex*)device_data_input->descriptor->data[0]);
if(result!=CUFFT_SUCCESS){
printf("failed 2\n");
}
// // cufftXtMemcpy() - Copy data from host to multiple GPUs
result = cufftXtMemcpy (plan_input, device_data_input, host_data_input, CUFFT_COPY_HOST_TO_DEVICE);
// // cufftXtExecDescriptorC2C() - Execute FFT on multiple GPUs
//cudaSetDevice(0);
result = cufftXtExecDescriptorC2C (plan_input, device_data_input, device_data_input, CUFFT_FORWARD);
printf("finish memcpy \n");
// // cufftXtMemcpy() - Copy the data to natural order on GPUs
result = cufftXtMemcpy (plan_input, device_data_output, device_data_input, CUFFT_COPY_DEVICE_TO_DEVICE);
cudaSetDevice(0);
//display_GPU_Complex_data<<<gridDim,blockDim>>>((cufftComplex*)device_data_output->descriptor->data[0],N);
cudaDeviceSynchronize();
if(result!=CUFFT_SUCCESS){
printf("failed copy data from device to device\n");
}
printf("problem 1\n");
// // cufftXtMemcpy() - Copy natural order data from multiple GPUs to host
result = cufftXtMemcpy (plan_input, host_data_output, device_data_input, CUFFT_COPY_DEVICE_TO_HOST);
for(int i=0;i<8;i++){
printf("%g %g\n",host_data_output[i].x,host_data_output[i].y);
}
// // Print output and check results int output_return = output_1d_results (nx, batch, host_data_input, host_data_output); //
// cufftXtFree() - Free GPU memory
result = cufftXtFree(device_data_input);
result = cufftXtFree(device_data_output);
// // cufftDestroy() - Destroy FFT plan
result = cufftDestroy(plan_input);
free(host_data_input);
free(host_data_output);
}
*/
/*
FILE *dp;
double *kxkykz,testD;
kxkykz=(double *)malloc(sizeof(double)*cufft_info->NxNyNz);
dp=fopen("kxyzdz.dat","r");
testD=0;
for(ijk=0;ijk<cufft_info->NxNyNz;ijk++){
fscanf(dp,"%lg\n",&kxkykz[ijk]);
testD+=(kxkykz[ijk]-cufft_info->kxyzdz[ijk])*(kxkykz[ijk]-cufft_info->kxyzdz[ijk]);
}
printf("compare %g\n",testD);
*/
|
essential_matrix.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <iostream>
#include <chrono>
#include <tuple>
#include <ATen/ATen.h>
#include "common.h"
#include "polish_E.cu"
#include "kernel_functions.cu"
/*
* CUDA macros, constants and functions
*/
const int subset_size = 5;
const unsigned long long seed = 1234;
#define CudaErrorCheck(ans) {__CudaErrorCheck((ans), __FILE__, __LINE__);}
void __CudaErrorCheck(hipError_t code, const char* file, int line) {
if (code != hipSuccess) {
std::cout << "CUDA Error (" << file << ":" << line << "): "
<< hipGetErrorString(code) << std::endl;
exit(code);
}
}
/*
* Decompose essential matrix into angles
*/
at::Tensor EssentialMatrixDecompose(
at::Tensor Emat) {
double *E_ptr = Emat.data_ptr<double>();
Ematrix E;
double parameter[5];
memcpy(E, E_ptr, 3 * 3 * sizeof(double));
Edecomp(E, parameter);
at::Tensor parameters = at::empty(5, Emat.options());
double *parameter_ptr = parameters.data_ptr<double>();
memcpy(parameter_ptr, parameter, 5 * sizeof(double));
return parameters;
}
/*
* Decompose essential matrix into UI^VT, doing SVD.
*/
std::tuple<at::Tensor, at::Tensor> EssentialMatrixDecomposeUV(
at::Tensor Emat) {
double *E_ptr = Emat.data_ptr<double>();
Ematrix E;
Ematrix U;
Ematrix V;
memcpy(E, E_ptr, 3 * 3 * sizeof(double));
Edecomp(E, U, V);
at::Tensor Umat = at::empty(3 * 3, Emat.options());
at::Tensor Vmat = at::empty(3 * 3, Emat.options());
double *U_ptr = Umat.data_ptr<double>();
memcpy(U_ptr, U, 3 * 3 * sizeof(double));
double *V_ptr = Vmat.data_ptr<double>();
memcpy(V_ptr, V, 3 * 3 * sizeof(double));
Umat.resize_({3,3});
Vmat.resize_({3,3});
auto t = std::make_tuple(Umat, Vmat);
return t;
}
/*
* Five point algorithm cuda optimization using robust cost functions
*/
at::Tensor EssentialMatrixOptimise(
at::Tensor input1, // input 1 has size nx2, type double
at::Tensor input2,
at::Tensor initial_essential_matrix,
const double delta,
const double alpha,
const int max_iterations) {
auto clock_begin = std::chrono::steady_clock::now();
const int num_points = input1.size(0);
// Input data pointers
double *input1_ptr = input1.data_ptr<double>();
double *input2_ptr = input2.data_ptr<double>();
double *essential_matrix_ptr = initial_essential_matrix.data_ptr<double>();
Ematrix E_in;
memcpy(E_in, essential_matrix_ptr, 3 * 3 * sizeof(double));
polish_E_robust_parametric(E_in, input1_ptr, input2_ptr, num_points, delta, alpha, max_iterations);
at::Tensor E_out = at::empty(3 * 3, initial_essential_matrix.options());
double* outptr = E_out.data_ptr<double>();
memcpy(outptr, E_in, 3 * 3 * sizeof(double));
E_out.resize_({3,3});
// std::cout << "Runtime (Optimise): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count() << "s" << std::endl;
return E_out; //E_optimised
}
/*
* Five point algorithm cuda initialization
*/
at::Tensor EssentialMatrixInitialise(
at::Tensor input1, // input 1 has size nx2, type double
at::Tensor input2,
const int num_test_points, // 10
const int num_ransac_test_points, // 1000
const int num_ransac_iterations, // number of iterations to run RANSAC
const double inlier_threshold) {
auto clock_begin = std::chrono::steady_clock::now();
const int num_points = input1.size(0);
const int num_threads_per_block = 64;
const int num_blocks = 8;
const int num_threads = num_blocks * num_threads_per_block;
// CUDA Setup
// Set GPU to use
// int device = 0;
hipSetDevice(input1.get_device());
// hipSetDevice(input1.get_device());
// Input data pointer (on GPU)
double *input1_ptr = input1.data_ptr<double>();
double *input2_ptr = input2.data_ptr<double>();
int *num_inliers;
double (*essential_matrices)[3][3];
hiprandState_t* state;
CudaErrorCheck(hipMallocManaged((void **) &num_inliers, num_threads * sizeof(int)));
CudaErrorCheck(hipMallocManaged((void **) &essential_matrices, num_threads * 3 * 3 * sizeof(double)));
CudaErrorCheck(hipMallocManaged((void **) &state, num_threads * sizeof(hiprandState_t)));
// Copy constants to device constant memory
CudaErrorCheck(hipMemcpyToSymbol(c_num_points, &num_points, sizeof(int)));
CudaErrorCheck(hipMemcpyToSymbol(c_num_test_points, &num_test_points, sizeof(int)));
CudaErrorCheck(hipMemcpyToSymbol(c_ransac_num_test_points, &num_ransac_test_points, sizeof(int)));
CudaErrorCheck(hipMemcpyToSymbol(c_ransac_num_iterations, &num_ransac_iterations, sizeof(int)));
CudaErrorCheck(hipMemcpyToSymbol(c_inlier_threshold, &inlier_threshold, sizeof(double)));
// Generate random states, one for each thread
hipLaunchKernelGGL(( SetupRandomState), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, seed, state);
auto clock_begin_kernel = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( EstimateEssentialMatrix<subset_size>), dim3(num_blocks), dim3(num_threads_per_block), 0, 0,
input1_ptr, // Two sets of matching points
input2_ptr, // (flattened 2D arrays)
state, // Random number generator state
num_inliers, // Number of inliers per thread
essential_matrices); // Essential matrices per thread
CudaErrorCheck(hipPeekAtLastError()); // Check for kernel launch error
CudaErrorCheck(hipDeviceSynchronize()); // Check for kernel execution error
// std::cout << "Runtime (Initialise, Kernel Only): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin_kernel).count() << "s" << std::endl;
int ind_max = distance(num_inliers, max_element(num_inliers, num_inliers + num_threads));
// cout << "The largest element is " << ind_max << '\n';
cout << "The number of inliers: " << num_inliers[ind_max] << '\n';
at::Tensor E_out = at::empty(3 * 3, input1.options());
double* dataptr = E_out.data_ptr<double>();
CudaErrorCheck(hipMemcpy(dataptr, &essential_matrices[ind_max], sizeof(essential_matrices[ind_max]), hipMemcpyDeviceToDevice));
CudaErrorCheck(hipFree(num_inliers));
CudaErrorCheck(hipFree(essential_matrices));
CudaErrorCheck(hipFree(state));
E_out.resize_({3, 3});
// std::cout << "Runtime (Initialise): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count() << "s" << std::endl;
return E_out;
}
/*
* Five point algorithm cuda initialization
*/
std::tuple<at::Tensor, at::Tensor,int> ProjectionMatrixRansac(
at::Tensor input1, // input 1 has size nx2, type double
at::Tensor input2,
const int num_test_points, // 10
const int num_ransac_test_points, // 1000
const int num_ransac_iterations, // number of iterations to run RANSAC
const double inlier_threshold) {
// auto clock_begin = std::chrono::steady_clock::now();
const int num_points = input1.size(0);
const int num_threads_per_block = 64;
const int num_blocks = 8;
const int num_threads = num_blocks * num_threads_per_block;
// CUDA Setup
// Set GPU to use
// int device = 0;
// CudaErrorCheck(hipSetDevice(device));
hipSetDevice(input1.get_device());
// hipSetDevice(input1.get_device());
// Input data pointer (on GPU)
double *input1_ptr = input1.data_ptr<double>();
double *input2_ptr = input2.data_ptr<double>();
int *num_inliers;
double (*essential_matrices)[3][3];
double (*projection_matrices)[3][4];
hiprandState_t* state;
CudaErrorCheck(hipMallocManaged((void **) &num_inliers, num_threads * sizeof(int)));
CudaErrorCheck(hipMallocManaged((void **) &essential_matrices, num_threads * 3 * 3 * sizeof(double)));
CudaErrorCheck(hipMallocManaged((void **) &projection_matrices, num_threads * 3 * 4 * sizeof(double)));
CudaErrorCheck(hipMallocManaged((void **) &state, num_threads * sizeof(hiprandState_t)));
// Copy constants to device constant memory
CudaErrorCheck(hipMemcpyToSymbol(c_num_points, &num_points, sizeof(int)));
CudaErrorCheck(hipMemcpyToSymbol(c_num_test_points, &num_test_points, sizeof(int)));
CudaErrorCheck(hipMemcpyToSymbol(c_ransac_num_test_points, &num_ransac_test_points, sizeof(int)));
CudaErrorCheck(hipMemcpyToSymbol(c_ransac_num_iterations, &num_ransac_iterations, sizeof(int)));
CudaErrorCheck(hipMemcpyToSymbol(c_inlier_threshold, &inlier_threshold, sizeof(double)));
// Generate random states, one for each thread
hipLaunchKernelGGL(( SetupRandomState), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, seed, state);
// auto clock_begin_kernel = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( EstimateProjectionMatrix<subset_size>), dim3(num_blocks), dim3(num_threads_per_block), 0, 0,
input1_ptr, // Two sets of matching points
input2_ptr, // (flattened 2D arrays)
state, // Random number generator state
num_inliers, // Number of inliers per thread
essential_matrices,
projection_matrices); // Essential matrices per thread
CudaErrorCheck(hipPeekAtLastError()); // Check for kernel launch error
CudaErrorCheck(hipDeviceSynchronize()); // Check for kernel execution error
// std::cout << "Runtime (Initialise, Kernel Only): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin_kernel).count() << "s" << std::endl;
int ind_max = distance(num_inliers, max_element(num_inliers, num_inliers + num_threads));
// cout << "The largest element is " << ind_max << '\n';
// cout << "The number of inliers: " << num_inliers[ind_max] << '\n';
at::Tensor E_out = at::empty(3 * 3, input1.options());
at::Tensor P_out = at::empty(3 * 4, input1.options());
double* dataptr = E_out.data_ptr<double>();
double* dataptr_p = P_out.data_ptr<double>();
CudaErrorCheck(hipMemcpy(dataptr, &essential_matrices[ind_max], sizeof(essential_matrices[ind_max]), hipMemcpyDeviceToDevice));
CudaErrorCheck(hipMemcpy(dataptr_p, &projection_matrices[ind_max], sizeof(projection_matrices[ind_max]), hipMemcpyDeviceToDevice));
const int Max_inlier = num_inliers[ind_max];
E_out.resize_({3, 3});
P_out.resize_({3, 4});
CudaErrorCheck(hipFree(num_inliers));
CudaErrorCheck(hipFree(essential_matrices));
CudaErrorCheck(hipFree(projection_matrices));
CudaErrorCheck(hipFree(state));
// std::cout << "Runtime (Initialise): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count() << "s" << std::endl;
auto t = std::make_tuple(E_out, P_out, Max_inlier);
return t;
}
| essential_matrix.cu | #include <vector>
#include <iostream>
#include <chrono>
#include <tuple>
#include <ATen/ATen.h>
#include "common.h"
#include "polish_E.cu"
#include "kernel_functions.cu"
/*
* CUDA macros, constants and functions
*/
const int subset_size = 5;
const unsigned long long seed = 1234;
#define CudaErrorCheck(ans) {__CudaErrorCheck((ans), __FILE__, __LINE__);}
void __CudaErrorCheck(cudaError_t code, const char* file, int line) {
if (code != cudaSuccess) {
std::cout << "CUDA Error (" << file << ":" << line << "): "
<< cudaGetErrorString(code) << std::endl;
exit(code);
}
}
/*
* Decompose essential matrix into angles
*/
at::Tensor EssentialMatrixDecompose(
at::Tensor Emat) {
double *E_ptr = Emat.data_ptr<double>();
Ematrix E;
double parameter[5];
memcpy(E, E_ptr, 3 * 3 * sizeof(double));
Edecomp(E, parameter);
at::Tensor parameters = at::empty(5, Emat.options());
double *parameter_ptr = parameters.data_ptr<double>();
memcpy(parameter_ptr, parameter, 5 * sizeof(double));
return parameters;
}
/*
* Decompose essential matrix into UI^VT, doing SVD.
*/
std::tuple<at::Tensor, at::Tensor> EssentialMatrixDecomposeUV(
at::Tensor Emat) {
double *E_ptr = Emat.data_ptr<double>();
Ematrix E;
Ematrix U;
Ematrix V;
memcpy(E, E_ptr, 3 * 3 * sizeof(double));
Edecomp(E, U, V);
at::Tensor Umat = at::empty(3 * 3, Emat.options());
at::Tensor Vmat = at::empty(3 * 3, Emat.options());
double *U_ptr = Umat.data_ptr<double>();
memcpy(U_ptr, U, 3 * 3 * sizeof(double));
double *V_ptr = Vmat.data_ptr<double>();
memcpy(V_ptr, V, 3 * 3 * sizeof(double));
Umat.resize_({3,3});
Vmat.resize_({3,3});
auto t = std::make_tuple(Umat, Vmat);
return t;
}
/*
* Five point algorithm cuda optimization using robust cost functions
*/
at::Tensor EssentialMatrixOptimise(
at::Tensor input1, // input 1 has size nx2, type double
at::Tensor input2,
at::Tensor initial_essential_matrix,
const double delta,
const double alpha,
const int max_iterations) {
auto clock_begin = std::chrono::steady_clock::now();
const int num_points = input1.size(0);
// Input data pointers
double *input1_ptr = input1.data_ptr<double>();
double *input2_ptr = input2.data_ptr<double>();
double *essential_matrix_ptr = initial_essential_matrix.data_ptr<double>();
Ematrix E_in;
memcpy(E_in, essential_matrix_ptr, 3 * 3 * sizeof(double));
polish_E_robust_parametric(E_in, input1_ptr, input2_ptr, num_points, delta, alpha, max_iterations);
at::Tensor E_out = at::empty(3 * 3, initial_essential_matrix.options());
double* outptr = E_out.data_ptr<double>();
memcpy(outptr, E_in, 3 * 3 * sizeof(double));
E_out.resize_({3,3});
// std::cout << "Runtime (Optimise): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count() << "s" << std::endl;
return E_out; //E_optimised
}
/*
* Five point algorithm cuda initialization
*/
at::Tensor EssentialMatrixInitialise(
at::Tensor input1, // input 1 has size nx2, type double
at::Tensor input2,
const int num_test_points, // 10
const int num_ransac_test_points, // 1000
const int num_ransac_iterations, // number of iterations to run RANSAC
const double inlier_threshold) {
auto clock_begin = std::chrono::steady_clock::now();
const int num_points = input1.size(0);
const int num_threads_per_block = 64;
const int num_blocks = 8;
const int num_threads = num_blocks * num_threads_per_block;
// CUDA Setup
// Set GPU to use
// int device = 0;
cudaSetDevice(input1.get_device());
// cudaSetDevice(input1.get_device());
// Input data pointer (on GPU)
double *input1_ptr = input1.data_ptr<double>();
double *input2_ptr = input2.data_ptr<double>();
int *num_inliers;
double (*essential_matrices)[3][3];
curandState* state;
CudaErrorCheck(cudaMallocManaged((void **) &num_inliers, num_threads * sizeof(int)));
CudaErrorCheck(cudaMallocManaged((void **) &essential_matrices, num_threads * 3 * 3 * sizeof(double)));
CudaErrorCheck(cudaMallocManaged((void **) &state, num_threads * sizeof(curandState)));
// Copy constants to device constant memory
CudaErrorCheck(cudaMemcpyToSymbol(c_num_points, &num_points, sizeof(int)));
CudaErrorCheck(cudaMemcpyToSymbol(c_num_test_points, &num_test_points, sizeof(int)));
CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_test_points, &num_ransac_test_points, sizeof(int)));
CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_iterations, &num_ransac_iterations, sizeof(int)));
CudaErrorCheck(cudaMemcpyToSymbol(c_inlier_threshold, &inlier_threshold, sizeof(double)));
// Generate random states, one for each thread
SetupRandomState<<<num_blocks, num_threads_per_block>>>(seed, state);
auto clock_begin_kernel = std::chrono::steady_clock::now();
EstimateEssentialMatrix<subset_size><<<num_blocks, num_threads_per_block>>>(
input1_ptr, // Two sets of matching points
input2_ptr, // (flattened 2D arrays)
state, // Random number generator state
num_inliers, // Number of inliers per thread
essential_matrices); // Essential matrices per thread
CudaErrorCheck(cudaPeekAtLastError()); // Check for kernel launch error
CudaErrorCheck(cudaDeviceSynchronize()); // Check for kernel execution error
// std::cout << "Runtime (Initialise, Kernel Only): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin_kernel).count() << "s" << std::endl;
int ind_max = distance(num_inliers, max_element(num_inliers, num_inliers + num_threads));
// cout << "The largest element is " << ind_max << '\n';
cout << "The number of inliers: " << num_inliers[ind_max] << '\n';
at::Tensor E_out = at::empty(3 * 3, input1.options());
double* dataptr = E_out.data_ptr<double>();
CudaErrorCheck(cudaMemcpy(dataptr, &essential_matrices[ind_max], sizeof(essential_matrices[ind_max]), cudaMemcpyDeviceToDevice));
CudaErrorCheck(cudaFree(num_inliers));
CudaErrorCheck(cudaFree(essential_matrices));
CudaErrorCheck(cudaFree(state));
E_out.resize_({3, 3});
// std::cout << "Runtime (Initialise): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count() << "s" << std::endl;
return E_out;
}
/*
* Five point algorithm cuda initialization
*/
std::tuple<at::Tensor, at::Tensor,int> ProjectionMatrixRansac(
at::Tensor input1, // input 1 has size nx2, type double
at::Tensor input2,
const int num_test_points, // 10
const int num_ransac_test_points, // 1000
const int num_ransac_iterations, // number of iterations to run RANSAC
const double inlier_threshold) {
// auto clock_begin = std::chrono::steady_clock::now();
const int num_points = input1.size(0);
const int num_threads_per_block = 64;
const int num_blocks = 8;
const int num_threads = num_blocks * num_threads_per_block;
// CUDA Setup
// Set GPU to use
// int device = 0;
// CudaErrorCheck(cudaSetDevice(device));
cudaSetDevice(input1.get_device());
// cudaSetDevice(input1.get_device());
// Input data pointer (on GPU)
double *input1_ptr = input1.data_ptr<double>();
double *input2_ptr = input2.data_ptr<double>();
int *num_inliers;
double (*essential_matrices)[3][3];
double (*projection_matrices)[3][4];
curandState* state;
CudaErrorCheck(cudaMallocManaged((void **) &num_inliers, num_threads * sizeof(int)));
CudaErrorCheck(cudaMallocManaged((void **) &essential_matrices, num_threads * 3 * 3 * sizeof(double)));
CudaErrorCheck(cudaMallocManaged((void **) &projection_matrices, num_threads * 3 * 4 * sizeof(double)));
CudaErrorCheck(cudaMallocManaged((void **) &state, num_threads * sizeof(curandState)));
// Copy constants to device constant memory
CudaErrorCheck(cudaMemcpyToSymbol(c_num_points, &num_points, sizeof(int)));
CudaErrorCheck(cudaMemcpyToSymbol(c_num_test_points, &num_test_points, sizeof(int)));
CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_test_points, &num_ransac_test_points, sizeof(int)));
CudaErrorCheck(cudaMemcpyToSymbol(c_ransac_num_iterations, &num_ransac_iterations, sizeof(int)));
CudaErrorCheck(cudaMemcpyToSymbol(c_inlier_threshold, &inlier_threshold, sizeof(double)));
// Generate random states, one for each thread
SetupRandomState<<<num_blocks, num_threads_per_block>>>(seed, state);
// auto clock_begin_kernel = std::chrono::steady_clock::now();
EstimateProjectionMatrix<subset_size><<<num_blocks, num_threads_per_block>>>(
input1_ptr, // Two sets of matching points
input2_ptr, // (flattened 2D arrays)
state, // Random number generator state
num_inliers, // Number of inliers per thread
essential_matrices,
projection_matrices); // Essential matrices per thread
CudaErrorCheck(cudaPeekAtLastError()); // Check for kernel launch error
CudaErrorCheck(cudaDeviceSynchronize()); // Check for kernel execution error
// std::cout << "Runtime (Initialise, Kernel Only): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin_kernel).count() << "s" << std::endl;
int ind_max = distance(num_inliers, max_element(num_inliers, num_inliers + num_threads));
// cout << "The largest element is " << ind_max << '\n';
// cout << "The number of inliers: " << num_inliers[ind_max] << '\n';
at::Tensor E_out = at::empty(3 * 3, input1.options());
at::Tensor P_out = at::empty(3 * 4, input1.options());
double* dataptr = E_out.data_ptr<double>();
double* dataptr_p = P_out.data_ptr<double>();
CudaErrorCheck(cudaMemcpy(dataptr, &essential_matrices[ind_max], sizeof(essential_matrices[ind_max]), cudaMemcpyDeviceToDevice));
CudaErrorCheck(cudaMemcpy(dataptr_p, &projection_matrices[ind_max], sizeof(projection_matrices[ind_max]), cudaMemcpyDeviceToDevice));
const int Max_inlier = num_inliers[ind_max];
E_out.resize_({3, 3});
P_out.resize_({3, 4});
CudaErrorCheck(cudaFree(num_inliers));
CudaErrorCheck(cudaFree(essential_matrices));
CudaErrorCheck(cudaFree(projection_matrices));
CudaErrorCheck(cudaFree(state));
// std::cout << "Runtime (Initialise): " << std::chrono::duration<double>(std::chrono::steady_clock::now() - clock_begin).count() << "s" << std::endl;
auto t = std::make_tuple(E_out, P_out, Max_inlier);
return t;
}
|
3c351c5663f541ee01acb6f84c8e4c16851958b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_asinh (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(asinh)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} | 3c351c5663f541ee01acb6f84c8e4c16851958b3.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_asinh (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(asinh)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} |
2faf63de3fcd3039a128fbbfb6212d9f620865a5.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template<typename scalar_t>
struct CompareLTFunctor {
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
return a < b;
}
};
void lt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "lt_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareLTFunctor<scalar_t>());
});
}
REGISTER_DISPATCH(lt_stub, <_kernel_cuda);
}} // namespace at::native
| 2faf63de3fcd3039a128fbbfb6212d9f620865a5.cu | #include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template<typename scalar_t>
struct CompareLTFunctor {
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
return a < b;
}
};
void lt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "lt_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareLTFunctor<scalar_t>());
});
}
REGISTER_DISPATCH(lt_stub, <_kernel_cuda);
}} // namespace at::native
|
e21f12ad16d6655f045a659808c75b11632d45f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <UnitTest++.h>
#include <iostream>
#include <cstdio>
#include "Material.hh"
#include "GPUUtilityFunctions.hh"
#include "SimpleVector.hh"
using namespace MonteRay;
struct CrossSection{
int zaid;
constexpr auto ZAID() const {return zaid;}
constexpr gpuFloatType_t getAWR() const {return 1.0;}
constexpr gpuFloatType_t getTotalXS(gpuFloatType_t) const {return 1.0;}
};
struct CrossSectionList{
SimpleVector<CrossSection> xs_vec;
const CrossSection& getXSByZAID(int ZAID) const {
auto loc = std::find_if(xs_vec.begin(), xs_vec.end(),
[ZAID](auto&& xs){ return xs.ZAID() == ZAID; } );
if (loc == xs_vec.end()) {
throw std::runtime_error("Attempted to access CrossSection with ZAID " + std::to_string(ZAID) +
" in CrossSectionList but it doesn't exist.");
}
return *loc;
}
};
class MaterialFixture{
public:
using Material_t = Material<CrossSection>;
CrossSectionList xsList;
Material_t mat;
MaterialFixture(){
using XS = CrossSection;
xsList = CrossSectionList{{XS{1001}, XS{2004}, XS{6012}}};
auto mb = Material_t::make_builder(xsList);
mb.addIsotope(2.0, 1001);
mb.addIsotope(3.0, 2004);
mb.addIsotope(5.0, 6012);
mat = mb.build();
}
};
SUITE( Material_tester ) {
constexpr double close = 1.0E-6;
TEST_FIXTURE( MaterialFixture, builder ) {
CHECK_CLOSE(mat.fraction(0), 0.2, close);
CHECK_CLOSE(mat.fraction(1), 0.3, close);
CHECK_CLOSE(mat.fraction(2), 0.5, close);
CHECK_EQUAL(1001, mat.xs(0).ZAID());
CHECK_EQUAL(2004, mat.xs(1).ZAID());
CHECK_EQUAL(6012, mat.xs(2).ZAID());
CHECK_CLOSE(mat.atomicWeight(), neutron_molar_mass, close);
CHECK_EQUAL(mat.numIsotopes(), 3);
}
TEST_FIXTURE( MaterialFixture, testing) {
CHECK_CLOSE(mat.fraction(0), 0.2, close);
CHECK_CLOSE(mat.fraction(1), 0.3, close);
CHECK_CLOSE(mat.fraction(2), 0.5, close);
CHECK_EQUAL(1001, mat.xs(0).ZAID());
CHECK_EQUAL(2004, mat.xs(1).ZAID());
CHECK_EQUAL(6012, mat.xs(2).ZAID());
CHECK_CLOSE(mat.atomicWeight(), neutron_molar_mass, close);
CHECK_EQUAL(mat.numIsotopes(), 3);
}
TEST_FIXTURE( MaterialFixture, TotalXS ) {
gpuFloatType_t E = 1.0;
gpuFloatType_t density = 2.0;
CHECK_CLOSE(mat.getMicroTotalXS(E), 1.0, close);
CHECK_CLOSE(mat.getTotalXS(E, density), mat.getMicroTotalXS(E) * density * AvogadroBarn / mat.atomicWeight(), close);
#ifdef __HIPCC__
int* zaid;
hipMallocManaged(&zaid, sizeof(int));
gpuFloatType_t* micro;
hipMallocManaged(µ, sizeof(gpuFloatType_t));
gpuFloatType_t* macro;
hipMallocManaged(¯o, sizeof(gpuFloatType_t));
auto matl = mat;
auto func = [=] __device__ () {
*zaid = matl.xs(1).ZAID();
*micro = matl.getMicroTotalXS(E);
*macro = matl.getTotalXS(E, density);
};
hipLaunchKernelGGL(( d_invoker), dim3(1), dim3(1), 0, 0, func);
hipDeviceSynchronize();
CHECK_EQUAL(*zaid, 2004);
CHECK_CLOSE(*micro, 1.0, close);
CHECK_CLOSE(*macro, mat.getMicroTotalXS(E) * density * AvogadroBarn / mat.atomicWeight(), close);
hipFree(zaid);
hipFree(micro);
hipFree(macro);
#endif
}
TEST_FIXTURE ( MaterialFixture, write_and_read ){
std::stringstream stream;
mat.write(stream);
Material_t::Builder<CrossSectionList> mat_builder(xsList);
mat_builder.read(stream);
auto newMat = mat_builder.build();
CHECK_EQUAL(newMat.atomicWeight(), mat.atomicWeight());
CHECK_EQUAL(newMat.numIsotopes(), mat.numIsotopes());
for (size_t i = 0; i < newMat.numIsotopes(); i++){
CHECK_EQUAL(newMat.fraction(i), mat.fraction(i));
CHECK_EQUAL(newMat.xs(i).getAWR(), mat.xs(i).getAWR());
}
}
}
| e21f12ad16d6655f045a659808c75b11632d45f6.cu | #include <UnitTest++.h>
#include <iostream>
#include <cstdio>
#include "Material.hh"
#include "GPUUtilityFunctions.hh"
#include "SimpleVector.hh"
using namespace MonteRay;
struct CrossSection{
int zaid;
constexpr auto ZAID() const {return zaid;}
constexpr gpuFloatType_t getAWR() const {return 1.0;}
constexpr gpuFloatType_t getTotalXS(gpuFloatType_t) const {return 1.0;}
};
struct CrossSectionList{
SimpleVector<CrossSection> xs_vec;
const CrossSection& getXSByZAID(int ZAID) const {
auto loc = std::find_if(xs_vec.begin(), xs_vec.end(),
[ZAID](auto&& xs){ return xs.ZAID() == ZAID; } );
if (loc == xs_vec.end()) {
throw std::runtime_error("Attempted to access CrossSection with ZAID " + std::to_string(ZAID) +
" in CrossSectionList but it doesn't exist.");
}
return *loc;
}
};
class MaterialFixture{
public:
using Material_t = Material<CrossSection>;
CrossSectionList xsList;
Material_t mat;
MaterialFixture(){
using XS = CrossSection;
xsList = CrossSectionList{{XS{1001}, XS{2004}, XS{6012}}};
auto mb = Material_t::make_builder(xsList);
mb.addIsotope(2.0, 1001);
mb.addIsotope(3.0, 2004);
mb.addIsotope(5.0, 6012);
mat = mb.build();
}
};
SUITE( Material_tester ) {
constexpr double close = 1.0E-6;
TEST_FIXTURE( MaterialFixture, builder ) {
CHECK_CLOSE(mat.fraction(0), 0.2, close);
CHECK_CLOSE(mat.fraction(1), 0.3, close);
CHECK_CLOSE(mat.fraction(2), 0.5, close);
CHECK_EQUAL(1001, mat.xs(0).ZAID());
CHECK_EQUAL(2004, mat.xs(1).ZAID());
CHECK_EQUAL(6012, mat.xs(2).ZAID());
CHECK_CLOSE(mat.atomicWeight(), neutron_molar_mass, close);
CHECK_EQUAL(mat.numIsotopes(), 3);
}
TEST_FIXTURE( MaterialFixture, testing) {
CHECK_CLOSE(mat.fraction(0), 0.2, close);
CHECK_CLOSE(mat.fraction(1), 0.3, close);
CHECK_CLOSE(mat.fraction(2), 0.5, close);
CHECK_EQUAL(1001, mat.xs(0).ZAID());
CHECK_EQUAL(2004, mat.xs(1).ZAID());
CHECK_EQUAL(6012, mat.xs(2).ZAID());
CHECK_CLOSE(mat.atomicWeight(), neutron_molar_mass, close);
CHECK_EQUAL(mat.numIsotopes(), 3);
}
TEST_FIXTURE( MaterialFixture, TotalXS ) {
gpuFloatType_t E = 1.0;
gpuFloatType_t density = 2.0;
CHECK_CLOSE(mat.getMicroTotalXS(E), 1.0, close);
CHECK_CLOSE(mat.getTotalXS(E, density), mat.getMicroTotalXS(E) * density * AvogadroBarn / mat.atomicWeight(), close);
#ifdef __CUDACC__
int* zaid;
cudaMallocManaged(&zaid, sizeof(int));
gpuFloatType_t* micro;
cudaMallocManaged(µ, sizeof(gpuFloatType_t));
gpuFloatType_t* macro;
cudaMallocManaged(¯o, sizeof(gpuFloatType_t));
auto matl = mat;
auto func = [=] __device__ () {
*zaid = matl.xs(1).ZAID();
*micro = matl.getMicroTotalXS(E);
*macro = matl.getTotalXS(E, density);
};
d_invoker<<<1, 1>>>(func);
cudaDeviceSynchronize();
CHECK_EQUAL(*zaid, 2004);
CHECK_CLOSE(*micro, 1.0, close);
CHECK_CLOSE(*macro, mat.getMicroTotalXS(E) * density * AvogadroBarn / mat.atomicWeight(), close);
cudaFree(zaid);
cudaFree(micro);
cudaFree(macro);
#endif
}
TEST_FIXTURE ( MaterialFixture, write_and_read ){
std::stringstream stream;
mat.write(stream);
Material_t::Builder<CrossSectionList> mat_builder(xsList);
mat_builder.read(stream);
auto newMat = mat_builder.build();
CHECK_EQUAL(newMat.atomicWeight(), mat.atomicWeight());
CHECK_EQUAL(newMat.numIsotopes(), mat.numIsotopes());
for (size_t i = 0; i < newMat.numIsotopes(); i++){
CHECK_EQUAL(newMat.fraction(i), mat.fraction(i));
CHECK_EQUAL(newMat.xs(i).getAWR(), mat.xs(i).getAWR());
}
}
}
|
df97f0dc53681cee155f7687fa375b9c38eeae86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* yolo_layer.cu
*
* This code was originally written by wang-xinyu under MIT license.
* I took it from:
*
* https://github.com/wang-xinyu/tensorrtx/tree/master/yolov4
*
* and made necessary modifications.
*
* - JK Jung
*/
#include "trt_yolo/yolo_layer.h"
using namespace Yolo;
namespace
{
// Write values into buffer
template <typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
// Read values from buffer
template <typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
} // namespace
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y, int new_coords)
{
mYoloWidth = yolo_width;
mYoloHeight = yolo_height;
mNumAnchors = num_anchors;
memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float));
mNumClasses = num_classes;
mInputWidth = input_width;
mInputHeight = input_height;
mScaleXY = scale_x_y;
mNewCoords = new_coords;
CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float)));
CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice));
}
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mThreadCount);
read(d, mYoloWidth);
read(d, mYoloHeight);
read(d, mNumAnchors);
memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float));
d += MAX_ANCHORS * 2 * sizeof(float);
read(d, mNumClasses);
read(d, mInputWidth);
read(d, mInputHeight);
read(d, mScaleXY);
read(d, mNewCoords);
CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float)));
CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice));
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
char* d = static_cast<char*>(buffer), *a = d;
write(d, mThreadCount);
write(d, mYoloWidth);
write(d, mYoloHeight);
write(d, mNumAnchors);
memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float));
d += MAX_ANCHORS * 2 * sizeof(float);
write(d, mNumClasses);
write(d, mInputWidth);
write(d, mInputHeight);
write(d, mScaleXY);
write(d, mNewCoords);
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mThreadCount) + \
sizeof(mYoloWidth) + sizeof(mYoloHeight) + \
sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \
sizeof(mNumClasses) + \
sizeof(mInputWidth) + sizeof(mInputHeight) + \
sizeof(mScaleXY) + sizeof(mNewCoords);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
void YoloLayerPlugin::terminate()
{
CHECK(hipFree(mAnchors));
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
assert(index == 0);
assert(nbInputDims == 1);
assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors);
assert(inputs[0].d[1] == mYoloHeight);
assert(inputs[0].d[2] == mYoloWidth);
// output detection results to the channel dimension
int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float);
return Dims3(totalsize, 1, 1);
}
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext()
{
}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY, mNewCoords);
p->setPluginNamespace(mPluginNamespace);
return p;
}
inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); }
inline __device__ float scale_sigmoidGPU(float x, float s)
{
return s * sigmoidGPU(x) - (s - 1.0f) * 0.5f;
}
// CalDetection(): This kernel processes 1 yolo layer calculation. It
// distributes calculations so that 1 GPU thread would be responsible
// for each grid/anchor combination.
// NOTE: The output (x, y, w, h) are between 0.0 and 1.0
// (relative to orginal image width and height).
__global__ void CalDetection(const float *input, float *output,
int batch_size,
int yolo_width, int yolo_height,
int num_anchors, const float *anchors,
int num_classes, int input_w, int input_h,
float scale_x_y)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
Detection* det = ((Detection*) output) + idx;
int total_grids = yolo_width * yolo_height;
if (idx >= batch_size * total_grids * num_anchors) return;
int info_len = 5 + num_classes;
//int batch_idx = idx / (total_grids * num_anchors);
int group_idx = idx / total_grids;
int anchor_idx = group_idx % num_anchors;
const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids);
int class_id;
float max_cls_logit = -CUDART_INF_F; // minus infinity
for (int i = 5; i < info_len; ++i) {
float l = *(cur_input + i * total_grids);
if (l > max_cls_logit) {
max_cls_logit = l;
class_id = i - 5;
}
}
float max_cls_prob = sigmoidGPU(max_cls_logit);
float box_prob = sigmoidGPU(*(cur_input + 4 * total_grids));
//if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH)
// return;
int row = (idx % total_grids) / yolo_width;
int col = (idx % total_grids) % yolo_width;
det->bbox[0] = (col + scale_sigmoidGPU(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1]
det->bbox[1] = (row + scale_sigmoidGPU(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1]
det->bbox[2] = __expf(*(cur_input + 2 * total_grids)) * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1]
det->bbox[3] = __expf(*(cur_input + 3 * total_grids)) * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1]
det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left
det->bbox[1] -= det->bbox[3] / 2;
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
inline __device__ float scale(float x, float s)
{
return s * x - (s - 1.0f) * 0.5f;
}
inline __device__ float square(float x)
{
return x * x;
}
__global__ void CalDetection_NewCoords(const float *input, float *output,
int batch_size,
int yolo_width, int yolo_height,
int num_anchors, const float *anchors,
int num_classes, int input_w, int input_h,
float scale_x_y)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
Detection* det = ((Detection*) output) + idx;
int total_grids = yolo_width * yolo_height;
if (idx >= batch_size * total_grids * num_anchors) return;
int info_len = 5 + num_classes;
//int batch_idx = idx / (total_grids * num_anchors);
int group_idx = idx / total_grids;
int anchor_idx = group_idx % num_anchors;
const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids);
int class_id;
float max_cls_prob = -CUDART_INF_F; // minus infinity
for (int i = 5; i < info_len; ++i) {
float l = *(cur_input + i * total_grids);
if (l > max_cls_prob) {
max_cls_prob = l;
class_id = i - 5;
}
}
float box_prob = *(cur_input + 4 * total_grids);
//if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH)
// return;
int row = (idx % total_grids) / yolo_width;
int col = (idx % total_grids) % yolo_width;
det->bbox[0] = (col + scale(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1]
det->bbox[1] = (row + scale(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1]
det->bbox[2] = square(*(cur_input + 2 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1]
det->bbox[3] = square(*(cur_input + 3 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1]
det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left
det->bbox[1] -= det->bbox[3] / 2;
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, hipStream_t stream, int batchSize)
{
int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight;
//CHECK(hipMemset(output, 0, num_elements * sizeof(Detection)));
if (mNewCoords) {
hipLaunchKernelGGL(( CalDetection_NewCoords), dim3((num_elements + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, stream,
inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY);
} else {
hipLaunchKernelGGL(( CalDetection), dim3((num_elements + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, stream,
inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream)
{
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
assert(!strcmp(name, getPluginName()));
const PluginField* fields = fc->fields;
int yolo_width, yolo_height, num_anchors = 0;
float anchors[MAX_ANCHORS * 2];
int num_classes, input_multiplier, new_coords = 0;
float scale_x_y = 1.0;
for (int i = 0; i < fc->nbFields; ++i)
{
const char* attrName = fields[i].name;
if (!strcmp(attrName, "yoloWidth"))
{
assert(fields[i].type == PluginFieldType::kINT32);
yolo_width = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "yoloHeight"))
{
assert(fields[i].type == PluginFieldType::kINT32);
yolo_height = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "numAnchors"))
{
assert(fields[i].type == PluginFieldType::kINT32);
num_anchors = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "numClasses"))
{
assert(fields[i].type == PluginFieldType::kINT32);
num_classes = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "inputMultiplier"))
{
assert(fields[i].type == PluginFieldType::kINT32);
input_multiplier = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "anchors")){
assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS);
assert(fields[i].type == PluginFieldType::kFLOAT32);
memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float));
}
else if (!strcmp(attrName, "scaleXY"))
{
assert(fields[i].type == PluginFieldType::kFLOAT32);
scale_x_y = *(static_cast<const float*>(fields[i].data));
}
else if (!strcmp(attrName, "newCoords"))
{
assert(fields[i].type == PluginFieldType::kINT32);
new_coords = *(static_cast<const int*>(fields[i].data));
}
else
{
std::cerr << "Unknown attribute: " << attrName << std::endl;
assert(0);
}
}
assert(yolo_width > 0 && yolo_height > 0);
assert(anchors[0] > 0.0f && anchors[1] > 0.0f);
assert(num_classes > 0);
assert(input_multiplier == 8 || input_multiplier == 16 || input_multiplier == 32);
assert(scale_x_y >= 1.0);
YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, yolo_width * input_multiplier, yolo_height * input_multiplier, scale_x_y, new_coords);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
} // namespace nvinfer1
| df97f0dc53681cee155f7687fa375b9c38eeae86.cu | /*
* yolo_layer.cu
*
* This code was originally written by wang-xinyu under MIT license.
* I took it from:
*
* https://github.com/wang-xinyu/tensorrtx/tree/master/yolov4
*
* and made necessary modifications.
*
* - JK Jung
*/
#include "trt_yolo/yolo_layer.h"
using namespace Yolo;
namespace
{
// Write values into buffer
template <typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
// Read values from buffer
template <typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
} // namespace
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y, int new_coords)
{
mYoloWidth = yolo_width;
mYoloHeight = yolo_height;
mNumAnchors = num_anchors;
memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float));
mNumClasses = num_classes;
mInputWidth = input_width;
mInputHeight = input_height;
mScaleXY = scale_x_y;
mNewCoords = new_coords;
CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float)));
CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice));
}
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mThreadCount);
read(d, mYoloWidth);
read(d, mYoloHeight);
read(d, mNumAnchors);
memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float));
d += MAX_ANCHORS * 2 * sizeof(float);
read(d, mNumClasses);
read(d, mInputWidth);
read(d, mInputHeight);
read(d, mScaleXY);
read(d, mNewCoords);
CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float)));
CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice));
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
char* d = static_cast<char*>(buffer), *a = d;
write(d, mThreadCount);
write(d, mYoloWidth);
write(d, mYoloHeight);
write(d, mNumAnchors);
memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float));
d += MAX_ANCHORS * 2 * sizeof(float);
write(d, mNumClasses);
write(d, mInputWidth);
write(d, mInputHeight);
write(d, mScaleXY);
write(d, mNewCoords);
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mThreadCount) + \
sizeof(mYoloWidth) + sizeof(mYoloHeight) + \
sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \
sizeof(mNumClasses) + \
sizeof(mInputWidth) + sizeof(mInputHeight) + \
sizeof(mScaleXY) + sizeof(mNewCoords);
}
int YoloLayerPlugin::initialize()
{
return 0;
}
void YoloLayerPlugin::terminate()
{
CHECK(cudaFree(mAnchors));
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
assert(index == 0);
assert(nbInputDims == 1);
assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors);
assert(inputs[0].d[1] == mYoloHeight);
assert(inputs[0].d[2] == mYoloWidth);
// output detection results to the channel dimension
int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float);
return Dims3(totalsize, 1, 1);
}
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext()
{
}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY, mNewCoords);
p->setPluginNamespace(mPluginNamespace);
return p;
}
inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); }
inline __device__ float scale_sigmoidGPU(float x, float s)
{
return s * sigmoidGPU(x) - (s - 1.0f) * 0.5f;
}
// CalDetection(): This kernel processes 1 yolo layer calculation. It
// distributes calculations so that 1 GPU thread would be responsible
// for each grid/anchor combination.
// NOTE: The output (x, y, w, h) are between 0.0 and 1.0
// (relative to orginal image width and height).
__global__ void CalDetection(const float *input, float *output,
int batch_size,
int yolo_width, int yolo_height,
int num_anchors, const float *anchors,
int num_classes, int input_w, int input_h,
float scale_x_y)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
Detection* det = ((Detection*) output) + idx;
int total_grids = yolo_width * yolo_height;
if (idx >= batch_size * total_grids * num_anchors) return;
int info_len = 5 + num_classes;
//int batch_idx = idx / (total_grids * num_anchors);
int group_idx = idx / total_grids;
int anchor_idx = group_idx % num_anchors;
const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids);
int class_id;
float max_cls_logit = -CUDART_INF_F; // minus infinity
for (int i = 5; i < info_len; ++i) {
float l = *(cur_input + i * total_grids);
if (l > max_cls_logit) {
max_cls_logit = l;
class_id = i - 5;
}
}
float max_cls_prob = sigmoidGPU(max_cls_logit);
float box_prob = sigmoidGPU(*(cur_input + 4 * total_grids));
//if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH)
// return;
int row = (idx % total_grids) / yolo_width;
int col = (idx % total_grids) % yolo_width;
det->bbox[0] = (col + scale_sigmoidGPU(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1]
det->bbox[1] = (row + scale_sigmoidGPU(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1]
det->bbox[2] = __expf(*(cur_input + 2 * total_grids)) * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1]
det->bbox[3] = __expf(*(cur_input + 3 * total_grids)) * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1]
det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left
det->bbox[1] -= det->bbox[3] / 2;
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
inline __device__ float scale(float x, float s)
{
return s * x - (s - 1.0f) * 0.5f;
}
inline __device__ float square(float x)
{
return x * x;
}
__global__ void CalDetection_NewCoords(const float *input, float *output,
int batch_size,
int yolo_width, int yolo_height,
int num_anchors, const float *anchors,
int num_classes, int input_w, int input_h,
float scale_x_y)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
Detection* det = ((Detection*) output) + idx;
int total_grids = yolo_width * yolo_height;
if (idx >= batch_size * total_grids * num_anchors) return;
int info_len = 5 + num_classes;
//int batch_idx = idx / (total_grids * num_anchors);
int group_idx = idx / total_grids;
int anchor_idx = group_idx % num_anchors;
const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids);
int class_id;
float max_cls_prob = -CUDART_INF_F; // minus infinity
for (int i = 5; i < info_len; ++i) {
float l = *(cur_input + i * total_grids);
if (l > max_cls_prob) {
max_cls_prob = l;
class_id = i - 5;
}
}
float box_prob = *(cur_input + 4 * total_grids);
//if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH)
// return;
int row = (idx % total_grids) / yolo_width;
int col = (idx % total_grids) % yolo_width;
det->bbox[0] = (col + scale(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1]
det->bbox[1] = (row + scale(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1]
det->bbox[2] = square(*(cur_input + 2 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1]
det->bbox[3] = square(*(cur_input + 3 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1]
det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left
det->bbox[1] -= det->bbox[3] / 2;
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, cudaStream_t stream, int batchSize)
{
int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight;
//CHECK(cudaMemset(output, 0, num_elements * sizeof(Detection)));
if (mNewCoords) {
CalDetection_NewCoords<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>>
(inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY);
} else {
CalDetection<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>>
(inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream)
{
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
assert(!strcmp(name, getPluginName()));
const PluginField* fields = fc->fields;
int yolo_width, yolo_height, num_anchors = 0;
float anchors[MAX_ANCHORS * 2];
int num_classes, input_multiplier, new_coords = 0;
float scale_x_y = 1.0;
for (int i = 0; i < fc->nbFields; ++i)
{
const char* attrName = fields[i].name;
if (!strcmp(attrName, "yoloWidth"))
{
assert(fields[i].type == PluginFieldType::kINT32);
yolo_width = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "yoloHeight"))
{
assert(fields[i].type == PluginFieldType::kINT32);
yolo_height = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "numAnchors"))
{
assert(fields[i].type == PluginFieldType::kINT32);
num_anchors = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "numClasses"))
{
assert(fields[i].type == PluginFieldType::kINT32);
num_classes = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "inputMultiplier"))
{
assert(fields[i].type == PluginFieldType::kINT32);
input_multiplier = *(static_cast<const int*>(fields[i].data));
}
else if (!strcmp(attrName, "anchors")){
assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS);
assert(fields[i].type == PluginFieldType::kFLOAT32);
memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float));
}
else if (!strcmp(attrName, "scaleXY"))
{
assert(fields[i].type == PluginFieldType::kFLOAT32);
scale_x_y = *(static_cast<const float*>(fields[i].data));
}
else if (!strcmp(attrName, "newCoords"))
{
assert(fields[i].type == PluginFieldType::kINT32);
new_coords = *(static_cast<const int*>(fields[i].data));
}
else
{
std::cerr << "Unknown attribute: " << attrName << std::endl;
assert(0);
}
}
assert(yolo_width > 0 && yolo_height > 0);
assert(anchors[0] > 0.0f && anchors[1] > 0.0f);
assert(num_classes > 0);
assert(input_multiplier == 8 || input_multiplier == 16 || input_multiplier == 32);
assert(scale_x_y >= 1.0);
YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, yolo_width * input_multiplier, yolo_height * input_multiplier, scale_x_y, new_coords);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
} // namespace nvinfer1
|
efbdaa20b2f1d8dab304cb960e974b22cc5c32c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorMath.h>
#include <THH/THHGeneral.h>
#include <THH/THHBlas.h>
#include <THH/THHTensorCopy.h>
#include <THH/THHNumerics.cuh>
#include <THH/THHTensor.hpp>
#include <THH/THHStorage.hpp>
#include <THH/generic/THHTensorMathBlas.hip>
#include <THH/THHGenerateAllTypes.h>
#include <THH/generic/THHTensorMathBlas.hip>
#include <THH/THHGenerateBFloat16Type.h>
| efbdaa20b2f1d8dab304cb960e974b22cc5c32c6.cu | #include <THC/THCTensorMath.h>
#include <THC/THCGeneral.h>
#include <THC/THCBlas.h>
#include <THC/THCTensorCopy.h>
#include <THC/THCNumerics.cuh>
#include <THC/THCTensor.hpp>
#include <THC/THCStorage.hpp>
#include <THC/generic/THCTensorMathBlas.cu>
#include <THC/THCGenerateAllTypes.h>
#include <THC/generic/THCTensorMathBlas.cu>
#include <THC/THCGenerateBFloat16Type.h>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.